SuperCS commited on
Commit
e31e7b4
·
verified ·
1 Parent(s): 202dc86

Add files using upload-large-folder tool

Browse files
Files changed (50) hide show
  1. dataset_code/cp_high_motion.py +171 -0
  2. dataset_code/get_length_num.py +31 -0
  3. dataset_code/get_res_num.py +29 -0
  4. dataset_code/move_bad_pt.py +46 -0
  5. dataset_code/move_bad_pt_mp4.py +241 -0
  6. dataset_code/run.sh +85 -0
  7. dataset_code/sekai/offload/dummy_dataloader.py +476 -0
  8. dataset_code/sekai/offload/dummy_dataloader_official.py +472 -0
  9. dataset_code/sekai/offload/get_ffmpeg.sh +4 -0
  10. dataset_code/sekai/offload/get_temp_csv.py +118 -0
  11. dataset_code/sekai/offload/kill.sh +3 -0
  12. dataset_code/sekai/offload/offoload_features_hv.py +326 -0
  13. dataset_code/sekai/offload/offoload_features_hv_official.py +307 -0
  14. dataset_code/sekai/offload/run.sh +85 -0
  15. dataset_code/sekai/offload/utils_framepack.py +1229 -0
  16. dataset_code/sekai/preprocess/0.sh +53 -0
  17. dataset_code/sekai/preprocess/1.sh +284 -0
  18. dataset_code/sekai/preprocess/2.sh +282 -0
  19. dataset_code/sekai/preprocess/3.sh +282 -0
  20. dataset_code/sekai/preprocess/4.sh +282 -0
  21. dataset_code/sekai/preprocess/5.sh +282 -0
  22. dataset_code/sekai/preprocess/6.sh +282 -0
  23. dataset_code/sekai/preprocess/add_config.py +221 -0
  24. dataset_code/sekai/preprocess/cut_video.py +292 -0
  25. dataset_code/sekai/preprocess/get_caption.py +281 -0
  26. dataset_code/sekai/preprocess/get_caption_keye.py +326 -0
  27. dataset_code/sekai/preprocess/get_temp_input_csv.py +173 -0
  28. dataset_code/sekai/preprocess/install.sh +15 -0
  29. dataset_code/sekai/preprocess/kill.sh +8 -0
  30. dataset_code/sekai/preprocess/merge_csv.py +217 -0
  31. dataset_code/sekai/preprocess/temp.py +25 -0
  32. dataset_code/sekai/preprocess/temp.sh +155 -0
  33. dataset_code/sft_sftnews/offload/app.py +32 -0
  34. dataset_code/sft_sftnews/offload/example_run.sh +153 -0
  35. dataset_code/sft_sftnews/offload/install.sh +119 -0
  36. dataset_code/sft_sftnews/offload/kill.sh +11 -0
  37. dataset_code/sft_sftnews/offload/offoload_features_backup.py +185 -0
  38. dataset_code/sft_sftnews/offload/offoload_features_hv.py +352 -0
  39. dataset_code/sft_sftnews/offload/offoload_features_hv_save_videos.py +255 -0
  40. dataset_code/sft_sftnews/offload/offoload_features_wan.py +417 -0
  41. dataset_code/sft_sftnews/offload/part0.yaml +101 -0
  42. dataset_code/sft_sftnews/offload/part1.yaml +101 -0
  43. dataset_code/sft_sftnews/offload/part2.yaml +101 -0
  44. dataset_code/sft_sftnews/offload/part3.yaml +101 -0
  45. dataset_code/sft_sftnews/offload/part4.yaml +101 -0
  46. dataset_code/sft_sftnews/offload/part5.yaml +101 -0
  47. dataset_code/test.sh +16 -0
  48. dataset_code/vae_decode_hv.py +92 -0
  49. dataset_code/vae_decode_hv_batch.py +118 -0
  50. dataset_code/vae_decode_wan.py +32 -0
dataset_code/cp_high_motion.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+ from pathlib import Path
4
+ from concurrent.futures import ThreadPoolExecutor, as_completed
5
+ from tqdm import tqdm
6
+ import threading
7
+
8
+ def copy_single_file(pt_file, output_folder):
9
+ """
10
+ 复制单个文件的函数
11
+
12
+ Args:
13
+ pt_file: PT文件路径
14
+ output_folder: 输出文件夹路径
15
+
16
+ Returns:
17
+ tuple: (是否成功, 文件名)
18
+ """
19
+ try:
20
+ pt_filename = pt_file.name
21
+ destination = Path(output_folder) / pt_filename
22
+ shutil.copy2(pt_file, destination)
23
+ return True, pt_filename
24
+ except Exception as e:
25
+ return False, f"复制 {pt_file.name} 失败: {str(e)}"
26
+
27
+ def copy_matching_pt_files(json_folder, pt_folder, matched_output_folder, unmatched_output_folder, max_workers=4):
28
+ """
29
+ 根据JSON文件的存在情况,多线程复制对应的PT文件到不同文件夹
30
+
31
+ Args:
32
+ json_folder: JSON文件所在的文件夹路径
33
+ pt_folder: PT文件所在的文件夹路径
34
+ matched_output_folder: 匹配文件的输出文件夹路径
35
+ unmatched_output_folder: 不匹配文件的输出文件夹路径
36
+ max_workers: 最大线程数,默认为4
37
+ """
38
+
39
+ # 创建输出文件夹(如果不存在)
40
+ os.makedirs(matched_output_folder, exist_ok=True)
41
+ os.makedirs(unmatched_output_folder, exist_ok=True)
42
+
43
+ # 获取所有JSON文件的ID
44
+ print("正在扫描JSON文件...")
45
+ json_files = list(Path(json_folder).glob("*.json"))
46
+ json_ids = set()
47
+
48
+ for json_file in tqdm(json_files, desc="扫描JSON文件"):
49
+ # 提取文件名(不含扩展名)作为ID
50
+ file_id = json_file.stem
51
+ json_ids.add(file_id)
52
+
53
+ print(f"找到 {len(json_ids)} 个JSON文件")
54
+
55
+ # 查找匹配和不匹配的PT文件
56
+ print("正在分类PT文件...")
57
+ pt_files = list(Path(pt_folder).glob("*.pt"))
58
+ matching_files = []
59
+ unmatching_files = []
60
+
61
+ for pt_file in tqdm(pt_files, desc="分类PT文件"):
62
+ pt_filename = pt_file.name
63
+
64
+ # 检查PT文件名是否以任何JSON ID开头
65
+ is_matched = False
66
+ for json_id in json_ids:
67
+ if pt_filename.startswith(json_id):
68
+ matching_files.append(pt_file)
69
+ is_matched = True
70
+ break # 找到匹配后跳出内层循环
71
+
72
+ if not is_matched:
73
+ unmatching_files.append(pt_file)
74
+
75
+ print(f"找到 {len(matching_files)} 个匹配的PT文件")
76
+ print(f"找到 {len(unmatching_files)} 个不匹配的PT文件")
77
+
78
+ # 使用线程锁保护计数器
79
+ copy_lock = threading.Lock()
80
+ matched_copied = 0
81
+ matched_failed = 0
82
+ unmatched_copied = 0
83
+ unmatched_failed = 0
84
+
85
+ def process_files(files, output_folder, file_type):
86
+ nonlocal matched_copied, matched_failed, unmatched_copied, unmatched_failed
87
+
88
+ if not files:
89
+ print(f"没有{file_type}文件需要复制")
90
+ return
91
+
92
+ with ThreadPoolExecutor(max_workers=max_workers) as executor:
93
+ # 提交所有复制任务
94
+ future_to_file = {
95
+ executor.submit(copy_single_file, pt_file, output_folder): pt_file
96
+ for pt_file in files
97
+ }
98
+
99
+ # 使用tqdm显示进度
100
+ with tqdm(total=len(files), desc=f"复制{file_type}文件") as pbar:
101
+ for future in as_completed(future_to_file):
102
+ success, result = future.result()
103
+
104
+ with copy_lock:
105
+ if success:
106
+ if file_type == "匹配":
107
+ matched_copied += 1
108
+ pbar.set_postfix({
109
+ '已复制': matched_copied,
110
+ '失败': matched_failed,
111
+ '当前': result
112
+ })
113
+ else:
114
+ unmatched_copied += 1
115
+ pbar.set_postfix({
116
+ '已复制': unmatched_copied,
117
+ '失败': unmatched_failed,
118
+ '当前': result
119
+ })
120
+ else:
121
+ if file_type == "匹配":
122
+ matched_failed += 1
123
+ pbar.set_postfix({
124
+ '已复制': matched_copied,
125
+ '失败': matched_failed
126
+ })
127
+ else:
128
+ unmatched_failed += 1
129
+ pbar.set_postfix({
130
+ '已复制': unmatched_copied,
131
+ '失败': unmatched_failed
132
+ })
133
+ print(f"\n错误: {result}")
134
+
135
+ pbar.update(1)
136
+
137
+ # 复制匹配的文件
138
+ if matching_files:
139
+ print("\n开始复制匹配的文件...")
140
+ process_files(matching_files, matched_output_folder, "匹配")
141
+
142
+ # 复制不匹配的文件
143
+ if unmatching_files:
144
+ print("\n开始复制不匹配的文件...")
145
+ process_files(unmatching_files, unmatched_output_folder, "不匹配")
146
+
147
+ # 输出最终统计
148
+ print(f"\n复制完成!")
149
+ print(f"匹配文件 - 成功复制: {matched_copied} 个, 失败: {matched_failed} 个")
150
+ print(f"不匹配文件 - 成功复制: {unmatched_copied} 个, 失败: {unmatched_failed} 个")
151
+ print(f"匹配文件输出目录: {matched_output_folder}")
152
+ print(f"不匹配文件输出目录: {unmatched_output_folder}")
153
+
154
+ # 使用示例
155
+ if __name__ == "__main__":
156
+ # 设置文件夹路径
157
+ json_folder = "/mnt/bn/yufan-dev-my/ysh/Datasets/sft_sftnews_videos/new_metadata/high_motion"
158
+ pt_folder = "/mnt/bn/yufan-dev-my/ysh/Datasets/fp_offload_latents"
159
+ matched_output_folder = "/mnt/bn/yufan-dev-my/ysh/Datasets/fp_offload_latents/high_motion"
160
+ unmatched_output_folder = "/mnt/bn/yufan-dev-my/ysh/Datasets/fp_offload_latents/low_motion"
161
+
162
+ os.makedirs(matched_output_folder, exist_ok=True)
163
+ os.makedirs(unmatched_output_folder, exist_ok=True)
164
+ # 执行复制操作(可以调整max_workers参数控制线程数)
165
+ copy_matching_pt_files(
166
+ json_folder,
167
+ pt_folder,
168
+ matched_output_folder,
169
+ unmatched_output_folder,
170
+ max_workers=32
171
+ )
dataset_code/get_length_num.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from collections import Counter
3
+ from tqdm import tqdm
4
+
5
+ def count_lengths(folder_path):
6
+ lengths = []
7
+
8
+ for filename in os.listdir(folder_path):
9
+ if filename.endswith('.pt'):
10
+ parts = filename.split('_')
11
+ if len(parts) >= 4:
12
+ try:
13
+ length = int(parts[-3])
14
+ resolution = f"{length}"
15
+ lengths.append(resolution)
16
+ except ValueError:
17
+ print(f"无法解析文件: {filename}")
18
+
19
+ counter = Counter(lengths)
20
+
21
+ print("各长度统计:")
22
+ for length, count in sorted(counter.items(), key=lambda x: x[1], reverse=True):
23
+ print(f"{length}: {count}个文件")
24
+
25
+ total_files = sum(counter.values())
26
+ print(f"\n总计: {total_files}个文件")
27
+
28
+ return counter
29
+
30
+ # 使用方法:
31
+ count_lengths("/mnt/bn/yufan-dev-my/ysh/Datasets/fp_offload_latents")
dataset_code/get_res_num.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from collections import Counter
3
+ from tqdm import tqdm
4
+
5
+ def count_resolutions(folder_path):
6
+ resolutions = []
7
+
8
+ for filename in os.listdir(folder_path):
9
+ if filename.endswith('.pt'):
10
+ parts = filename.split('_')
11
+ if len(parts) >= 4:
12
+ try:
13
+ height = int(parts[-2])
14
+ width = int(parts[-1].split('.')[0]) # 去掉.pt后缀
15
+ resolution = f"{width}×{height}"
16
+ resolutions.append(resolution)
17
+ except ValueError:
18
+ print(f"无法解析文件: {filename}")
19
+
20
+ counter = Counter(resolutions)
21
+
22
+ print("各分辨率统计:")
23
+ for resolution, count in sorted(counter.items()):
24
+ print(f"{resolution}: {count}个文件")
25
+
26
+ return counter
27
+
28
+ # 使用方法:
29
+ count_resolutions("/mnt/bn/yufan-dev-my/ysh/Datasets/fp_offload_latents")
dataset_code/move_bad_pt.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+ import torch
4
+ from concurrent.futures import ThreadPoolExecutor, as_completed
5
+ from tqdm import tqdm
6
+
7
+ # 设置路径
8
+ src_dirs = [
9
+ "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-drone",
10
+ "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-drone",
11
+ "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193",
12
+ "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386",
13
+ "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193",
14
+ "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386",
15
+ ]
16
+ # src_dir = '/mnt/bn/yufan-dev-my/ysh/Datasets/fp_offload_latents'
17
+ bad_dir = '/mnt/bn/yufan-dev-my/ysh/Datasets/bad_pt'
18
+
19
+ for src_dir in src_dirs:
20
+ # 创建 bad_pt 目录(如果不存在)
21
+ os.makedirs(bad_dir, exist_ok=True)
22
+
23
+ # 检查并移动损坏文件
24
+ def check_and_move(file):
25
+ file_path = os.path.join(src_dir, file)
26
+ try:
27
+ torch.load(file_path, map_location='cpu', weights_only=False)
28
+ return (file, True) # 成功加载
29
+ except Exception:
30
+ shutil.move(file_path, os.path.join(bad_dir, file))
31
+ return (file, False)
32
+
33
+ # 获取所有 pt 文件
34
+ pt_files = [f for f in os.listdir(src_dir) if f.endswith('.pt')]
35
+
36
+ # 进度条包装器
37
+ results = []
38
+ with ThreadPoolExecutor(max_workers=8) as executor:
39
+ futures = {executor.submit(check_and_move, file): file for file in pt_files}
40
+ for future in tqdm(as_completed(futures), total=len(futures), desc="处理中"):
41
+ file, ok = future.result()
42
+ if not ok:
43
+ print(f"❌ 损坏:{file}")
44
+ # 可以注释掉下面这行减少控制台输出
45
+ # else:
46
+ # print(f"✅ 正常:{file}")
dataset_code/move_bad_pt_mp4.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+ import torch
4
+ import cv2
5
+ from pathlib import Path
6
+ import logging
7
+ from tqdm import tqdm
8
+ import concurrent.futures
9
+ from threading import Lock
10
+ import time
11
+
12
+ class FileChecker:
13
+ def __init__(self, source_dir, corrupted_dir, max_workers=32):
14
+ self.source_dir = Path(source_dir)
15
+ self.corrupted_dir = Path(corrupted_dir)
16
+ self.max_workers = max_workers
17
+ self.lock = Lock()
18
+
19
+ # 统计信息
20
+ self.stats = {
21
+ 'total_pt': 0,
22
+ 'total_mp4': 0,
23
+ 'corrupted_pt': 0,
24
+ 'corrupted_mp4': 0,
25
+ 'moved_files': [],
26
+ 'failed_moves': []
27
+ }
28
+
29
+ self.setup_logging()
30
+
31
+ def setup_logging(self):
32
+ """设置日志记录"""
33
+ logging.basicConfig(
34
+ level=logging.INFO,
35
+ format='%(asctime)s - %(levelname)s - %(message)s',
36
+ handlers=[
37
+ logging.FileHandler('file_check.log'),
38
+ logging.StreamHandler()
39
+ ]
40
+ )
41
+ self.logger = logging.getLogger(__name__)
42
+
43
+ def check_pt_file(self, file_path):
44
+ """检查.pt文件是否损坏"""
45
+ try:
46
+ # 尝试加载torch文件
47
+ data = torch.load(file_path, map_location='cpu')
48
+ # 额外检查:确保数据不为空
49
+ if data is None:
50
+ return False
51
+ return True
52
+ except Exception as e:
53
+ return False
54
+
55
+ def check_mp4_file(self, file_path):
56
+ """检查.mp4文件是否损坏"""
57
+ try:
58
+ # 尝试打开视频文件
59
+ cap = cv2.VideoCapture(str(file_path))
60
+ if not cap.isOpened():
61
+ return False
62
+
63
+ # 检查视频属性
64
+ frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
65
+ fps = cap.get(cv2.CAP_PROP_FPS)
66
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
67
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
68
+
69
+ # 基本属性检查
70
+ if frame_count <= 0 or fps <= 0 or width <= 0 or height <= 0:
71
+ cap.release()
72
+ return False
73
+
74
+ # 尝试读取几帧来验证
75
+ frames_to_check = min(3, frame_count)
76
+ for i in range(frames_to_check):
77
+ ret, frame = cap.read()
78
+ if not ret or frame is None:
79
+ cap.release()
80
+ return False
81
+
82
+ cap.release()
83
+ return True
84
+ except Exception as e:
85
+ return False
86
+
87
+ def move_corrupted_file(self, file_path, file_type):
88
+ """移动损坏的文件"""
89
+ try:
90
+ # 保持原有的目录结构
91
+ relative_path = file_path.relative_to(self.source_dir)
92
+ new_path = self.corrupted_dir / relative_path
93
+ new_path.parent.mkdir(parents=True, exist_ok=True)
94
+
95
+ # 移动文件
96
+ shutil.move(str(file_path), str(new_path))
97
+
98
+ with self.lock:
99
+ self.stats['moved_files'].append(str(file_path))
100
+ if file_type == 'pt':
101
+ self.stats['corrupted_pt'] += 1
102
+ else:
103
+ self.stats['corrupted_mp4'] += 1
104
+
105
+ self.logger.info(f"已移动损坏文件: {file_path} -> {new_path}")
106
+ return True
107
+
108
+ except Exception as e:
109
+ with self.lock:
110
+ self.stats['failed_moves'].append(str(file_path))
111
+ self.logger.error(f"移动文件失败 {file_path}: {e}")
112
+ return False
113
+
114
+ def process_pt_file(self, file_path):
115
+ """处理单个.pt文件"""
116
+ with self.lock:
117
+ self.stats['total_pt'] += 1
118
+
119
+ if not self.check_pt_file(file_path):
120
+ self.logger.warning(f"发现损坏的 .pt 文件: {file_path}")
121
+ return self.move_corrupted_file(file_path, 'pt')
122
+ return True
123
+
124
+ def process_mp4_file(self, file_path):
125
+ """处理单个.mp4文件"""
126
+ with self.lock:
127
+ self.stats['total_mp4'] += 1
128
+
129
+ if not self.check_mp4_file(file_path):
130
+ self.logger.warning(f"发现损坏的 .mp4 文件: {file_path}")
131
+ return self.move_corrupted_file(file_path, 'mp4')
132
+ return True
133
+
134
+ def process_files(self):
135
+ """多线程处理文件"""
136
+ # 创建损坏文件存储目录
137
+ self.corrupted_dir.mkdir(parents=True, exist_ok=True)
138
+
139
+ # 收集所有目标文件
140
+ pt_files = list(self.source_dir.rglob('*.pt'))
141
+ # mp4_files = list(self.source_dir.rglob('*.mp4'))
142
+
143
+ self.logger.info(f"找到 {len(pt_files)} 个 .pt 文件")
144
+ # self.logger.info(f"找到 {len(mp4_files)} 个 .mp4 文件")
145
+ self.logger.info(f"使用 {self.max_workers} 个线程进行处理")
146
+
147
+ start_time = time.time()
148
+
149
+ # 处理.pt文件
150
+ if pt_files:
151
+ self.logger.info("开始多线程检查 .pt 文件...")
152
+ with concurrent.futures.ThreadPoolExecutor(max_workers=self.max_workers) as executor:
153
+ # 提交所有任务
154
+ future_to_file = {executor.submit(self.process_pt_file, file_path): file_path
155
+ for file_path in pt_files}
156
+
157
+ # 使用tqdm显示进度
158
+ for future in tqdm(concurrent.futures.as_completed(future_to_file),
159
+ total=len(pt_files), desc="检查 .pt 文件"):
160
+ file_path = future_to_file[future]
161
+ try:
162
+ future.result()
163
+ except Exception as e:
164
+ self.logger.error(f"处理文件 {file_path} 时出错: {e}")
165
+
166
+ # # 处理.mp4文件
167
+ # if mp4_files:
168
+ # self.logger.info("开始多线程检查 .mp4 文件...")
169
+ # with concurrent.futures.ThreadPoolExecutor(max_workers=self.max_workers) as executor:
170
+ # # 提交所有任务
171
+ # future_to_file = {executor.submit(self.process_mp4_file, file_path): file_path
172
+ # for file_path in mp4_files}
173
+
174
+ # # 使用tqdm显示进度
175
+ # for future in tqdm(concurrent.futures.as_completed(future_to_file),
176
+ # total=len(mp4_files), desc="检查 .mp4 文件"):
177
+ # file_path = future_to_file[future]
178
+ # try:
179
+ # future.result()
180
+ # except Exception as e:
181
+ # self.logger.error(f"处理文件 {file_path} 时出错: {e}")
182
+
183
+ end_time = time.time()
184
+ processing_time = end_time - start_time
185
+
186
+ # 输出统计结果
187
+ self.print_statistics(processing_time)
188
+
189
+ return self.stats
190
+
191
+ def print_statistics(self, processing_time):
192
+ """输出统计结果"""
193
+ self.logger.info("=" * 60)
194
+ self.logger.info("检查完成!统计结果:")
195
+ self.logger.info(f"处理时间: {processing_time:.2f} 秒")
196
+ self.logger.info(f"使用线程数: {self.max_workers}")
197
+ self.logger.info(f"总 .pt 文件数: {self.stats['total_pt']}")
198
+ self.logger.info(f"损坏 .pt 文件数: {self.stats['corrupted_pt']}")
199
+ self.logger.info(f"总 .mp4 文件数: {self.stats['total_mp4']}")
200
+ self.logger.info(f"损坏 .mp4 文件数: {self.stats['corrupted_mp4']}")
201
+ self.logger.info(f"成功移动文件数: {len(self.stats['moved_files'])}")
202
+ self.logger.info(f"移动失败文件数: {len(self.stats['failed_moves'])}")
203
+
204
+ if self.stats['total_pt'] + self.stats['total_mp4'] > 0:
205
+ total_files = self.stats['total_pt'] + self.stats['total_mp4']
206
+ files_per_second = total_files / processing_time
207
+ self.logger.info(f"平均处理速度: {files_per_second:.2f} 文件/秒")
208
+
209
+ self.logger.info("=" * 60)
210
+
211
+ def main():
212
+ # 配置参数
213
+ source_dir = "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset"
214
+ corrupted_dir = "/mnt/bn/yufan-dev-my/ysh/Datasets/corrupted_files"
215
+ max_workers = 8
216
+
217
+ print(f"源目录: {source_dir}")
218
+ print(f"损坏文件将移动到: {corrupted_dir}")
219
+ print(f"并发线程数: {max_workers}")
220
+ print("=" * 50)
221
+
222
+ # 创建文件检查器并执行
223
+ checker = FileChecker(source_dir, corrupted_dir, max_workers)
224
+ stats = checker.process_files()
225
+
226
+ # 保存移动文件列表
227
+ if stats['moved_files']:
228
+ with open('moved_files_list.txt', 'w') as f:
229
+ for file_path in stats['moved_files']:
230
+ f.write(f"{file_path}\n")
231
+ print(f"已将移动的文件列表保存到 moved_files_list.txt")
232
+
233
+ # 保存失败文件列表
234
+ if stats['failed_moves']:
235
+ with open('failed_moves_list.txt', 'w') as f:
236
+ for file_path in stats['failed_moves']:
237
+ f.write(f"{file_path}\n")
238
+ print(f"已将移动失败的文件列表保存到 failed_moves_list.txt")
239
+
240
+ if __name__ == "__main__":
241
+ main()
dataset_code/run.sh ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # export CUDA_VISIBLE_DEVICES=1,2,3,4,5,6
2
+
3
+ export OMNISTORE_LOAD_STRICT_MODE=0
4
+ export OMNISTORE_LOGGING_LEVEL=ERROR
5
+ #################################################################
6
+ ## Torch
7
+ #################################################################
8
+ export TOKENIZERS_PARALLELISM=false
9
+ export TORCH_LOGS="+dynamo,recompiles,graph_breaks"
10
+ export TORCHDYNAMO_VERBOSE=1
11
+ export TORCH_NCCL_ENABLE_MONITORING=1
12
+ export PYTORCH_CUDA_ALLOC_CONF="expandable_segments:True,garbage_collection_threshold:0.9"
13
+ #################################################################
14
+
15
+
16
+ #################################################################
17
+ ## NCCL
18
+ #################################################################
19
+ export NCCL_IB_GID_INDEX=3
20
+ export NCCL_IB_HCA=$ARNOLD_RDMA_DEVICE
21
+ export NCCL_SOCKET_IFNAME=eth0
22
+ export NCCL_SOCKET_TIMEOUT=3600000
23
+
24
+ export NCCL_DEBUG=WARN # disable the verbose NCCL logs
25
+ export NCCL_P2P_DISABLE=0
26
+ export NCCL_IB_DISABLE=0 # was 1
27
+ export NCCL_SHM_DISABLE=0 # was 1
28
+ export NCCL_P2P_LEVEL=NVL
29
+
30
+ export NCCL_PXN_DISABLE=0
31
+ export NCCL_NET_GDR_LEVEL=2
32
+ export NCCL_IB_QPS_PER_CONNECTION=4
33
+ export NCCL_IB_TC=160
34
+ export NCCL_IB_TIMEOUT=22
35
+ #################################################################
36
+
37
+ #################################################################
38
+ ## DIST
39
+ #################################################################
40
+ MASTER_ADDR=$ARNOLD_WORKER_0_HOST
41
+ ports=(`echo $METIS_WORKER_0_PORT | tr ',' ' '`)
42
+ MASTER_PORT=${ports[0]}
43
+ NNODES=$ARNOLD_WORKER_NUM
44
+ NODE_RANK=$ARNOLD_ID
45
+ GPUS_PER_NODE=$ARNOLD_WORKER_GPU
46
+ # GPUS_PER_NODE=5
47
+ # NNODES=1
48
+ # NODE_RANK=0
49
+ WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES))
50
+
51
+ DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT"
52
+ if [ ! -z $RDZV_BACKEND ]; then
53
+ DISTRIBUTED_ARGS="${DISTRIBUTED_ARGS} --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_id 9863 --rdzv_backend c10d"
54
+ export NCCL_SHM_DISABLE=1
55
+ fi
56
+
57
+ echo -e "\033[31mDISTRIBUTED_ARGS: ${DISTRIBUTED_ARGS}\033[0m"
58
+
59
+ #################################################################
60
+ #
61
+ # torchrun $DISTRIBUTED_ARGS offoload_features_hv_official.py \
62
+ # --stride 2 \
63
+ # --batch_size 4 \
64
+ # --dataloader_num_workers 8 \
65
+ # --csv_file "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-drone_updated.csv" \
66
+ # --video_folder "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-drone" \
67
+ # --output_latent_folder "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-drone/latents_stride2"
68
+ # torchrun $DISTRIBUTED_ARGS offoload_features_hv_official.py \
69
+ # --stride 2 \
70
+ # --batch_size 4 \
71
+ # --dataloader_num_workers 8 \
72
+ # --csv_file "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-drone_updated.csv" \
73
+ # --video_folder "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-drone" \
74
+ # --output_latent_folder "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-drone/latents_stride2"
75
+ #
76
+
77
+ #
78
+ torchrun $DISTRIBUTED_ARGS offoload_features_hv_official.py \
79
+ --stride 1 \
80
+ --batch_size 4 \
81
+ --dataloader_num_workers 8 \
82
+ --csv_file "/mnt/bn/yufan-dev-my/ysh/Ckpts/SpatialVID/SpatialVID-HQ-Final/data/SpatialVID_HQ_step2_filtered.csv" \
83
+ --video_folder "/mnt/bn/yufan-dev-my/ysh/Ckpts/SpatialVID/SpatialVID-HQ-Final" \
84
+ --output_latent_folder "/mnt/bn/icvg/users/ysh/Ckpts/SpatialVID/SpatialVID-HQ-Final/latents_stride1_new"
85
+ #
dataset_code/sekai/offload/dummy_dataloader.py ADDED
@@ -0,0 +1,476 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pickle
3
+ import random
4
+ import numpy as np
5
+ import pandas as pd
6
+ from collections import defaultdict
7
+
8
+ import torch
9
+ import torchvision
10
+ from torch.utils.data import DataLoader, Dataset, Sampler
11
+
12
+ from video_reader import PyVideoReader
13
+
14
+ from diffusers.utils import export_to_video
15
+ from diffusers.training_utils import free_memory
16
+
17
+ # 5: (21, 41, 61, 81, 101)
18
+ # 6: (25, 49, 73, 97, 121)
19
+ # 7: (29, 57, 85, 113, 141)
20
+ # 8: (33, 65, 97, 129, 161)
21
+ # 9: (37, 73, 109, 145, 181)
22
+ # 10: (41, 81, 121, 161, 201)
23
+ # 11: (45, 89, 133, 177, 221)
24
+ # 12: (49, 97, 145, 193, 241)
25
+
26
+ # 1: (21 - 1) * 4 + 1 = 81, 162
27
+ # 2: (22 - 1) * 4 + 1 = 85, 170
28
+ # 3: (23 - 1) * 4 + 1 = 89, 178
29
+ # 4: (24 - 1) * 4 + 1 = 93, 186
30
+ # 5: (25 - 1) * 4 + 1 = 97, 194
31
+ # 6: (26 - 1) * 4 + 1 = 101, 202
32
+ # 7: (27 - 1) * 4 + 1 = 105, 210
33
+ # 8: (28 - 1) * 4 + 1 = 109, 218
34
+ # 9: (29 - 1) * 4 + 1 = 113, 226
35
+ # 10: (30 - 1) * 4 + 1 = 117, 234
36
+ # 11: (31 - 1) * 4 + 1 = 121, 242
37
+ # 12: (32 - 1) * 4 + 1 = 125, 250
38
+ # 13: (33 - 1) * 4 + 1 = 129, 258
39
+ # 14: (34 - 1) * 4 + 1 = 133, 266
40
+ # 15: (35 - 1) * 4 + 1 = 137, 274
41
+ # 16: (36 - 1) * 4 + 1 = 141, 282
42
+
43
+ resolution_bucket_options = {
44
+ 640: [
45
+ (768, 320),
46
+ (768, 384),
47
+ (640, 384),
48
+ (768, 512),
49
+ (576, 448),
50
+ (512, 512),
51
+ (448, 576),
52
+ (512, 768),
53
+ (384, 640),
54
+ (384, 768),
55
+ (320, 768),
56
+ ],
57
+ }
58
+
59
+ length_bucket_options = {
60
+ 1: [321, 301, 281, 261, 241, 221, 193, 181, 161, 141, 121, 101, 81, 61, 41, 21],
61
+ 2: [193, 177, 161, 156, 145, 133, 129, 121, 113, 109, 97, 85, 81, 73, 65, 61, 49, 37, 25],
62
+ }
63
+
64
+ def find_nearest_resolution_bucket(h, w, resolution=640):
65
+ min_metric = float('inf')
66
+ best_bucket = None
67
+ for (bucket_h, bucket_w) in resolution_bucket_options[resolution]:
68
+ metric = abs(h * bucket_w - w * bucket_h)
69
+ if metric <= min_metric:
70
+ min_metric = metric
71
+ best_bucket = (bucket_h, bucket_w)
72
+ return best_bucket
73
+
74
+ def find_nearest_length_bucket(length, stride=1):
75
+ buckets = length_bucket_options[stride]
76
+ min_bucket = min(buckets)
77
+ if length < min_bucket:
78
+ return length
79
+ valid_buckets = [bucket for bucket in buckets if bucket <= length]
80
+ return max(valid_buckets)
81
+
82
+ def read_cut_crop_and_resize(video_path, f_prime, h_prime, w_prime, stride=1):
83
+ vr = PyVideoReader(video_path, threads=0) # 0 means auto (let ffmpeg pick the optimal number)
84
+
85
+ filename = os.path.splitext(os.path.basename(video_path))[0]
86
+ parts = filename.split('_')
87
+ total_frames = int(parts[-1]) - int(parts[-2])
88
+
89
+ if stride != 1:
90
+ required_span = stride * (f_prime - 1)
91
+ try:
92
+ start_frame = max(0, total_frames - required_span - 1)
93
+ frame_indices = list(range(start_frame, total_frames, stride))
94
+ assert len(frame_indices) == f_prime
95
+ # frames = torch.from_numpy(vr.get_batch(frame_indices)).float()
96
+ frames = torch.from_numpy(np.stack(vr.decode_fast(start_frame=0, end_frame=total_frames))).float()
97
+ frames = frames[frame_indices]
98
+ except:
99
+ start_frame = max(0, total_frames - required_span - 2)
100
+ frame_indices = list(range(start_frame, total_frames, stride))
101
+ assert len(frame_indices) == f_prime
102
+ # frames = torch.from_numpy(vr.get_batch(frame_indices)).float()
103
+ frames = torch.from_numpy(np.stack(vr.decode_fast(start_frame=0, end_frame=total_frames))).float()
104
+ frames = frames[frame_indices]
105
+ else:
106
+ start_frame = max(0, total_frames - f_prime)
107
+ # frame_indices = list(range(start_frame, total_frames))
108
+ frames = torch.from_numpy(np.stack(vr.decode_fast(start_frame=start_frame, end_frame=total_frames))).float()
109
+
110
+ # total_frames = len(vr)
111
+ # start_frame = max(0, total_frames - f_prime)
112
+ # # frame_indices = list(range(start_frame, total_frames))
113
+ # # frames = torch.from_numpy(vr.get_batch(frame_indices)).float()
114
+ # frames = torch.from_numpy(np.stack(vr.decode_fast(start_frame=start_frame, end_frame=total_frames))).float()
115
+
116
+ frames = (frames / 127.5) - 1
117
+ video = frames.permute(0, 3, 1, 2)
118
+
119
+ frames, channels, h, w = video.shape
120
+ aspect_ratio_original = h / w
121
+ aspect_ratio_target = h_prime / w_prime
122
+
123
+ if aspect_ratio_original >= aspect_ratio_target:
124
+ new_h = int(w * aspect_ratio_target)
125
+ top = (h - new_h) // 2
126
+ bottom = top + new_h
127
+ left = 0
128
+ right = w
129
+ else:
130
+ new_w = int(h / aspect_ratio_target)
131
+ left = (w - new_w) // 2
132
+ right = left + new_w
133
+ top = 0
134
+ bottom = h
135
+
136
+ # Crop the video
137
+ cropped_video = video[:, :, top:bottom, left:right]
138
+ # Resize the cropped video
139
+ resized_video = torchvision.transforms.functional.resize(cropped_video, (h_prime, w_prime))
140
+ return resized_video
141
+
142
+ def save_frames(frame_raw, fps=24, video_path="1.mp4"):
143
+ save_list = []
144
+ for frame in frame_raw:
145
+ frame = (frame + 1) / 2 * 255
146
+ frame = torchvision.transforms.transforms.ToPILImage()(frame.to(torch.uint8)).convert("RGB")
147
+ save_list.append(frame)
148
+ frame = None
149
+ del frame
150
+ export_to_video(save_list, video_path, fps=fps)
151
+
152
+ save_list = None
153
+ del save_list
154
+ free_memory()
155
+
156
+ class BucketedFeatureDataset(Dataset):
157
+ def __init__(self, csv_file, video_folder, stride=1, cache_file=None, force_rebuild=True):
158
+ self.csv_file = csv_file
159
+ self.video_folder = video_folder
160
+ self.stride = stride
161
+
162
+ if cache_file is None:
163
+ cache_file = os.path.join(video_folder, f"dataset_cache_stride{stride}.pkl")
164
+
165
+ if force_rebuild or not os.path.exists(cache_file):
166
+ print("Building metadata cache...")
167
+ self._build_metadata()
168
+ self._save_cache(cache_file)
169
+ else:
170
+ print("Loading cached metadata...")
171
+ with open(cache_file, "rb") as f:
172
+ cached_data = pickle.load(f)
173
+ if cached_data.get("stride", 1) != stride:
174
+ print(f"Stride mismatch in cache (cached: {cached_data.get('stride', 1)}, current: {stride}). Rebuilding...")
175
+ self._build_metadata()
176
+ self._save_cache(cache_file)
177
+ else:
178
+ self.samples = cached_data["samples"]
179
+ self.buckets = cached_data["buckets"]
180
+ print(f"Loaded {len(self.samples)} samples from cache")
181
+
182
+
183
+ def _save_cache(self, cache_file):
184
+ print("Saving metadata cache...")
185
+ cached_data = {
186
+ "samples": self.samples,
187
+ "buckets": self.buckets,
188
+ "stride": self.stride
189
+ }
190
+ with open(cache_file, "wb") as f:
191
+ pickle.dump(cached_data, f)
192
+ print(f"Cached {len(self.samples)} samples with stride={self.stride}")
193
+
194
+ # def _build_metadata(self):
195
+ # self.feature_files = [f for f in os.listdir(self.video_folder) if f.endswith(".mp4")]
196
+ # self.samples = []
197
+ # self.buckets = defaultdict(list)
198
+ # sample_idx = 0
199
+
200
+ # print(f"Processing {len(self.feature_files)} files...")
201
+ # for i, feature_file in enumerate(self.feature_files):
202
+ # if i % 10000 == 0:
203
+ # print(f"Processed {i}/{len(self.feature_files)} files")
204
+
205
+ # video_path = os.path.join(self.video_folder, feature_file)
206
+
207
+ # # Parse filename
208
+ # parts = feature_file.split("_")[:4]
209
+ # uttid = parts[0]
210
+ # num_frame = int(parts[1])
211
+ # height = int(parts[2])
212
+ # width = int(parts[3].replace(".mp4", ""))
213
+
214
+ def _build_metadata(self):
215
+ self.df = pd.read_csv(self.csv_file)
216
+
217
+ self.samples = []
218
+ self.buckets = defaultdict(list)
219
+ sample_idx = 0
220
+
221
+ print(f"Processing {len(self.df)} records from CSV with stride={self.stride}...")
222
+ for i, row in self.df.iterrows():
223
+ if i % 10000 == 0:
224
+ print(f"Processed {i}/{len(self.df)} records")
225
+
226
+ uttid = os.path.basename(row['videoFile']).replace(".mp4", "")
227
+ video_file = row['videoFile']
228
+ video_path = os.path.join(self.video_folder, video_file)
229
+ prompt = row["caption"]
230
+ # num_frame = row["num_frame"]
231
+
232
+ filename = os.path.splitext(os.path.basename(video_path))[0]
233
+ parts = filename.split('_')
234
+ num_frame = int(parts[-1]) - int(parts[-2])
235
+
236
+ height = row["height"]
237
+ width = row["width"]
238
+ fps = row["fps"]
239
+
240
+ # # keep length >= 121
241
+ # if num_frame < 121:
242
+ # continue
243
+
244
+ effective_num_frame = (num_frame + self.stride - 1) // self.stride
245
+ bucket_height, bucket_width = find_nearest_resolution_bucket(height, width, resolution=640)
246
+ bucket_num_frame = find_nearest_length_bucket(effective_num_frame, stride=self.stride)
247
+ bucket_key = (bucket_num_frame, bucket_height, bucket_width)
248
+
249
+ sample_info = {
250
+ "uttid": uttid,
251
+ "bucket_key": bucket_key,
252
+ "video_path": video_path,
253
+ "prompt": prompt,
254
+ "fps": fps,
255
+ "stride": self.stride,
256
+ "effective_num_frame": effective_num_frame,
257
+ "num_frame": num_frame,
258
+ "height": height,
259
+ "width": width,
260
+ "bucket_num_frame": bucket_num_frame,
261
+ "bucket_height": bucket_height,
262
+ "bucket_width": bucket_width,
263
+ }
264
+
265
+ self.samples.append(sample_info)
266
+ self.buckets[bucket_key].append(sample_idx)
267
+ sample_idx += 1
268
+
269
+ def __len__(self):
270
+ return len(self.samples)
271
+
272
+ def __getitem__(self, idx):
273
+ sample_info = self.samples[idx]
274
+ video_data = read_cut_crop_and_resize(
275
+ video_path=sample_info["video_path"],
276
+ f_prime=sample_info["bucket_num_frame"],
277
+ h_prime=sample_info["bucket_height"],
278
+ w_prime=sample_info["bucket_width"],
279
+ stride=self.stride,
280
+ )
281
+ # while True:
282
+ # sample_info = self.samples[idx]
283
+ # try:
284
+ # video_data = read_cut_crop_and_resize(
285
+ # video_path=sample_info["video_path"],
286
+ # f_prime=sample_info["bucket_num_frame"],
287
+ # h_prime=sample_info["bucket_height"],
288
+ # w_prime=sample_info["bucket_width"],
289
+ # stride=self.stride,
290
+ # )
291
+ # break
292
+ # except Exception:
293
+ # idx = random.randint(0, len(self.samples) - 1)
294
+ # print(f"Error loading {sample_info['video_path']}, retrying...")
295
+
296
+ return {
297
+ "uttid": sample_info["uttid"],
298
+ "bucket_key": sample_info["bucket_key"],
299
+ "video_metadata": {
300
+ "num_frames": sample_info["bucket_num_frame"],
301
+ "height": sample_info["bucket_height"],
302
+ "width": sample_info["bucket_width"],
303
+ "fps": sample_info["fps"],
304
+ "stride": self.stride,
305
+ "effective_num_frame": sample_info["effective_num_frame"],
306
+ },
307
+ "videos": video_data,
308
+ "prompts": sample_info["prompt"],
309
+ "first_frames_images": (video_data[0] + 1) / 2 * 255,
310
+ }
311
+
312
+ class BucketedSampler(Sampler):
313
+ def __init__(self, dataset, batch_size, drop_last=False, shuffle=True, seed=42):
314
+ self.dataset = dataset
315
+ self.batch_size = batch_size
316
+ self.drop_last = drop_last
317
+ self.shuffle = shuffle
318
+ self.seed = seed
319
+ self.generator = torch.Generator()
320
+ self.buckets = dataset.buckets
321
+ self._epoch = 0
322
+
323
+ def set_epoch(self, epoch):
324
+ self._epoch = epoch
325
+
326
+ def __iter__(self):
327
+ if self.shuffle:
328
+ self.generator.manual_seed(self.seed + self._epoch)
329
+ else:
330
+ self.generator.manual_seed(self.seed)
331
+
332
+ bucket_iterators = {}
333
+ bucket_batches = {}
334
+
335
+ for bucket_key, sample_indices in self.buckets.items():
336
+ indices = sample_indices.copy()
337
+ if self.shuffle:
338
+ indices = torch.randperm(len(indices), generator=self.generator).tolist()
339
+ indices = [sample_indices[i] for i in indices]
340
+
341
+ batches = []
342
+ for i in range(0, len(indices), self.batch_size):
343
+ batch = indices[i : i + self.batch_size]
344
+ if len(batch) == self.batch_size or not self.drop_last:
345
+ batches.append(batch)
346
+
347
+ if batches:
348
+ bucket_batches[bucket_key] = batches
349
+ bucket_iterators[bucket_key] = iter(batches)
350
+
351
+ remaining_buckets = list(bucket_iterators.keys())
352
+
353
+ while remaining_buckets:
354
+ idx = torch.randint(len(remaining_buckets), (1,), generator=self.generator).item()
355
+ bucket_key = remaining_buckets[idx]
356
+
357
+ bucket_iter = bucket_iterators[bucket_key]
358
+
359
+ try:
360
+ batch = next(bucket_iter)
361
+ yield batch
362
+ except StopIteration:
363
+ remaining_buckets.remove(bucket_key)
364
+
365
+ def __len__(self):
366
+ total_batches = 0
367
+ for sample_indices in self.buckets.values():
368
+ num_batches = len(sample_indices) // self.batch_size
369
+ if not self.drop_last and len(sample_indices) % self.batch_size != 0:
370
+ num_batches += 1
371
+ total_batches += num_batches
372
+ return total_batches
373
+
374
+
375
+ def collate_fn(batch):
376
+ def collate_dict(data_list):
377
+ if isinstance(data_list[0], dict):
378
+ return {
379
+ key: collate_dict([d[key] for d in data_list])
380
+ for key in data_list[0]
381
+ }
382
+ elif isinstance(data_list[0], torch.Tensor):
383
+ return torch.stack(data_list)
384
+ else:
385
+ return data_list
386
+
387
+ return {
388
+ key: collate_dict([d[key] for d in batch])
389
+ for key in batch[0]
390
+ }
391
+
392
+
393
+ if __name__ == "__main__":
394
+ from accelerate import Accelerator
395
+
396
+ base_name = "sekai-game-drone"
397
+ csv_file = f"/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls{base_name}_updated.csv"
398
+ video_folder = f"/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/{base_name}"
399
+ stride = 2
400
+ batch_size = 2
401
+ num_train_epochs = 1
402
+ seed = 0
403
+ output_dir = "accelerate_checkpoints"
404
+ checkpoint_dirs = (
405
+ [
406
+ d
407
+ for d in os.listdir(output_dir)
408
+ if d.startswith("checkpoint-") and os.path.isdir(os.path.join(output_dir, d))
409
+ ]
410
+ if os.path.exists(output_dir)
411
+ else []
412
+ )
413
+
414
+ dataset = BucketedFeatureDataset(csv_file=csv_file, video_folder=video_folder, stride=stride)
415
+ sampler = BucketedSampler(dataset, batch_size=2, drop_last=False, shuffle=True, seed=seed)
416
+ dataloader = DataLoader(dataset, batch_sampler=sampler, collate_fn=collate_fn, num_workers=0)
417
+
418
+ print(len(dataset), len(dataloader))
419
+ accelerator = Accelerator()
420
+ dataloader = accelerator.prepare(dataloader)
421
+ print(f"Dataset size: {len(dataset)}, Dataloader batches: {len(dataloader)}")
422
+ print(f"Process index: {accelerator.process_index}, World size: {accelerator.num_processes}")
423
+
424
+ step = 0
425
+ global_step = 0
426
+ first_epoch = 0
427
+ num_update_steps_per_epoch = len(dataloader)
428
+
429
+ print("Testing dataloader...")
430
+ step = global_step
431
+ for epoch in range(first_epoch, num_train_epochs):
432
+ sampler.set_epoch(epoch)
433
+ skip_steps = 0
434
+ printed_skip_log = False
435
+ for i, batch in enumerate(dataloader):
436
+ if epoch == first_epoch and skip_steps < (global_step % num_update_steps_per_epoch):
437
+ skip_steps += 1
438
+ continue
439
+ if epoch == first_epoch and not printed_skip_log:
440
+ print(f"Skip {skip_steps} steps in epoch {epoch}")
441
+ printed_skip_log = True
442
+
443
+ # Get metadata
444
+ uttid = batch["uttid"]
445
+ bucket_key = batch["bucket_key"]
446
+ num_frame = batch["video_metadata"]["num_frames"]
447
+ height = batch["video_metadata"]["height"]
448
+ width = batch["video_metadata"]["width"]
449
+
450
+ # Get feature
451
+ video_data = batch["videos"]
452
+ prompt = batch["prompts"]
453
+ first_frames_images = batch["first_frames_images"]
454
+ first_frames_images = [torchvision.transforms.ToPILImage()(x.to(torch.uint8)) for x in first_frames_images]
455
+
456
+ # save_frames(video_data[0].squeeze(0))
457
+
458
+ if accelerator.process_index == 0:
459
+ # print info
460
+ print(f" Step {step}:")
461
+ print(f" Batch {i}:")
462
+ print(f" Batch size: {len(uttid)}")
463
+ print(f" Uttids: {uttid}")
464
+ print(f" Dimensions - frames: {num_frame[0]}, height: {height[0]}, width: {width[0]}")
465
+ print(f" Bucket key: {bucket_key[0]}")
466
+ print(f" Videos shape: {video_data.shape}")
467
+ print(f" Cpation: {prompt}")
468
+
469
+ # verify
470
+ assert all(nf == num_frame[0] for nf in num_frame), "Frame numbers not consistent in batch"
471
+ assert all(h == height[0] for h in height), "Heights not consistent in batch"
472
+ assert all(w == width[0] for w in width), "Widths not consistent in batch"
473
+
474
+ print(" ✓ Batch dimensions are consistent")
475
+
476
+ step += 1
dataset_code/sekai/offload/dummy_dataloader_official.py ADDED
@@ -0,0 +1,472 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pickle
3
+ import random
4
+ import numpy as np
5
+ import pandas as pd
6
+ from collections import defaultdict
7
+
8
+ import torch
9
+ import torchvision
10
+ from torch.utils.data import DataLoader, Dataset, Sampler
11
+
12
+ from video_reader import PyVideoReader
13
+
14
+ from diffusers.utils import export_to_video
15
+ from diffusers.training_utils import free_memory
16
+
17
+ # 5: (21, 41, 61, 81, 101)
18
+ # 6: (25, 49, 73, 97, 121)
19
+ # 7: (29, 57, 85, 113, 141)
20
+ # 8: (33, 65, 97, 129, 161)
21
+ # 9: (37, 73, 109, 145, 181)
22
+ # 10: (41, 81, 121, 161, 201)
23
+ # 11: (45, 89, 133, 177, 221)
24
+ # 12: (49, 97, 145, 193, 241)
25
+
26
+ # 1: (21 - 1) * 4 + 1 = 81, 162
27
+ # 2: (22 - 1) * 4 + 1 = 85, 170
28
+ # 3: (23 - 1) * 4 + 1 = 89, 178
29
+ # 4: (24 - 1) * 4 + 1 = 93, 186
30
+ # 5: (25 - 1) * 4 + 1 = 97, 194
31
+ # 6: (26 - 1) * 4 + 1 = 101, 202
32
+ # 7: (27 - 1) * 4 + 1 = 105, 210
33
+ # 8: (28 - 1) * 4 + 1 = 109, 218
34
+ # 9: (29 - 1) * 4 + 1 = 113, 226
35
+ # 10: (30 - 1) * 4 + 1 = 117, 234
36
+ # 11: (31 - 1) * 4 + 1 = 121, 242
37
+ # 12: (32 - 1) * 4 + 1 = 125, 250
38
+ # 13: (33 - 1) * 4 + 1 = 129, 258
39
+ # 14: (34 - 1) * 4 + 1 = 133, 266
40
+ # 15: (35 - 1) * 4 + 1 = 137, 274
41
+ # 16: (36 - 1) * 4 + 1 = 141, 282
42
+
43
+ resolution_bucket_options = {
44
+ 640: [
45
+ (768, 320),
46
+ (768, 384),
47
+ (640, 384),
48
+ (768, 512),
49
+ (576, 448),
50
+ (512, 512),
51
+ (448, 576),
52
+ (512, 768),
53
+ (384, 640),
54
+ (384, 768),
55
+ (320, 768),
56
+ ],
57
+ }
58
+
59
+ length_bucket_options = {
60
+ 1: [321, 301, 281, 261, 241, 221, 193, 181, 161, 141, 121, 101, 81, 61, 41, 21],
61
+ 2: [193, 177, 161, 156, 145, 133, 129, 121, 113, 109, 97, 85, 81, 73, 65, 61, 49, 37, 25],
62
+ }
63
+
64
+ def find_nearest_resolution_bucket(h, w, resolution=640):
65
+ min_metric = float('inf')
66
+ best_bucket = None
67
+ for (bucket_h, bucket_w) in resolution_bucket_options[resolution]:
68
+ metric = abs(h * bucket_w - w * bucket_h)
69
+ if metric <= min_metric:
70
+ min_metric = metric
71
+ best_bucket = (bucket_h, bucket_w)
72
+ return best_bucket
73
+
74
+ def find_nearest_length_bucket(length, stride=1):
75
+ buckets = length_bucket_options[stride]
76
+ min_bucket = min(buckets)
77
+ if length < min_bucket:
78
+ return length
79
+ valid_buckets = [bucket for bucket in buckets if bucket <= length]
80
+ return max(valid_buckets)
81
+
82
+ def read_cut_crop_and_resize(video_path, f_prime, h_prime, w_prime, stride=1):
83
+ vr = PyVideoReader(video_path, threads=0) # 0 means auto (let ffmpeg pick the optimal number)
84
+ total_frames = len(vr)
85
+
86
+ if stride != 1:
87
+ required_span = stride * (f_prime - 1)
88
+ start_frame = max(0, total_frames - required_span - 1)
89
+ else:
90
+ start_frame = max(0, total_frames - f_prime)
91
+
92
+ frame_indices = list(range(start_frame, total_frames, stride))
93
+ assert len(frame_indices) == f_prime
94
+ frames = torch.from_numpy(vr.get_batch(frame_indices)).float()
95
+
96
+
97
+ # if stride != 1:
98
+ # required_span = stride * (f_prime - 1)
99
+ # start_frame = max(0, total_frames - required_span - 1)
100
+ # frame_indices = list(range(start_frame, total_frames, stride))
101
+ # assert len(frame_indices) == f_prime
102
+ # frames = torch.from_numpy(np.stack(vr.decode_fast(start_frame=0, end_frame=total_frames))).float()
103
+ # frames = frames[frame_indices]
104
+ # else:
105
+ # start_frame = max(0, total_frames - f_prime)
106
+ # frames = torch.from_numpy(np.stack(vr.decode_fast(start_frame=start_frame, end_frame=total_frames))).float()
107
+
108
+
109
+ # total_frames = len(vr)
110
+ # start_frame = max(0, total_frames - f_prime)
111
+ # # frame_indices = list(range(start_frame, total_frames))
112
+ # # frames = torch.from_numpy(vr.get_batch(frame_indices)).float()
113
+ # frames = torch.from_numpy(np.stack(vr.decode_fast(start_frame=start_frame, end_frame=total_frames))).float()
114
+
115
+
116
+ frames = (frames / 127.5) - 1
117
+ video = frames.permute(0, 3, 1, 2)
118
+
119
+ frames, channels, h, w = video.shape
120
+ aspect_ratio_original = h / w
121
+ aspect_ratio_target = h_prime / w_prime
122
+
123
+ if aspect_ratio_original >= aspect_ratio_target:
124
+ new_h = int(w * aspect_ratio_target)
125
+ top = (h - new_h) // 2
126
+ bottom = top + new_h
127
+ left = 0
128
+ right = w
129
+ else:
130
+ new_w = int(h / aspect_ratio_target)
131
+ left = (w - new_w) // 2
132
+ right = left + new_w
133
+ top = 0
134
+ bottom = h
135
+
136
+ # Crop the video
137
+ cropped_video = video[:, :, top:bottom, left:right]
138
+ # Resize the cropped video
139
+ resized_video = torchvision.transforms.functional.resize(cropped_video, (h_prime, w_prime))
140
+ return resized_video
141
+
142
+ def save_frames(frame_raw, fps=24, video_path="1.mp4"):
143
+ save_list = []
144
+ for frame in frame_raw:
145
+ frame = (frame + 1) / 2 * 255
146
+ frame = torchvision.transforms.transforms.ToPILImage()(frame.to(torch.uint8)).convert("RGB")
147
+ save_list.append(frame)
148
+ frame = None
149
+ del frame
150
+ export_to_video(save_list, video_path, fps=fps)
151
+
152
+ save_list = None
153
+ del save_list
154
+ free_memory()
155
+
156
+ class BucketedFeatureDataset(Dataset):
157
+ def __init__(self, csv_file, video_folder, stride=1, cache_file=None, force_rebuild=False):
158
+ self.csv_file = csv_file
159
+ self.video_folder = video_folder
160
+ self.stride = stride
161
+
162
+ if cache_file is None:
163
+ cache_file = os.path.join(video_folder, f"dataset_cache_stride{stride}.pkl")
164
+
165
+ if force_rebuild or not os.path.exists(cache_file):
166
+ print("Building metadata cache...")
167
+ self._build_metadata()
168
+ self._save_cache(cache_file)
169
+ else:
170
+ print("Loading cached metadata...")
171
+ with open(cache_file, "rb") as f:
172
+ cached_data = pickle.load(f)
173
+ if cached_data.get("stride", 1) != stride:
174
+ print(f"Stride mismatch in cache (cached: {cached_data.get('stride', 1)}, current: {stride}). Rebuilding...")
175
+ self._build_metadata()
176
+ self._save_cache(cache_file)
177
+ else:
178
+ self.samples = cached_data["samples"]
179
+ self.buckets = cached_data["buckets"]
180
+ print(f"Loaded {len(self.samples)} samples from cache")
181
+
182
+
183
+ def _save_cache(self, cache_file):
184
+ print("Saving metadata cache...")
185
+ cached_data = {
186
+ "samples": self.samples,
187
+ "buckets": self.buckets,
188
+ "stride": self.stride
189
+ }
190
+ with open(cache_file, "wb") as f:
191
+ pickle.dump(cached_data, f)
192
+ print(f"Cached {len(self.samples)} samples with stride={self.stride}")
193
+
194
+ # def _build_metadata(self):
195
+ # self.feature_files = [f for f in os.listdir(self.video_folder) if f.endswith(".mp4")]
196
+ # self.samples = []
197
+ # self.buckets = defaultdict(list)
198
+ # sample_idx = 0
199
+
200
+ # print(f"Processing {len(self.feature_files)} files...")
201
+ # for i, feature_file in enumerate(self.feature_files):
202
+ # if i % 10000 == 0:
203
+ # print(f"Processed {i}/{len(self.feature_files)} files")
204
+
205
+ # video_path = os.path.join(self.video_folder, feature_file)
206
+
207
+ # # Parse filename
208
+ # parts = feature_file.split("_")[:4]
209
+ # uttid = parts[0]
210
+ # num_frame = int(parts[1])
211
+ # height = int(parts[2])
212
+ # width = int(parts[3].replace(".mp4", ""))
213
+
214
+ def _build_metadata(self):
215
+ self.df = pd.read_csv(self.csv_file)
216
+
217
+ self.samples = []
218
+ self.buckets = defaultdict(list)
219
+ sample_idx = 0
220
+
221
+ print(f"Processing {len(self.df)} records from CSV with stride={self.stride}...")
222
+ for i, row in self.df.iterrows():
223
+ if i % 10000 == 0:
224
+ print(f"Processed {i}/{len(self.df)} records")
225
+
226
+ uttid = os.path.basename(row['videoFile']).replace(".mp4", "")
227
+ video_file = row['videoFile']
228
+ video_path = os.path.join(self.video_folder, video_file)
229
+ prompt = row["caption"]
230
+ num_frame = row["num_frame"]
231
+ height = row["height"]
232
+ width = row["width"]
233
+ fps = row["fps"]
234
+
235
+ # # keep length >= 121
236
+ # if num_frame < 121:
237
+ # continue
238
+
239
+ effective_num_frame = (num_frame + self.stride - 1) // self.stride
240
+ bucket_height, bucket_width = find_nearest_resolution_bucket(height, width, resolution=640)
241
+ bucket_num_frame = find_nearest_length_bucket(effective_num_frame, stride=self.stride)
242
+ bucket_key = (bucket_num_frame, bucket_height, bucket_width)
243
+
244
+ sample_info = {
245
+ "uttid": uttid,
246
+ "bucket_key": bucket_key,
247
+ "video_path": video_path,
248
+ "prompt": prompt,
249
+ "fps": fps,
250
+ "stride": self.stride,
251
+ "effective_num_frame": effective_num_frame,
252
+ "num_frame": num_frame,
253
+ "height": height,
254
+ "width": width,
255
+ "bucket_num_frame": bucket_num_frame,
256
+ "bucket_height": bucket_height,
257
+ "bucket_width": bucket_width,
258
+ }
259
+
260
+ self.samples.append(sample_info)
261
+ self.buckets[bucket_key].append(sample_idx)
262
+ sample_idx += 1
263
+
264
+ def __len__(self):
265
+ return len(self.samples)
266
+
267
+ def __getitem__(self, idx):
268
+ # sample_info = self.samples[idx]
269
+ # video_data = read_cut_crop_and_resize(
270
+ # video_path=sample_info["video_path"],
271
+ # f_prime=sample_info["bucket_num_frame"],
272
+ # h_prime=sample_info["bucket_height"],
273
+ # w_prime=sample_info["bucket_width"],
274
+ # stride=self.stride,
275
+ # )
276
+ while True:
277
+ sample_info = self.samples[idx]
278
+ try:
279
+ video_data = read_cut_crop_and_resize(
280
+ video_path=sample_info["video_path"],
281
+ f_prime=sample_info["bucket_num_frame"],
282
+ h_prime=sample_info["bucket_height"],
283
+ w_prime=sample_info["bucket_width"],
284
+ stride=self.stride,
285
+ )
286
+ break
287
+ except Exception:
288
+ idx = random.randint(0, len(self.samples) - 1)
289
+ print(f"Error loading {sample_info['video_path']}, retrying...")
290
+
291
+ return {
292
+ "uttid": sample_info["uttid"],
293
+ "bucket_key": sample_info["bucket_key"],
294
+ "video_metadata": {
295
+ "num_frames": sample_info["bucket_num_frame"],
296
+ "height": sample_info["bucket_height"],
297
+ "width": sample_info["bucket_width"],
298
+ "fps": sample_info["fps"],
299
+ "stride": self.stride,
300
+ "effective_num_frame": sample_info["effective_num_frame"],
301
+ },
302
+ "videos": video_data,
303
+ "prompts": sample_info["prompt"],
304
+ "first_frames_images": (video_data[0] + 1) / 2 * 255,
305
+ }
306
+
307
+ class BucketedSampler(Sampler):
308
+ def __init__(self, dataset, batch_size, drop_last=False, shuffle=False, seed=42):
309
+ self.dataset = dataset
310
+ self.batch_size = batch_size
311
+ self.drop_last = drop_last
312
+ self.shuffle = shuffle
313
+ self.seed = seed
314
+ self.generator = torch.Generator()
315
+ self.buckets = dataset.buckets
316
+ self._epoch = 0
317
+
318
+ def set_epoch(self, epoch):
319
+ self._epoch = epoch
320
+
321
+ def __iter__(self):
322
+ if self.shuffle:
323
+ self.generator.manual_seed(self.seed + self._epoch)
324
+ else:
325
+ self.generator.manual_seed(self.seed)
326
+
327
+ bucket_iterators = {}
328
+ bucket_batches = {}
329
+
330
+ for bucket_key, sample_indices in self.buckets.items():
331
+ indices = sample_indices.copy()
332
+ if self.shuffle:
333
+ indices = torch.randperm(len(indices), generator=self.generator).tolist()
334
+ indices = [sample_indices[i] for i in indices]
335
+
336
+ batches = []
337
+ for i in range(0, len(indices), self.batch_size):
338
+ batch = indices[i : i + self.batch_size]
339
+ if len(batch) == self.batch_size or not self.drop_last:
340
+ batches.append(batch)
341
+
342
+ if batches:
343
+ bucket_batches[bucket_key] = batches
344
+ bucket_iterators[bucket_key] = iter(batches)
345
+
346
+ remaining_buckets = list(bucket_iterators.keys())
347
+
348
+ while remaining_buckets:
349
+ idx = torch.randint(len(remaining_buckets), (1,), generator=self.generator).item()
350
+ bucket_key = remaining_buckets[idx]
351
+
352
+ bucket_iter = bucket_iterators[bucket_key]
353
+
354
+ try:
355
+ batch = next(bucket_iter)
356
+ yield batch
357
+ except StopIteration:
358
+ remaining_buckets.remove(bucket_key)
359
+
360
+ def __len__(self):
361
+ total_batches = 0
362
+ for sample_indices in self.buckets.values():
363
+ num_batches = len(sample_indices) // self.batch_size
364
+ if not self.drop_last and len(sample_indices) % self.batch_size != 0:
365
+ num_batches += 1
366
+ total_batches += num_batches
367
+ return total_batches
368
+
369
+
370
+ def collate_fn(batch):
371
+ def collate_dict(data_list):
372
+ if isinstance(data_list[0], dict):
373
+ return {
374
+ key: collate_dict([d[key] for d in data_list])
375
+ for key in data_list[0]
376
+ }
377
+ elif isinstance(data_list[0], torch.Tensor):
378
+ return torch.stack(data_list)
379
+ else:
380
+ return data_list
381
+
382
+ return {
383
+ key: collate_dict([d[key] for d in batch])
384
+ for key in batch[0]
385
+ }
386
+
387
+
388
+ if __name__ == "__main__":
389
+ from accelerate import Accelerator
390
+
391
+ base_name = "sekai-game-drone"
392
+ csv_file = f"/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/{base_name}_updated.csv"
393
+ video_folder = f"/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/{base_name}"
394
+ stride = 1
395
+ batch_size = 2
396
+ num_train_epochs = 1
397
+ seed = 0
398
+ output_dir = "accelerate_checkpoints"
399
+ checkpoint_dirs = (
400
+ [
401
+ d
402
+ for d in os.listdir(output_dir)
403
+ if d.startswith("checkpoint-") and os.path.isdir(os.path.join(output_dir, d))
404
+ ]
405
+ if os.path.exists(output_dir)
406
+ else []
407
+ )
408
+
409
+ dataset = BucketedFeatureDataset(csv_file=csv_file, video_folder=video_folder, stride=stride)
410
+ sampler = BucketedSampler(dataset, batch_size=2, drop_last=False, shuffle=True, seed=seed)
411
+ dataloader = DataLoader(dataset, batch_sampler=sampler, collate_fn=collate_fn, num_workers=8)
412
+
413
+ print(len(dataset), len(dataloader))
414
+ accelerator = Accelerator()
415
+ dataloader = accelerator.prepare(dataloader)
416
+ print(f"Dataset size: {len(dataset)}, Dataloader batches: {len(dataloader)}")
417
+ print(f"Process index: {accelerator.process_index}, World size: {accelerator.num_processes}")
418
+
419
+ step = 0
420
+ global_step = 0
421
+ first_epoch = 0
422
+ num_update_steps_per_epoch = len(dataloader)
423
+
424
+ print("Testing dataloader...")
425
+ step = global_step
426
+ for epoch in range(first_epoch, num_train_epochs):
427
+ sampler.set_epoch(epoch)
428
+ skip_steps = 0
429
+ printed_skip_log = False
430
+ for i, batch in enumerate(dataloader):
431
+ if epoch == first_epoch and skip_steps < (global_step % num_update_steps_per_epoch):
432
+ skip_steps += 1
433
+ continue
434
+ if epoch == first_epoch and not printed_skip_log:
435
+ print(f"Skip {skip_steps} steps in epoch {epoch}")
436
+ printed_skip_log = True
437
+
438
+ # Get metadata
439
+ uttid = batch["uttid"]
440
+ bucket_key = batch["bucket_key"]
441
+ num_frame = batch["video_metadata"]["num_frames"]
442
+ height = batch["video_metadata"]["height"]
443
+ width = batch["video_metadata"]["width"]
444
+
445
+ # Get feature
446
+ video_data = batch["videos"]
447
+ prompt = batch["prompts"]
448
+ first_frames_images = batch["first_frames_images"]
449
+ first_frames_images = [torchvision.transforms.ToPILImage()(x.to(torch.uint8)) for x in first_frames_images]
450
+
451
+ # import pdb;pdb.set_trace()
452
+ # save_frames(video_data[0].squeeze(0), video_path="1.mp4")
453
+
454
+ if accelerator.process_index == 0:
455
+ # print info
456
+ print(f" Step {step}:")
457
+ print(f" Batch {i}:")
458
+ print(f" Batch size: {len(uttid)}")
459
+ print(f" Uttids: {uttid}")
460
+ print(f" Dimensions - frames: {num_frame[0]}, height: {height[0]}, width: {width[0]}")
461
+ print(f" Bucket key: {bucket_key[0]}")
462
+ print(f" Videos shape: {video_data.shape}")
463
+ print(f" Cpation: {prompt}")
464
+
465
+ # verify
466
+ assert all(nf == num_frame[0] for nf in num_frame), "Frame numbers not consistent in batch"
467
+ assert all(h == height[0] for h in height), "Heights not consistent in batch"
468
+ assert all(w == width[0] for w in width), "Widths not consistent in batch"
469
+
470
+ print(" ✓ Batch dimensions are consistent")
471
+
472
+ step += 1
dataset_code/sekai/offload/get_ffmpeg.sh ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+
2
+ ffprobe -v quiet -count_frames -select_streams v:0 -show_entries stream=nb_frames -of csv=p=0 /mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/Sekai-Project/dummy_segments/00100200002_0021815_0022008.mp4
3
+
4
+ ffprobe -v quiet -select_streams v:0 -show_entries stream=r_frame_rate -of csv=p=0 /mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/Sekai-Project/dummy_segments/00100200002_0021815_0022008.mp4
dataset_code/sekai/offload/get_temp_csv.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pandas as pd
3
+ import argparse
4
+ from tqdm import tqdm
5
+
6
+ def extract_uttid_from_video_file(video_file):
7
+ """
8
+ 从videoFile列中提取uttid(去掉.mp4后缀)
9
+ """
10
+ if video_file.endswith('.mp4'):
11
+ return video_file[:-4] # 去掉.mp4
12
+ return video_file
13
+
14
+ def create_filtered_csv(csv_file, output_latent_folder, output_csv_file):
15
+ """
16
+ 创建一个过滤后的CSV文件,只包含需要处理的样本
17
+ 只使用uttid匹配,不依赖其他元数据
18
+ """
19
+ # 读取原始CSV
20
+ df = pd.read_csv(csv_file)
21
+ print(f"Original dataset size: {len(df)}")
22
+
23
+ # 获取已经存在的latent文件
24
+ existing_files = set()
25
+ if os.path.exists(output_latent_folder):
26
+ for filename in os.listdir(output_latent_folder):
27
+ if filename.endswith('.pt'):
28
+ parts = filename[:-3].split('_')
29
+ if len(parts) >= 4: # 至少要有uttid + 3个元数据
30
+ uttid_parts = parts[:-3]
31
+ uttid = '_'.join(uttid_parts)
32
+ existing_files.add(uttid)
33
+
34
+ print(f"Found {len(existing_files)} existing latent files")
35
+
36
+ df_uttids = df['videoFile'].apply(extract_uttid_from_video_file)
37
+ mask = ~df_uttids.isin(existing_files)
38
+ filtered_df = df[mask]
39
+
40
+ # 保存到新的CSV文件
41
+ os.makedirs(os.path.dirname(output_csv_file), exist_ok=True)
42
+ filtered_df.to_csv(output_csv_file, index=False)
43
+
44
+ print(f"Filtered dataset size: {len(filtered_df)}")
45
+ print(f"Filtered CSV saved to: {output_csv_file}")
46
+
47
+ return len(filtered_df)
48
+
49
+ def create_all_filtered_csvs():
50
+ """
51
+ 为所有数据集创建过滤后的CSV文件
52
+ """
53
+ base_csv_path = "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/"
54
+ base_output_latent_path = "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/"
55
+
56
+ csv_paths = [
57
+ "sekai-game-walking-193_updated.csv",
58
+ "sekai-real-walking-hq-193_updated.csv",
59
+ "sekai-real-walking-hq-386_updated.csv",
60
+ "sekai-game-walking-386_updated.csv"
61
+ ]
62
+ output_latent_paths = [
63
+ "sekai-game-walking-193/latents_stride1",
64
+ "sekai-real-walking-hq-193/latents_stride1",
65
+ "sekai-real-walking-hq-386/latents_stride2",
66
+ "sekai-game-walking-386/latents_stride2"
67
+ ]
68
+
69
+ for csv_path, output_latent_path in zip(csv_paths, output_latent_paths):
70
+ original_csv = os.path.join(base_csv_path, csv_path)
71
+ output_latent_folder = os.path.join(base_output_latent_path, output_latent_path)
72
+
73
+ # 创建过滤后的CSV文件名
74
+ filtered_csv_name = csv_path.replace('_updated.csv', '_filtered.csv')
75
+ filtered_csv_path = os.path.join(base_csv_path, filtered_csv_name)
76
+
77
+ print(f"\nProcessing: {csv_path}")
78
+
79
+ filtered_count = create_filtered_csv(
80
+ csv_file=original_csv,
81
+ output_latent_folder=output_latent_folder,
82
+ output_csv_file=filtered_csv_path
83
+ )
84
+
85
+ print(f"Created filtered CSV: {filtered_csv_path} with {filtered_count} samples")
86
+
87
+ def main():
88
+ parser = argparse.ArgumentParser(description="Create filtered CSV for processing")
89
+ # parser.add_argument("--csv_file", type=str, help="Original CSV file path")
90
+ # parser.add_argument("--output_latent_folder", type=str, help="Output latent folder path")
91
+ # parser.add_argument("--output_csv_file", type=str, help="Output filtered CSV file path")
92
+ parser.add_argument("--batch", action="store_true", help="Process all datasets in batch")
93
+
94
+ args = parser.parse_args()
95
+ create_all_filtered_csvs()
96
+
97
+ # if args.batch:
98
+ # # 批量处理所有数据集
99
+ # create_all_filtered_csvs()
100
+ # else:
101
+ # # 单个处理
102
+ # if not all([args.csv_file, args.output_latent_folder, args.output_csv_file]):
103
+ # print("Error: For single processing, --csv_file, --output_latent_folder, and --output_csv_file are required")
104
+ # return
105
+
106
+ # filtered_count = create_filtered_csv(
107
+ # csv_file=args.csv_file,
108
+ # output_latent_folder=args.output_latent_folder,
109
+ # output_csv_file=args.output_csv_file
110
+ # )
111
+
112
+ # if filtered_count == 0:
113
+ # print("No samples need processing!")
114
+ # else:
115
+ # print(f"Ready to process {filtered_count} samples")
116
+
117
+ if __name__ == "__main__":
118
+ main()
dataset_code/sekai/offload/kill.sh ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ pkill -9 -f run.sh
2
+ pkill -9 -f offoload_features_hv.py
3
+ pkill -9 -f offoload_features_hv_official.py
dataset_code/sekai/offload/offoload_features_hv.py ADDED
@@ -0,0 +1,326 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ from tqdm import tqdm
4
+ from diffusers import AutoencoderKLHunyuanVideo
5
+ from transformers import (
6
+ CLIPTextModel,
7
+ CLIPTokenizer,
8
+ LlamaModel,
9
+ LlamaTokenizerFast,
10
+ SiglipImageProcessor,
11
+ SiglipVisionModel,
12
+ )
13
+ from diffusers.video_processor import VideoProcessor
14
+ from diffusers.utils import export_to_video, load_image
15
+
16
+ from dummy_dataloader import BucketedFeatureDataset, BucketedSampler, collate_fn
17
+ from torch.utils.data import DataLoader
18
+
19
+ import torch
20
+ import torch.distributed as dist
21
+ import torch.nn as nn
22
+ from torch.nn.parallel import DistributedDataParallel as DDP
23
+ from torch.utils.data.distributed import DistributedSampler
24
+ from torch.utils.data import Subset
25
+ import torchvision.transforms as transforms
26
+ import numpy as np
27
+ import matplotlib.pyplot as plt
28
+ from matplotlib.animation import FuncAnimation
29
+ from IPython.display import HTML, display
30
+ from IPython.display import clear_output
31
+
32
+ from accelerate import Accelerator, DistributedType
33
+ from accelerate.logging import get_logger
34
+ from accelerate.utils import DistributedDataParallelKwargs, ProjectConfiguration, set_seed
35
+ from diffusers.training_utils import free_memory
36
+
37
+ from accelerate import Accelerator
38
+ from utils_framepack import encode_image, encode_prompt
39
+
40
+ def setup_distributed_env():
41
+ dist.init_process_group(backend="nccl")
42
+ torch.cuda.set_device(int(os.environ["LOCAL_RANK"]))
43
+
44
+ def cleanup_distributed_env():
45
+ dist.destroy_process_group()
46
+
47
+ def main(rank, world_size, global_rank, stride, batch_size, dataloader_num_workers, csv_file, video_folder, output_latent_folder, pretrained_model_name_or_path, siglip_model_name_or_path):
48
+ weight_dtype = torch.bfloat16
49
+ device = rank
50
+ seed = 42
51
+
52
+ # Load the tokenizers
53
+ tokenizer_one = LlamaTokenizerFast.from_pretrained(
54
+ pretrained_model_name_or_path,
55
+ subfolder="tokenizer",
56
+ )
57
+ tokenizer_two = CLIPTokenizer.from_pretrained(
58
+ pretrained_model_name_or_path,
59
+ subfolder="tokenizer_2",
60
+ )
61
+ feature_extractor = SiglipImageProcessor.from_pretrained(
62
+ siglip_model_name_or_path,
63
+ subfolder="feature_extractor",
64
+
65
+ )
66
+
67
+ vae = AutoencoderKLHunyuanVideo.from_pretrained(
68
+ pretrained_model_name_or_path,
69
+ subfolder="vae",
70
+ torch_dtype=torch.float32,
71
+ )
72
+ vae_scale_factor_spatial = vae.spatial_compression_ratio
73
+ video_processor = VideoProcessor(vae_scale_factor=vae_scale_factor_spatial)
74
+
75
+ text_encoder_one = LlamaModel.from_pretrained(
76
+ pretrained_model_name_or_path,
77
+ subfolder="text_encoder",
78
+ torch_dtype=weight_dtype,
79
+ )
80
+ text_encoder_two = CLIPTextModel.from_pretrained(
81
+ pretrained_model_name_or_path,
82
+ subfolder="text_encoder_2",
83
+ torch_dtype=weight_dtype,
84
+ )
85
+ image_encoder = SiglipVisionModel.from_pretrained(
86
+ siglip_model_name_or_path,
87
+ subfolder="image_encoder",
88
+ torch_dtype=weight_dtype,
89
+ )
90
+
91
+ vae.requires_grad_(False)
92
+ text_encoder_one.requires_grad_(False)
93
+ text_encoder_two.requires_grad_(False)
94
+ image_encoder.requires_grad_(False)
95
+ vae.eval()
96
+ text_encoder_one.eval()
97
+ text_encoder_two.eval()
98
+ image_encoder.eval()
99
+
100
+ vae = vae.to(device)
101
+ text_encoder_one = text_encoder_one.to(device)
102
+ text_encoder_two = text_encoder_two.to(device)
103
+ image_encoder = image_encoder.to(device)
104
+
105
+ # dist.barrier()
106
+ dataset = BucketedFeatureDataset(csv_file=csv_file, video_folder=video_folder, stride=stride)
107
+ sampler = BucketedSampler(dataset, batch_size=batch_size, drop_last=True if batch_size != 1 else False, shuffle=False, seed=seed)
108
+ dataloader = DataLoader(
109
+ dataset,
110
+ batch_sampler=sampler,
111
+ collate_fn=collate_fn,
112
+ num_workers=dataloader_num_workers,
113
+ # pin_memory=True,
114
+ prefetch_factor=2 if dataloader_num_workers != 0 else None,
115
+ # persistent_workers=True if dataloader_num_workers > 0 else False,
116
+ )
117
+
118
+ print(len(dataset), len(dataloader))
119
+ accelerator = Accelerator()
120
+ dataloader = accelerator.prepare(dataloader)
121
+ print(f"Dataset size: {len(dataset)}, Dataloader batches: {len(dataloader)}")
122
+ print(f"Process index: {accelerator.process_index}, World size: {accelerator.num_processes}")
123
+
124
+ sampler.set_epoch(0)
125
+ if rank==0:
126
+ pbar = tqdm(total=len(dataloader), desc="Processing")
127
+ # dist.barrier()
128
+ for idx, batch in enumerate(dataloader):
129
+ free_memory()
130
+
131
+ valid_indices = []
132
+ valid_uttids = []
133
+ valid_num_frames = []
134
+ valid_heights = []
135
+ valid_widths = []
136
+ valid_videos = []
137
+ valid_prompts = []
138
+ valid_first_frames_images = []
139
+
140
+ for i, (uttid, num_frame, height, width) in enumerate(zip(batch["uttid"], batch["video_metadata"]["num_frames"], batch["video_metadata"]["height"], batch["video_metadata"]["width"])):
141
+ os.makedirs(output_latent_folder, exist_ok=True)
142
+ output_path = os.path.join(output_latent_folder, f"{uttid}_{num_frame}_{height}_{width}.pt")
143
+ if not os.path.exists(output_path):
144
+ valid_indices.append(i)
145
+ valid_uttids.append(uttid)
146
+ valid_num_frames.append(num_frame)
147
+ valid_heights.append(height)
148
+ valid_widths.append(width)
149
+ valid_videos.append(batch["videos"][i])
150
+ valid_prompts.append(batch["prompts"][i])
151
+ valid_first_frames_images.append(batch["first_frames_images"][i])
152
+ else:
153
+ print(f"skipping {uttid}")
154
+
155
+ if not valid_indices:
156
+ print("skipping entire batch!")
157
+ if rank==0:
158
+ pbar.update(1)
159
+ pbar.set_postfix({"batch": idx})
160
+ continue
161
+
162
+ batch = None
163
+ del batch
164
+ free_memory()
165
+
166
+ batch = {
167
+ "uttid": valid_uttids,
168
+ "video_metadata": {
169
+ "num_frames": valid_num_frames,
170
+ "height": valid_heights,
171
+ "width": valid_widths
172
+ },
173
+ "videos": torch.stack(valid_videos),
174
+ "prompts": valid_prompts,
175
+ "first_frames_images": torch.stack(valid_first_frames_images),
176
+ }
177
+
178
+ if len(batch["uttid"]) == 0:
179
+ print("All samples in this batch are already processed, skipping!")
180
+ continue
181
+
182
+ with torch.no_grad():
183
+ # Get Vae feature 1
184
+ pixel_values = batch["videos"].permute(0, 2, 1, 3, 4).to(dtype=vae.dtype, device=device)
185
+ vae_latents = vae.encode(pixel_values).latent_dist.sample()
186
+ vae_latents = vae_latents * vae.config.scaling_factor
187
+
188
+ # Encode prompts
189
+ prompts = batch["prompts"]
190
+ prompt_embeds, pooled_prompt_embeds, prompt_attention_mask = encode_prompt(
191
+ tokenizer=tokenizer_one,
192
+ text_encoder=text_encoder_one,
193
+ tokenizer_2=tokenizer_two,
194
+ text_encoder_2=text_encoder_two,
195
+ prompt=prompts,
196
+ device=device,
197
+ )
198
+
199
+ # Prepare images
200
+ image_tensor = batch["first_frames_images"]
201
+ images = [transforms.ToPILImage()(x.to(torch.uint8)) for x in image_tensor]
202
+ image = video_processor.preprocess(image=images, height=batch["videos"].shape[-2], width=batch["videos"].shape[-1])
203
+ image_embeds = encode_image(
204
+ feature_extractor,
205
+ image_encoder,
206
+ image,
207
+ device=device,
208
+ dtype=weight_dtype,
209
+ )
210
+
211
+ for uttid, num_frame, height, width, cur_vae_latent, cur_prompt_embed, cur_pooled_prompt_embed, cur_prompt_attention_mask, cur_image_embed in zip(batch["uttid"], batch["video_metadata"]["num_frames"], batch["video_metadata"]["height"], batch["video_metadata"]["width"], vae_latents, prompt_embeds, pooled_prompt_embeds, prompt_attention_mask, image_embeds):
212
+ output_path = os.path.join(output_latent_folder, f"{uttid}_{num_frame}_{height}_{width}.pt")
213
+ temp_to_save = {
214
+ "vae_latent": cur_vae_latent.cpu().detach(),
215
+ "prompt_embed": cur_prompt_embed.cpu().detach(),
216
+ "pooled_prompt_embeds": cur_pooled_prompt_embed.cpu().detach(),
217
+ "prompt_attention_mask": cur_prompt_attention_mask.cpu().detach(),
218
+ "image_embeds": cur_image_embed.cpu().detach(),
219
+ }
220
+ torch.save(
221
+ temp_to_save,
222
+ output_path
223
+ )
224
+ print(f"save latent to: {output_path}")
225
+
226
+ if rank==0:
227
+ pbar.update(1)
228
+ pbar.set_postfix({"batch": idx})
229
+
230
+
231
+ pixel_values = None
232
+ prompts = None
233
+ image_tensor = None
234
+ images = None
235
+ vae_latents = None
236
+ vae_latents_2 = None
237
+ image_embeds = None
238
+ prompt_embeds = None
239
+ pooled_prompt_embeds = None
240
+ prompt_attention_mask = None
241
+ batch = None
242
+ valid_indices = None
243
+ valid_uttids = None
244
+ valid_num_frames = None
245
+ valid_heights = None
246
+ valid_widths = None
247
+ valid_videos = None
248
+ valid_prompts = None
249
+ valid_first_frames_images = None
250
+ temp_to_save = None
251
+
252
+ del pixel_values
253
+ del prompts
254
+ del image_tensor
255
+ del images
256
+ del vae_latents
257
+ del vae_latents_2
258
+ del image_embeds
259
+ del batch
260
+ del valid_indices
261
+ del valid_uttids
262
+ del valid_num_frames
263
+ del valid_heights
264
+ del valid_widths
265
+ del valid_videos
266
+ del valid_prompts
267
+ del valid_first_frames_images
268
+ del temp_to_save
269
+
270
+ free_memory()
271
+
272
+ # dist.barrier()
273
+
274
+ if __name__ == "__main__":
275
+ parser = argparse.ArgumentParser(description="Script for running model training and data processing.")
276
+ parser.add_argument("--stride", type=int, default=2, help="Batch size for processing")
277
+ parser.add_argument("--batch_size", type=int, default=1, help="Batch size for processing")
278
+ parser.add_argument("--dataloader_num_workers", type=int, default=0, help="Number of workers for data loading")
279
+ parser.add_argument("--csv_file", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/Sekai-Project/train/sekai-game-drone_updated.csv", help="Path to the config file")
280
+ parser.add_argument("--video_folder", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/Sekai-Project/sekai-game-drone", help="Path to the config file")
281
+ parser.add_argument("--output_latent_folder", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/Sekai-Project/sekai-game-drone/latents", help="Folder to store output latents")
282
+ parser.add_argument("--pretrained_model_name_or_path", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/hunyuanvideo-community/HunyuanVideo", help="Pretrained model path")
283
+ parser.add_argument("--siglip_model_name_or_path", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/lllyasviel/flux_redux_bfl", help="Siglip model path")
284
+ args = parser.parse_args()
285
+
286
+
287
+ setup_distributed_env()
288
+
289
+ global_rank = dist.get_rank()
290
+ local_rank = int(os.environ["LOCAL_RANK"])
291
+ device = torch.cuda.current_device()
292
+ world_size = dist.get_world_size()
293
+
294
+ base_csv_path = "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/"
295
+ base_video_path = "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/"
296
+ base_output_latent_path = "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/"
297
+
298
+ strides = [1, 1, 2, 2]
299
+ batch_sizes = [1, 1, 1, 1]
300
+ # csv_paths = ["sekai-game-walking-193_updated.csv", "sekai-real-walking-hq-193_updated.csv", "sekai-real-walking-hq-386_updated.csv", "sekai-game-walking-386_updated.csv"]
301
+ csv_paths = ["sekai-game-walking-193_filtered.csv", "sekai-real-walking-hq-193_filtered.csv", "sekai-real-walking-hq-386_filtered.csv", "sekai-game-walking-386_filtered.csv"]
302
+ video_paths = ["sekai-game-walking-193", "sekai-real-walking-hq-193", "sekai-real-walking-hq-386", "sekai-game-walking-386"]
303
+ output_latent_paths = ["sekai-game-walking-193/latents_stride1", "sekai-real-walking-hq-193/latents_stride1", "sekai-real-walking-hq-386/latents_stride2", "sekai-game-walking-386/latents_stride2"]
304
+
305
+ for stride, batch_size, csv_path, video_path, output_latent_path in zip(strides, batch_sizes, csv_paths, video_paths, output_latent_paths):
306
+ args.stride = stride
307
+ args.batch_sizes = batch_sizes
308
+ args.csv_file = os.path.join(base_csv_path, csv_path)
309
+ args.video_folder = os.path.join(base_video_path, video_path)
310
+ args.output_latent_folder =os.path.join(base_output_latent_path, output_latent_path)
311
+
312
+ main(
313
+ rank=device,
314
+ world_size=world_size,
315
+ global_rank=global_rank,
316
+ stride=args.stride,
317
+ batch_size=args.batch_size,
318
+ dataloader_num_workers=args.dataloader_num_workers,
319
+ csv_file=args.csv_file,
320
+ video_folder=args.video_folder,
321
+ output_latent_folder=args.output_latent_folder,
322
+ pretrained_model_name_or_path=args.pretrained_model_name_or_path,
323
+ siglip_model_name_or_path=args.siglip_model_name_or_path,
324
+ )
325
+
326
+ dist.destroy_process_group()
dataset_code/sekai/offload/offoload_features_hv_official.py ADDED
@@ -0,0 +1,307 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ from tqdm import tqdm
4
+ from diffusers import AutoencoderKLHunyuanVideo
5
+ from transformers import (
6
+ CLIPTextModel,
7
+ CLIPTokenizer,
8
+ LlamaModel,
9
+ LlamaTokenizerFast,
10
+ SiglipImageProcessor,
11
+ SiglipVisionModel,
12
+ )
13
+ from diffusers.video_processor import VideoProcessor
14
+ from diffusers.utils import export_to_video, load_image
15
+
16
+ from dummy_dataloader_official import BucketedFeatureDataset, BucketedSampler, collate_fn
17
+ from torch.utils.data import DataLoader
18
+
19
+ import torch
20
+ import torch.distributed as dist
21
+ import torch.nn as nn
22
+ from torch.nn.parallel import DistributedDataParallel as DDP
23
+ from torch.utils.data.distributed import DistributedSampler
24
+ from torch.utils.data import Subset
25
+ import torchvision.transforms as transforms
26
+ import numpy as np
27
+ import matplotlib.pyplot as plt
28
+ from matplotlib.animation import FuncAnimation
29
+ from IPython.display import HTML, display
30
+ from IPython.display import clear_output
31
+
32
+ from accelerate import Accelerator, DistributedType
33
+ from accelerate.logging import get_logger
34
+ from accelerate.utils import DistributedDataParallelKwargs, ProjectConfiguration, set_seed
35
+ from diffusers.training_utils import free_memory
36
+
37
+ from accelerate import Accelerator
38
+ from utils_framepack import encode_image, encode_prompt
39
+
40
+ def setup_distributed_env():
41
+ dist.init_process_group(backend="nccl")
42
+ torch.cuda.set_device(int(os.environ["LOCAL_RANK"]))
43
+
44
+ def cleanup_distributed_env():
45
+ dist.destroy_process_group()
46
+
47
+ def main(rank, world_size, global_rank, stride, batch_size, dataloader_num_workers, csv_file, video_folder, output_latent_folder, pretrained_model_name_or_path, siglip_model_name_or_path):
48
+ weight_dtype = torch.bfloat16
49
+ device = rank
50
+ seed = 42
51
+
52
+ # Load the tokenizers
53
+ tokenizer_one = LlamaTokenizerFast.from_pretrained(
54
+ pretrained_model_name_or_path,
55
+ subfolder="tokenizer",
56
+ )
57
+ tokenizer_two = CLIPTokenizer.from_pretrained(
58
+ pretrained_model_name_or_path,
59
+ subfolder="tokenizer_2",
60
+ )
61
+ feature_extractor = SiglipImageProcessor.from_pretrained(
62
+ siglip_model_name_or_path,
63
+ subfolder="feature_extractor",
64
+
65
+ )
66
+
67
+ vae = AutoencoderKLHunyuanVideo.from_pretrained(
68
+ pretrained_model_name_or_path,
69
+ subfolder="vae",
70
+ torch_dtype=torch.float32,
71
+ )
72
+ vae_scale_factor_spatial = vae.spatial_compression_ratio
73
+ video_processor = VideoProcessor(vae_scale_factor=vae_scale_factor_spatial)
74
+
75
+ text_encoder_one = LlamaModel.from_pretrained(
76
+ pretrained_model_name_or_path,
77
+ subfolder="text_encoder",
78
+ torch_dtype=weight_dtype,
79
+ )
80
+ text_encoder_two = CLIPTextModel.from_pretrained(
81
+ pretrained_model_name_or_path,
82
+ subfolder="text_encoder_2",
83
+ torch_dtype=weight_dtype,
84
+ )
85
+ image_encoder = SiglipVisionModel.from_pretrained(
86
+ siglip_model_name_or_path,
87
+ subfolder="image_encoder",
88
+ torch_dtype=weight_dtype,
89
+ )
90
+
91
+ vae.requires_grad_(False)
92
+ text_encoder_one.requires_grad_(False)
93
+ text_encoder_two.requires_grad_(False)
94
+ image_encoder.requires_grad_(False)
95
+ vae.eval()
96
+ text_encoder_one.eval()
97
+ text_encoder_two.eval()
98
+ image_encoder.eval()
99
+
100
+ vae = vae.to(device)
101
+ text_encoder_one = text_encoder_one.to(device)
102
+ text_encoder_two = text_encoder_two.to(device)
103
+ image_encoder = image_encoder.to(device)
104
+
105
+ dist.barrier()
106
+ dataset = BucketedFeatureDataset(csv_file=csv_file, video_folder=video_folder, stride=stride)
107
+ sampler = BucketedSampler(dataset, batch_size=batch_size, drop_last=False, shuffle=False, seed=seed)
108
+ dataloader = DataLoader(
109
+ dataset,
110
+ batch_sampler=sampler,
111
+ collate_fn=collate_fn,
112
+ num_workers=dataloader_num_workers,
113
+ # pin_memory=True,
114
+ prefetch_factor=2 if dataloader_num_workers != 0 else None,
115
+ # persistent_workers=True if dataloader_num_workers > 0 else False,
116
+ )
117
+
118
+ print(len(dataset), len(dataloader))
119
+ accelerator = Accelerator()
120
+ dataloader = accelerator.prepare(dataloader)
121
+ print(f"Dataset size: {len(dataset)}, Dataloader batches: {len(dataloader)}")
122
+ print(f"Process index: {accelerator.process_index}, World size: {accelerator.num_processes}")
123
+
124
+ sampler.set_epoch(0)
125
+ if rank==0:
126
+ pbar = tqdm(total=len(dataloader), desc="Processing")
127
+ dist.barrier()
128
+ for idx, batch in enumerate(dataloader):
129
+ free_memory()
130
+
131
+ valid_indices = []
132
+ valid_uttids = []
133
+ valid_num_frames = []
134
+ valid_heights = []
135
+ valid_widths = []
136
+ valid_videos = []
137
+ valid_prompts = []
138
+ valid_first_frames_images = []
139
+
140
+ for i, (uttid, num_frame, height, width) in enumerate(zip(batch["uttid"], batch["video_metadata"]["num_frames"], batch["video_metadata"]["height"], batch["video_metadata"]["width"])):
141
+ os.makedirs(output_latent_folder, exist_ok=True)
142
+ output_path = os.path.join(output_latent_folder, f"{uttid}_{num_frame}_{height}_{width}.pt")
143
+ if not os.path.exists(output_path):
144
+ valid_indices.append(i)
145
+ valid_uttids.append(uttid)
146
+ valid_num_frames.append(num_frame)
147
+ valid_heights.append(height)
148
+ valid_widths.append(width)
149
+ valid_videos.append(batch["videos"][i])
150
+ valid_prompts.append(batch["prompts"][i])
151
+ valid_first_frames_images.append(batch["first_frames_images"][i])
152
+ else:
153
+ print(f"skipping {uttid}")
154
+
155
+ if not valid_indices:
156
+ print("skipping entire batch!")
157
+ if rank==0:
158
+ pbar.update(1)
159
+ pbar.set_postfix({"batch": idx})
160
+ continue
161
+
162
+ batch = None
163
+ del batch
164
+ free_memory()
165
+
166
+ batch = {
167
+ "uttid": valid_uttids,
168
+ "video_metadata": {
169
+ "num_frames": valid_num_frames,
170
+ "height": valid_heights,
171
+ "width": valid_widths
172
+ },
173
+ "videos": torch.stack(valid_videos),
174
+ "prompts": valid_prompts,
175
+ "first_frames_images": torch.stack(valid_first_frames_images),
176
+ }
177
+
178
+ if len(batch["uttid"]) == 0:
179
+ print("All samples in this batch are already processed, skipping!")
180
+ continue
181
+
182
+ with torch.no_grad():
183
+ # Get Vae feature 1
184
+ pixel_values = batch["videos"].permute(0, 2, 1, 3, 4).to(dtype=vae.dtype, device=device)
185
+ vae_latents = vae.encode(pixel_values).latent_dist.sample()
186
+ vae_latents = vae_latents * vae.config.scaling_factor
187
+
188
+ # Encode prompts
189
+ prompts = batch["prompts"]
190
+ prompt_embeds, pooled_prompt_embeds, prompt_attention_mask = encode_prompt(
191
+ tokenizer=tokenizer_one,
192
+ text_encoder=text_encoder_one,
193
+ tokenizer_2=tokenizer_two,
194
+ text_encoder_2=text_encoder_two,
195
+ prompt=prompts,
196
+ device=device,
197
+ )
198
+
199
+ # Prepare images
200
+ image_tensor = batch["first_frames_images"]
201
+ images = [transforms.ToPILImage()(x.to(torch.uint8)) for x in image_tensor]
202
+ image = video_processor.preprocess(image=images, height=batch["videos"].shape[-2], width=batch["videos"].shape[-1])
203
+ image_embeds = encode_image(
204
+ feature_extractor,
205
+ image_encoder,
206
+ image,
207
+ device=device,
208
+ dtype=weight_dtype,
209
+ )
210
+
211
+ for uttid, num_frame, height, width, cur_vae_latent, cur_prompt_embed, cur_pooled_prompt_embed, cur_prompt_attention_mask, cur_image_embed in zip(batch["uttid"], batch["video_metadata"]["num_frames"], batch["video_metadata"]["height"], batch["video_metadata"]["width"], vae_latents, prompt_embeds, pooled_prompt_embeds, prompt_attention_mask, image_embeds):
212
+ output_path = os.path.join(output_latent_folder, f"{uttid}_{num_frame}_{height}_{width}.pt")
213
+ temp_to_save = {
214
+ "vae_latent": cur_vae_latent.cpu().detach(),
215
+ "prompt_embed": cur_prompt_embed.cpu().detach(),
216
+ "pooled_prompt_embeds": cur_pooled_prompt_embed.cpu().detach(),
217
+ "prompt_attention_mask": cur_prompt_attention_mask.cpu().detach(),
218
+ "image_embeds": cur_image_embed.cpu().detach(),
219
+ }
220
+ torch.save(
221
+ temp_to_save,
222
+ output_path
223
+ )
224
+ print(f"save latent to: {output_path}")
225
+
226
+ if rank==0:
227
+ pbar.update(1)
228
+ pbar.set_postfix({"batch": idx})
229
+
230
+
231
+ pixel_values = None
232
+ prompts = None
233
+ image_tensor = None
234
+ images = None
235
+ vae_latents = None
236
+ vae_latents_2 = None
237
+ image_embeds = None
238
+ prompt_embeds = None
239
+ pooled_prompt_embeds = None
240
+ prompt_attention_mask = None
241
+ batch = None
242
+ valid_indices = None
243
+ valid_uttids = None
244
+ valid_num_frames = None
245
+ valid_heights = None
246
+ valid_widths = None
247
+ valid_videos = None
248
+ valid_prompts = None
249
+ valid_first_frames_images = None
250
+ temp_to_save = None
251
+
252
+ del pixel_values
253
+ del prompts
254
+ del image_tensor
255
+ del images
256
+ del vae_latents
257
+ del vae_latents_2
258
+ del image_embeds
259
+ del batch
260
+ del valid_indices
261
+ del valid_uttids
262
+ del valid_num_frames
263
+ del valid_heights
264
+ del valid_widths
265
+ del valid_videos
266
+ del valid_prompts
267
+ del valid_first_frames_images
268
+ del temp_to_save
269
+
270
+ free_memory()
271
+ dist.barrier()
272
+ # dist.barrier()
273
+ dist.destroy_process_group()
274
+
275
+ if __name__ == "__main__":
276
+ parser = argparse.ArgumentParser(description="Script for running model training and data processing.")
277
+ parser.add_argument("--stride", type=int, default=2, help="Batch size for processing")
278
+ parser.add_argument("--batch_size", type=int, default=1, help="Batch size for processing")
279
+ parser.add_argument("--dataloader_num_workers", type=int, default=0, help="Number of workers for data loading")
280
+ parser.add_argument("--csv_file", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/Sekai-Project/train/sekai-game-drone_updated.csv", help="Path to the config file")
281
+ parser.add_argument("--video_folder", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/Sekai-Project/sekai-game-drone", help="Path to the config file")
282
+ parser.add_argument("--output_latent_folder", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/Sekai-Project/sekai-game-drone/latents", help="Folder to store output latents")
283
+ parser.add_argument("--pretrained_model_name_or_path", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/hunyuanvideo-community/HunyuanVideo", help="Pretrained model path")
284
+ parser.add_argument("--siglip_model_name_or_path", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/lllyasviel/flux_redux_bfl", help="Siglip model path")
285
+ args = parser.parse_args()
286
+
287
+
288
+ setup_distributed_env()
289
+
290
+ global_rank = dist.get_rank()
291
+ local_rank = int(os.environ["LOCAL_RANK"])
292
+ device = torch.cuda.current_device()
293
+ world_size = dist.get_world_size()
294
+
295
+ main(
296
+ rank=device,
297
+ world_size=world_size,
298
+ global_rank=global_rank,
299
+ stride=args.stride,
300
+ batch_size=args.batch_size,
301
+ dataloader_num_workers=args.dataloader_num_workers,
302
+ csv_file=args.csv_file,
303
+ video_folder=args.video_folder,
304
+ output_latent_folder=args.output_latent_folder,
305
+ pretrained_model_name_or_path=args.pretrained_model_name_or_path,
306
+ siglip_model_name_or_path=args.siglip_model_name_or_path,
307
+ )
dataset_code/sekai/offload/run.sh ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6
2
+
3
+ export OMNISTORE_LOAD_STRICT_MODE=0
4
+ export OMNISTORE_LOGGING_LEVEL=ERROR
5
+ #################################################################
6
+ ## Torch
7
+ #################################################################
8
+ export TOKENIZERS_PARALLELISM=false
9
+ export TORCH_LOGS="+dynamo,recompiles,graph_breaks"
10
+ export TORCHDYNAMO_VERBOSE=1
11
+ export TORCH_NCCL_ENABLE_MONITORING=1
12
+ export PYTORCH_CUDA_ALLOC_CONF="expandable_segments:True,garbage_collection_threshold:0.9"
13
+ #################################################################
14
+
15
+
16
+ #################################################################
17
+ ## NCCL
18
+ #################################################################
19
+ export NCCL_IB_GID_INDEX=3
20
+ export NCCL_IB_HCA=$ARNOLD_RDMA_DEVICE
21
+ export NCCL_SOCKET_IFNAME=eth0
22
+ export NCCL_SOCKET_TIMEOUT=3600000
23
+
24
+ export NCCL_DEBUG=WARN # disable the verbose NCCL logs
25
+ export NCCL_P2P_DISABLE=0
26
+ export NCCL_IB_DISABLE=0 # was 1
27
+ export NCCL_SHM_DISABLE=0 # was 1
28
+ export NCCL_P2P_LEVEL=NVL
29
+
30
+ export NCCL_PXN_DISABLE=0
31
+ export NCCL_NET_GDR_LEVEL=2
32
+ export NCCL_IB_QPS_PER_CONNECTION=4
33
+ export NCCL_IB_TC=160
34
+ export NCCL_IB_TIMEOUT=22
35
+ #################################################################
36
+
37
+ #################################################################
38
+ ## DIST
39
+ #################################################################
40
+ MASTER_ADDR=$ARNOLD_WORKER_0_HOST
41
+ ports=(`echo $METIS_WORKER_0_PORT | tr ',' ' '`)
42
+ MASTER_PORT=${ports[0]}
43
+ NNODES=$ARNOLD_WORKER_NUM
44
+ NODE_RANK=$ARNOLD_ID
45
+ GPUS_PER_NODE=$ARNOLD_WORKER_GPU
46
+ GPUS_PER_NODE=1
47
+ NNODES=1
48
+ NODE_RANK=0
49
+ WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES))
50
+
51
+ DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT"
52
+ if [ ! -z $RDZV_BACKEND ]; then
53
+ DISTRIBUTED_ARGS="${DISTRIBUTED_ARGS} --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_id 9863 --rdzv_backend c10d"
54
+ export NCCL_SHM_DISABLE=1
55
+ fi
56
+
57
+ echo -e "\033[31mDISTRIBUTED_ARGS: ${DISTRIBUTED_ARGS}\033[0m"
58
+
59
+ #################################################################
60
+ #
61
+ # torchrun $DISTRIBUTED_ARGS offoload_features_hv_official.py \
62
+ # --stride 2 \
63
+ # --batch_size 4 \
64
+ # --dataloader_num_workers 8 \
65
+ # --csv_file "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-drone_updated.csv" \
66
+ # --video_folder "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-drone" \
67
+ # --output_latent_folder "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-drone/latents_stride2"
68
+ # torchrun $DISTRIBUTED_ARGS offoload_features_hv_official.py \
69
+ # --stride 2 \
70
+ # --batch_size 4 \
71
+ # --dataloader_num_workers 8 \
72
+ # --csv_file "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-drone_updated.csv" \
73
+ # --video_folder "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-drone" \
74
+ # --output_latent_folder "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-drone/latents_stride2"
75
+ #
76
+
77
+ #
78
+ torchrun $DISTRIBUTED_ARGS offoload_features_hv.py \
79
+ --stride 1 \
80
+ --batch_size 1 \
81
+ --dataloader_num_workers 8 \
82
+ --csv_file "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193_updated.csv" \
83
+ --video_folder "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
84
+ --output_latent_folder "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking/latents_stride1"
85
+ #
dataset_code/sekai/offload/utils_framepack.py ADDED
@@ -0,0 +1,1229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import random
3
+ from typing import Any, Dict, List, Optional, Tuple, Union
4
+
5
+ import torch
6
+ import torch.nn.functional as F
7
+ from einops import rearrange, repeat
8
+
9
+ from diffusers.training_utils import compute_density_for_timestep_sampling
10
+
11
+
12
+ DEFAULT_PROMPT_TEMPLATE = {
13
+ "template": (
14
+ "<|start_header_id|>system<|end_header_id|>\n\nDescribe the video by detailing the following aspects: "
15
+ "1. The main content and theme of the video."
16
+ "2. The color, shape, size, texture, quantity, text, and spatial relationships of the objects."
17
+ "3. Actions, events, behaviors temporal relationships, physical movement changes of the objects."
18
+ "4. background environment, light, style and atmosphere."
19
+ "5. camera angles, movements, and transitions used in the video:<|eot_id|>"
20
+ "<|start_header_id|>user<|end_header_id|>\n\n{}<|eot_id|>"
21
+ ),
22
+ "crop_start": 95,
23
+ }
24
+
25
+ def get_config_value(args, name):
26
+ if hasattr(args, name):
27
+ return getattr(args, name)
28
+ elif hasattr(args, 'training_config') and hasattr(args.training_config, name):
29
+ return getattr(args.training_config, name)
30
+ else:
31
+ raise AttributeError(f"Neither args nor args.training_config has attribute '{name}'")
32
+
33
+ # Copied from diffusers.pipelines.hunyuan_video.pipeline_hunyuan_video.HunyuanVideoPipeline._get_llama_prompt_embeds
34
+ def _get_llama_prompt_embeds(
35
+ tokenizer,
36
+ text_encoder,
37
+ prompt: Union[str, List[str]],
38
+ prompt_template: Dict[str, Any],
39
+ num_videos_per_prompt: int = 1,
40
+ device: Optional[torch.device] = None,
41
+ dtype: Optional[torch.dtype] = None,
42
+ max_sequence_length: int = 256,
43
+ num_hidden_layers_to_skip: int = 2,
44
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
45
+ device = device
46
+ dtype = dtype
47
+
48
+ prompt = [prompt] if isinstance(prompt, str) else prompt
49
+ batch_size = len(prompt)
50
+
51
+ prompt = [prompt_template["template"].format(p) for p in prompt]
52
+
53
+ crop_start = prompt_template.get("crop_start", None)
54
+ if crop_start is None:
55
+ prompt_template_input = tokenizer(
56
+ prompt_template["template"],
57
+ padding="max_length",
58
+ return_tensors="pt",
59
+ return_length=False,
60
+ return_overflowing_tokens=False,
61
+ return_attention_mask=False,
62
+ )
63
+ crop_start = prompt_template_input["input_ids"].shape[-1]
64
+ # Remove <|eot_id|> token and placeholder {}
65
+ crop_start -= 2
66
+
67
+ max_sequence_length += crop_start
68
+ text_inputs = tokenizer(
69
+ prompt,
70
+ max_length=max_sequence_length,
71
+ padding="max_length",
72
+ truncation=True,
73
+ return_tensors="pt",
74
+ return_length=False,
75
+ return_overflowing_tokens=False,
76
+ return_attention_mask=True,
77
+ )
78
+ text_input_ids = text_inputs.input_ids.to(device=device)
79
+ prompt_attention_mask = text_inputs.attention_mask.to(device=device)
80
+
81
+ prompt_embeds = text_encoder(
82
+ input_ids=text_input_ids,
83
+ attention_mask=prompt_attention_mask,
84
+ output_hidden_states=True,
85
+ ).hidden_states[-(num_hidden_layers_to_skip + 1)]
86
+ prompt_embeds = prompt_embeds.to(dtype=dtype)
87
+
88
+ if crop_start is not None and crop_start > 0:
89
+ prompt_embeds = prompt_embeds[:, crop_start:]
90
+ prompt_attention_mask = prompt_attention_mask[:, crop_start:]
91
+
92
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
93
+ _, seq_len, _ = prompt_embeds.shape
94
+ prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1)
95
+ prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1)
96
+ prompt_attention_mask = prompt_attention_mask.repeat(1, num_videos_per_prompt)
97
+ prompt_attention_mask = prompt_attention_mask.view(batch_size * num_videos_per_prompt, seq_len)
98
+
99
+ return prompt_embeds, prompt_attention_mask
100
+
101
+
102
+ # Copied from diffusers.pipelines.hunyuan_video.pipeline_hunyuan_video.HunyuanVideoPipeline._get_clip_prompt_embeds
103
+ def _get_clip_prompt_embeds(
104
+ tokenizer_2,
105
+ text_encoder_2,
106
+ prompt: Union[str, List[str]],
107
+ num_videos_per_prompt: int = 1,
108
+ device: Optional[torch.device] = None,
109
+ dtype: Optional[torch.dtype] = None,
110
+ max_sequence_length: int = 77,
111
+ ) -> torch.Tensor:
112
+ device = device
113
+ dtype = dtype
114
+
115
+ prompt = [prompt] if isinstance(prompt, str) else prompt
116
+ batch_size = len(prompt)
117
+
118
+ text_inputs = tokenizer_2(
119
+ prompt,
120
+ padding="max_length",
121
+ max_length=max_sequence_length,
122
+ truncation=True,
123
+ return_tensors="pt",
124
+ )
125
+
126
+ text_input_ids = text_inputs.input_ids
127
+ untruncated_ids = tokenizer_2(prompt, padding="longest", return_tensors="pt").input_ids
128
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
129
+ _ = tokenizer_2.batch_decode(untruncated_ids[:, max_sequence_length - 1 : -1])
130
+
131
+ prompt_embeds = text_encoder_2(text_input_ids.to(device), output_hidden_states=False).pooler_output
132
+
133
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
134
+ prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt)
135
+ prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, -1)
136
+
137
+ return prompt_embeds
138
+
139
+
140
+ # Copied from diffusers.pipelines.hunyuan_video.pipeline_hunyuan_video.HunyuanVideoPipeline.encode_prompt
141
+ def encode_prompt(
142
+ tokenizer,
143
+ text_encoder,
144
+ tokenizer_2,
145
+ text_encoder_2,
146
+ prompt: Union[str, List[str]],
147
+ prompt_2: Union[str, List[str]] = None,
148
+ prompt_template: Dict[str, Any] = DEFAULT_PROMPT_TEMPLATE,
149
+ num_videos_per_prompt: int = 1,
150
+ prompt_embeds: Optional[torch.Tensor] = None,
151
+ pooled_prompt_embeds: Optional[torch.Tensor] = None,
152
+ prompt_attention_mask: Optional[torch.Tensor] = None,
153
+ device: Optional[torch.device] = None,
154
+ dtype: Optional[torch.dtype] = None,
155
+ max_sequence_length: int = 256,
156
+ ):
157
+ if prompt_embeds is None:
158
+ prompt_embeds, prompt_attention_mask = _get_llama_prompt_embeds(
159
+ tokenizer,
160
+ text_encoder,
161
+ prompt,
162
+ prompt_template,
163
+ num_videos_per_prompt,
164
+ device=device,
165
+ dtype=dtype,
166
+ max_sequence_length=max_sequence_length,
167
+ )
168
+
169
+ if pooled_prompt_embeds is None:
170
+ if prompt_2 is None:
171
+ prompt_2 = prompt
172
+ pooled_prompt_embeds = _get_clip_prompt_embeds(
173
+ tokenizer_2,
174
+ text_encoder_2,
175
+ prompt,
176
+ num_videos_per_prompt,
177
+ device=device,
178
+ dtype=dtype,
179
+ max_sequence_length=77,
180
+ )
181
+
182
+ return prompt_embeds, pooled_prompt_embeds, prompt_attention_mask
183
+
184
+
185
+ def encode_image(
186
+ feature_extractor,
187
+ image_encoder,
188
+ image: torch.Tensor,
189
+ device: Optional[torch.device] = None,
190
+ dtype: Optional[torch.dtype] = None,
191
+ ):
192
+ device = device
193
+ image = (image + 1) / 2.0 # [-1, 1] -> [0, 1]
194
+ image = feature_extractor(images=image, return_tensors="pt", do_rescale=False).to(
195
+ device=device, dtype=image_encoder.dtype
196
+ )
197
+ image_embeds = image_encoder(**image).last_hidden_state
198
+ return image_embeds.to(dtype=dtype)
199
+
200
+
201
+ def get_framepack_input_t2v(
202
+ vae,
203
+ pixel_values, # [-1, 1], (B, C, F, H, W)
204
+ latent_window_size: int = 9,
205
+ vanilla_sampling: bool = False,
206
+ dtype: Optional[torch.dtype] = None,
207
+ is_keep_x0=False,
208
+ ):
209
+ # calculate latent frame count from original frame count (4n+1)
210
+ latent_f = (pixel_values.shape[2] - 1) // 4 + 1
211
+ # assert latent_f % latent_window_size == 0
212
+
213
+ # calculate the total number of sections (excluding the first frame, divided by window size)
214
+ total_latent_sections = math.floor(latent_f / latent_window_size) # 2.0
215
+ if total_latent_sections < 1:
216
+ min_frames_needed = latent_window_size * 4 + 1
217
+ raise ValueError(
218
+ f"Not enough frames for FramePack: {pixel_values.shape[2]} frames ({latent_f} latent frames), minimum required: {min_frames_needed} frames ({latent_window_size + 1} latent frames)"
219
+ )
220
+
221
+ # actual latent frame count (aligned to section boundaries)
222
+ latent_f_aligned = total_latent_sections * latent_window_size
223
+
224
+ # actual video frame count
225
+ frame_count_aligned = (latent_f_aligned - 1) * 4 + 1 # 73
226
+ if frame_count_aligned != pixel_values.shape[2]: # 73 != 89
227
+ print(
228
+ f"Frame count mismatch: required={frame_count_aligned} != actual={pixel_values.shape[2]}, trimming to {frame_count_aligned}"
229
+ )
230
+ pixel_values = pixel_values[
231
+ :, :, :frame_count_aligned, :, :
232
+ ] # torch.Size([1, 3, 89, 480, 832]) -> torch.Size([1, 3, 73, 480, 832])
233
+
234
+ latent_f = latent_f_aligned # Update to the aligned value
235
+
236
+ # VAE encode
237
+ pixel_values = pixel_values.to(device=vae.device, dtype=vae.dtype)
238
+ latents = vae.encode(pixel_values).latent_dist.sample()
239
+ latents = latents * vae.config.scaling_factor
240
+ latents = latents.to(dtype=dtype)
241
+
242
+ all_target_latents = []
243
+ all_target_latent_indices = []
244
+ all_clean_latents = []
245
+ all_clean_latent_indices = []
246
+ all_clean_latents_2x = []
247
+ all_clean_latent_2x_indices = []
248
+ all_clean_latents_4x = []
249
+ all_clean_latent_4x_indices = []
250
+ section_to_video_idx = []
251
+
252
+ if vanilla_sampling:
253
+ # Vanilla Sampling Logic
254
+ if is_keep_x0:
255
+ for b in range(latents.shape[0]):
256
+ video_lat = latents[b : b + 1] # Keep batch dim: 1, C, F_aligned, H, W
257
+
258
+ for section_index in range(total_latent_sections):
259
+ target_start_f = section_index * latent_window_size
260
+ target_end_f = target_start_f + latent_window_size
261
+ start_latent = video_lat[:, :, 0:1, :, :]
262
+ target_latents = video_lat[:, :, target_start_f:target_end_f, :, :]
263
+
264
+ # Clean latents preparation (Vanilla)
265
+ if section_index == 0:
266
+ clean_latents_total_count = 2 + 2 + 16
267
+ else:
268
+ clean_latents_total_count = 1 + 2 + 16
269
+ history_latents = torch.zeros(
270
+ size=(
271
+ 1,
272
+ 16,
273
+ clean_latents_total_count,
274
+ video_lat.shape[-2],
275
+ video_lat.shape[-1],
276
+ ),
277
+ device=video_lat.device,
278
+ dtype=video_lat.dtype,
279
+ )
280
+
281
+ history_start_f = 0
282
+ video_start_f = target_start_f - clean_latents_total_count
283
+ copy_count = clean_latents_total_count
284
+
285
+ if video_start_f < 0:
286
+ history_start_f = -video_start_f
287
+ copy_count = clean_latents_total_count - history_start_f
288
+ video_start_f = 0
289
+ if copy_count > 0:
290
+ history_latents[:, :, history_start_f:] = video_lat[
291
+ :, :, video_start_f : video_start_f + copy_count, :, :
292
+ ]
293
+
294
+ # indices generation (Vanilla): copy from FramePack-F1
295
+ if section_index == 0:
296
+ indices = torch.arange(0, sum([16, 2, 2, latent_window_size])).unsqueeze(0)
297
+ (
298
+ clean_latent_4x_indices,
299
+ clean_latent_2x_indices,
300
+ clean_latent_indices,
301
+ latent_indices,
302
+ ) = indices.split([16, 2, 2, latent_window_size], dim=1)
303
+ clean_latents_4x, clean_latents_2x, clean_latents = history_latents.split([16, 2, 2], dim=2)
304
+ else:
305
+ indices = torch.arange(0, sum([1, 16, 2, 1, latent_window_size])).unsqueeze(0)
306
+ (
307
+ clean_latent_indices_start,
308
+ clean_latent_4x_indices,
309
+ clean_latent_2x_indices,
310
+ clean_latent_1x_indices,
311
+ latent_indices,
312
+ ) = indices.split([1, 16, 2, 1, latent_window_size], dim=1)
313
+ clean_latent_indices = torch.cat([clean_latent_indices_start, clean_latent_1x_indices], dim=1)
314
+
315
+ clean_latents_4x, clean_latents_2x, clean_latents_1x = history_latents.split([16, 2, 1], dim=2)
316
+ clean_latents = torch.cat([start_latent, clean_latents_1x], dim=2)
317
+
318
+ all_target_latents.append(target_latents)
319
+ all_target_latent_indices.append(latent_indices)
320
+ all_clean_latents.append(clean_latents)
321
+ all_clean_latent_indices.append(clean_latent_indices)
322
+ all_clean_latents_2x.append(clean_latents_2x)
323
+ all_clean_latent_2x_indices.append(clean_latent_2x_indices)
324
+ all_clean_latents_4x.append(clean_latents_4x)
325
+ all_clean_latent_4x_indices.append(clean_latent_4x_indices)
326
+ section_to_video_idx.append(b)
327
+ else:
328
+ for b in range(latents.shape[0]):
329
+ video_lat = latents[b : b + 1] # Keep batch dim: 1, C, F_aligned, H, W
330
+
331
+ for section_index in range(total_latent_sections):
332
+ target_start_f = section_index * latent_window_size
333
+ target_end_f = target_start_f + latent_window_size
334
+ target_latents = video_lat[:, :, target_start_f:target_end_f, :, :]
335
+
336
+ # Clean latents preparation (Vanilla)
337
+ clean_latents_total_count = 2 + 2 + 16
338
+ history_latents = torch.zeros(
339
+ size=(
340
+ 1,
341
+ 16,
342
+ clean_latents_total_count,
343
+ video_lat.shape[-2],
344
+ video_lat.shape[-1],
345
+ ),
346
+ device=video_lat.device,
347
+ dtype=video_lat.dtype,
348
+ )
349
+
350
+ history_start_f = 0
351
+ video_start_f = target_start_f - clean_latents_total_count
352
+ copy_count = clean_latents_total_count
353
+
354
+ if video_start_f < 0:
355
+ history_start_f = -video_start_f
356
+ copy_count = clean_latents_total_count - history_start_f
357
+ video_start_f = 0
358
+ if copy_count > 0:
359
+ history_latents[:, :, history_start_f:] = video_lat[
360
+ :, :, video_start_f : video_start_f + copy_count, :, :
361
+ ]
362
+
363
+ # indices generation (Vanilla): copy from FramePack-F1
364
+ indices = torch.arange(0, sum([16, 2, 2, latent_window_size])).unsqueeze(0)
365
+ (
366
+ clean_latent_4x_indices,
367
+ clean_latent_2x_indices,
368
+ clean_latent_indices,
369
+ latent_indices,
370
+ ) = indices.split([16, 2, 2, latent_window_size], dim=1)
371
+ clean_latents_4x, clean_latents_2x, clean_latents = history_latents.split([16, 2, 2], dim=2)
372
+
373
+ all_target_latents.append(target_latents)
374
+ all_target_latent_indices.append(latent_indices)
375
+ all_clean_latents.append(clean_latents)
376
+ all_clean_latent_indices.append(clean_latent_indices)
377
+ all_clean_latents_2x.append(clean_latents_2x)
378
+ all_clean_latent_2x_indices.append(clean_latent_2x_indices)
379
+ all_clean_latents_4x.append(clean_latents_4x)
380
+ all_clean_latent_4x_indices.append(clean_latent_4x_indices)
381
+ section_to_video_idx.append(b)
382
+ else:
383
+ pass
384
+
385
+ # Stack all sections into batches
386
+ batched_target_latents = torch.cat(all_target_latents, dim=0)
387
+ batched_target_latent_indices = torch.cat(all_target_latent_indices, dim=0)
388
+ batched_clean_latents = torch.cat(all_clean_latents, dim=0)
389
+ batched_clean_latent_indices = torch.cat(all_clean_latent_indices, dim=0)
390
+ batched_clean_latents_2x = torch.cat(all_clean_latents_2x, dim=0)
391
+ batched_clean_latent_2x_indices = torch.cat(all_clean_latent_2x_indices, dim=0)
392
+ batched_clean_latents_4x = torch.cat(all_clean_latents_4x, dim=0)
393
+ batched_clean_latent_4x_indices = torch.cat(all_clean_latent_4x_indices, dim=0)
394
+
395
+ return (
396
+ batched_target_latents,
397
+ batched_target_latent_indices,
398
+ batched_clean_latents,
399
+ batched_clean_latent_indices,
400
+ batched_clean_latents_2x,
401
+ batched_clean_latent_2x_indices,
402
+ batched_clean_latents_4x,
403
+ batched_clean_latent_4x_indices,
404
+ section_to_video_idx,
405
+ )
406
+
407
+
408
+ def get_framepack_input_i2v(
409
+ vae,
410
+ pixel_values, # [-1, 1], (B, C, F, H, W)
411
+ latent_window_size: int = 9,
412
+ vanilla_sampling: bool = False,
413
+ dtype: Optional[torch.dtype] = None,
414
+ ):
415
+ # calculate latent frame count from original frame count (4n+1)
416
+ latent_f = (pixel_values.shape[2] - 1) // 4 + 1
417
+
418
+ # calculate the total number of sections (excluding the first frame, divided by window size)
419
+ total_latent_sections = math.floor((latent_f - 1) / latent_window_size) # 2.0
420
+ if total_latent_sections < 1:
421
+ min_frames_needed = latent_window_size * 4 + 1
422
+ raise ValueError(
423
+ f"Not enough frames for FramePack: {pixel_values.shape[2]} frames ({latent_f} latent frames), minimum required: {min_frames_needed} frames ({latent_window_size + 1} latent frames)"
424
+ )
425
+
426
+ # actual latent frame count (aligned to section boundaries)
427
+ latent_f_aligned = total_latent_sections * latent_window_size + 1
428
+
429
+ # actual video frame count
430
+ frame_count_aligned = (latent_f_aligned - 1) * 4 + 1 # 73
431
+ if frame_count_aligned != pixel_values.shape[2]: # 73 != 89
432
+ print(
433
+ f"Frame count mismatch: required={frame_count_aligned} != actual={pixel_values.shape[2]}, trimming to {frame_count_aligned}"
434
+ )
435
+ pixel_values = pixel_values[
436
+ :, :, :frame_count_aligned, :, :
437
+ ] # torch.Size([1, 3, 89, 480, 832]) -> torch.Size([1, 3, 73, 480, 832])
438
+
439
+ latent_f = latent_f_aligned # Update to the aligned value
440
+
441
+ # VAE encode
442
+ pixel_values = pixel_values.to(device=vae.device, dtype=vae.dtype)
443
+ latents = vae.encode(pixel_values).latent_dist.sample()
444
+ latents = latents * vae.config.scaling_factor
445
+ latents = latents.to(dtype=dtype)
446
+
447
+ all_target_latents = []
448
+ all_target_latent_indices = []
449
+ all_clean_latents = []
450
+ all_clean_latent_indices = []
451
+ all_clean_latents_2x = []
452
+ all_clean_latent_2x_indices = []
453
+ all_clean_latents_4x = []
454
+ all_clean_latent_4x_indices = []
455
+ section_to_video_idx = []
456
+
457
+ if vanilla_sampling:
458
+ # Vanilla Sampling Logic
459
+ for b in range(latents.shape[0]):
460
+ video_lat = latents[b : b + 1] # Keep batch dim: 1, C, F_aligned, H, W
461
+
462
+ for section_index in range(total_latent_sections):
463
+ target_start_f = section_index * latent_window_size + 1
464
+ target_end_f = target_start_f + latent_window_size
465
+ target_latents = video_lat[:, :, target_start_f:target_end_f, :, :]
466
+ start_latent = video_lat[:, :, 0:1, :, :]
467
+
468
+ # Clean latents preparation (Vanilla)
469
+ clean_latents_total_count = 1 + 2 + 16
470
+ history_latents = torch.zeros(
471
+ size=(
472
+ 1,
473
+ 16,
474
+ clean_latents_total_count,
475
+ video_lat.shape[-2],
476
+ video_lat.shape[-1],
477
+ ),
478
+ device=video_lat.device,
479
+ dtype=video_lat.dtype,
480
+ )
481
+
482
+ history_start_f = 0
483
+ video_start_f = target_start_f - clean_latents_total_count
484
+ copy_count = clean_latents_total_count
485
+
486
+ if video_start_f < 0:
487
+ history_start_f = -video_start_f
488
+ copy_count = clean_latents_total_count - history_start_f
489
+ video_start_f = 0
490
+ if copy_count > 0:
491
+ history_latents[:, :, history_start_f:] = video_lat[
492
+ :, :, video_start_f : video_start_f + copy_count, :, :
493
+ ]
494
+
495
+ # indices generation (Vanilla): copy from FramePack-F1
496
+ indices = torch.arange(0, sum([1, 16, 2, 1, latent_window_size])).unsqueeze(0)
497
+ (
498
+ clean_latent_indices_start,
499
+ clean_latent_4x_indices,
500
+ clean_latent_2x_indices,
501
+ clean_latent_1x_indices,
502
+ latent_indices,
503
+ ) = indices.split([1, 16, 2, 1, latent_window_size], dim=1)
504
+ clean_latent_indices = torch.cat([clean_latent_indices_start, clean_latent_1x_indices], dim=1)
505
+
506
+ clean_latents_4x, clean_latents_2x, clean_latents_1x = history_latents.split([16, 2, 1], dim=2)
507
+ clean_latents = torch.cat([start_latent, clean_latents_1x], dim=2)
508
+
509
+ all_target_latents.append(target_latents)
510
+ all_target_latent_indices.append(latent_indices)
511
+ all_clean_latents.append(clean_latents)
512
+ all_clean_latent_indices.append(clean_latent_indices)
513
+ all_clean_latents_2x.append(clean_latents_2x)
514
+ all_clean_latent_2x_indices.append(clean_latent_2x_indices)
515
+ all_clean_latents_4x.append(clean_latents_4x)
516
+ all_clean_latent_4x_indices.append(clean_latent_4x_indices)
517
+ section_to_video_idx.append(b)
518
+ else:
519
+ # padding is reversed for inference (future to past)
520
+ latent_paddings = list(reversed(range(total_latent_sections))) # [1, 0]
521
+ # Note: The padding trick for inference. See the paper for details.
522
+ if total_latent_sections > 4:
523
+ latent_paddings = [3] + [2] * (total_latent_sections - 3) + [1, 0]
524
+
525
+ for b in range(latents.shape[0]):
526
+ video_lat = latents[
527
+ b : b + 1
528
+ ] # keep batch dim, (1, C, F, H, W) # torch.Size([1, 16, 19, 60, 104])
529
+
530
+ # emulate inference step (history latents)
531
+ # Note: In inference, history_latents stores *generated* future latents.
532
+ # Here, for caching, we just need its shape and type for clean_* tensors.
533
+ # The actual content doesn't matter much as clean_* will be overwritten.
534
+ history_latents = torch.zeros(
535
+ (
536
+ 1,
537
+ video_lat.shape[1],
538
+ 1 + 2 + 16,
539
+ video_lat.shape[3],
540
+ video_lat.shape[4],
541
+ ),
542
+ dtype=video_lat.dtype,
543
+ ).to(video_lat.device) # torch.Size([1, 16, 19, 60, 104])
544
+
545
+ latent_f_index = latent_f - latent_window_size # Start from the last section # 19 - 9 = 10
546
+ section_index = total_latent_sections - 1 # 2 - 1 = 1
547
+
548
+ for latent_padding in latent_paddings:
549
+ is_last_section = (
550
+ section_index == 0
551
+ ) # the last section in inference order == the first section in time
552
+ latent_padding_size = latent_padding * latent_window_size
553
+ if is_last_section:
554
+ assert latent_f_index == 1, "Last section should be starting from frame 1"
555
+
556
+ # indices generation (same as inference)
557
+ indices = torch.arange(0, sum([1, latent_padding_size, latent_window_size, 1, 2, 16])).unsqueeze(0)
558
+ (
559
+ clean_latent_indices_pre, # Index for start_latent
560
+ blank_indices, # Indices for padding (future context in inference)
561
+ latent_indices, # Indices for the target latents to predict
562
+ clean_latent_indices_post, # Index for the most recent history frame
563
+ clean_latent_2x_indices, # Indices for the next 2 history frames
564
+ clean_latent_4x_indices, # Indices for the next 16 history frames
565
+ ) = indices.split([1, latent_padding_size, latent_window_size, 1, 2, 16], dim=1)
566
+
567
+ # Indices for clean_latents (start + recent history)
568
+ clean_latent_indices = torch.cat([clean_latent_indices_pre, clean_latent_indices_post], dim=1)
569
+
570
+ # clean latents preparation (emulating inference)
571
+ clean_latents_pre = video_lat[:, :, 0:1, :, :] # Always the first frame (start_latent)
572
+ clean_latents_post, clean_latents_2x, clean_latents_4x = history_latents[
573
+ :, :, : 1 + 2 + 16, :, :
574
+ ].split([1, 2, 16], dim=2)
575
+ clean_latents = torch.cat(
576
+ [clean_latents_pre, clean_latents_post], dim=2
577
+ ) # Combine start frame + placeholder
578
+
579
+ # Target latents for this section (ground truth)
580
+ target_latents = video_lat[:, :, latent_f_index : latent_f_index + latent_window_size, :, :]
581
+
582
+ all_target_latents.append(target_latents)
583
+ all_target_latent_indices.append(latent_indices)
584
+ all_clean_latents.append(clean_latents)
585
+ all_clean_latent_indices.append(clean_latent_indices)
586
+ all_clean_latents_2x.append(clean_latents_2x)
587
+ all_clean_latent_2x_indices.append(clean_latent_2x_indices)
588
+ all_clean_latents_4x.append(clean_latents_4x)
589
+ all_clean_latent_4x_indices.append(clean_latent_4x_indices)
590
+ section_to_video_idx.append(b)
591
+
592
+ if is_last_section: # If this was the first section generated in inference (time=0)
593
+ # History gets the start frame + the generated first section
594
+ generated_latents_for_history = video_lat[:, :, : latent_window_size + 1, :, :]
595
+ else:
596
+ # History gets the generated current section
597
+ generated_latents_for_history = target_latents # Use true latents as stand-in for generated
598
+
599
+ history_latents = torch.cat([generated_latents_for_history, history_latents], dim=2)
600
+
601
+ section_index -= 1
602
+ latent_f_index -= latent_window_size
603
+
604
+ # Stack all sections into batches
605
+ batched_target_latents = torch.cat(all_target_latents, dim=0)
606
+ batched_target_latent_indices = torch.cat(all_target_latent_indices, dim=0)
607
+ batched_clean_latents = torch.cat(all_clean_latents, dim=0)
608
+ batched_clean_latent_indices = torch.cat(all_clean_latent_indices, dim=0)
609
+ batched_clean_latents_2x = torch.cat(all_clean_latents_2x, dim=0)
610
+ batched_clean_latent_2x_indices = torch.cat(all_clean_latent_2x_indices, dim=0)
611
+ batched_clean_latents_4x = torch.cat(all_clean_latents_4x, dim=0)
612
+ batched_clean_latent_4x_indices = torch.cat(all_clean_latent_4x_indices, dim=0)
613
+
614
+ return (
615
+ batched_target_latents,
616
+ batched_target_latent_indices,
617
+ batched_clean_latents,
618
+ batched_clean_latent_indices,
619
+ batched_clean_latents_2x,
620
+ batched_clean_latent_2x_indices,
621
+ batched_clean_latents_4x,
622
+ batched_clean_latent_4x_indices,
623
+ section_to_video_idx,
624
+ )
625
+
626
+
627
+ def get_pyramid_input(
628
+ args,
629
+ scheduler,
630
+ latents, # [b c t h w]
631
+ pyramid_stage_num=3,
632
+ pyramid_sample_ratios=[1, 2, 1],
633
+ pyramid_sample_mode="efficient", # ["efficient", "full", "diffusion_forcing", "stream_sample"]
634
+ pyramid_stream_inference_steps=[10, 10, 10],
635
+ stream_chunk_size=5,
636
+ ):
637
+ assert pyramid_stage_num == len(pyramid_sample_ratios)
638
+ if pyramid_sample_mode not in ["efficient", "full", "diffusion_forcing", "stream_sample"]:
639
+ raise ValueError(
640
+ f"Invalid pyramid_sample_mode: {pyramid_sample_mode}. Must be one of ['efficient', 'full', 'diffusion_forcing', 'dance_forcing']."
641
+ )
642
+
643
+ # Get clen pyramid latent list
644
+ pyramid_latent_list = []
645
+ pyramid_latent_list.append(latents)
646
+ num_frames, height, width = latents.shape[-3], latents.shape[-2], latents.shape[-1]
647
+ for _ in range(pyramid_stage_num - 1):
648
+ height //= 2
649
+ width //= 2
650
+ latents = rearrange(latents, "b c t h w -> (b t) c h w")
651
+ latents = torch.nn.functional.interpolate(latents, size=(height, width), mode="bilinear")
652
+ latents = rearrange(latents, "(b t) c h w -> b c t h w", t=num_frames)
653
+ pyramid_latent_list.append(latents)
654
+ pyramid_latent_list = list(reversed(pyramid_latent_list))
655
+
656
+ # Get pyramid noise list
657
+ noise = torch.randn_like(pyramid_latent_list[-1])
658
+ device = noise.device
659
+ dtype = pyramid_latent_list[-1].dtype
660
+ latent_frame_num = noise.shape[2]
661
+ input_video_num = noise.shape[0]
662
+
663
+ height, width = noise.shape[-2], noise.shape[-1]
664
+ noise_list = [noise]
665
+ cur_noise = noise
666
+ for i_s in range(pyramid_stage_num - 1):
667
+ height //= 2
668
+ width //= 2
669
+ cur_noise = rearrange(cur_noise, "b c t h w -> (b t) c h w")
670
+ cur_noise = F.interpolate(cur_noise, size=(height, width), mode="bilinear") * 2
671
+ cur_noise = rearrange(cur_noise, "(b t) c h w -> b c t h w", t=latent_frame_num)
672
+ noise_list.append(cur_noise)
673
+ noise_list = list(reversed(noise_list)) # make sure from low res to high res
674
+
675
+ # Get pyramid target list
676
+ if pyramid_sample_mode == "efficient":
677
+ assert input_video_num % (int(sum(pyramid_sample_ratios))) == 0
678
+ # To calculate the padding batchsize and column size
679
+ bsz = input_video_num // int(sum(pyramid_sample_ratios))
680
+ column_size = int(sum(pyramid_sample_ratios))
681
+ column_to_stage = {}
682
+ i_sum = 0
683
+ for i_s, column_num in enumerate(pyramid_sample_ratios):
684
+ for index in range(i_sum, i_sum + column_num):
685
+ column_to_stage[index] = i_s
686
+ i_sum += column_num
687
+
688
+ # from low resolution to high resolution
689
+ noisy_latents_list = []
690
+ sigmas_list = []
691
+ targets_list = []
692
+ timesteps_list = []
693
+ training_steps = scheduler.config.num_train_timesteps
694
+ for index in range(column_size):
695
+ i_s = column_to_stage[index]
696
+ clean_latent = pyramid_latent_list[i_s][index::column_size] # [bs, c, t, h, w]
697
+ last_clean_latent = None if i_s == 0 else pyramid_latent_list[i_s - 1][index::column_size]
698
+ start_sigma = scheduler.start_sigmas[i_s]
699
+ end_sigma = scheduler.end_sigmas[i_s]
700
+
701
+ if i_s == 0:
702
+ start_point = noise_list[i_s][index::column_size]
703
+ else:
704
+ # Get the upsampled latent
705
+ last_clean_latent = rearrange(last_clean_latent, "b c t h w -> (b t) c h w")
706
+ last_clean_latent = F.interpolate(
707
+ last_clean_latent,
708
+ size=(
709
+ last_clean_latent.shape[-2] * 2,
710
+ last_clean_latent.shape[-1] * 2,
711
+ ),
712
+ mode="nearest",
713
+ )
714
+ last_clean_latent = rearrange(last_clean_latent, "(b t) c h w -> b c t h w", t=latent_frame_num)
715
+ start_point = start_sigma * noise_list[i_s][index::column_size] + (1 - start_sigma) * last_clean_latent
716
+
717
+ if i_s == pyramid_stage_num - 1:
718
+ end_point = clean_latent
719
+ else:
720
+ end_point = end_sigma * noise_list[i_s][index::column_size] + (1 - end_sigma) * clean_latent
721
+
722
+ # Sample a random timestep for each image
723
+ # for weighting schemes where we sample timesteps non-uniformly
724
+ u = compute_density_for_timestep_sampling(
725
+ weighting_scheme=get_config_value(args, 'weighting_scheme'),
726
+ batch_size=bsz,
727
+ logit_mean=get_config_value(args, 'logit_mean'),
728
+ logit_std=get_config_value(args, 'logit_std'),
729
+ mode_scale=get_config_value(args, 'mode_scale'),
730
+ )
731
+ indices = (u * training_steps).long() # Totally 1000 training steps per stage
732
+ indices = indices.clamp(0, training_steps - 1)
733
+ timesteps = scheduler.timesteps_per_stage[i_s][indices].to(device=device)
734
+
735
+ # Add noise according to flow matching.
736
+ # zt = (1 - texp) * x + texp * z1
737
+ sigmas = scheduler.sigmas_per_stage[i_s][indices].to(device=device)
738
+ while len(sigmas.shape) < start_point.ndim:
739
+ sigmas = sigmas.unsqueeze(-1)
740
+
741
+ noisy_latents = sigmas * start_point + (1 - sigmas) * end_point
742
+
743
+ # [stage1_latent, stage2_latent, ..., stagen_latent], which will be concat after patching
744
+ noisy_latents_list.append([noisy_latents.to(dtype)])
745
+ sigmas_list.append(sigmas.to(dtype))
746
+ timesteps_list.append(timesteps.to(dtype))
747
+ targets_list.append(start_point - end_point) # The standard rectified flow matching objective
748
+ elif pyramid_sample_mode == "full":
749
+ # To calculate the batchsize
750
+ bsz = input_video_num
751
+
752
+ # from low resolution to high resolution
753
+ noisy_latents_list = []
754
+ sigmas_list = []
755
+ targets_list = []
756
+ timesteps_list = []
757
+ training_steps = scheduler.config.num_train_timesteps
758
+ for i_s, cur_sample_ratio in zip(range(pyramid_stage_num), pyramid_sample_ratios):
759
+ clean_latent = pyramid_latent_list[i_s] # [bs, c, t, h, w]
760
+ last_clean_latent = None if i_s == 0 else pyramid_latent_list[i_s - 1]
761
+ start_sigma = scheduler.start_sigmas[i_s]
762
+ end_sigma = scheduler.end_sigmas[i_s]
763
+
764
+ if i_s == 0:
765
+ start_point = noise_list[i_s]
766
+ else:
767
+ # Get the upsampled latent
768
+ last_clean_latent = rearrange(last_clean_latent, "b c t h w -> (b t) c h w")
769
+ last_clean_latent = F.interpolate(
770
+ last_clean_latent,
771
+ size=(
772
+ last_clean_latent.shape[-2] * 2,
773
+ last_clean_latent.shape[-1] * 2,
774
+ ),
775
+ mode="nearest",
776
+ )
777
+ last_clean_latent = rearrange(last_clean_latent, "(b t) c h w -> b c t h w", t=latent_frame_num)
778
+ start_point = start_sigma * noise_list[i_s] + (1 - start_sigma) * last_clean_latent
779
+
780
+ if i_s == pyramid_stage_num - 1:
781
+ end_point = clean_latent
782
+ else:
783
+ end_point = end_sigma * noise_list[i_s] + (1 - end_sigma) * clean_latent
784
+
785
+ for _ in range(cur_sample_ratio):
786
+ # Sample a random timestep for each image
787
+ # for weighting schemes where we sample timesteps non-uniformly
788
+ u = compute_density_for_timestep_sampling(
789
+ weighting_scheme=get_config_value(args, 'weighting_scheme'),
790
+ batch_size=bsz,
791
+ logit_mean=get_config_value(args, 'logit_mean'),
792
+ logit_std=get_config_value(args, 'logit_std'),
793
+ mode_scale=get_config_value(args, 'mode_scale'),
794
+ )
795
+ indices = (u * training_steps).long() # Totally 1000 training steps per stage
796
+ indices = indices.clamp(0, training_steps - 1)
797
+ timesteps = scheduler.timesteps_per_stage[i_s][indices].to(device=device)
798
+
799
+ # Add noise according to flow matching.
800
+ # zt = (1 - texp) * x + texp * z1
801
+ sigmas = scheduler.sigmas_per_stage[i_s][indices].to(device=device)
802
+ while len(sigmas.shape) < start_point.ndim:
803
+ sigmas = sigmas.unsqueeze(-1)
804
+
805
+ noisy_latents = sigmas * start_point + (1 - sigmas) * end_point
806
+
807
+ # [stage1_latent, stage2_latent, ..., stagen_latent]
808
+ noisy_latents_list.append(noisy_latents.to(dtype))
809
+ sigmas_list.append(sigmas.to(dtype))
810
+ timesteps_list.append(timesteps.to(dtype))
811
+ targets_list.append(start_point - end_point) # The standard rectified flow matching objective
812
+ elif pyramid_sample_mode == "diffusion_forcing":
813
+ # To calculate the batchsize
814
+ bsz = input_video_num
815
+ latent_chunk_num = latent_frame_num // stream_chunk_size
816
+ assert latent_frame_num % stream_chunk_size == 0
817
+
818
+ # from low resolution to high resolution
819
+ noisy_latents_list = []
820
+ sigmas_list = []
821
+ targets_list = []
822
+ timesteps_list = []
823
+ training_steps = scheduler.config.num_train_timesteps
824
+ for i_s, cur_sample_ratio in zip(range(pyramid_stage_num), pyramid_sample_ratios):
825
+ clean_latent = pyramid_latent_list[i_s] # [bs, c, t, h, w]
826
+ last_clean_latent = None if i_s == 0 else pyramid_latent_list[i_s - 1]
827
+ start_sigma = scheduler.start_sigmas[i_s]
828
+ end_sigma = scheduler.end_sigmas[i_s]
829
+
830
+ if i_s == 0:
831
+ start_point = noise_list[i_s]
832
+ else:
833
+ # Get the upsampled latent
834
+ last_clean_latent = rearrange(last_clean_latent, "b c t h w -> (b t) c h w")
835
+ last_clean_latent = F.interpolate(
836
+ last_clean_latent,
837
+ size=(
838
+ last_clean_latent.shape[-2] * 2,
839
+ last_clean_latent.shape[-1] * 2,
840
+ ),
841
+ mode="nearest",
842
+ )
843
+ last_clean_latent = rearrange(last_clean_latent, "(b t) c h w -> b c t h w", t=latent_frame_num)
844
+ start_point = start_sigma * noise_list[i_s] + (1 - start_sigma) * last_clean_latent
845
+
846
+ if i_s == pyramid_stage_num - 1:
847
+ end_point = clean_latent
848
+ else:
849
+ end_point = end_sigma * noise_list[i_s] + (1 - end_sigma) * clean_latent
850
+
851
+ for _ in range(cur_sample_ratio):
852
+ # Sample a random timestep for each image
853
+ # for weighting schemes where we sample timesteps non-uniformly
854
+ u = compute_density_for_timestep_sampling(
855
+ weighting_scheme=get_config_value(args, 'weighting_scheme'),
856
+ batch_size=bsz * latent_chunk_num,
857
+ logit_mean=get_config_value(args, 'logit_mean'),
858
+ logit_std=get_config_value(args, 'logit_std'),
859
+ mode_scale=get_config_value(args, 'mode_scale'),
860
+ )
861
+ indices = (u * training_steps).long() # Totally 1000 training steps per stage
862
+ indices = indices.clamp(0, training_steps - 1)
863
+
864
+ timesteps = scheduler.timesteps_per_stage[i_s][indices].to(device=device)
865
+ timesteps = timesteps.view(bsz, latent_chunk_num) # [bsz, latent_chunk_num]
866
+ sigmas = scheduler.sigmas_per_stage[i_s][indices].to(device=device)
867
+ sigmas = sigmas.view(bsz, latent_chunk_num) # [bsz, latent_chunk_num]
868
+
869
+ chunk_index = (
870
+ torch.arange(latent_frame_num, device=device).unsqueeze(0).expand(bsz, -1) // stream_chunk_size
871
+ )
872
+ chunk_index = chunk_index.clamp(max=latent_chunk_num - 1)
873
+ sigmas = torch.gather(sigmas, 1, chunk_index) # [bsz, t]
874
+ timesteps = torch.gather(timesteps, 1, chunk_index)
875
+
876
+ # Add noise according to flow matching.
877
+ # zt = (1 - texp) * x + texp * z1
878
+ sigmas = (
879
+ sigmas.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
880
+ ) # reshape to [bsz, 1, t, 1, 1] for broadcasting
881
+ noisy_latents = sigmas * start_point + (1 - sigmas) * end_point
882
+
883
+ # [stage1_latent, stage2_latent, ..., stagen_latent]
884
+ noisy_latents_list.append(noisy_latents.to(dtype)) # torch.Size([2, 16, 10, 12, 20])
885
+ sigmas_list.append(sigmas.to(dtype)) # torch.Size([2, 1, 10, 1, 1])
886
+ timesteps_list.append(timesteps.to(dtype)) # torch.Size([2, 10])
887
+ targets_list.append(start_point - end_point) # The standard rectified flow matching objective
888
+ elif pyramid_sample_mode == "stream_sample":
889
+ # training_all_progressive_timesteps
890
+ # skip 0. (1, max_inference_steps):[1.3850, 44.1200, 86.8550, 129.5900, 172.3250,
891
+ # 215.0600, 257.7950, 300.5300, 343.2650, 386.0000,
892
+ # 386.3580, 426.0960, 465.8340, 505.5720, 545.3100,
893
+ # 585.0480, 624.7860, 664.5240, 704.2620, 744.0000,
894
+ # 744.2560, 772.6720, 801.0880, 829.5040, 857.9200,
895
+ # 886.3360, 914.7520, 943.1680, 971.5840, 1000.0000]
896
+
897
+ # progressive_timesteps_stages
898
+ # stream_chunk_size=3:
899
+ # [ 386., 386., 386., 744., 744., 744., 1000., 1000., 1000.] high, mid, low
900
+ # [343.2650, 343.2650, 343.2650, 704.2620, 704.2620, 704.2620, 971.5840, 971.5840, 971.5840] high, mid, low
901
+ # [300.5300, 300.5300, 300.5300, 664.5240, 664.5240, 664.5240, 943.1680, 943.1680, 943.1680] high, mid, low
902
+ # [257.7950, 257.7950, 257.7950, 624.7860, 624.7860, 624.7860, 914.7520, 914.7520, 914.7520] high, mid, low
903
+ # [215.0600, 215.0600, 215.0600, 585.0480, 585.0480, 585.0480, 886.3360, 886.3360, 886.3360] high, mid, low
904
+ # [172.3250, 172.3250, 172.3250, 545.3100, 545.3100, 545.3100, 857.9200, 857.9200, 857.9200] high, mid, low
905
+ # [129.5900, 129.5900, 129.5900, 505.5720, 505.5720, 505.5720, 829.5040, 829.5040, 829.5040] high, mid, low
906
+ # [ 86.8550, 86.8550, 86.8550, 465.8340, 465.8340, 465.8340, 801.0880, 801.0880, 801.0880] high, mid, low
907
+ # [ 44.1200, 44.1200, 44.1200, 426.0960, 426.0960, 426.0960, 772.6720, 772.6720, 772.6720] high, mid, low
908
+ # [ 1.3850, 1.3850, 1.3850, 386.3580, 386.3580, 386.3580, 744.2560, 744.2560, 744.2560] high, mid, low
909
+
910
+ # stream_chunk_size=5, shape = (training_num_steps_to_be_saved, latent_frame_num):
911
+ # [545.3100, 545.3100, 545.3100, 545.3100, 545.3100, 1000.0000, 1000.0000, 1000.0000, 1000.0000, 1000.0000] mid, low
912
+ # [505.5720, 505.5720, 505.5720, 505.5720, 505.5720, 971.5840, 971.5840, 971.5840, 971.5840, 971.5840] mid, low
913
+ # [465.8340, 465.8340, 465.8340, 465.8340, 465.8340, 943.1680, 943.1680, 943.1680, 943.1680, 943.1680] mid, low
914
+ # [426.0960, 426.0960, 426.0960, 426.0960, 426.0960, 914.7520, 914.7520, 914.7520, 914.7520, 914.7520] mid, low
915
+ # [386.3580, 386.3580, 386.3580, 386.3580, 386.3580, 886.3360, 886.3360, 886.3360, 886.3360, 886.3360] mid, low
916
+ # [386.0000, 386.0000, 386.0000, 386.0000, 386.0000, 857.9200, 857.9200, 857.9200, 857.9200, 857.9200] high, low
917
+ # [343.2650, 343.2650, 343.2650, 343.2650, 343.2650, 829.5040, 829.5040, 829.5040, 829.5040, 829.5040] high, low
918
+ # [300.5300, 300.5300, 300.5300, 300.5300, 300.5300, 801.0880, 801.0880, 801.0880, 801.0880, 801.0880] high, low
919
+ # [257.7950, 257.7950, 257.7950, 257.7950, 257.7950, 772.6720, 772.6720, 772.6720, 772.6720, 772.6720] high, low
920
+ # [215.0600, 215.0600, 215.0600, 215.0600, 215.0600, 744.2560, 744.2560, 744.2560, 744.2560, 744.2560] high, low
921
+ # [172.3250, 172.3250, 172.3250, 172.3250, 172.3250, 744.0000, 744.0000, 744.0000, 744.0000, 744.0000] high, mid
922
+ # [129.5900, 129.5900, 129.5900, 129.5900, 129.5900, 704.2620, 704.2620, 704.2620, 704.2620, 704.2620] high, mid
923
+ # [ 86.8550, 86.8550, 86.8550, 86.8550, 86.8550, 664.5240, 664.5240, 664.5240, 664.5240, 664.5240] high, mid
924
+ # [ 44.1200, 44.1200, 44.1200, 44.1200, 44.1200, 624.7860, 624.7860, 624.7860, 624.7860, 624.7860] high, mid
925
+ # [ 1.3850, 1.3850, 1.3850, 1.3850, 1.3850, 585.0480, 585.0480, 585.0480, 585.0480, 585.0480] high, mid
926
+
927
+ # To calculate the batchsize
928
+ bsz = input_video_num
929
+
930
+ # Get multi stage timesteps for streamgen
931
+ (
932
+ training_num_steps_to_be_saved,
933
+ training_all_timesteps_stage_ids,
934
+ training_all_progressive_timesteps,
935
+ progressive_timesteps_stages,
936
+ ) = get_stream_sample(
937
+ scheduler=scheduler,
938
+ max_latent_frame_num=latent_frame_num,
939
+ stream_chunk_size=stream_chunk_size,
940
+ pyramid_stage_num=pyramid_stage_num,
941
+ pyramid_stream_inference_steps=pyramid_stream_inference_steps,
942
+ )
943
+ timestep_to_stage = {
944
+ float(t.item()): int(stage.item())
945
+ for t, stage in zip(training_all_progressive_timesteps[0], training_all_timesteps_stage_ids[0])
946
+ }
947
+
948
+ while True:
949
+ initialization = random.choice([True, False])
950
+ termination = random.choice([True, False])
951
+ if not (initialization and termination): # Make sure not both are True
952
+ break
953
+
954
+ stage_i = random.randint(0, training_num_steps_to_be_saved - 1)
955
+ timesteps = progressive_timesteps_stages[stage_i].clone().repeat(bsz, 1) # (b, f)
956
+ if initialization: # get the ending timesteps, [999]x5 from [91, 192, ..., 999]x5
957
+ timesteps = timesteps[:, -latent_frame_num:]
958
+ elif termination: # get the starting timesteps, [91]x5 from [91, ..., 999]x5
959
+ timesteps = timesteps[:, :latent_frame_num]
960
+
961
+ # For stage mapping / Get sigmas
962
+ sigmas, stage_latent_mapping = get_sigmas_from_pyramid_timesteps(scheduler, timesteps, timestep_to_stage)
963
+
964
+ # To device
965
+ timesteps = timesteps.to(device)
966
+ sigmas = sigmas.to(device)
967
+
968
+ # Get pyramid stage points
969
+ stage_point_list = []
970
+ for i_s in range(pyramid_stage_num):
971
+ clean_latent = pyramid_latent_list[i_s] # [bs, c, t, h, w]
972
+ last_clean_latent = None if i_s == 0 else pyramid_latent_list[i_s - 1]
973
+ start_sigma = scheduler.start_sigmas[i_s]
974
+ end_sigma = scheduler.end_sigmas[i_s]
975
+
976
+ if i_s == 0:
977
+ start_point = noise_list[i_s]
978
+ else:
979
+ # Get the upsampled latent
980
+ last_clean_latent = rearrange(last_clean_latent, "b c t h w -> (b t) c h w")
981
+ last_clean_latent = F.interpolate(
982
+ last_clean_latent,
983
+ size=(
984
+ last_clean_latent.shape[-2] * 2,
985
+ last_clean_latent.shape[-1] * 2,
986
+ ),
987
+ mode="nearest",
988
+ )
989
+ last_clean_latent = rearrange(last_clean_latent, "(b t) c h w -> b c t h w", t=latent_frame_num)
990
+ start_point = start_sigma * noise_list[i_s] + (1 - start_sigma) * last_clean_latent
991
+
992
+ if i_s == pyramid_stage_num - 1:
993
+ end_point = clean_latent
994
+ else:
995
+ end_point = end_sigma * noise_list[i_s] + (1 - end_sigma) * clean_latent
996
+
997
+ stage_point_list.append((start_point, end_point))
998
+
999
+ noisy_latents_list = [] # torch.Size([2, 16, 10, 12, 20])
1000
+ targets_list = [] # torch.Size([2, 16, 10, 12, 20])
1001
+ sigmas_list = [] # torch.Size([2, 1, 10, 1, 1])
1002
+ timesteps_list = [] # torch.Size([2, 10])
1003
+ temp_noisy_latents_list = []
1004
+ temp_targets_list = []
1005
+
1006
+ unique_elements = list(map(int, torch.unique(stage_latent_mapping)))
1007
+ for cur_stage in reversed(unique_elements):
1008
+ stage_indices = torch.nonzero(stage_latent_mapping == cur_stage, as_tuple=True)
1009
+ start_index = stage_indices[1][0].item()
1010
+ end_index = start_index + stream_chunk_size
1011
+
1012
+ start_point, end_point = stage_point_list[cur_stage]
1013
+ start_point_slice = start_point[:, :, start_index:end_index, :, :]
1014
+ end_point_slice = end_point[:, :, start_index:end_index, :, :]
1015
+
1016
+ sigmas_slice = sigmas[:, :, start_index:end_index, :, :]
1017
+ noisy_latents = sigmas_slice * start_point_slice + (1 - sigmas_slice) * end_point_slice
1018
+ target = start_point_slice - end_point_slice
1019
+
1020
+ temp_noisy_latents_list.append(noisy_latents.to(dtype))
1021
+ temp_targets_list.append(target)
1022
+
1023
+ noisy_latents_list.append(temp_noisy_latents_list)
1024
+ targets_list.append(temp_targets_list)
1025
+ sigmas_list.append(sigmas.to(dtype))
1026
+ timesteps_list.append(timesteps.to(dtype=dtype))
1027
+
1028
+ return noisy_latents_list, sigmas_list, timesteps_list, targets_list
1029
+
1030
+
1031
+ def get_sigmas_from_pyramid_timesteps(scheduler, timesteps, timestep_to_stage):
1032
+ # For stage mapping
1033
+ flat_timesteps = timesteps.flatten()
1034
+ stage_latent_mapping = torch.tensor(
1035
+ [timestep_to_stage.get(float(t.item()), -1) for t in flat_timesteps],
1036
+ device=timesteps.device,
1037
+ ).view(timesteps.shape)
1038
+
1039
+ # Get sigmas
1040
+ sigmas = torch.full_like(timesteps, -1.0)
1041
+ for i in range(timesteps.shape[0]):
1042
+ for j in range(timesteps.shape[1]):
1043
+ temp_stage_mapping = int(stage_latent_mapping[i, j])
1044
+ target_value = timesteps[i, j]
1045
+ temp_indice = (
1046
+ (
1047
+ torch.isclose(
1048
+ scheduler.timesteps_per_stage[temp_stage_mapping],
1049
+ target_value.clone().detach().to(scheduler.timesteps_per_stage[temp_stage_mapping].dtype),
1050
+ )
1051
+ )
1052
+ .nonzero(as_tuple=True)[0]
1053
+ .item()
1054
+ )
1055
+ sigmas[i, j] = scheduler.sigmas_per_stage[temp_stage_mapping][temp_indice]
1056
+ sigmas = sigmas.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
1057
+
1058
+ return sigmas, stage_latent_mapping
1059
+
1060
+
1061
+ def get_stream_sample(
1062
+ scheduler,
1063
+ max_latent_frame_num,
1064
+ stream_chunk_size,
1065
+ pyramid_stage_num=3,
1066
+ pyramid_stream_inference_steps=[10, 10, 10],
1067
+ ):
1068
+ max_inference_steps = sum(pyramid_stream_inference_steps)
1069
+
1070
+ # Set training all progressive timesteps and stage mapping
1071
+ all_progressive_timesteps_list = []
1072
+ timestep_stage_list = []
1073
+ for stage_idx in range(pyramid_stage_num):
1074
+ scheduler.set_timesteps(pyramid_stream_inference_steps[stage_idx], stage_idx)
1075
+ temp_timesteps = scheduler.timesteps # shape: (n_i,)
1076
+ all_progressive_timesteps_list.append(temp_timesteps)
1077
+ timestep_stage_list.append(
1078
+ torch.full_like(temp_timesteps, fill_value=stage_idx)
1079
+ ) # same shape, filled with stage_idx
1080
+ all_progressive_timesteps = torch.cat(all_progressive_timesteps_list).unsqueeze(0).flip(1) # (1, T)
1081
+ all_timesteps_stage_ids = torch.cat(timestep_stage_list).unsqueeze(0).flip(1)
1082
+
1083
+ # Set training progressive timesteps stages
1084
+ # every stream_chunk_size frames is treated as one, using the same noise level. f' = f / c
1085
+ assert max_latent_frame_num % stream_chunk_size == 0, (
1086
+ f"num_frames should be multiple of stream_chunk_size, {max_latent_frame_num} % {stream_chunk_size} != 0"
1087
+ )
1088
+ assert max_inference_steps % (max_latent_frame_num // stream_chunk_size) == 0, (
1089
+ f"max_inference_steps should be multiple of max_latent_frame_num // stream_chunk_size, {max_inference_steps} % {max_latent_frame_num // stream_chunk_size} != 0"
1090
+ )
1091
+ num_steps_to_be_saved = max_inference_steps // (
1092
+ max_latent_frame_num // stream_chunk_size
1093
+ ) # every m steps, save stream_chunk_size frames. m = t / f' = t / (f / c) = c * (t / f)
1094
+
1095
+ # (b, t) -> [(b, t / m) in reverse range(m)] -> [(b, f) in reverse range(m)]
1096
+ progressive_timesteps_stages = [
1097
+ repeat(
1098
+ all_progressive_timesteps[:, (num_steps_to_be_saved - 1) - s :: num_steps_to_be_saved],
1099
+ "b f -> b f c",
1100
+ c=stream_chunk_size,
1101
+ ).flatten(1, 2)
1102
+ for s in range(num_steps_to_be_saved)
1103
+ ]
1104
+
1105
+ return num_steps_to_be_saved, all_timesteps_stage_ids, all_progressive_timesteps, progressive_timesteps_stages
1106
+
1107
+
1108
+ if __name__ == "__main__":
1109
+ import argparse
1110
+
1111
+ parser = argparse.ArgumentParser(description="Simple example of a training script.")
1112
+ parser.add_argument(
1113
+ "--weighting_scheme",
1114
+ type=str,
1115
+ default="logit_normal",
1116
+ choices=["sigma_sqrt", "logit_normal", "mode", "cosmap", "none"],
1117
+ help=('We default to the "none" weighting scheme for uniform sampling and uniform loss'),
1118
+ )
1119
+ parser.add_argument(
1120
+ "--logit_mean",
1121
+ type=float,
1122
+ default=0.0,
1123
+ help="mean to use when using the `'logit_normal'` weighting scheme.",
1124
+ )
1125
+ parser.add_argument(
1126
+ "--logit_std",
1127
+ type=float,
1128
+ default=1.0,
1129
+ help="std to use when using the `'logit_normal'` weighting scheme.",
1130
+ )
1131
+ parser.add_argument(
1132
+ "--mode_scale",
1133
+ type=float,
1134
+ default=1.29,
1135
+ help="Scale of mode weighting scheme. Only effective when using the `'mode'` as the `weighting_scheme`.",
1136
+ )
1137
+ args = parser.parse_args()
1138
+
1139
+ device = "cuda"
1140
+
1141
+ import sys
1142
+
1143
+ sys.path.append("../")
1144
+ from scheduler.scheduling_flow_matching_pyramid import PyramidFlowMatchEulerDiscreteScheduler
1145
+
1146
+ stages = [1, 2, 4]
1147
+ timestep_shift = 1.0
1148
+ stage_range = [0, 1 / 3, 2 / 3, 1]
1149
+ scheduler_gamma = 1 / 3
1150
+ scheduler = PyramidFlowMatchEulerDiscreteScheduler(
1151
+ shift=timestep_shift,
1152
+ stages=len(stages),
1153
+ stage_range=stage_range,
1154
+ gamma=scheduler_gamma,
1155
+ )
1156
+ print(
1157
+ f"The start sigmas and end sigmas of each stage is Start: {scheduler.start_sigmas}, End: {scheduler.end_sigmas}, Ori_start: {scheduler.ori_start_sigmas}"
1158
+ )
1159
+
1160
+ # Test get_framepack_input
1161
+ from diffusers import AutoencoderKLHunyuanVideo
1162
+
1163
+ # 5: (21, 41, 61, 81, 101)
1164
+ # 6: (25, 49, 73, 97, 121)
1165
+ # 7: (29, 57, 85, 113, 141)
1166
+ # 8: (33, 65, 97, 129, 161)
1167
+ # 9: (37, 73, 109, 145, 181)
1168
+ # 10: (41, 81, 121, 161, 201)
1169
+ # 11: (45, 89, 133, 177, 221)
1170
+ # 12: (49, 97, 145, 193, 241)
1171
+
1172
+ pixel_values = torch.randn([2, 3, 241, 384, 640], device=device).clamp(-1, 1)
1173
+ pixel_values = pixel_values.to(torch.bfloat16)
1174
+ vae = AutoencoderKLHunyuanVideo.from_pretrained(
1175
+ "/mnt/workspace/checkpoints/hunyuanvideo-community/HunyuanVideo/",
1176
+ subfolder="vae",
1177
+ weight_dtype=torch.bfloat16,
1178
+ ).to(device)
1179
+ vae.requires_grad_(False)
1180
+ vae.eval()
1181
+
1182
+ (
1183
+ model_input, # torch.Size([2, 16, 9, 60, 104])
1184
+ indices_latents, # torch.Size([2, 9])
1185
+ latents_clean, # torch.Size([2, 16, 2, 60, 104])
1186
+ indices_clean_latents, # torch.Size([2, 2])
1187
+ latents_history_2x, # torch.Size([2, 16, 2, 60, 104])
1188
+ indices_latents_history_2x, # torch.Size([2, 2])
1189
+ latents_history_4x, # torch.Size([2, 16, 16, 60, 104])
1190
+ indices_latents_history_4x, # torch.Size([2, 16])
1191
+ section_to_video_idx,
1192
+ ) = get_framepack_input_i2v(
1193
+ vae=vae,
1194
+ pixel_values=pixel_values, # torch.Size([1, 3, 73, 480, 832])
1195
+ latent_window_size=12,
1196
+ vanilla_sampling=False,
1197
+ dtype=torch.bfloat16,
1198
+ )
1199
+
1200
+ print(indices_latents, "\n", indices_clean_latents, "\n", indices_latents_history_2x, "\n", indices_latents_history_4x)
1201
+
1202
+ # print(
1203
+ # indices_latents,
1204
+ # "\n",
1205
+ # indices_clean_latents,
1206
+ # "\n",
1207
+ # indices_latents_history_2x,
1208
+ # "\n",
1209
+ # indices_latents_history_4x,
1210
+ # )
1211
+
1212
+ # Test get_pyramid_input
1213
+ # model_input = torch.randn([2, 16, 10, 48, 80], device=device)
1214
+ # noisy_model_input_list, sigmas_list, timesteps_list, targets_list = get_pyramid_input(
1215
+ # args=args,
1216
+ # scheduler=scheduler,
1217
+ # latents=model_input,
1218
+ # pyramid_stage_num=3,
1219
+ # pyramid_sample_ratios=[1, 2, 1],
1220
+ # pyramid_sample_mode="stream_sample",
1221
+ # stream_chunk_size=3,
1222
+ # pyramid_stream_inference_steps=[10, 10, 10],
1223
+ # )
1224
+
1225
+ # if isinstance(noisy_model_input_list[0], list):
1226
+ # total_sample_count = sum(y.shape[0] for x in noisy_model_input_list for y in x)
1227
+ # else:
1228
+ # total_sample_count = sum(x.shape[0] for x in noisy_model_input_list)
1229
+ # batch_size = model_input.shape[0]
dataset_code/sekai/preprocess/0.sh ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # CUDA_VISIBLE_DEVICES=5 python get_caption.py \
2
+ # --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
3
+ # --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
4
+ # --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
5
+ # --num_workers 8 \
6
+ # --part 0 \
7
+ # --total_part 1
8
+
9
+ # CUDA_VISIBLE_DEVICES=5 python get_caption.py \
10
+ # --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
11
+ # --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
12
+ # --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
13
+ # --num_workers 8 \
14
+ # --part 0 \
15
+ # --total_part 1
16
+
17
+
18
+ CUDA_VISIBLE_DEVICES=2 python get_caption.py \
19
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
20
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
21
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
22
+ --num_workers 8 \
23
+ --part 0 \
24
+ --total_part 4 &
25
+ CUDA_VISIBLE_DEVICES=3 python get_caption.py \
26
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
27
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
28
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
29
+ --num_workers 8 \
30
+ --part 2 \
31
+ --total_part 4 &
32
+ CUDA_VISIBLE_DEVICES=4 python get_caption.py \
33
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
34
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
35
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
36
+ --num_workers 8 \
37
+ --part 2 \
38
+ --total_part 4 &
39
+ CUDA_VISIBLE_DEVICES=5 python get_caption.py \
40
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
41
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
42
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
43
+ --num_workers 8 \
44
+ --part 3 \
45
+ --total_part 4
46
+
47
+ # CUDA_VISIBLE_DEVICES=5 python get_caption.py \
48
+ # --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
49
+ # --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
50
+ # --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
51
+ # --num_workers 8 \
52
+ # --part 0 \
53
+ # --total_part 1
dataset_code/sekai/preprocess/1.sh ADDED
@@ -0,0 +1,284 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ bash install.sh
2
+
3
+ # python cut_video.py \
4
+ # --input_folder /mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq \
5
+ # --output_dir /mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193 \
6
+ # --frames-per-segment 193 \
7
+ # --max-workers 32 \
8
+ # --cur-part 1 \
9
+ # --total-part 6 \
10
+
11
+
12
+ # python cut_video.py \
13
+ # --input_folder /mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq \
14
+ # --output_dir /mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386 \
15
+ # --frames-per-segment 386 \
16
+ # --max-workers 32 \
17
+ # --cur-part 1 \
18
+ # --total-part 6 \
19
+
20
+ export PYTHONMULTIPROCESSING_START_METHOD=fork
21
+ export VLLM_WORKER_MULTIPROC_METHO=spawn
22
+
23
+ # python get_caption.py
24
+
25
+ CUDA_VISIBLE_DEVICES=0 python get_caption.py \
26
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
27
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
28
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
29
+ --num_workers 8 \
30
+ --part 0 \
31
+ --total_part 32 &
32
+ sleep 20
33
+ CUDA_VISIBLE_DEVICES=1 python get_caption.py \
34
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
35
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
36
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
37
+ --num_workers 8 \
38
+ --part 1 \
39
+ --total_part 32 &
40
+ sleep 20
41
+ CUDA_VISIBLE_DEVICES=2 python get_caption.py \
42
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
43
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
44
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
45
+ --num_workers 8 \
46
+ --part 2 \
47
+ --total_part 32 &
48
+ sleep 20
49
+ CUDA_VISIBLE_DEVICES=3 python get_caption.py \
50
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
51
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
52
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
53
+ --num_workers 8 \
54
+ --part 3 \
55
+ --total_part 32 &
56
+ sleep 20
57
+ CUDA_VISIBLE_DEVICES=4 python get_caption.py \
58
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
59
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
60
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
61
+ --num_workers 8 \
62
+ --part 4 \
63
+ --total_part 32 &
64
+ sleep 20
65
+ CUDA_VISIBLE_DEVICES=5 python get_caption.py \
66
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
67
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
68
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
69
+ --num_workers 8 \
70
+ --part 5 \
71
+ --total_part 32 &
72
+ sleep 20
73
+ CUDA_VISIBLE_DEVICES=6 python get_caption.py \
74
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
75
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
76
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
77
+ --num_workers 8 \
78
+ --part 6 \
79
+ --total_part 32 &
80
+ sleep 20
81
+ CUDA_VISIBLE_DEVICES=7 python get_caption.py \
82
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
83
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
84
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
85
+ --num_workers 8 \
86
+ --part 7 \
87
+ --total_part 32
88
+
89
+
90
+ CUDA_VISIBLE_DEVICES=0 python get_caption.py \
91
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
92
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
93
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
94
+ --num_workers 8 \
95
+ --part 0 \
96
+ --total_part 32 &
97
+ sleep 20
98
+ CUDA_VISIBLE_DEVICES=1 python get_caption.py \
99
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
100
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
101
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
102
+ --num_workers 8 \
103
+ --part 1 \
104
+ --total_part 32 &
105
+ sleep 20
106
+ CUDA_VISIBLE_DEVICES=2 python get_caption.py \
107
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
108
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
109
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
110
+ --num_workers 8 \
111
+ --part 2 \
112
+ --total_part 32 &
113
+ sleep 20
114
+ CUDA_VISIBLE_DEVICES=3 python get_caption.py \
115
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
116
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
117
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
118
+ --num_workers 8 \
119
+ --part 3 \
120
+ --total_part 32 &
121
+ sleep 20
122
+ CUDA_VISIBLE_DEVICES=4 python get_caption.py \
123
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
124
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
125
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
126
+ --num_workers 8 \
127
+ --part 4 \
128
+ --total_part 32 &
129
+ sleep 20
130
+ CUDA_VISIBLE_DEVICES=5 python get_caption.py \
131
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
132
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
133
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
134
+ --num_workers 8 \
135
+ --part 5 \
136
+ --total_part 32 &
137
+ sleep 20
138
+ CUDA_VISIBLE_DEVICES=6 python get_caption.py \
139
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
140
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
141
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
142
+ --num_workers 8 \
143
+ --part 6 \
144
+ --total_part 32 &
145
+ sleep 20
146
+ CUDA_VISIBLE_DEVICES=7 python get_caption.py \
147
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
148
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
149
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
150
+ --num_workers 8 \
151
+ --part 7 \
152
+ --total_part 32
153
+
154
+
155
+
156
+
157
+ CUDA_VISIBLE_DEVICES=0 python get_caption.py \
158
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
159
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
160
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
161
+ --num_workers 8 \
162
+ --part 0 \
163
+ --total_part 32 &
164
+ sleep 20
165
+ CUDA_VISIBLE_DEVICES=1 python get_caption.py \
166
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
167
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
168
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
169
+ --num_workers 8 \
170
+ --part 1 \
171
+ --total_part 32 &
172
+ sleep 20
173
+ CUDA_VISIBLE_DEVICES=2 python get_caption.py \
174
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
175
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
176
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
177
+ --num_workers 8 \
178
+ --part 2 \
179
+ --total_part 32 &
180
+ sleep 20
181
+ CUDA_VISIBLE_DEVICES=3 python get_caption.py \
182
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
183
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
184
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
185
+ --num_workers 8 \
186
+ --part 3 \
187
+ --total_part 32 &
188
+ sleep 20
189
+ CUDA_VISIBLE_DEVICES=4 python get_caption.py \
190
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
191
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
192
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
193
+ --num_workers 8 \
194
+ --part 4 \
195
+ --total_part 32 &
196
+ sleep 20
197
+ CUDA_VISIBLE_DEVICES=5 python get_caption.py \
198
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
199
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
200
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
201
+ --num_workers 8 \
202
+ --part 5 \
203
+ --total_part 32 &
204
+ sleep 20
205
+ CUDA_VISIBLE_DEVICES=6 python get_caption.py \
206
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
207
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
208
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
209
+ --num_workers 8 \
210
+ --part 6 \
211
+ --total_part 32 &
212
+ sleep 20
213
+ CUDA_VISIBLE_DEVICES=7 python get_caption.py \
214
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
215
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
216
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
217
+ --num_workers 8 \
218
+ --part 7 \
219
+ --total_part 32
220
+
221
+
222
+ CUDA_VISIBLE_DEVICES=0 python get_caption.py \
223
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
224
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
225
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
226
+ --num_workers 8 \
227
+ --part 0 \
228
+ --total_part 32 &
229
+ sleep 20
230
+ CUDA_VISIBLE_DEVICES=1 python get_caption.py \
231
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
232
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
233
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
234
+ --num_workers 8 \
235
+ --part 1 \
236
+ --total_part 32 &
237
+ sleep 20
238
+ CUDA_VISIBLE_DEVICES=2 python get_caption.py \
239
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
240
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
241
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
242
+ --num_workers 8 \
243
+ --part 2 \
244
+ --total_part 32 &
245
+ sleep 20
246
+ CUDA_VISIBLE_DEVICES=3 python get_caption.py \
247
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
248
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
249
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
250
+ --num_workers 8 \
251
+ --part 3 \
252
+ --total_part 32 &
253
+ sleep 20
254
+ CUDA_VISIBLE_DEVICES=4 python get_caption.py \
255
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
256
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
257
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
258
+ --num_workers 8 \
259
+ --part 4 \
260
+ --total_part 32 &
261
+ sleep 20
262
+ CUDA_VISIBLE_DEVICES=5 python get_caption.py \
263
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
264
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
265
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
266
+ --num_workers 8 \
267
+ --part 5 \
268
+ --total_part 32 &
269
+ sleep 20
270
+ CUDA_VISIBLE_DEVICES=6 python get_caption.py \
271
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
272
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
273
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
274
+ --num_workers 8 \
275
+ --part 6 \
276
+ --total_part 32 &
277
+ sleep 20
278
+ CUDA_VISIBLE_DEVICES=7 python get_caption.py \
279
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
280
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
281
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
282
+ --num_workers 8 \
283
+ --part 7 \
284
+ --total_part 32
dataset_code/sekai/preprocess/2.sh ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ bash install.sh
2
+
3
+ # python cut_video.py \
4
+ # --input_folder /mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq \
5
+ # --output_dir /mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193 \
6
+ # --frames-per-segment 193 \
7
+ # --max-workers 32 \
8
+ # --cur-part 2 \
9
+ # --total-part 6 \
10
+
11
+
12
+ # python cut_video.py \
13
+ # --input_folder /mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq \
14
+ # --output_dir /mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386 \
15
+ # --frames-per-segment 386 \
16
+ # --max-workers 32 \
17
+ # --cur-part 2 \
18
+ # --total-part 6 \
19
+
20
+ export PYTHONMULTIPROCESSING_START_METHOD=fork
21
+ export VLLM_WORKER_MULTIPROC_METHO=spawn
22
+
23
+ # python get_caption.py
24
+
25
+ CUDA_VISIBLE_DEVICES=0 python get_caption.py \
26
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
27
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
28
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
29
+ --num_workers 8 \
30
+ --part 8 \
31
+ --total_part 32 &
32
+ sleep 20
33
+ CUDA_VISIBLE_DEVICES=1 python get_caption.py \
34
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
35
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
36
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
37
+ --num_workers 8 \
38
+ --part 9 \
39
+ --total_part 32 &
40
+ sleep 20
41
+ CUDA_VISIBLE_DEVICES=2 python get_caption.py \
42
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
43
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
44
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
45
+ --num_workers 8 \
46
+ --part 10 \
47
+ --total_part 32 &
48
+ sleep 20
49
+ CUDA_VISIBLE_DEVICES=3 python get_caption.py \
50
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
51
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
52
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
53
+ --num_workers 8 \
54
+ --part 11 \
55
+ --total_part 32 &
56
+ sleep 20
57
+ CUDA_VISIBLE_DEVICES=4 python get_caption.py \
58
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
59
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
60
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
61
+ --num_workers 8 \
62
+ --part 12 \
63
+ --total_part 32 &
64
+ sleep 20
65
+ CUDA_VISIBLE_DEVICES=5 python get_caption.py \
66
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
67
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
68
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
69
+ --num_workers 8 \
70
+ --part 13 \
71
+ --total_part 32 &
72
+ sleep 20
73
+ CUDA_VISIBLE_DEVICES=6 python get_caption.py \
74
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
75
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
76
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
77
+ --num_workers 8 \
78
+ --part 14 \
79
+ --total_part 32 &
80
+ sleep 20
81
+ CUDA_VISIBLE_DEVICES=7 python get_caption.py \
82
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
83
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
84
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
85
+ --num_workers 8 \
86
+ --part 15 \
87
+ --total_part 32
88
+
89
+
90
+ CUDA_VISIBLE_DEVICES=0 python get_caption.py \
91
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
92
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
93
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
94
+ --num_workers 8 \
95
+ --part 8 \
96
+ --total_part 32 &
97
+ sleep 20
98
+ CUDA_VISIBLE_DEVICES=1 python get_caption.py \
99
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
100
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
101
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
102
+ --num_workers 8 \
103
+ --part 9 \
104
+ --total_part 32 &
105
+ sleep 20
106
+ CUDA_VISIBLE_DEVICES=2 python get_caption.py \
107
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
108
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
109
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
110
+ --num_workers 8 \
111
+ --part 10 \
112
+ --total_part 32 &
113
+ sleep 20
114
+ CUDA_VISIBLE_DEVICES=3 python get_caption.py \
115
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
116
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
117
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
118
+ --num_workers 8 \
119
+ --part 11 \
120
+ --total_part 32 &
121
+ sleep 20
122
+ CUDA_VISIBLE_DEVICES=4 python get_caption.py \
123
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
124
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
125
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
126
+ --num_workers 8 \
127
+ --part 12 \
128
+ --total_part 32 &
129
+ sleep 20
130
+ CUDA_VISIBLE_DEVICES=5 python get_caption.py \
131
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
132
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
133
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
134
+ --num_workers 8 \
135
+ --part 13 \
136
+ --total_part 32 &
137
+ sleep 20
138
+ CUDA_VISIBLE_DEVICES=6 python get_caption.py \
139
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
140
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
141
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
142
+ --num_workers 8 \
143
+ --part 14 \
144
+ --total_part 32 &
145
+ sleep 20
146
+ CUDA_VISIBLE_DEVICES=7 python get_caption.py \
147
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
148
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
149
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
150
+ --num_workers 8 \
151
+ --part 15 \
152
+ --total_part 32
153
+
154
+
155
+ CUDA_VISIBLE_DEVICES=0 python get_caption.py \
156
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
157
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
158
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
159
+ --num_workers 8 \
160
+ --part 8 \
161
+ --total_part 32 &
162
+ sleep 20
163
+ CUDA_VISIBLE_DEVICES=1 python get_caption.py \
164
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
165
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
166
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
167
+ --num_workers 8 \
168
+ --part 9 \
169
+ --total_part 32 &
170
+ sleep 20
171
+ CUDA_VISIBLE_DEVICES=2 python get_caption.py \
172
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
173
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
174
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
175
+ --num_workers 8 \
176
+ --part 10 \
177
+ --total_part 32 &
178
+ sleep 20
179
+ CUDA_VISIBLE_DEVICES=3 python get_caption.py \
180
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
181
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
182
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
183
+ --num_workers 8 \
184
+ --part 11 \
185
+ --total_part 32 &
186
+ sleep 20
187
+ CUDA_VISIBLE_DEVICES=4 python get_caption.py \
188
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
189
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
190
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
191
+ --num_workers 8 \
192
+ --part 12 \
193
+ --total_part 32 &
194
+ sleep 20
195
+ CUDA_VISIBLE_DEVICES=5 python get_caption.py \
196
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
197
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
198
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
199
+ --num_workers 8 \
200
+ --part 13 \
201
+ --total_part 32 &
202
+ sleep 20
203
+ CUDA_VISIBLE_DEVICES=6 python get_caption.py \
204
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
205
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
206
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
207
+ --num_workers 8 \
208
+ --part 14 \
209
+ --total_part 32 &
210
+ sleep 20
211
+ CUDA_VISIBLE_DEVICES=7 python get_caption.py \
212
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
213
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
214
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
215
+ --num_workers 8 \
216
+ --part 15 \
217
+ --total_part 32
218
+
219
+
220
+ CUDA_VISIBLE_DEVICES=0 python get_caption.py \
221
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
222
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
223
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
224
+ --num_workers 8 \
225
+ --part 8 \
226
+ --total_part 32 &
227
+ sleep 20
228
+ CUDA_VISIBLE_DEVICES=1 python get_caption.py \
229
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
230
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
231
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
232
+ --num_workers 8 \
233
+ --part 9 \
234
+ --total_part 32 &
235
+ sleep 20
236
+ CUDA_VISIBLE_DEVICES=2 python get_caption.py \
237
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
238
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
239
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
240
+ --num_workers 8 \
241
+ --part 10 \
242
+ --total_part 32 &
243
+ sleep 20
244
+ CUDA_VISIBLE_DEVICES=3 python get_caption.py \
245
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
246
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
247
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
248
+ --num_workers 8 \
249
+ --part 11 \
250
+ --total_part 32 &
251
+ sleep 20
252
+ CUDA_VISIBLE_DEVICES=4 python get_caption.py \
253
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
254
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
255
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
256
+ --num_workers 8 \
257
+ --part 12 \
258
+ --total_part 32 &
259
+ sleep 20
260
+ CUDA_VISIBLE_DEVICES=5 python get_caption.py \
261
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
262
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
263
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
264
+ --num_workers 8 \
265
+ --part 13 \
266
+ --total_part 32 &
267
+ sleep 20
268
+ CUDA_VISIBLE_DEVICES=6 python get_caption.py \
269
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
270
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
271
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
272
+ --num_workers 8 \
273
+ --part 14 \
274
+ --total_part 32 &
275
+ sleep 20
276
+ CUDA_VISIBLE_DEVICES=7 python get_caption.py \
277
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
278
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
279
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
280
+ --num_workers 8 \
281
+ --part 15 \
282
+ --total_part 32
dataset_code/sekai/preprocess/3.sh ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ bash install.sh
2
+
3
+ # python cut_video.py \
4
+ # --input_folder /mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq \
5
+ # --output_dir /mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193 \
6
+ # --frames-per-segment 193 \
7
+ # --max-workers 32 \
8
+ # --cur-part 3 \
9
+ # --total-part 6 \
10
+
11
+
12
+ # python cut_video.py \
13
+ # --input_folder /mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq \
14
+ # --output_dir /mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386 \
15
+ # --frames-per-segment 386 \
16
+ # --max-workers 32 \
17
+ # --cur-part 3 \
18
+ # --total-part 6 \
19
+
20
+ export PYTHONMULTIPROCESSING_START_METHOD=fork
21
+ export VLLM_WORKER_MULTIPROC_METHO=spawn
22
+
23
+ # python get_caption.py
24
+
25
+ CUDA_VISIBLE_DEVICES=0 python get_caption.py \
26
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
27
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
28
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
29
+ --num_workers 8 \
30
+ --part 16 \
31
+ --total_part 32 &
32
+ sleep 20
33
+ CUDA_VISIBLE_DEVICES=1 python get_caption.py \
34
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
35
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
36
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
37
+ --num_workers 8 \
38
+ --part 17 \
39
+ --total_part 32 &
40
+ sleep 20
41
+ CUDA_VISIBLE_DEVICES=2 python get_caption.py \
42
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
43
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
44
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
45
+ --num_workers 8 \
46
+ --part 18 \
47
+ --total_part 32 &
48
+ sleep 20
49
+ CUDA_VISIBLE_DEVICES=3 python get_caption.py \
50
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
51
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
52
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
53
+ --num_workers 8 \
54
+ --part 19 \
55
+ --total_part 32 &
56
+ sleep 20
57
+ CUDA_VISIBLE_DEVICES=4 python get_caption.py \
58
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
59
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
60
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
61
+ --num_workers 8 \
62
+ --part 20 \
63
+ --total_part 32 &
64
+ sleep 20
65
+ CUDA_VISIBLE_DEVICES=5 python get_caption.py \
66
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
67
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
68
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
69
+ --num_workers 8 \
70
+ --part 21 \
71
+ --total_part 32 &
72
+ sleep 20
73
+ CUDA_VISIBLE_DEVICES=6 python get_caption.py \
74
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
75
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
76
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
77
+ --num_workers 8 \
78
+ --part 22 \
79
+ --total_part 32 &
80
+ sleep 20
81
+ CUDA_VISIBLE_DEVICES=7 python get_caption.py \
82
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
83
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
84
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
85
+ --num_workers 8 \
86
+ --part 23 \
87
+ --total_part 32
88
+
89
+
90
+ CUDA_VISIBLE_DEVICES=0 python get_caption.py \
91
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
92
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
93
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
94
+ --num_workers 8 \
95
+ --part 16 \
96
+ --total_part 32 &
97
+ sleep 20
98
+ CUDA_VISIBLE_DEVICES=1 python get_caption.py \
99
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
100
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
101
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
102
+ --num_workers 8 \
103
+ --part 17 \
104
+ --total_part 32 &
105
+ sleep 20
106
+ CUDA_VISIBLE_DEVICES=2 python get_caption.py \
107
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
108
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
109
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
110
+ --num_workers 8 \
111
+ --part 18 \
112
+ --total_part 32 &
113
+ sleep 20
114
+ CUDA_VISIBLE_DEVICES=3 python get_caption.py \
115
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
116
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
117
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
118
+ --num_workers 8 \
119
+ --part 19 \
120
+ --total_part 32 &
121
+ sleep 20
122
+ CUDA_VISIBLE_DEVICES=4 python get_caption.py \
123
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
124
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
125
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
126
+ --num_workers 8 \
127
+ --part 20 \
128
+ --total_part 32 &
129
+ sleep 20
130
+ CUDA_VISIBLE_DEVICES=5 python get_caption.py \
131
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
132
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
133
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
134
+ --num_workers 8 \
135
+ --part 21 \
136
+ --total_part 32 &
137
+ sleep 20
138
+ CUDA_VISIBLE_DEVICES=6 python get_caption.py \
139
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
140
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
141
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
142
+ --num_workers 8 \
143
+ --part 22 \
144
+ --total_part 32 &
145
+ sleep 20
146
+ CUDA_VISIBLE_DEVICES=7 python get_caption.py \
147
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
148
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
149
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
150
+ --num_workers 8 \
151
+ --part 23 \
152
+ --total_part 32
153
+
154
+
155
+ CUDA_VISIBLE_DEVICES=0 python get_caption.py \
156
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
157
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
158
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
159
+ --num_workers 8 \
160
+ --part 16 \
161
+ --total_part 32 &
162
+ sleep 20
163
+ CUDA_VISIBLE_DEVICES=1 python get_caption.py \
164
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
165
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
166
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
167
+ --num_workers 8 \
168
+ --part 17 \
169
+ --total_part 32 &
170
+ sleep 20
171
+ CUDA_VISIBLE_DEVICES=2 python get_caption.py \
172
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
173
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
174
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
175
+ --num_workers 8 \
176
+ --part 18 \
177
+ --total_part 32 &
178
+ sleep 20
179
+ CUDA_VISIBLE_DEVICES=3 python get_caption.py \
180
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
181
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
182
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
183
+ --num_workers 8 \
184
+ --part 19 \
185
+ --total_part 32 &
186
+ sleep 20
187
+ CUDA_VISIBLE_DEVICES=4 python get_caption.py \
188
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
189
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
190
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
191
+ --num_workers 8 \
192
+ --part 20 \
193
+ --total_part 32 &
194
+ sleep 20
195
+ CUDA_VISIBLE_DEVICES=5 python get_caption.py \
196
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
197
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
198
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
199
+ --num_workers 8 \
200
+ --part 21 \
201
+ --total_part 32 &
202
+ sleep 20
203
+ CUDA_VISIBLE_DEVICES=6 python get_caption.py \
204
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
205
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
206
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
207
+ --num_workers 8 \
208
+ --part 22 \
209
+ --total_part 32 &
210
+ sleep 20
211
+ CUDA_VISIBLE_DEVICES=7 python get_caption.py \
212
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
213
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
214
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
215
+ --num_workers 8 \
216
+ --part 23 \
217
+ --total_part 32
218
+
219
+
220
+ CUDA_VISIBLE_DEVICES=0 python get_caption.py \
221
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
222
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
223
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
224
+ --num_workers 8 \
225
+ --part 16 \
226
+ --total_part 32 &
227
+ sleep 20
228
+ CUDA_VISIBLE_DEVICES=1 python get_caption.py \
229
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
230
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
231
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
232
+ --num_workers 8 \
233
+ --part 17 \
234
+ --total_part 32 &
235
+ sleep 20
236
+ CUDA_VISIBLE_DEVICES=2 python get_caption.py \
237
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
238
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
239
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
240
+ --num_workers 8 \
241
+ --part 18 \
242
+ --total_part 32 &
243
+ sleep 20
244
+ CUDA_VISIBLE_DEVICES=3 python get_caption.py \
245
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
246
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
247
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
248
+ --num_workers 8 \
249
+ --part 19 \
250
+ --total_part 32 &
251
+ sleep 20
252
+ CUDA_VISIBLE_DEVICES=4 python get_caption.py \
253
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
254
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
255
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
256
+ --num_workers 8 \
257
+ --part 20 \
258
+ --total_part 32 &
259
+ sleep 20
260
+ CUDA_VISIBLE_DEVICES=5 python get_caption.py \
261
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
262
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
263
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
264
+ --num_workers 8 \
265
+ --part 21 \
266
+ --total_part 32 &
267
+ sleep 20
268
+ CUDA_VISIBLE_DEVICES=6 python get_caption.py \
269
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
270
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
271
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
272
+ --num_workers 8 \
273
+ --part 22 \
274
+ --total_part 32 &
275
+ sleep 20
276
+ CUDA_VISIBLE_DEVICES=7 python get_caption.py \
277
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
278
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
279
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
280
+ --num_workers 8 \
281
+ --part 23 \
282
+ --total_part 32
dataset_code/sekai/preprocess/4.sh ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ bash install.sh
2
+
3
+ # python cut_video.py \
4
+ # --input_folder /mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq \
5
+ # --output_dir /mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193 \
6
+ # --frames-per-segment 193 \
7
+ # --max-workers 32 \
8
+ # --cur-part 4 \
9
+ # --total-part 6 \
10
+
11
+
12
+ # python cut_video.py \
13
+ # --input_folder /mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq \
14
+ # --output_dir /mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386 \
15
+ # --frames-per-segment 386 \
16
+ # --max-workers 32 \
17
+ # --cur-part 4 \
18
+ # --total-part 6 \
19
+
20
+ export PYTHONMULTIPROCESSING_START_METHOD=fork
21
+ export VLLM_WORKER_MULTIPROC_METHO=spawn
22
+
23
+ # python get_caption.py
24
+
25
+ CUDA_VISIBLE_DEVICES=0 python get_caption.py \
26
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
27
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
28
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
29
+ --num_workers 8 \
30
+ --part 24 \
31
+ --total_part 32 &
32
+ sleep 20
33
+ CUDA_VISIBLE_DEVICES=1 python get_caption.py \
34
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
35
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
36
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
37
+ --num_workers 8 \
38
+ --part 25 \
39
+ --total_part 32 &
40
+ sleep 20
41
+ CUDA_VISIBLE_DEVICES=2 python get_caption.py \
42
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
43
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
44
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
45
+ --num_workers 8 \
46
+ --part 26 \
47
+ --total_part 32 &
48
+ sleep 20
49
+ CUDA_VISIBLE_DEVICES=3 python get_caption.py \
50
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
51
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
52
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
53
+ --num_workers 8 \
54
+ --part 27 \
55
+ --total_part 32 &
56
+ sleep 20
57
+ CUDA_VISIBLE_DEVICES=4 python get_caption.py \
58
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
59
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
60
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
61
+ --num_workers 8 \
62
+ --part 28 \
63
+ --total_part 32 &
64
+ sleep 20
65
+ CUDA_VISIBLE_DEVICES=5 python get_caption.py \
66
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
67
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
68
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
69
+ --num_workers 8 \
70
+ --part 29 \
71
+ --total_part 32 &
72
+ sleep 20
73
+ CUDA_VISIBLE_DEVICES=6 python get_caption.py \
74
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
75
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
76
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
77
+ --num_workers 8 \
78
+ --part 30 \
79
+ --total_part 32 &
80
+ sleep 20
81
+ CUDA_VISIBLE_DEVICES=7 python get_caption.py \
82
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
83
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
84
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
85
+ --num_workers 8 \
86
+ --part 31 \
87
+ --total_part 32
88
+
89
+
90
+ CUDA_VISIBLE_DEVICES=0 python get_caption.py \
91
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
92
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
93
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
94
+ --num_workers 8 \
95
+ --part 24 \
96
+ --total_part 32 &
97
+ sleep 20
98
+ CUDA_VISIBLE_DEVICES=1 python get_caption.py \
99
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
100
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
101
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
102
+ --num_workers 8 \
103
+ --part 25 \
104
+ --total_part 32 &
105
+ sleep 20
106
+ CUDA_VISIBLE_DEVICES=2 python get_caption.py \
107
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
108
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
109
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
110
+ --num_workers 8 \
111
+ --part 26 \
112
+ --total_part 32 &
113
+ sleep 20
114
+ CUDA_VISIBLE_DEVICES=3 python get_caption.py \
115
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
116
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
117
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
118
+ --num_workers 8 \
119
+ --part 27 \
120
+ --total_part 32 &
121
+ sleep 20
122
+ CUDA_VISIBLE_DEVICES=4 python get_caption.py \
123
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
124
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
125
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
126
+ --num_workers 8 \
127
+ --part 28 \
128
+ --total_part 32 &
129
+ sleep 20
130
+ CUDA_VISIBLE_DEVICES=5 python get_caption.py \
131
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
132
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
133
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
134
+ --num_workers 8 \
135
+ --part 29 \
136
+ --total_part 32 &
137
+ sleep 20
138
+ CUDA_VISIBLE_DEVICES=6 python get_caption.py \
139
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
140
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
141
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
142
+ --num_workers 8 \
143
+ --part 30 \
144
+ --total_part 32 &
145
+ sleep 20
146
+ CUDA_VISIBLE_DEVICES=7 python get_caption.py \
147
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
148
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
149
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
150
+ --num_workers 8 \
151
+ --part 31 \
152
+ --total_part 32
153
+
154
+
155
+ CUDA_VISIBLE_DEVICES=0 python get_caption.py \
156
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
157
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
158
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
159
+ --num_workers 8 \
160
+ --part 24 \
161
+ --total_part 32 &
162
+ sleep 20
163
+ CUDA_VISIBLE_DEVICES=1 python get_caption.py \
164
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
165
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
166
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
167
+ --num_workers 8 \
168
+ --part 25 \
169
+ --total_part 32 &
170
+ sleep 20
171
+ CUDA_VISIBLE_DEVICES=2 python get_caption.py \
172
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
173
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
174
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
175
+ --num_workers 8 \
176
+ --part 26 \
177
+ --total_part 32 &
178
+ sleep 20
179
+ CUDA_VISIBLE_DEVICES=3 python get_caption.py \
180
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
181
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
182
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
183
+ --num_workers 8 \
184
+ --part 27 \
185
+ --total_part 32 &
186
+ sleep 20
187
+ CUDA_VISIBLE_DEVICES=4 python get_caption.py \
188
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
189
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
190
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
191
+ --num_workers 8 \
192
+ --part 28 \
193
+ --total_part 32 &
194
+ sleep 20
195
+ CUDA_VISIBLE_DEVICES=5 python get_caption.py \
196
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
197
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
198
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
199
+ --num_workers 8 \
200
+ --part 29 \
201
+ --total_part 32 &
202
+ sleep 20
203
+ CUDA_VISIBLE_DEVICES=6 python get_caption.py \
204
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
205
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
206
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
207
+ --num_workers 8 \
208
+ --part 30 \
209
+ --total_part 32 &
210
+ sleep 20
211
+ CUDA_VISIBLE_DEVICES=7 python get_caption.py \
212
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
213
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
214
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
215
+ --num_workers 8 \
216
+ --part 31 \
217
+ --total_part 32
218
+
219
+
220
+ CUDA_VISIBLE_DEVICES=0 python get_caption.py \
221
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
222
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
223
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
224
+ --num_workers 8 \
225
+ --part 24 \
226
+ --total_part 32 &
227
+ sleep 20
228
+ CUDA_VISIBLE_DEVICES=1 python get_caption.py \
229
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
230
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
231
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
232
+ --num_workers 8 \
233
+ --part 25 \
234
+ --total_part 32 &
235
+ sleep 20
236
+ CUDA_VISIBLE_DEVICES=2 python get_caption.py \
237
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
238
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
239
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
240
+ --num_workers 8 \
241
+ --part 26 \
242
+ --total_part 32 &
243
+ sleep 20
244
+ CUDA_VISIBLE_DEVICES=3 python get_caption.py \
245
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
246
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
247
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
248
+ --num_workers 8 \
249
+ --part 27 \
250
+ --total_part 32 &
251
+ sleep 20
252
+ CUDA_VISIBLE_DEVICES=4 python get_caption.py \
253
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
254
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
255
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
256
+ --num_workers 8 \
257
+ --part 28 \
258
+ --total_part 32 &
259
+ sleep 20
260
+ CUDA_VISIBLE_DEVICES=5 python get_caption.py \
261
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
262
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
263
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
264
+ --num_workers 8 \
265
+ --part 29 \
266
+ --total_part 32 &
267
+ sleep 20
268
+ CUDA_VISIBLE_DEVICES=6 python get_caption.py \
269
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
270
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
271
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
272
+ --num_workers 8 \
273
+ --part 30 \
274
+ --total_part 32 &
275
+ sleep 20
276
+ CUDA_VISIBLE_DEVICES=7 python get_caption.py \
277
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
278
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
279
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
280
+ --num_workers 8 \
281
+ --part 31 \
282
+ --total_part 32
dataset_code/sekai/preprocess/5.sh ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ bash install.sh
2
+
3
+ # python cut_video.py \
4
+ # --input_folder /mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq \
5
+ # --output_dir /mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193 \
6
+ # --frames-per-segment 193 \
7
+ # --max-workers 32 \
8
+ # --cur-part 5 \
9
+ # --total-part 6 \
10
+
11
+
12
+ # python cut_video.py \
13
+ # --input_folder /mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq \
14
+ # --output_dir /mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386 \
15
+ # --frames-per-segment 386 \
16
+ # --max-workers 32 \
17
+ # --cur-part 5 \
18
+ # --total-part 6 \
19
+
20
+ export PYTHONMULTIPROCESSING_START_METHOD=fork
21
+ export VLLM_WORKER_MULTIPROC_METHO=spawn
22
+
23
+ # python get_caption.py
24
+
25
+ CUDA_VISIBLE_DEVICES=0 python get_caption.py \
26
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
27
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
28
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
29
+ --num_workers 8 \
30
+ --part 32 \
31
+ --total_part 48 &
32
+ sleep 20
33
+ CUDA_VISIBLE_DEVICES=1 python get_caption.py \
34
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
35
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
36
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
37
+ --num_workers 8 \
38
+ --part 33 \
39
+ --total_part 48 &
40
+ sleep 20
41
+ CUDA_VISIBLE_DEVICES=2 python get_caption.py \
42
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
43
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
44
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
45
+ --num_workers 8 \
46
+ --part 34 \
47
+ --total_part 48 &
48
+ sleep 20
49
+ CUDA_VISIBLE_DEVICES=3 python get_caption.py \
50
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
51
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
52
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
53
+ --num_workers 8 \
54
+ --part 35 \
55
+ --total_part 48 &
56
+ sleep 20
57
+ CUDA_VISIBLE_DEVICES=4 python get_caption.py \
58
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
59
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
60
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
61
+ --num_workers 8 \
62
+ --part 36 \
63
+ --total_part 48 &
64
+ sleep 20
65
+ CUDA_VISIBLE_DEVICES=5 python get_caption.py \
66
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
67
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
68
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
69
+ --num_workers 8 \
70
+ --part 37 \
71
+ --total_part 48 &
72
+ sleep 20
73
+ CUDA_VISIBLE_DEVICES=6 python get_caption.py \
74
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
75
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
76
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
77
+ --num_workers 8 \
78
+ --part 38 \
79
+ --total_part 48 &
80
+ sleep 20
81
+ CUDA_VISIBLE_DEVICES=7 python get_caption.py \
82
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
83
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
84
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
85
+ --num_workers 8 \
86
+ --part 39 \
87
+ --total_part 48
88
+
89
+
90
+ CUDA_VISIBLE_DEVICES=0 python get_caption.py \
91
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
92
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
93
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
94
+ --num_workers 8 \
95
+ --part 32 \
96
+ --total_part 48 &
97
+ sleep 20
98
+ CUDA_VISIBLE_DEVICES=1 python get_caption.py \
99
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
100
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
101
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
102
+ --num_workers 8 \
103
+ --part 33 \
104
+ --total_part 48 &
105
+ sleep 20
106
+ CUDA_VISIBLE_DEVICES=2 python get_caption.py \
107
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
108
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
109
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
110
+ --num_workers 8 \
111
+ --part 34 \
112
+ --total_part 48 &
113
+ sleep 20
114
+ CUDA_VISIBLE_DEVICES=3 python get_caption.py \
115
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
116
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
117
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
118
+ --num_workers 8 \
119
+ --part 35 \
120
+ --total_part 48 &
121
+ sleep 20
122
+ CUDA_VISIBLE_DEVICES=4 python get_caption.py \
123
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
124
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
125
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
126
+ --num_workers 8 \
127
+ --part 36 \
128
+ --total_part 48 &
129
+ sleep 20
130
+ CUDA_VISIBLE_DEVICES=5 python get_caption.py \
131
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
132
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
133
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
134
+ --num_workers 8 \
135
+ --part 37 \
136
+ --total_part 48 &
137
+ sleep 20
138
+ CUDA_VISIBLE_DEVICES=6 python get_caption.py \
139
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
140
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
141
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
142
+ --num_workers 8 \
143
+ --part 38 \
144
+ --total_part 48 &
145
+ sleep 20
146
+ CUDA_VISIBLE_DEVICES=7 python get_caption.py \
147
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
148
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
149
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
150
+ --num_workers 8 \
151
+ --part 39 \
152
+ --total_part 48
153
+
154
+
155
+ CUDA_VISIBLE_DEVICES=0 python get_caption.py \
156
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
157
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
158
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
159
+ --num_workers 8 \
160
+ --part 32 \
161
+ --total_part 48 &
162
+ sleep 20
163
+ CUDA_VISIBLE_DEVICES=1 python get_caption.py \
164
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
165
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
166
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
167
+ --num_workers 8 \
168
+ --part 33 \
169
+ --total_part 48 &
170
+ sleep 20
171
+ CUDA_VISIBLE_DEVICES=2 python get_caption.py \
172
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
173
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
174
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
175
+ --num_workers 8 \
176
+ --part 34 \
177
+ --total_part 48 &
178
+ sleep 20
179
+ CUDA_VISIBLE_DEVICES=3 python get_caption.py \
180
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
181
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
182
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
183
+ --num_workers 8 \
184
+ --part 35 \
185
+ --total_part 48 &
186
+ sleep 20
187
+ CUDA_VISIBLE_DEVICES=4 python get_caption.py \
188
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
189
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
190
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
191
+ --num_workers 8 \
192
+ --part 36 \
193
+ --total_part 48 &
194
+ sleep 20
195
+ CUDA_VISIBLE_DEVICES=5 python get_caption.py \
196
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
197
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
198
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
199
+ --num_workers 8 \
200
+ --part 37 \
201
+ --total_part 48 &
202
+ sleep 20
203
+ CUDA_VISIBLE_DEVICES=6 python get_caption.py \
204
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
205
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
206
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
207
+ --num_workers 8 \
208
+ --part 38 \
209
+ --total_part 48 &
210
+ sleep 20
211
+ CUDA_VISIBLE_DEVICES=7 python get_caption.py \
212
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
213
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
214
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
215
+ --num_workers 8 \
216
+ --part 39 \
217
+ --total_part 48
218
+
219
+
220
+ CUDA_VISIBLE_DEVICES=0 python get_caption.py \
221
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
222
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
223
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
224
+ --num_workers 8 \
225
+ --part 32 \
226
+ --total_part 48 &
227
+ sleep 20
228
+ CUDA_VISIBLE_DEVICES=1 python get_caption.py \
229
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
230
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
231
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
232
+ --num_workers 8 \
233
+ --part 33 \
234
+ --total_part 48 &
235
+ sleep 20
236
+ CUDA_VISIBLE_DEVICES=2 python get_caption.py \
237
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
238
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
239
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
240
+ --num_workers 8 \
241
+ --part 34 \
242
+ --total_part 48 &
243
+ sleep 20
244
+ CUDA_VISIBLE_DEVICES=3 python get_caption.py \
245
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
246
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
247
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
248
+ --num_workers 8 \
249
+ --part 35 \
250
+ --total_part 48 &
251
+ sleep 20
252
+ CUDA_VISIBLE_DEVICES=4 python get_caption.py \
253
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
254
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
255
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
256
+ --num_workers 8 \
257
+ --part 36 \
258
+ --total_part 48 &
259
+ sleep 20
260
+ CUDA_VISIBLE_DEVICES=5 python get_caption.py \
261
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
262
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
263
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
264
+ --num_workers 8 \
265
+ --part 37 \
266
+ --total_part 48 &
267
+ sleep 20
268
+ CUDA_VISIBLE_DEVICES=6 python get_caption.py \
269
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
270
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
271
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
272
+ --num_workers 8 \
273
+ --part 38 \
274
+ --total_part 48 &
275
+ sleep 20
276
+ CUDA_VISIBLE_DEVICES=7 python get_caption.py \
277
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
278
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
279
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
280
+ --num_workers 8 \
281
+ --part 39 \
282
+ --total_part 48
dataset_code/sekai/preprocess/6.sh ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ bash install.sh
2
+
3
+ # python cut_video.py \
4
+ # --input_folder /mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq \
5
+ # --output_dir /mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193 \
6
+ # --frames-per-segment 193 \
7
+ # --max-workers 32 \
8
+ # --cur-part 6 \
9
+ # --total-part 6 \
10
+
11
+
12
+ # python cut_video.py \
13
+ # --input_folder /mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq \
14
+ # --output_dir /mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386 \
15
+ # --frames-per-segment 386 \
16
+ # --max-workers 32 \
17
+ # --cur-part 6 \
18
+ # --total-part 6 \
19
+
20
+ export PYTHONMULTIPROCESSING_START_METHOD=fork
21
+ export VLLM_WORKER_MULTIPROC_METHO=spawn
22
+
23
+ # python get_caption.py
24
+
25
+ CUDA_VISIBLE_DEVICES=0 python get_caption.py \
26
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
27
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
28
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
29
+ --num_workers 8 \
30
+ --part 40 \
31
+ --total_part 48 &
32
+ sleep 20
33
+ CUDA_VISIBLE_DEVICES=1 python get_caption.py \
34
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
35
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
36
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
37
+ --num_workers 8 \
38
+ --part 41 \
39
+ --total_part 48 &
40
+ sleep 20
41
+ CUDA_VISIBLE_DEVICES=2 python get_caption.py \
42
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
43
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
44
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
45
+ --num_workers 8 \
46
+ --part 42 \
47
+ --total_part 48 &
48
+ sleep 20
49
+ CUDA_VISIBLE_DEVICES=3 python get_caption.py \
50
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
51
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
52
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
53
+ --num_workers 8 \
54
+ --part 43 \
55
+ --total_part 48 &
56
+ sleep 20
57
+ CUDA_VISIBLE_DEVICES=4 python get_caption.py \
58
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
59
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
60
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
61
+ --num_workers 8 \
62
+ --part 44 \
63
+ --total_part 48 &
64
+ sleep 20
65
+ CUDA_VISIBLE_DEVICES=5 python get_caption.py \
66
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
67
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
68
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
69
+ --num_workers 8 \
70
+ --part 45 \
71
+ --total_part 48 &
72
+ sleep 20
73
+ CUDA_VISIBLE_DEVICES=6 python get_caption.py \
74
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
75
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
76
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
77
+ --num_workers 8 \
78
+ --part 46 \
79
+ --total_part 48 &
80
+ sleep 20
81
+ CUDA_VISIBLE_DEVICES=7 python get_caption.py \
82
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv" \
83
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193" \
84
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-193" \
85
+ --num_workers 8 \
86
+ --part 47 \
87
+ --total_part 48
88
+
89
+
90
+ CUDA_VISIBLE_DEVICES=0 python get_caption.py \
91
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
92
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
93
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
94
+ --num_workers 8 \
95
+ --part 40 \
96
+ --total_part 48 &
97
+ sleep 20
98
+ CUDA_VISIBLE_DEVICES=1 python get_caption.py \
99
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
100
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
101
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
102
+ --num_workers 8 \
103
+ --part 41 \
104
+ --total_part 48 &
105
+ sleep 20
106
+ CUDA_VISIBLE_DEVICES=2 python get_caption.py \
107
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
108
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
109
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
110
+ --num_workers 8 \
111
+ --part 42 \
112
+ --total_part 48 &
113
+ sleep 20
114
+ CUDA_VISIBLE_DEVICES=3 python get_caption.py \
115
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
116
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
117
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
118
+ --num_workers 8 \
119
+ --part 43 \
120
+ --total_part 48 &
121
+ sleep 20
122
+ CUDA_VISIBLE_DEVICES=4 python get_caption.py \
123
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
124
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
125
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
126
+ --num_workers 8 \
127
+ --part 44 \
128
+ --total_part 48 &
129
+ sleep 20
130
+ CUDA_VISIBLE_DEVICES=5 python get_caption.py \
131
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
132
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
133
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
134
+ --num_workers 8 \
135
+ --part 45 \
136
+ --total_part 48 &
137
+ sleep 20
138
+ CUDA_VISIBLE_DEVICES=6 python get_caption.py \
139
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
140
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
141
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
142
+ --num_workers 8 \
143
+ --part 46 \
144
+ --total_part 48 &
145
+ sleep 20
146
+ CUDA_VISIBLE_DEVICES=7 python get_caption.py \
147
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-193.csv" \
148
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193" \
149
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-193" \
150
+ --num_workers 8 \
151
+ --part 47 \
152
+ --total_part 48
153
+
154
+
155
+ CUDA_VISIBLE_DEVICES=0 python get_caption.py \
156
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
157
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
158
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
159
+ --num_workers 8 \
160
+ --part 40 \
161
+ --total_part 48 &
162
+ sleep 20
163
+ CUDA_VISIBLE_DEVICES=1 python get_caption.py \
164
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
165
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
166
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
167
+ --num_workers 8 \
168
+ --part 41 \
169
+ --total_part 48 &
170
+ sleep 20
171
+ CUDA_VISIBLE_DEVICES=2 python get_caption.py \
172
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
173
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
174
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
175
+ --num_workers 8 \
176
+ --part 42 \
177
+ --total_part 48 &
178
+ sleep 20
179
+ CUDA_VISIBLE_DEVICES=3 python get_caption.py \
180
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
181
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
182
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
183
+ --num_workers 8 \
184
+ --part 43 \
185
+ --total_part 48 &
186
+ sleep 20
187
+ CUDA_VISIBLE_DEVICES=4 python get_caption.py \
188
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
189
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
190
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
191
+ --num_workers 8 \
192
+ --part 44 \
193
+ --total_part 48 &
194
+ sleep 20
195
+ CUDA_VISIBLE_DEVICES=5 python get_caption.py \
196
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
197
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
198
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
199
+ --num_workers 8 \
200
+ --part 45 \
201
+ --total_part 48 &
202
+ sleep 20
203
+ CUDA_VISIBLE_DEVICES=6 python get_caption.py \
204
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
205
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
206
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
207
+ --num_workers 8 \
208
+ --part 46 \
209
+ --total_part 48 &
210
+ sleep 20
211
+ CUDA_VISIBLE_DEVICES=7 python get_caption.py \
212
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
213
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
214
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
215
+ --num_workers 8 \
216
+ --part 47 \
217
+ --total_part 48
218
+
219
+
220
+ CUDA_VISIBLE_DEVICES=0 python get_caption.py \
221
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
222
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
223
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
224
+ --num_workers 8 \
225
+ --part 40 \
226
+ --total_part 48 &
227
+ sleep 20
228
+ CUDA_VISIBLE_DEVICES=1 python get_caption.py \
229
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
230
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
231
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
232
+ --num_workers 8 \
233
+ --part 41 \
234
+ --total_part 48 &
235
+ sleep 20
236
+ CUDA_VISIBLE_DEVICES=2 python get_caption.py \
237
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
238
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
239
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
240
+ --num_workers 8 \
241
+ --part 42 \
242
+ --total_part 48 &
243
+ sleep 20
244
+ CUDA_VISIBLE_DEVICES=3 python get_caption.py \
245
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
246
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
247
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
248
+ --num_workers 8 \
249
+ --part 43 \
250
+ --total_part 48 &
251
+ sleep 20
252
+ CUDA_VISIBLE_DEVICES=4 python get_caption.py \
253
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
254
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
255
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
256
+ --num_workers 8 \
257
+ --part 44 \
258
+ --total_part 48 &
259
+ sleep 20
260
+ CUDA_VISIBLE_DEVICES=5 python get_caption.py \
261
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
262
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
263
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
264
+ --num_workers 8 \
265
+ --part 45 \
266
+ --total_part 48 &
267
+ sleep 20
268
+ CUDA_VISIBLE_DEVICES=6 python get_caption.py \
269
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
270
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
271
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
272
+ --num_workers 8 \
273
+ --part 46 \
274
+ --total_part 48 &
275
+ sleep 20
276
+ CUDA_VISIBLE_DEVICES=7 python get_caption.py \
277
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
278
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
279
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
280
+ --num_workers 8 \
281
+ --part 47 \
282
+ --total_part 48
dataset_code/sekai/preprocess/add_config.py ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import cv2
3
+ import os
4
+ from pathlib import Path
5
+ from concurrent.futures import ThreadPoolExecutor, as_completed
6
+ from threading import Lock
7
+ import time
8
+
9
+ class VideoProcessor:
10
+ def __init__(self, max_workers=4):
11
+ self.max_workers = max_workers
12
+ self.progress_lock = Lock()
13
+ self.processed_count = 0
14
+ self.total_count = 0
15
+
16
+ def get_video_properties(self, video_path):
17
+ """
18
+ 获取视频的基本属性:高度、宽度、帧率
19
+
20
+ Args:
21
+ video_path (str): 视频文件路径
22
+
23
+ Returns:
24
+ tuple: (height, width, fps) 或 (None, None, None) 如果读取失败
25
+ """
26
+ try:
27
+ # 打开视频文件
28
+ cap = cv2.VideoCapture(video_path)
29
+
30
+ if not cap.isOpened():
31
+ return None, None, None
32
+
33
+ # 获取视频属性
34
+ filename = os.path.splitext(os.path.basename(video_path))[0]
35
+ parts = filename.split('_')
36
+ num_frame = int(parts[-1]) - int(parts[-2])
37
+
38
+ # num_frame = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
39
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
40
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
41
+ fps = cap.get(cv2.CAP_PROP_FPS)
42
+
43
+ # 释放视频捕获对象
44
+ cap.release()
45
+
46
+ return num_frame, height, width, fps
47
+
48
+ except Exception as e:
49
+ print(f"读取视频 {video_path} 时出错: {str(e)}")
50
+ return None, None, None
51
+
52
+ def process_single_video(self, args):
53
+ """
54
+ 处理单个视频文件
55
+
56
+ Args:
57
+ args: (idx, video_file, video_dir)
58
+
59
+ Returns:
60
+ tuple: (idx, num_frame, height, width, fps, success, message)
61
+ """
62
+ idx, video_file, video_dir = args
63
+ video_path = os.path.join(video_dir, video_file)
64
+
65
+ # 检查视频文件是否存在
66
+ if not os.path.exists(video_path):
67
+ message = f"视频文件不存在: {video_path}"
68
+ return idx, None, None, None, False, message
69
+
70
+ # 获取视频属性
71
+ num_frame, height, width, fps = self.get_video_properties(video_path)
72
+
73
+ # 更新进度
74
+ with self.progress_lock:
75
+ self.processed_count += 1
76
+ progress = (self.processed_count / self.total_count) * 100
77
+
78
+ if height is not None:
79
+ message = f"[{self.processed_count}/{self.total_count}] ({progress:.1f}%) {video_file} → {num_frame}, {width}x{height}, {fps:.2f}fps"
80
+ success = True
81
+ fps = round(fps, 2)
82
+ else:
83
+ message = f"[{self.processed_count}/{self.total_count}] ({progress:.1f}%) {video_file} → 获取信息失败"
84
+ success = False
85
+
86
+ print(message)
87
+
88
+ return idx, num_frame, height, width, fps, success, message
89
+
90
+ def process_video_csv(self, csv_path, video_dir="./", output_csv_path=None, max_workers=None):
91
+ """
92
+ 多线程处理CSV文件,添加视频的height、width、fps信息
93
+
94
+ Args:
95
+ csv_path (str): 输入CSV文件路径
96
+ video_dir (str): 视频文件所在目录
97
+ output_csv_path (str): 输出CSV文件路径,如果为None则覆盖原文件
98
+ max_workers (int): 最大线程数,如果为None则使用初始化时的值
99
+ """
100
+ if max_workers is None:
101
+ max_workers = self.max_workers
102
+
103
+ try:
104
+ # 读取CSV文件
105
+ df = pd.read_csv(csv_path)
106
+ self.total_count = len(df)
107
+ self.processed_count = 0
108
+
109
+ print(f"成功读取CSV文件,共 {len(df)} 行数据")
110
+ print(f"使用 {max_workers} 个线程进行处理...")
111
+
112
+ # 初始化新列
113
+ df['num_frame'] = None
114
+ df['height'] = None
115
+ df['width'] = None
116
+ df['fps'] = None
117
+
118
+ # 准备任务列表
119
+ tasks = [(idx, row['videoFile'], video_dir) for idx, row in df.iterrows()]
120
+
121
+ # 记录开始时间
122
+ start_time = time.time()
123
+
124
+ # 使用线程池执行任务
125
+ with ThreadPoolExecutor(max_workers=max_workers) as executor:
126
+ # 提交所有任务
127
+ future_to_task = {executor.submit(self.process_single_video, task): task for task in tasks}
128
+
129
+ # 处理完成的任务
130
+ for future in as_completed(future_to_task):
131
+ idx, num_frame, height, width, fps, success, message = future.result()
132
+
133
+ # 更新DataFrame
134
+ if success and height is not None:
135
+ df.at[idx, 'num_frame'] = num_frame
136
+ df.at[idx, 'height'] = height
137
+ df.at[idx, 'width'] = width
138
+ df.at[idx, 'fps'] = fps
139
+
140
+ # 计算处理时间
141
+ end_time = time.time()
142
+ processing_time = end_time - start_time
143
+
144
+ # 保存结果
145
+ if output_csv_path is None:
146
+ output_csv_path = csv_path
147
+
148
+ df.to_csv(output_csv_path, index=False)
149
+
150
+ # 显示统计信息
151
+ valid_videos = df['height'].notna().sum()
152
+ print(f"\n{'='*60}")
153
+ print(f"处理完成!")
154
+ print(f"总处理时间: {processing_time:.2f}秒")
155
+ print(f"平均每个视频: {processing_time/len(df):.2f}秒")
156
+ print(f"成功处理视频数量: {valid_videos}/{len(df)}")
157
+ print(f"结果已保存到: {output_csv_path}")
158
+ print(f"{'='*60}")
159
+
160
+ return df
161
+
162
+ except Exception as e:
163
+ print(f"处理过程中出错: {str(e)}")
164
+ return None
165
+
166
+ # 便捷函数
167
+ def process_video_csv_multithread(csv_path, video_dir="./", output_csv_path=None, max_workers=4):
168
+ """
169
+ 便捷的多线程视频处理函数
170
+
171
+ Args:
172
+ csv_path (str): 输入CSV文件路径
173
+ video_dir (str): 视频文件所在目录
174
+ output_csv_path (str): 输出CSV文件路径
175
+ max_workers (int): 最大线程数
176
+ """
177
+ processor = VideoProcessor(max_workers=max_workers)
178
+ return processor.process_video_csv(csv_path, video_dir, output_csv_path, max_workers)
179
+
180
+ # 使用示例
181
+ if __name__ == "__main__":
182
+ # 配置参数
183
+ # base_names = ["sekai-real-walking-hq-193", "sekai-game-walking-193", "sekai-real-walking-hq-386", "sekai-game-walking-386"]
184
+ # base_names = ["sekai-real-walking-hq-193"]
185
+ # base_names = ["sekai-game-walking-193"]
186
+ # base_names = ["sekai-real-walking-hq-386"]
187
+ base_names = ["sekai-game-walking-386"]
188
+
189
+ for base_name in base_names:
190
+ csv_file_path = f"/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/{base_name}.csv"
191
+ video_directory = f"/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/{base_name}"
192
+ output_file_path = f"/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/{base_name}_updated.csv"
193
+ thread_count = 32
194
+
195
+ # 方法1: 使用便捷函数
196
+ result_df = process_video_csv_multithread(
197
+ csv_path=csv_file_path,
198
+ video_dir=video_directory,
199
+ output_csv_path=output_file_path,
200
+ max_workers=thread_count
201
+ )
202
+
203
+ # 方法2: 使用类的方式(更灵活)
204
+ """
205
+ processor = VideoProcessor(max_workers=thread_count)
206
+ result_df = processor.process_video_csv(
207
+ csv_path=csv_file_path,
208
+ video_dir=video_directory,
209
+ output_csv_path=output_file_path
210
+ )
211
+ """
212
+
213
+ # 显示前几行结果
214
+ if result_df is not None:
215
+ print("\n处理后的数据预览:")
216
+ print(result_df[['videoFile', 'num_frame', 'height', 'width', 'fps']].head())
217
+
218
+ # 显示一些统计信息
219
+ print(f"\n视频分辨率统计:")
220
+ resolution_stats = result_df.groupby(['width', 'height']).size().reset_index(name='count')
221
+ print(resolution_stats.head(10))
dataset_code/sekai/preprocess/cut_video.py ADDED
@@ -0,0 +1,292 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import subprocess
3
+ import os
4
+ import glob
5
+ from tqdm import tqdm
6
+ from concurrent.futures import ThreadPoolExecutor, as_completed
7
+ import threading
8
+
9
+ def extract_video_info_split(input_file):
10
+ filename = os.path.splitext(os.path.basename(input_file))[0]
11
+ parts = filename.split('_')
12
+
13
+ start_frame = int(parts[-2])
14
+ end_frame = int(parts[-1])
15
+
16
+ # video_id = filename.replace(f"_{parts[-1]}", "").replace(f"_{parts[-2]}", "")
17
+ video_id = filename
18
+
19
+ return video_id, start_frame, end_frame
20
+
21
+ # if len(parts) == 3:
22
+ # video_id = parts[0]
23
+ # start_frame = int(parts[1])
24
+ # end_frame = int(parts[2])
25
+ # return video_id, start_frame, end_frame
26
+ # else:
27
+ # raise ValueError(f"文件名格式不匹配,期望3个部分,实际得到{len(parts)}个: {filename}")
28
+
29
+ def check_segments_exist(video_id, start_frame, total_frames, output_dir, frames_per_segment):
30
+ """
31
+ 检查该视频的所有片段是否已存在
32
+
33
+ Returns:
34
+ bool: True表示所有片段都存在,False表示有片段缺失
35
+ list: 缺失的片段文件名列表
36
+ """
37
+ missing_segments = []
38
+ current_frame = start_frame
39
+
40
+ while current_frame < start_frame + total_frames:
41
+ segment_end_frame = min(current_frame + frames_per_segment - 1, start_frame + total_frames - 1)
42
+ output_filename = f"{video_id}_{current_frame:07d}_{(segment_end_frame+1):07d}.mp4"
43
+ output_file = os.path.join(output_dir, output_filename)
44
+
45
+ if not os.path.exists(output_file):
46
+ missing_segments.append(output_filename)
47
+
48
+ current_frame = segment_end_frame + 1
49
+
50
+ return len(missing_segments) == 0, missing_segments
51
+
52
+ def process_single_video(input_file, output_dir, frames_per_segment=386, skip_existing=True):
53
+ try:
54
+ video_id, start_frame, original_end_frame = extract_video_info_split(input_file)
55
+
56
+ # 获取帧率
57
+ result = subprocess.run([
58
+ 'ffprobe', '-v', 'quiet', '-select_streams', 'v:0',
59
+ '-show_entries', 'stream=r_frame_rate', '-of', 'csv=p=0', input_file
60
+ ], capture_output=True, text=True)
61
+
62
+ frame_rate_str = result.stdout.strip()
63
+ if frame_rate_str and '/' in frame_rate_str:
64
+ frame_rate = eval(frame_rate_str)
65
+ else:
66
+ frame_rate = 30.0
67
+
68
+ # 获取总帧数
69
+ result = subprocess.run([
70
+ 'ffprobe', '-v', 'quiet', '-select_streams', 'v:0',
71
+ '-show_entries', 'stream=nb_frames', '-of', 'csv=p=0', input_file
72
+ ], capture_output=True, text=True)
73
+
74
+ total_frames = int(result.stdout.strip())
75
+
76
+ # 检查文件是否已存在
77
+ if skip_existing:
78
+ all_exist, missing_segments = check_segments_exist(
79
+ video_id, start_frame, total_frames, output_dir, frames_per_segment
80
+ )
81
+
82
+ thread_id = threading.current_thread().name
83
+
84
+ if all_exist:
85
+ # 计算应该有多少个片段
86
+ expected_segments = 0
87
+ current_frame = start_frame
88
+ while current_frame < start_frame + total_frames:
89
+ expected_segments += 1
90
+ segment_end_frame = min(current_frame + frames_per_segment - 1, start_frame + total_frames - 1)
91
+ current_frame = segment_end_frame + 1
92
+
93
+ print(f"[{thread_id}] 跳过文件: {os.path.basename(input_file)} - 所有{expected_segments}个片段已存在")
94
+ return True, expected_segments, os.path.basename(input_file), True # 最后一个参数表示是否跳过
95
+ else:
96
+ print(f"[{thread_id}] 处理文件: {os.path.basename(input_file)} - 缺失{len(missing_segments)}个片段")
97
+
98
+ current_frame = start_frame
99
+
100
+ # 使用线程安全的打印
101
+ thread_id = threading.current_thread().name
102
+ if not skip_existing or not all_exist:
103
+ print(f"[{thread_id}] 原视频片段: 第{start_frame}帧 到 第{original_end_frame}帧")
104
+ print(f"[{thread_id}] 文件总帧数: {total_frames}")
105
+
106
+ segment_index = 0
107
+ processed_segments = 0
108
+
109
+ while current_frame < start_frame + total_frames:
110
+ segment_end_frame = min(current_frame + frames_per_segment - 1, start_frame + total_frames - 1)
111
+
112
+ output_filename = f"{video_id}_{current_frame:07d}_{(segment_end_frame + 1):07d}.mp4"
113
+ output_file = os.path.join(output_dir, output_filename)
114
+
115
+ if skip_existing and os.path.exists(output_file):
116
+ print(f"[{thread_id}] 跳过片段: {output_filename} (已存在)")
117
+ pass
118
+ else:
119
+ start_time = (current_frame - start_frame) / frame_rate
120
+ # 修改这里:直接使用帧数计算,确��精确
121
+ actual_frames = segment_end_frame - current_frame + 1
122
+ duration = actual_frames / frame_rate
123
+
124
+ # 使用更精确的FFmpeg命令
125
+ # subprocess.run([
126
+ # 'ffmpeg', '-ss', str(start_time), '-i', input_file,
127
+ # '-frames:v', str(actual_frames),
128
+ # '-c:v', 'libx264', '-crf', '0', # 无损编码
129
+ # '-preset', 'ultrafast', # 快速编码
130
+ # '-c:a', 'copy',
131
+ # output_file, '-y'
132
+ # ], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
133
+ subprocess.run([
134
+ 'ffmpeg', '-ss', str(start_time), '-i', input_file,
135
+ '-t', str(duration),
136
+ '-c', 'copy', # 直接拷贝,不重编码
137
+ output_file, '-y'
138
+ ], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
139
+
140
+ print(f"[{thread_id}] 生成片段: {output_filename} ({actual_frames}帧)")
141
+ processed_segments += 1
142
+
143
+ current_frame = segment_end_frame + 1
144
+ segment_index += 1
145
+
146
+ if not skip_existing or not all_exist:
147
+ if processed_segments > 0:
148
+ print(f"[{thread_id}] 完成: {os.path.basename(input_file)} - 新生成{processed_segments}个片段")
149
+ else:
150
+ print(f"[{thread_id}] 完成: {os.path.basename(input_file)} - 所有片段都已存在")
151
+
152
+ return True, segment_index, os.path.basename(input_file), False
153
+
154
+ except Exception as e:
155
+ thread_id = threading.current_thread().name
156
+ print(f"[{thread_id}] 处理文件 {input_file} 时出错: {str(e)}")
157
+ return False, 0, os.path.basename(input_file), False
158
+
159
+ def batch_process_videos(input_folder, output_dir, frames_per_segment=386, max_workers=4,
160
+ skip_existing=True, cur_part=None, total_part=None):
161
+ """
162
+ 多线程批量处理视频
163
+
164
+ Args:
165
+ input_folder: 输入文件夹路径
166
+ output_dir: 输出目录
167
+ frames_per_segment: 每个片段的帧数
168
+ max_workers: 最大线程数,建议设置为CPU核心数的1-2倍
169
+ skip_existing: 是否跳过已存在的文件
170
+ cur_part: 当前处理的部分 (1-based, 从1开始)
171
+ total_part: 总共分割的部分数
172
+ """
173
+ # 创建输出目录
174
+ os.makedirs(output_dir, exist_ok=True)
175
+
176
+ # 查找所有mp4文件
177
+ video_files = glob.glob(os.path.join(input_folder, "*.mp4"))
178
+
179
+ if not video_files:
180
+ print(f"在目录 {input_folder} 中未找到mp4文件")
181
+ return
182
+
183
+ # 对视频文件进行排序,确保分割结果的一致性
184
+ video_files.sort()
185
+
186
+ # 如果指定了分割参数,则对视频文件列表进行分割
187
+ if cur_part is not None and total_part is not None:
188
+ if not (1 <= cur_part <= total_part):
189
+ raise ValueError(f"cur_part ({cur_part}) 必须在 1 到 {total_part} 之间")
190
+
191
+ total_videos = len(video_files)
192
+ videos_per_part = total_videos // total_part
193
+ remainder = total_videos % total_part
194
+
195
+ # 计算当前部分的起始和结束索引
196
+ if cur_part <= remainder:
197
+ # 前remainder个部分每个多分配1个文件
198
+ start_idx = (cur_part - 1) * (videos_per_part + 1)
199
+ end_idx = start_idx + videos_per_part + 1
200
+ else:
201
+ # 后面的部分按标准数量分配
202
+ start_idx = remainder * (videos_per_part + 1) + (cur_part - remainder - 1) * videos_per_part
203
+ end_idx = start_idx + videos_per_part
204
+
205
+ video_files = video_files[start_idx:end_idx]
206
+
207
+ print(f"分割处理模式: 第 {cur_part} 部分 / 共 {total_part} 部分")
208
+ print(f"原始视频总数: {total_videos}")
209
+ print(f"当前部分处理: {len(video_files)} 个视频 (索引 {start_idx} 到 {end_idx-1})")
210
+
211
+ if not video_files:
212
+ print("当前部分没有需要处理的视频文件")
213
+ return
214
+
215
+ print(f"找到 {len(video_files)} 个视频文件")
216
+ print(f"输出目录: {output_dir}")
217
+ print(f"使用 {max_workers} 个线程进行处理")
218
+ print(f"跳过已存在文件: {'是' if skip_existing else '否'}")
219
+
220
+ total_segments = 0
221
+ success_count = 0
222
+ skipped_count = 0
223
+ failed_files = []
224
+
225
+ # 使用ThreadPoolExecutor进行多线程处理
226
+ with ThreadPoolExecutor(max_workers=max_workers) as executor:
227
+ # 提交所有任务
228
+ future_to_file = {
229
+ executor.submit(process_single_video, video_file, output_dir, frames_per_segment, skip_existing): video_file
230
+ for video_file in video_files
231
+ }
232
+
233
+ # 使用tqdm显示进度条
234
+ progress_desc = "处理进度"
235
+ if cur_part is not None and total_part is not None:
236
+ progress_desc = f"处理进度 ({cur_part}/{total_part})"
237
+
238
+ with tqdm(total=len(video_files), desc=progress_desc) as pbar:
239
+ for future in as_completed(future_to_file):
240
+ video_file = future_to_file[future]
241
+ try:
242
+ success, segments, filename, was_skipped = future.result()
243
+ if success:
244
+ success_count += 1
245
+ total_segments += segments
246
+ if was_skipped:
247
+ skipped_count += 1
248
+ else:
249
+ failed_files.append(filename)
250
+ except Exception as exc:
251
+ print(f'视频文件 {video_file} 处理时发生异常: {exc}')
252
+ failed_files.append(os.path.basename(video_file))
253
+ finally:
254
+ pbar.update(1)
255
+
256
+ print(f"\n批量处理完成!")
257
+ if cur_part is not None and total_part is not None:
258
+ print(f"当前部分 ({cur_part}/{total_part}) 处理结果:")
259
+ print(f"成功处理: {success_count}/{len(video_files)} 个视频")
260
+ print(f"跳过文件: {skipped_count} 个视频 (所有片段已存在)")
261
+ print(f"实际处理: {success_count - skipped_count} 个视频")
262
+ print(f"总共生成: {total_segments} 个片段")
263
+
264
+ if failed_files:
265
+ print(f"处理失败的文件: {failed_files}")
266
+
267
+ def main():
268
+ parser = argparse.ArgumentParser(description='批量处理视频文件,提取帧并分段保存')
269
+
270
+ parser.add_argument('--input_folder', type=str, default="./", help='输入文件夹路径')
271
+ parser.add_argument('--output_dir', type=str, default="./dummy_segments_33", help='输出目录路径')
272
+ parser.add_argument('--frames-per-segment', type=int, default=193, help='每段的帧数')
273
+ parser.add_argument('--max-workers', type=int, default=8, help='线程数')
274
+ parser.add_argument('--skip-existing', action='store_true', default=True, help='跳过已存在的文件')
275
+ parser.add_argument('--no-skip-existing', dest='skip_existing', action='store_false', help='强制重新处理')
276
+ parser.add_argument('--cur-part', type=int, default=1, help='当前处理的部分')
277
+ parser.add_argument('--total-part', type=int, default=1, help='总共分成几部分')
278
+
279
+ args = parser.parse_args()
280
+
281
+ batch_process_videos(
282
+ input_folder=args.input_folder,
283
+ output_dir=args.output_dir,
284
+ frames_per_segment=args.frames_per_segment,
285
+ max_workers=args.max_workers,
286
+ skip_existing=args.skip_existing,
287
+ cur_part=args.cur_part,
288
+ total_part=args.total_part
289
+ )
290
+
291
+ if __name__ == "__main__":
292
+ main()
dataset_code/sekai/preprocess/get_caption.py ADDED
@@ -0,0 +1,281 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from vllm import LLM, SamplingParams
2
+
3
+ import numpy as np
4
+ import pandas as pd
5
+ import torch
6
+ from torch.utils.data import DataLoader, Dataset
7
+
8
+ import argparse
9
+ import os
10
+ from typing import Tuple
11
+
12
+ import qwen_vl_utils
13
+ from qwen_vl_utils import process_vision_info
14
+ from tqdm import tqdm
15
+ from transformers import AutoProcessor
16
+
17
+ from video_reader import PyVideoReader
18
+
19
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
20
+
21
+ input_prompt = (
22
+ "Please generate a comprehensive caption for the following video, describing various aspects, including but not limited to: "
23
+ "1. The main theme and setting of the image (such as location, time of day, weather conditions, etc.) "
24
+ "2. Key objects and their characteristics (such as color, shape, size, etc.) "
25
+ "3. Relationships and interactions between objects (such as positioning, actions, etc.) "
26
+ "4. Any people present and their emotions or activities (such as expressions, postures, etc.) "
27
+ "5. Background and environmental details (such as architecture, natural scenery, etc.) "
28
+ "6. Motion of the Subject: The movement of people or objects in the video. Use verbs that describe movement. "
29
+ "7. Camera motion control: zoom in, zoom out, push in, pull out, pan right, pan left, truck right, truck left, tilt up, tilt down, pedestal up, pedestal down, arc shot, tracking shot, static shot, and handheld shot. "
30
+ 'Do not describe imagined content. Only describe what can be determined from the video. Avoid listing things. Do not use abstract concepts (love, hate, justice, infinity, joy) as subjects. Use concrete nouns (human, cup, dog, planet, headphones) for more accurate results. Use verbs to describe the movement and changes of the subject or people. Write your prompts in plain, conversational language. Start your description directly with the main subject, typically a noun. Without "\n", subheading and title. '
31
+ "For guidance on the expected output format and content length, refer to the provided examples:"
32
+ "The video begins with the viewer moving forward along a rocky path surrounded by dense greenery under a clear blue sky. The camera smoothly pans to reveal a signpost on the left, indicating a trailhead, before continuing along the uneven terrain dotted with shrubs and small trees. As the journey progresses, the path ascends slightly, leading to a set of wooden steps that navigate through the lush vegetation. The camera angle shifts subtly to capture the ascent, highlighting the natural textures of the rocks and foliage. Upon reaching the top, the scene opens up to a breathtaking view of Castle Rock Beach, with the vast ocean stretching out to the horizon and a prominent rock formation standing tall against the backdrop of the sea. The camera then pans back to the trail, showing more steps and the surrounding forested area, emphasizing the serene and untouched beauty of the location. The sunlight bathes the entire landscape in warm hues, casting sharp shadows and enhancing the vivid greens and earthy tones of the environment. The video concludes with the camera moving steadily along the trail, capturing the intricate details of the natural surroundings and the tranquil atmosphere of this remote coastal setting. "
33
+ "Attention: #######. Please describe the content of the video and the changes that occur, in chronological order:"
34
+ )
35
+
36
+ def _read_video_decord_cus(
37
+ ele: dict,
38
+ ) -> Tuple[torch.Tensor, float]:
39
+ vr = PyVideoReader(ele["video"], threads=0)
40
+ # crop video
41
+ # s_x, e_x, s_y, e_y = ele["crop"]
42
+ # sample video
43
+ # total_frames = ele["video_end"] - ele["video_start"]
44
+ # _, video_fps = len(vr), vr.get_avg_fps()
45
+ total_frames, video_fps = len(vr), vr.get_fps()
46
+ nframes = 32
47
+ # nframes = qwen_vl_utils.vision_process.smart_nframes(ele, total_frames=total_frames, video_fps=video_fps)
48
+ idx = np.linspace(0, total_frames - 1, nframes).round().astype(int).tolist()
49
+ # idx = [i + ele["video_start"] for i in idx]
50
+ video = vr.decode()[idx]
51
+ # video = vr.get_batch(idx).asnumpy()
52
+ video = torch.tensor(video).permute(0, 3, 1, 2) # Convert to TCHW format
53
+ # video = video[:, :, s_y:e_y, s_x:e_x]
54
+ sample_fps = nframes / max(total_frames, 1e-6) * video_fps
55
+ vr = None
56
+ del vr
57
+ return video, sample_fps
58
+
59
+
60
+ qwen_vl_utils.vision_process.VIDEO_READER_BACKENDS = {
61
+ "decord": _read_video_decord_cus,
62
+ }
63
+
64
+
65
+ class CaptionData(Dataset):
66
+ def __init__(self, video_data, input_video_root, output_json_folder, processor):
67
+ super().__init__()
68
+ self.input_video_root = input_video_root
69
+ self.output_json_folder = output_json_folder
70
+
71
+ vid_paths = [i["path"] for i in video_data]
72
+ video_keys = [i["video_key"] for i in video_data]
73
+ cameraFiles = [i["cameraFile"] for i in video_data]
74
+ locations = [i["location"] for i in video_data]
75
+ scenes = [i["scene"] for i in video_data]
76
+ crowdDensitys = [i["crowdDensity"] for i in video_data]
77
+ weathers = [i["weather"] for i in video_data]
78
+ timeOfDays = [i["timeOfDay"] for i in video_data]
79
+ save_paths = [
80
+ os.path.join(output_json_folder, (i["video_key"] + ".csv"))
81
+ for i in video_data
82
+ ]
83
+ print("part x origin num", len(save_paths))
84
+ self.paths = [
85
+ [save_path, vid_path, video_key, cameraFile, location, scene, crowdDensity, weather, timeOfDay]
86
+ for save_path, vid_path, video_key, cameraFile, location, scene, crowdDensity, weather, timeOfDay in zip(
87
+ save_paths, vid_paths, video_keys, cameraFiles, locations, scenes, crowdDensitys, weathers, timeOfDays
88
+ )
89
+ ]
90
+ print("part x need to process num", len(self.paths))
91
+
92
+ self.processor = processor
93
+
94
+ def __len__(self):
95
+ return len(self.paths)
96
+
97
+ def load_video(self, path, location, scene, crowdDensity, weather, timeOfDay):
98
+ useful_message = f"here is some auxiliary information about the video, the location is {location}, the scene is {scene}, the crowdDensity is {crowdDensity}, the weather is {weather}, the timeOfDay is {timeOfDay}."
99
+ messages = [
100
+ {
101
+ "role": "user",
102
+ "content": [
103
+ {
104
+ "type": "video",
105
+ "video": path,
106
+ # "total_pixels": 20480 * 28 * 28,
107
+ "min_pixels": 16 * 28 * 28,
108
+ # "max_pixels": 512 * 512,
109
+ "fps": 1.0,
110
+ # "video_start": cut[0],
111
+ # "video_end": cut[1],
112
+ # "crop": crop,
113
+ },
114
+ {"type": "text", "text": input_prompt.replace("#######", useful_message)},
115
+ ],
116
+ }
117
+ ]
118
+ # Preparation for inference
119
+ text = self.processor.apply_chat_template(
120
+ messages, tokenize=False, add_generation_prompt=True
121
+ )
122
+ image_inputs, video_inputs = process_vision_info(messages)
123
+
124
+ mm_data = {}
125
+ if image_inputs is not None:
126
+ mm_data["image"] = image_inputs
127
+ if video_inputs is not None:
128
+ mm_data["video"] = video_inputs
129
+
130
+ inputs = {
131
+ "prompt": text,
132
+ "multi_modal_data": mm_data,
133
+ }
134
+
135
+ return inputs
136
+
137
+ def wrapper(self, index):
138
+ save_path, video_path, video_key, cameraFile, location, scene, crowdDensity, weather, timeOfDay = self.paths[index]
139
+ inputs = [self.load_video(video_path, location, scene, crowdDensity, weather, timeOfDay)]
140
+ return save_path, inputs, video_key, cameraFile, location, scene, crowdDensity, weather, timeOfDay
141
+
142
+ def __getitem__(self, index):
143
+ try:
144
+ save_path, inputs, video_key, cameraFile, location, scene, crowdDensity, weather, timeOfDay = self.wrapper(index)
145
+ return save_path, inputs, video_key, cameraFile, location, scene, crowdDensity, weather, timeOfDay
146
+ except Exception as e:
147
+ print("error", e)
148
+ return False, False, False
149
+
150
+
151
+ def collate_fn(batch):
152
+ save_paths, inputs, video_keys, cameraFiles, locations, scenes, crowdDensitys, weathers, timeOfDays = zip(*batch)
153
+ inputs = inputs[0]
154
+ if not inputs:
155
+ return False, False, False, False, False, False, False, False, False
156
+ return save_paths, inputs, video_keys, cameraFiles, locations, scenes, crowdDensitys, weathers, timeOfDays
157
+
158
+
159
+ def parse_args():
160
+ parser = argparse.ArgumentParser()
161
+ parser.add_argument(
162
+ "--model_id_or_path",
163
+ type=str,
164
+ default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Qwen/Qwen2.5-VL-7B-Instruct/",
165
+ )
166
+ parser.add_argument("--batch_size", type=int, default=1)
167
+ parser.add_argument(
168
+ "--input_csv",
169
+ type=str,
170
+ default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/test.csv",
171
+ )
172
+ parser.add_argument(
173
+ "--input_video_root", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193"
174
+ )
175
+ parser.add_argument(
176
+ "--output_csv_path",
177
+ type=str,
178
+ default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/test-193",
179
+ )
180
+ parser.add_argument("--num_workers", type=int, default=0)
181
+ parser.add_argument("--part", type=int, default=0)
182
+ parser.add_argument("--total_part", type=int, default=1)
183
+ args = parser.parse_args()
184
+ return args
185
+
186
+
187
+ def main(args, llm):
188
+ assert args.batch_size == 1
189
+
190
+ model_id_or_path = args.model_id_or_path
191
+ processor = AutoProcessor.from_pretrained(model_id_or_path)
192
+
193
+ # 读取并预处理
194
+ df = pd.read_csv(args.input_csv)
195
+ keep_columns = ['videoFile', 'cameraFile', 'location', 'scene', 'crowdDensity', 'weather', 'timeOfDay']
196
+ df = df[keep_columns].copy()
197
+
198
+ # 批量构建路径和key
199
+ video_files = df['videoFile'].values
200
+ paths = np.array([os.path.join(args.input_video_root, f) for f in video_files])
201
+ video_keys = np.array([os.path.splitext(os.path.basename(f))[0] for f in video_files])
202
+
203
+ # 添加新列
204
+ df['path'] = paths
205
+ df['video_key'] = video_keys
206
+
207
+ # 转换为字典列表
208
+ video_data = df.to_dict('records')
209
+ print(f"总共构建了 {len(video_data)} 个视频数据项")
210
+ if len(video_data) == 0:
211
+ print("Finish: no data need to be processed!")
212
+ return
213
+
214
+ video_data = video_data[args.part :: args.total_part]
215
+ data = CaptionData(
216
+ video_data, args.input_video_root, args.output_csv_path, processor
217
+ )
218
+ loader = DataLoader(
219
+ data,
220
+ batch_size=args.batch_size,
221
+ num_workers=args.num_workers,
222
+ pin_memory=False,
223
+ prefetch_factor=2 if args.num_workers > 0 else None,
224
+ shuffle=False,
225
+ drop_last=False,
226
+ collate_fn=collate_fn,
227
+ )
228
+
229
+ sampling_params = SamplingParams(
230
+ temperature=0.1,
231
+ top_p=0.001,
232
+ # top_k=1,
233
+ repetition_penalty=1.05,
234
+ max_tokens=512,
235
+ )
236
+
237
+ for save_paths, frames, video_key, cameraFile, location, scene, crowdDensity, weather, timeOfDay in tqdm(loader):
238
+ if not save_paths:
239
+ print(f"{save_paths} is broking")
240
+ continue
241
+ if os.path.exists(save_paths[0]):
242
+ print(f"{save_paths} is already exists")
243
+ continue
244
+ if len(save_paths[0]) > 255:
245
+ print("Name too long, skipping :", save_paths[0])
246
+ continue
247
+
248
+ folder, filename = os.path.split(save_paths[0])
249
+ os.makedirs(folder, exist_ok=True)
250
+
251
+ try:
252
+ results = []
253
+ for inputs in frames:
254
+ with torch.inference_mode():
255
+ outputs = llm.generate([inputs], sampling_params=sampling_params)
256
+ generated_text = outputs[0].outputs[0].text
257
+ results.append(generated_text)
258
+
259
+ df = pd.DataFrame({'videoFile': f"{video_key[0]}.mp4", 'cameraFile': cameraFile[0], 'caption': results[0].replace('\n', ' ').replace('\r', ' '), 'location': location[0], 'scene': scene[0], 'crowdDensity': crowdDensity[0], 'weather': weather[0], 'timeOfDay': timeOfDay[0]}, index=[0])
260
+ output_path = save_paths[0]
261
+ df.to_csv(f"{output_path}", index=False)
262
+
263
+ except Exception as e:
264
+ print(f"Error processing: {e}")
265
+
266
+ print("Done")
267
+
268
+
269
+ if __name__ == "__main__":
270
+ # os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn"
271
+ args = parse_args()
272
+
273
+ args.model_id_or_path = "/mnt/bn/yufan-dev-my/ysh/Ckpts/Qwen/Qwen2.5-VL-7B-Instruct/"
274
+ llm = LLM(
275
+ args.model_id_or_path,
276
+ # max_model_len=32768 if process_vision_info is None else 4096,
277
+ # tensor_parallel_size=2,
278
+ # distributed_executor_backend="mp",
279
+ gpu_memory_utilization=0.95
280
+ )
281
+ main(args, llm)
dataset_code/sekai/preprocess/get_caption_keye.py ADDED
@@ -0,0 +1,326 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from vllm import LLM, SamplingParams
2
+
3
+ import numpy as np
4
+ import pandas as pd
5
+ import torch
6
+ from torch.utils.data import DataLoader, Dataset
7
+
8
+ import argparse
9
+ import os
10
+ from typing import Tuple
11
+
12
+ import keye_vl_utils
13
+ from keye_vl_utils import process_vision_info
14
+ from tqdm import tqdm
15
+ from transformers import AutoProcessor
16
+
17
+ from video_reader import PyVideoReader
18
+
19
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
20
+
21
+ input_prompt = (
22
+ "Please generate a comprehensive caption for the following video, describing various aspects, including but not limited to: "
23
+ "1. The main theme and setting of the image (such as location, time of day, weather conditions, etc.) "
24
+ "2. Key objects and their characteristics (such as color, shape, size, etc.) "
25
+ "3. Relationships and interactions between objects (such as positioning, actions, etc.) "
26
+ "4. Any people present and their emotions or activities (such as expressions, postures, etc.) "
27
+ "5. Background and environmental details (such as architecture, natural scenery, etc.) "
28
+ "6. Motion of the Subject: The movement of people or objects in the video. Use verbs that describe movement. "
29
+ "7. Camera motion control: zoom in, zoom out, push in, pull out, pan right, pan left, truck right, truck left, tilt up, tilt down, pedestal up, pedestal down, arc shot, tracking shot, static shot, and handheld shot. "
30
+ 'Do not describe imagined content. Only describe what can be determined from the video. Avoid listing things. Do not use abstract concepts (love, hate, justice, infinity, joy) as subjects. Use concrete nouns (human, cup, dog, planet, headphones) for more accurate results. Use verbs to describe the movement and changes of the subject or people. Write your prompts in plain, conversational language. Start your description directly with the main subject, typically a noun. Without "\n", subheading and title. '
31
+ "For guidance on the expected output format and content length, refer to the provided examples:"
32
+ "The video begins with the viewer moving forward along a rocky path surrounded by dense greenery under a clear blue sky. The camera smoothly pans to reveal a signpost on the left, indicating a trailhead, before continuing along the uneven terrain dotted with shrubs and small trees. As the journey progresses, the path ascends slightly, leading to a set of wooden steps that navigate through the lush vegetation. The camera angle shifts subtly to capture the ascent, highlighting the natural textures of the rocks and foliage. Upon reaching the top, the scene opens up to a breathtaking view of Castle Rock Beach, with the vast ocean stretching out to the horizon and a prominent rock formation standing tall against the backdrop of the sea. The camera then pans back to the trail, showing more steps and the surrounding forested area, emphasizing the serene and untouched beauty of the location. The sunlight bathes the entire landscape in warm hues, casting sharp shadows and enhancing the vivid greens and earthy tones of the environment. The video concludes with the camera moving steadily along the trail, capturing the intricate details of the natural surroundings and the tranquil atmosphere of this remote coastal setting. "
33
+ "Attention: #######. Please describe the content of the video and the changes that occur, in chronological order:"
34
+ )
35
+
36
+ # def _read_video_decord_cus(
37
+ # ele: dict,
38
+ # ) -> Tuple[torch.Tensor, float]:
39
+ # vr = PyVideoReader(ele["video"], threads=0)
40
+ # # crop video
41
+ # # s_x, e_x, s_y, e_y = ele["crop"]
42
+ # # sample video
43
+ # # total_frames = ele["video_end"] - ele["video_start"]
44
+ # # _, video_fps = len(vr), vr.get_avg_fps()
45
+ # total_frames, video_fps = len(vr), vr.get_fps()
46
+ # nframes = 32
47
+ # # nframes = keye_vl_utils.vision_process.smart_nframes(ele, total_frames=total_frames, video_fps=video_fps)
48
+ # idx = np.linspace(0, total_frames - 1, nframes).round().astype(int).tolist()
49
+ # # idx = [i + ele["video_start"] for i in idx]
50
+ # video = vr.decode()[idx]
51
+ # # video = vr.get_batch(idx).asnumpy()
52
+ # video = torch.tensor(video).permute(0, 3, 1, 2) # Convert to TCHW format
53
+ # # video = video[:, :, s_y:e_y, s_x:e_x]
54
+ # sample_fps = nframes / max(total_frames, 1e-6) * video_fps
55
+ # vr = None
56
+ # del vr
57
+ # return video, sample_fps
58
+
59
+ def _read_video_decord_cus(
60
+ ele: dict,
61
+ ) -> torch.Tensor:
62
+ """read video using decord.VideoReader
63
+
64
+ Args:
65
+ ele (dict): a dict contains the configuration of video.
66
+ support keys:
67
+ - video: the path of video. support "file://", "http://", "https://" and local path.
68
+ - video_start: the start time of video.
69
+ - video_end: the end time of video.
70
+ Returns:
71
+ torch.Tensor: the video tensor with shape (T, C, H, W).
72
+ """
73
+ import decord
74
+ st = time.time()
75
+ if isinstance(ele["video"], bytes):
76
+ video_path = ""
77
+ fp = py_io.BytesIO(ele["video"])
78
+ vr = decord.VideoReader(fp)
79
+ else:
80
+ video_path = ele["video"]
81
+ vr = decord.VideoReader(video_path)
82
+ # TODO: support start_pts and end_pts
83
+ if 'video_start' in ele or 'video_end' in ele:
84
+ raise NotImplementedError("not support start_pts and end_pts in decord for now.")
85
+ nframes, video_fps = len(vr), vr.get_avg_fps()
86
+ # timestamp start from 0.0
87
+ timestamps = torch.FloatTensor([(1 / video_fps) * i for i in range(nframes)])
88
+
89
+ # final_nframes = smart_nframes(ele, total_frames=nframes, video_fps=video_fps)
90
+ # indices = torch.linspace(0, nframes - 1, final_nframes).round().long()
91
+
92
+ final_nframes = 32
93
+ idx = np.linspace(0, nframes - 1, final_nframes).round().astype(int).tolist()
94
+
95
+ frames = vr.get_batch(indices.tolist()).asnumpy()
96
+ frames = torch.tensor(frames).permute(0, 3, 1, 2)
97
+ logger.debug(f"Decord: {video_path=}, {nframes=}, {video_fps=}, time={time.time() - st:.3f}s")
98
+ timestamps = timestamps[indices]
99
+
100
+ ##### extract key frames start ######
101
+ threshold = ele.get("min_frame_similarity", MIN_FRAME_SIMILARITY)
102
+ frame_types = extract_slow_fast_frames(frames, threshold)
103
+ ##### extract key frames end ######
104
+ logger.debug(f"Read video: {video_path=}, {nframes=}, {video_fps=}, time={time.time() - st:.3f}s")
105
+
106
+ return frames, timestamps, frame_types
107
+
108
+ keye_vl_utils.vision_process.VIDEO_READER_BACKENDS = {
109
+ "decord": _read_video_decord_cus,
110
+ }
111
+
112
+
113
+ class CaptionData(Dataset):
114
+ def __init__(self, video_data, input_video_root, output_json_folder, processor):
115
+ super().__init__()
116
+ self.input_video_root = input_video_root
117
+ self.output_json_folder = output_json_folder
118
+
119
+ vid_paths = [i["path"] for i in video_data]
120
+ video_keys = [i["video_key"] for i in video_data]
121
+ cameraFiles = [i["cameraFile"] for i in video_data]
122
+ locations = [i["location"] for i in video_data]
123
+ scenes = [i["scene"] for i in video_data]
124
+ crowdDensitys = [i["crowdDensity"] for i in video_data]
125
+ weathers = [i["weather"] for i in video_data]
126
+ timeOfDays = [i["timeOfDay"] for i in video_data]
127
+ save_paths = [
128
+ os.path.join(output_json_folder, (i["video_key"] + ".csv"))
129
+ for i in video_data
130
+ ]
131
+ print("part x origin num", len(save_paths))
132
+ self.paths = [
133
+ [save_path, vid_path, video_key, cameraFile, location, scene, crowdDensity, weather, timeOfDay]
134
+ for save_path, vid_path, video_key, cameraFile, location, scene, crowdDensity, weather, timeOfDay in zip(
135
+ save_paths, vid_paths, video_keys, cameraFiles, locations, scenes, crowdDensitys, weathers, timeOfDays
136
+ )
137
+ ]
138
+ print("part x need to process num", len(self.paths))
139
+
140
+ self.processor = processor
141
+
142
+ def __len__(self):
143
+ return len(self.paths)
144
+
145
+ def load_video(self, path, location, scene, crowdDensity, weather, timeOfDay):
146
+ useful_message = f"here is some auxiliary information about the video, the location is {location}, the scene is {scene}, the crowdDensity is {crowdDensity}, the weather is {weather}, the timeOfDay is {timeOfDay}."
147
+ messages = [
148
+ {
149
+ "role": "user",
150
+ "content": [
151
+ {
152
+ "type": "video",
153
+ "video": path,
154
+ # "total_pixels": 20480 * 28 * 28,
155
+ # "min_pixels": 16 * 28 * 28,
156
+ # "max_pixels": 512 * 512,
157
+ # "fps": 1.0,
158
+ # "video_start": cut[0],
159
+ # "video_end": cut[1],
160
+ # "crop": crop,
161
+ },
162
+ {"type": "text", "text": input_prompt.replace("#######", useful_message)},
163
+ ],
164
+ }
165
+ ]
166
+ # Preparation for inference
167
+ text = self.processor.apply_chat_template(
168
+ messages, tokenize=False, add_generation_prompt=True
169
+ )
170
+ image_inputs, video_inputs, video_kwargs = process_vision_info(messages)
171
+
172
+ mm_data = {}
173
+ if image_inputs is not None:
174
+ mm_data["image"] = image_inputs
175
+ if video_inputs is not None:
176
+ mm_data["video"] = video_inputs
177
+
178
+ inputs = {
179
+ "prompt": text,
180
+ "multi_modal_data": mm_data,
181
+ # FPS will be returned in video_kwargs
182
+ "mm_processor_kwargs": video_kwargs,
183
+ }
184
+
185
+ return inputs
186
+
187
+ def wrapper(self, index):
188
+ save_path, video_path, video_key, cameraFile, location, scene, crowdDensity, weather, timeOfDay = self.paths[index]
189
+ inputs = [self.load_video(video_path, location, scene, crowdDensity, weather, timeOfDay)]
190
+ return save_path, inputs, video_key, cameraFile, location, scene, crowdDensity, weather, timeOfDay
191
+
192
+ def __getitem__(self, index):
193
+ try:
194
+ save_path, inputs, video_key, cameraFile, location, scene, crowdDensity, weather, timeOfDay = self.wrapper(index)
195
+ return save_path, inputs, video_key, cameraFile, location, scene, crowdDensity, weather, timeOfDay
196
+ except Exception as e:
197
+ print("error", e)
198
+ return False, False, False
199
+
200
+
201
+ def collate_fn(batch):
202
+ save_paths, inputs, video_keys, cameraFiles, locations, scenes, crowdDensitys, weathers, timeOfDays = zip(*batch)
203
+ inputs = inputs[0]
204
+ if not inputs:
205
+ return False, False, False, False, False, False, False, False, False
206
+ return save_paths, inputs, video_keys, cameraFiles, locations, scenes, crowdDensitys, weathers, timeOfDays
207
+
208
+
209
+ def parse_args():
210
+ parser = argparse.ArgumentParser()
211
+ parser.add_argument(
212
+ "--model_id_or_path",
213
+ type=str,
214
+ default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Qwen/Qwen2.5-VL-7B-Instruct/",
215
+ )
216
+ parser.add_argument("--batch_size", type=int, default=1)
217
+ parser.add_argument(
218
+ "--input_csv",
219
+ type=str,
220
+ default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/test.csv",
221
+ )
222
+ parser.add_argument(
223
+ "--input_video_root", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193"
224
+ )
225
+ parser.add_argument(
226
+ "--output_csv_path",
227
+ type=str,
228
+ default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/test-193",
229
+ )
230
+ parser.add_argument("--num_workers", type=int, default=0)
231
+ parser.add_argument("--part", type=int, default=0)
232
+ parser.add_argument("--total_part", type=int, default=1)
233
+ args = parser.parse_args()
234
+ return args
235
+
236
+
237
+ def main(args, llm, sampling_params):
238
+ assert args.batch_size == 1
239
+
240
+ model_id_or_path = args.model_id_or_path
241
+ processor = AutoProcessor.from_pretrained(model_id_or_path, trust_remote_code=True)
242
+
243
+ # 读取并预处理
244
+ df = pd.read_csv(args.input_csv)
245
+ keep_columns = ['videoFile', 'cameraFile', 'location', 'scene', 'crowdDensity', 'weather', 'timeOfDay']
246
+ df = df[keep_columns].copy()
247
+
248
+ # 批量构建路径和key
249
+ video_files = df['videoFile'].values
250
+ paths = np.array([os.path.join(args.input_video_root, f) for f in video_files])
251
+ video_keys = np.array([os.path.splitext(os.path.basename(f))[0] for f in video_files])
252
+
253
+ # 添加新列
254
+ df['path'] = paths
255
+ df['video_key'] = video_keys
256
+
257
+ # 转换为字典列表
258
+ video_data = df.to_dict('records')
259
+ print(f"总共构建了 {len(video_data)} 个视频数据项")
260
+
261
+ video_data = video_data[args.part :: args.total_part]
262
+ data = CaptionData(
263
+ video_data, args.input_video_root, args.output_csv_path, processor
264
+ )
265
+ loader = DataLoader(
266
+ data,
267
+ batch_size=args.batch_size,
268
+ num_workers=args.num_workers,
269
+ pin_memory=False,
270
+ prefetch_factor=2 if args.num_workers > 0 else None,
271
+ shuffle=False,
272
+ drop_last=False,
273
+ collate_fn=collate_fn,
274
+ )
275
+
276
+ for save_paths, frames, video_key, cameraFile, location, scene, crowdDensity, weather, timeOfDay in tqdm(loader):
277
+ if not save_paths:
278
+ print(f"{save_paths} is broking")
279
+ continue
280
+ if os.path.exists(save_paths[0]):
281
+ print(f"{save_paths} is already exists")
282
+ continue
283
+ if len(save_paths[0]) > 255:
284
+ print("Name too long, skipping :", save_paths[0])
285
+ continue
286
+
287
+ folder, filename = os.path.split(save_paths[0])
288
+ os.makedirs(folder, exist_ok=True)
289
+
290
+ try:
291
+ results = []
292
+ for inputs in frames:
293
+ with torch.inference_mode():
294
+ outputs = llm.generate([inputs], sampling_params=sampling_params)
295
+ generated_text = outputs[0].outputs[0].text
296
+ results.append(generated_text)
297
+
298
+ df = pd.DataFrame({'videoFile': f"{video_key[0]}.mp4", 'cameraFile': cameraFile[0], 'caption': results[0].replace('\n', ' ').replace('\r', ' '), 'location': location[0], 'scene': scene[0], 'crowdDensity': crowdDensity[0], 'weather': weather[0], 'timeOfDay': timeOfDay[0]}, index=[0])
299
+ output_path = save_paths[0]
300
+ df.to_csv(f"{output_path}", index=False)
301
+
302
+ except Exception as e:
303
+ print(f"Error processing: {e}")
304
+
305
+ print("Done")
306
+
307
+
308
+ if __name__ == "__main__":
309
+ # os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn"
310
+ args = parse_args()
311
+
312
+ args.model_id_or_path = "/mnt/bn/yufan-dev-my/ysh/Ckpts/Kwai-Keye/Keye-VL-1_5-8B"
313
+ llm = LLM(
314
+ args.model_id_or_path,
315
+ # max_model_len=32768 if process_vision_info is None else 4096,
316
+ # tensor_parallel_size=2,
317
+ # distributed_executor_backend="mp",
318
+ gpu_memory_utilization=0.95,
319
+ trust_remote_code=True,
320
+ )
321
+
322
+ sampling_params = SamplingParams(
323
+ temperature=0.3,
324
+ max_tokens=512,
325
+ )
326
+ main(args, llm, sampling_params)
dataset_code/sekai/preprocess/get_temp_input_csv.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import argparse
3
+ import pandas as pd
4
+ from tqdm import tqdm
5
+
6
+ from concurrent.futures import ThreadPoolExecutor, as_completed
7
+ import threading
8
+
9
+ # 添加线程锁保护共享资源
10
+ video_data_lock = threading.Lock()
11
+ matched_count_lock = threading.Lock()
12
+
13
+ def process_video_file(video_file, args, csv_video_mapping):
14
+ """处理单个视频文件的函数"""
15
+ video_path = os.path.join(args.input_video_root, video_file)
16
+ video_filename = os.path.splitext(video_file)[0]
17
+
18
+ matched_row = None
19
+ for csv_prefix, row in csv_video_mapping.items():
20
+ if video_filename.startswith(csv_prefix):
21
+ matched_row = row
22
+ break
23
+
24
+ result = None
25
+ if matched_row is not None:
26
+ final_csv_path = os.path.join(args.output_csv_path, (video_filename + ".csv"))
27
+
28
+ if os.path.exists(final_csv_path):
29
+ # 检查CSV文件是否损坏
30
+ try:
31
+ import pandas as pd
32
+ # 尝试读取CSV文件来验证其完整性
33
+ pd.read_csv(final_csv_path)
34
+ return None # 文件存在且有效,不需要重新处理
35
+ except (pd.errors.EmptyDataError, pd.errors.ParserError, UnicodeDecodeError, FileNotFoundError) as e:
36
+ # CSV文件损坏,删除它
37
+ print(f"Warning: CSV file {final_csv_path} is corrupted ({e}). Deleting and will recreate.")
38
+ os.remove(final_csv_path)
39
+
40
+ result = {
41
+ 'videoFile': video_filename + ".mp4",
42
+ 'cameraFile': matched_row['cameraFile'],
43
+ 'location': matched_row['location'],
44
+ 'scene': matched_row['scene'],
45
+ 'crowdDensity': matched_row['crowdDensity'],
46
+ 'weather': matched_row['weather'],
47
+ 'timeOfDay': matched_row['timeOfDay'],
48
+ }
49
+ else:
50
+ print(f"Warning: No CSV record found for video file: {video_file}")
51
+
52
+ return result
53
+
54
+ # 多线程处理主代码
55
+ def process_videos_multithreaded(video_files, args, csv_video_mapping, max_workers=4):
56
+ video_data = []
57
+ matched_count = 0
58
+
59
+ with ThreadPoolExecutor(max_workers=max_workers) as executor:
60
+ # 提交所有任务
61
+ future_to_video = {
62
+ executor.submit(process_video_file, video_file, args, csv_video_mapping): video_file
63
+ for video_file in video_files
64
+ }
65
+
66
+ # 处理完成的任务
67
+ for future in tqdm(as_completed(future_to_video), total=len(video_files), desc="Processing videos"):
68
+ video_file = future_to_video[future]
69
+ try:
70
+ result = future.result()
71
+ if result is not None:
72
+ with video_data_lock:
73
+ video_data.append(result)
74
+ with matched_count_lock:
75
+ matched_count += 1
76
+ except Exception as exc:
77
+ print(f'Video {video_file} generated an exception: {exc}')
78
+
79
+ return video_data, matched_count
80
+
81
+ def parse_args():
82
+ parser = argparse.ArgumentParser()
83
+ parser.add_argument(
84
+ "--input_csv",
85
+ type=str,
86
+ default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking_updated.csv",
87
+ )
88
+ parser.add_argument(
89
+ "--input_video_root", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386"
90
+ )
91
+ parser.add_argument(
92
+ "--output_csv_path",
93
+ type=str,
94
+ default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386",
95
+ )
96
+ parser.add_argument(
97
+ "--output_csv_file",
98
+ type=str,
99
+ default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv",
100
+ )
101
+ parser.add_argument("--num_workers", type=int, default=16)
102
+ args = parser.parse_args()
103
+ return args
104
+
105
+ if __name__ == "__main__":
106
+ args = parse_args()
107
+
108
+ # 读取CSV文件
109
+ df = pd.read_csv(args.input_csv)
110
+
111
+ # 保留需要的字段,过滤掉不需要的字段
112
+ keep_columns = ['videoFile', 'cameraFile', 'caption', 'location', 'scene', 'crowdDensity', 'weather', 'timeOfDay']
113
+ df = df[keep_columns].copy()
114
+
115
+ # 创建CSV中视频文件名前缀到记录的映射
116
+ csv_video_mapping = {}
117
+ for idx, row in df.iterrows():
118
+ video_prefix = os.path.splitext(os.path.basename(row['videoFile']))[0]
119
+ csv_video_mapping[video_prefix] = row
120
+
121
+ # 获取视频文件夹中的所有视频文件
122
+ video_files = []
123
+ for file in os.listdir(args.input_video_root):
124
+ if file.lower().endswith(('.mp4', '.avi', '.mov', '.mkv', '.wmv', '.flv')): # 添加更多视频格式
125
+ video_files.append(file)
126
+
127
+ # # 准备视频数据
128
+ # video_data = []
129
+ # matched_count = 0
130
+
131
+ # for video_file in tqdm(video_files):
132
+ # video_path = os.path.join(args.input_video_root, video_file)
133
+ # video_filename = os.path.splitext(video_file)[0]
134
+
135
+ # matched_row = None
136
+ # for csv_prefix, row in csv_video_mapping.items():
137
+ # if video_filename.startswith(csv_prefix):
138
+ # matched_row = row
139
+ # break
140
+
141
+ # if matched_row is not None:
142
+ # final_csv_path = os.path.join(args.output_csv_path, (video_filename + ".csv"))
143
+ # if not os.path.exists(final_csv_path):
144
+ # video_data.append({
145
+ # "video_key": video_filename,
146
+ # 'videoFile': video_filename + ".mp4",
147
+ # 'cameraFile': matched_row['cameraFile'],
148
+ # 'location': matched_row['location'],
149
+ # 'scene': matched_row['scene'],
150
+ # 'crowdDensity': matched_row['crowdDensity'],
151
+ # 'weather': matched_row['weather'],
152
+ # 'timeOfDay': matched_row['timeOfDay'],
153
+ # })
154
+ # matched_count += 1
155
+ # else:
156
+ # print(f"Warning: No CSV record found for video file: {video_file}")
157
+ video_data, matched_count = process_videos_multithreaded(video_files, args, csv_video_mapping, max_workers=args.num_workers)
158
+
159
+ print(f"Successfully matched {matched_count} videos with CSV records")
160
+ print(f"Total video data to process: {len(video_data)}")
161
+
162
+ if video_data:
163
+ output_df = pd.DataFrame(video_data)
164
+ output_csv_file = args.output_csv_file
165
+ output_df.to_csv(output_csv_file, index=False)
166
+ print(f"Video data saved to: {output_csv_file}")
167
+ print(f"Saved {len(video_data)} video records")
168
+ else:
169
+ output_df = pd.DataFrame()
170
+ output_csv_file = args.output_csv_file
171
+ output_df.to_csv(output_csv_file, index=False)
172
+ print(f"Empty video data saved to: {output_csv_file}")
173
+ print("No video data to save - created empty CSV file")
dataset_code/sekai/preprocess/install.sh ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pip install torch==2.6.0 torchvision==0.21.0 torchaudio==2.6.0 --index-url https://download.pytorch.org/whl/cu124
2
+ # pip install flashinfer-python==0.2.2.post1 -i https://flashinfer.ai/whl/cu124/torch2.6
3
+ # pip install vllm==0.8.4 qwen_vl_utils keye_vl_utils opencv-python-headless==4.11.0.86 numpy==1.26.4 video-reader-rs
4
+ # sudo pip install flash-attn==2.7.4.post1 --no-build-isolation
5
+
6
+ # cp -r /mnt/bn/yufan-dev-my/ysh/Codes/dummy_dataloader/decord_temp/flash-attention /opt/tiger
7
+ # cd /opt/tiger/flash-attention/hopper
8
+ # pip install ninja==1.11.1.3
9
+ # sudo python setup.py install
10
+
11
+ # pip install torch==2.8.0 torchvision==0.23.0 torchaudio==2.8.0 --index-url https://download.pytorch.org/whl/cu126
12
+ # pip install flashinfer-python==0.2.2.post1 -i https://flashinfer.ai/whl/cu124/torch2.6
13
+ # pip install qwen_vl_utils keye_vl_utils opencv-python-headless==4.11.0.86 numpy==1.26.4 video-reader-rs
14
+ # pip install flash-attn==2.8.3 --no-build-isolation
15
+ # pip install git+https://github.com/vllm-project/vllm.git
dataset_code/sekai/preprocess/kill.sh ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ pkill -9 -f 1.sh
2
+ pkill -9 -f 2.sh
3
+ pkill -9 -f 3.sh
4
+ pkill -9 -f 4.sh
5
+ pkill -9 -f 5.sh
6
+ pkill -9 -f 6.sh
7
+ pkill -9 -f get_caption.py
8
+ pkill -f "multiprocessing.spawn"
dataset_code/sekai/preprocess/merge_csv.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import os
3
+ import glob
4
+ from pathlib import Path
5
+ from concurrent.futures import ThreadPoolExecutor, as_completed
6
+ from tqdm import tqdm
7
+ import threading
8
+ from functools import partial
9
+
10
+ def read_single_csv(file_path, expected_columns=None):
11
+ """
12
+ 读取单个CSV文件的辅助函数
13
+
14
+ Args:
15
+ file_path (str): CSV文件路径
16
+ expected_columns (list): 期望的列名列表
17
+
18
+ Returns:
19
+ tuple: (DataFrame或None, 文件名, 错误信息或None)
20
+ """
21
+ try:
22
+ df = pd.read_csv(file_path)
23
+
24
+ # 检查列是否一致
25
+ if expected_columns and df.columns.tolist() != expected_columns:
26
+ return None, os.path.basename(file_path), f"列结构不一致"
27
+
28
+ return df, os.path.basename(file_path), None
29
+
30
+ except Exception as e:
31
+ return None, os.path.basename(file_path), str(e)
32
+
33
+ def merge_single_row_csvs(folder_path, output_file='merged_data.csv', max_workers=None):
34
+ """
35
+ 使用多线程合并文件夹中所有单行CSV文件为一个大的CSV文件
36
+
37
+ Args:
38
+ folder_path (str): 包含CSV文件的文件夹路径
39
+ output_file (str): 输出文件名
40
+ max_workers (int): 最大线程数,默认为None(使用系统默认值)
41
+ """
42
+ # 获取文件夹中所有CSV文件
43
+ csv_files = glob.glob(os.path.join(folder_path, "*.csv"))
44
+
45
+ if not csv_files:
46
+ print("文件夹中没有找到CSV文件")
47
+ return
48
+
49
+ print(f"找到 {len(csv_files)} 个CSV文件")
50
+
51
+ # 读取第一个文件获取列名
52
+ try:
53
+ first_df = pd.read_csv(csv_files[0])
54
+ expected_columns = first_df.columns.tolist()
55
+ print(f"期望的列结构: {expected_columns}")
56
+ except Exception as e:
57
+ print(f"无法读取第一个文件: {str(e)}")
58
+ return
59
+
60
+ # 存储所有数据的列表
61
+ all_data = []
62
+ failed_files = []
63
+
64
+ # 创建部分函数,预设expected_columns参数
65
+ read_csv_partial = partial(read_single_csv, expected_columns=expected_columns)
66
+
67
+ # 使用ThreadPoolExecutor进行多线程处理
68
+ print("开始多线程读取文件...")
69
+
70
+ with ThreadPoolExecutor(max_workers=max_workers) as executor:
71
+ # 提交所有任务
72
+ future_to_file = {executor.submit(read_csv_partial, file_path): file_path
73
+ for file_path in csv_files}
74
+
75
+ # 使用tqdm显示进度并收集结果
76
+ with tqdm(total=len(csv_files), desc="读取CSV文件") as pbar:
77
+ for future in as_completed(future_to_file):
78
+ df, filename, error = future.result()
79
+
80
+ if df is not None:
81
+ all_data.append(df)
82
+ else:
83
+ failed_files.append((filename, error))
84
+
85
+ pbar.update(1)
86
+ # 在进度条描述中显示成功/失败统计
87
+ pbar.set_postfix({
88
+ '成功': len(all_data),
89
+ '失败': len(failed_files)
90
+ })
91
+
92
+ # 显示处理结果
93
+ print(f"\n处理完成:")
94
+ print(f"成功读取: {len(all_data)} 个文件")
95
+ print(f"失败: {len(failed_files)} 个文件")
96
+
97
+ if failed_files:
98
+ print("\n失败的文件:")
99
+ for filename, error in failed_files[:10]: # 只显示前10个错误
100
+ print(f" {filename}: {error}")
101
+ if len(failed_files) > 10:
102
+ print(f" ... 还有 {len(failed_files) - 10} 个失败的文件")
103
+
104
+ if not all_data:
105
+ print("没有成功读取任何数据")
106
+ return
107
+
108
+ # 合并所有数据
109
+ print("\n正在合并数据...")
110
+ with tqdm(desc="合并数据") as pbar:
111
+ merged_df = pd.concat(all_data, ignore_index=True)
112
+ pbar.update(1)
113
+
114
+ # 保存合并后的数据
115
+ print("正在保存文件...")
116
+ with tqdm(desc="保存文件") as pbar:
117
+ merged_df.to_csv(output_file, index=False)
118
+ pbar.update(1)
119
+
120
+ print(f"\n✅ 合并完成!")
121
+ print(f"共 {len(merged_df)} 行数据已保存到 {output_file}")
122
+
123
+ # 显示数据概览
124
+ print(f"\n📊 数据概览:")
125
+ print(f"总行数: {len(merged_df):,}")
126
+ print(f"总列数: {len(merged_df.columns)}")
127
+ print(f"文件大小: {os.path.getsize(output_file) / 1024 / 1024:.2f} MB")
128
+ print(f"列名: {list(merged_df.columns)}")
129
+
130
+ # 显示前几行数据
131
+ print(f"\n📝 数据预览:")
132
+ print(merged_df.head())
133
+
134
+ def merge_with_batch_processing(folder_path, output_file='merged_data.csv',
135
+ batch_size=1000, max_workers=None):
136
+ """
137
+ 使用批处理的方式合并大量CSV文件,减少内存占用
138
+
139
+ Args:
140
+ folder_path (str): 包含CSV文件的文件夹路径
141
+ output_file (str): 输出文件名
142
+ batch_size (int): 每批处理的文件数量
143
+ max_workers (int): 最大线程数
144
+ """
145
+ csv_files = glob.glob(os.path.join(folder_path, "*.csv"))
146
+
147
+ if not csv_files:
148
+ print("文件夹中没有找到CSV文件")
149
+ return
150
+
151
+ print(f"找到 {len(csv_files)} 个CSV文件,将分批处理")
152
+
153
+ # 读取第一个文件获取列名
154
+ try:
155
+ first_df = pd.read_csv(csv_files[0])
156
+ expected_columns = first_df.columns.tolist()
157
+ except Exception as e:
158
+ print(f"无法读取第一个文件: {str(e)}")
159
+ return
160
+
161
+ # 分批处理文件
162
+ total_rows = 0
163
+ is_first_batch = True
164
+
165
+ with tqdm(total=len(csv_files), desc="总进度") as main_pbar:
166
+ for i in range(0, len(csv_files), batch_size):
167
+ batch_files = csv_files[i:i + batch_size]
168
+ batch_data = []
169
+
170
+ # 处理当前批次
171
+ read_csv_partial = partial(read_single_csv, expected_columns=expected_columns)
172
+
173
+ with ThreadPoolExecutor(max_workers=max_workers) as executor:
174
+ future_to_file = {executor.submit(read_csv_partial, file_path): file_path
175
+ for file_path in batch_files}
176
+
177
+ for future in as_completed(future_to_file):
178
+ df, filename, error = future.result()
179
+ if df is not None:
180
+ batch_data.append(df)
181
+ main_pbar.update(1)
182
+
183
+ # 合并当前批次数据
184
+ if batch_data:
185
+ batch_df = pd.concat(batch_data, ignore_index=True)
186
+
187
+ # 保存到文件(追加模式)
188
+ mode = 'w' if is_first_batch else 'a'
189
+ header = is_first_batch
190
+ batch_df.to_csv(output_file, mode=mode, header=header, index=False)
191
+
192
+ total_rows += len(batch_df)
193
+ is_first_batch = False
194
+
195
+ print(f"\n批次 {i//batch_size + 1} 完成,添加了 {len(batch_df)} 行")
196
+
197
+ print(f"\n✅ 所有批次处理完成!总共 {total_rows} 行数据保存到 {output_file}")
198
+
199
+ # 使用示例
200
+ if __name__ == "__main__":
201
+ folder_path = "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386"
202
+ output_file = "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386.csv"
203
+
204
+ # 方法1: 标准多线程合并(推荐用于中等大小的数据集)
205
+ merge_single_row_csvs(
206
+ folder_path=folder_path,
207
+ output_file=output_file,
208
+ max_workers=8 # 可以根据你的CPU核心数调整
209
+ )
210
+
211
+ # 方法2: 批处理合并(推荐用于大型数据集,节省内存)
212
+ # merge_with_batch_processing(
213
+ # folder_path=folder_path,
214
+ # output_file=output_file,
215
+ # batch_size=1000,
216
+ # max_workers=8
217
+ # )
dataset_code/sekai/preprocess/temp.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import numpy as np
3
+ import os
4
+
5
+ input_csv = "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-193.csv"
6
+ input_video_root = "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193"
7
+
8
+ # 读取并预处理
9
+ df = pd.read_csv(input_csv)
10
+ keep_columns = ['videoFile', 'cameraFile', 'location', 'scene', 'crowdDensity', 'weather', 'timeOfDay']
11
+ df = df[keep_columns].copy()
12
+
13
+ # 批量构建路径和key
14
+ video_files = df['videoFile'].values
15
+ paths = np.array([os.path.join(input_video_root, f) for f in video_files])
16
+ video_keys = np.array([os.path.splitext(os.path.basename(f))[0] for f in video_files])
17
+
18
+ # 添加新列
19
+ df['path'] = paths
20
+ df['video_key'] = video_keys
21
+
22
+ # 转换为字典列表
23
+ video_data = df.to_dict('records')
24
+
25
+ import pdb;pdb.set_trace()
dataset_code/sekai/preprocess/temp.sh ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pip install torch==2.6.0 torchvision==0.21.0 torchaudio==2.6.0 --index-url https://download.pytorch.org/whl/cu124
2
+ pip install vllm==0.8.4 qwen_vl_utils
3
+ pip install flashinfer-python==0.2.2.post1 -i https://flashinfer.ai/whl/cu124/torch2.6
4
+ pip install opencv-python-headless==4.11.0.86 numpy==1.26.4 video-reader-rs
5
+
6
+ # python cut_video.py \
7
+ # --input_folder /mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq \
8
+ # --output_dir /mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386 \
9
+ # --frames-per-segment 193 \
10
+ # --max-workers 32 \
11
+ # --cur-part 6 \
12
+ # --total-part 6 \
13
+
14
+
15
+ # python cut_video.py \
16
+ # --input_folder /mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq \
17
+ # --output_dir /mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386 \
18
+ # --frames-per-segment 386 \
19
+ # --max-workers 32 \
20
+ # --cur-part 6 \
21
+ # --total-part 6 \
22
+
23
+ export PYTHONMULTIPROCESSING_START_METHOD=fork
24
+ export VLLM_WORKER_MULTIPROC_METHO=spawn
25
+
26
+ python warm_up_model.py
27
+
28
+ CUDA_VISIBLE_DEVICES=0 python get_caption.py \
29
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
30
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
31
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
32
+ --num_workers 8 \
33
+ --part 40 \
34
+ --total_part 48 &
35
+ sleep 20
36
+ CUDA_VISIBLE_DEVICES=1 python get_caption.py \
37
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
38
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
39
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
40
+ --num_workers 8 \
41
+ --part 41 \
42
+ --total_part 48 &
43
+ sleep 20
44
+ CUDA_VISIBLE_DEVICES=2 python get_caption.py \
45
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
46
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
47
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
48
+ --num_workers 8 \
49
+ --part 42 \
50
+ --total_part 48 &
51
+ sleep 20
52
+ CUDA_VISIBLE_DEVICES=3 python get_caption.py \
53
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
54
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
55
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
56
+ --num_workers 8 \
57
+ --part 43 \
58
+ --total_part 48 &
59
+ sleep 20
60
+ CUDA_VISIBLE_DEVICES=4 python get_caption.py \
61
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
62
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
63
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
64
+ --num_workers 8 \
65
+ --part 44 \
66
+ --total_part 48 &
67
+ sleep 20
68
+ CUDA_VISIBLE_DEVICES=5 python get_caption.py \
69
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
70
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
71
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
72
+ --num_workers 8 \
73
+ --part 45 \
74
+ --total_part 48 &
75
+ sleep 20
76
+ CUDA_VISIBLE_DEVICES=6 python get_caption.py \
77
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
78
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
79
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
80
+ --num_workers 8 \
81
+ --part 46 \
82
+ --total_part 48 &
83
+ sleep 20
84
+ CUDA_VISIBLE_DEVICES=7 python get_caption.py \
85
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-real-walking-hq-386.csv" \
86
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-386" \
87
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-real-walking-hq-386" \
88
+ --num_workers 8 \
89
+ --part 47 \
90
+ --total_part 48
91
+
92
+
93
+ CUDA_VISIBLE_DEVICES=0 python get_caption.py \
94
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
95
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
96
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
97
+ --num_workers 8 \
98
+ --part 40 \
99
+ --total_part 48 &
100
+ sleep 20
101
+ CUDA_VISIBLE_DEVICES=1 python get_caption.py \
102
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
103
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
104
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
105
+ --num_workers 8 \
106
+ --part 41 \
107
+ --total_part 48 &
108
+ sleep 20
109
+ CUDA_VISIBLE_DEVICES=2 python get_caption.py \
110
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
111
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
112
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
113
+ --num_workers 8 \
114
+ --part 42 \
115
+ --total_part 48 &
116
+ sleep 20
117
+ CUDA_VISIBLE_DEVICES=3 python get_caption.py \
118
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
119
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
120
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
121
+ --num_workers 8 \
122
+ --part 43 \
123
+ --total_part 48 &
124
+ sleep 20
125
+ CUDA_VISIBLE_DEVICES=4 python get_caption.py \
126
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
127
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
128
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
129
+ --num_workers 8 \
130
+ --part 44 \
131
+ --total_part 48 &
132
+ sleep 20
133
+ CUDA_VISIBLE_DEVICES=5 python get_caption.py \
134
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
135
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
136
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
137
+ --num_workers 8 \
138
+ --part 45 \
139
+ --total_part 48 &
140
+ sleep 20
141
+ CUDA_VISIBLE_DEVICES=6 python get_caption.py \
142
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
143
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
144
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
145
+ --num_workers 8 \
146
+ --part 46 \
147
+ --total_part 48 &
148
+ sleep 20
149
+ CUDA_VISIBLE_DEVICES=7 python get_caption.py \
150
+ --input_csv "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv" \
151
+ --input_video_root "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" \
152
+ --output_csv_path "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" \
153
+ --num_workers 8 \
154
+ --part 47 \
155
+ --total_part 48
dataset_code/sft_sftnews/offload/app.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataset_tool import CollectionDataset, collate_fn_map
2
+ from omegaconf import OmegaConf
3
+ from torch.utils.data import DataLoader
4
+
5
+ import torch
6
+ import numpy as np
7
+ import matplotlib.pyplot as plt
8
+ from matplotlib.animation import FuncAnimation
9
+ from IPython.display import HTML, display
10
+ from IPython.display import clear_output # 用于清理历史输出
11
+
12
+ from torch.utils.data import Subset
13
+
14
+ configs = OmegaConf.load("512_collection_config_vae1011_aligned_full_dump.yaml")
15
+ dataset = CollectionDataset.create_dataset_function(configs['train_data'],
16
+ configs['train_data_weights'],
17
+ **configs['data']['params'])
18
+
19
+ dataloader = DataLoader(
20
+ dataset,
21
+ batch_size=2,
22
+ num_workers=0,
23
+ collate_fn=collate_fn_map,
24
+ pin_memory=True,
25
+ # prefetch_factor=2,
26
+ # persistent_workers=True,
27
+ )
28
+
29
+ print(len(dataloader))
30
+
31
+ for idx, batch in enumerate(dataloader):
32
+ print(batch["videos"].shape)
dataset_code/sft_sftnews/offload/example_run.sh ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+ sudo apt-get update && sudo apt-get install -y libgl1-mesa-glx
3
+ bash ./config/shell_scripts/cogvideo_i2v/train_wan_prepare.sh
4
+ git --no-pager log --decorate=short --pretty=oneline -n5
5
+
6
+ export OMNISTORE_LOAD_STRICT_MODE=0
7
+ export OMNISTORE_LOGGING_LEVEL=ERROR
8
+ #################################################################
9
+ ## Torch
10
+ #################################################################
11
+ export TOKENIZERS_PARALLELISM=false
12
+ export TORCH_LOGS="+dynamo,recompiles,graph_breaks"
13
+ export TORCHDYNAMO_VERBOSE=1
14
+ export TORCH_NCCL_ENABLE_MONITORING=1
15
+ export PYTORCH_CUDA_ALLOC_CONF="expandable_segments:True,garbage_collection_threshold:0.9"
16
+ #################################################################
17
+
18
+
19
+ #################################################################
20
+ ## NCCL
21
+ #################################################################
22
+ export NCCL_IB_GID_INDEX=3
23
+ export NCCL_IB_HCA=$ARNOLD_RDMA_DEVICE
24
+ export NCCL_SOCKET_IFNAME=eth0
25
+ export NCCL_SOCKET_TIMEOUT=3600000
26
+
27
+ export NCCL_DEBUG=WARN # disable the verbose NCCL logs
28
+ export NCCL_P2P_DISABLE=0
29
+ export NCCL_IB_DISABLE=0 # was 1
30
+ export NCCL_SHM_DISABLE=0 # was 1
31
+ export NCCL_P2P_LEVEL=NVL
32
+
33
+ export NCCL_PXN_DISABLE=0
34
+ export NCCL_NET_GDR_LEVEL=2
35
+ export NCCL_IB_QPS_PER_CONNECTION=4
36
+ export NCCL_IB_TC=160
37
+ export NCCL_IB_TIMEOUT=22
38
+ #################################################################
39
+
40
+ #################################################################
41
+ ## WANDB
42
+ #################################################################
43
+ export WANDB__SERVICE_WAIT=6000
44
+ export WANDB_MODE=online
45
+ export WANDB_DISABLE_SERVICE=True
46
+ #################################################################
47
+
48
+ #################################################################
49
+ ## DIST
50
+ #################################################################
51
+ MASTER_ADDR=$ARNOLD_WORKER_0_HOST
52
+ ports=(`echo $METIS_WORKER_0_PORT | tr ',' ' '`)
53
+ MASTER_PORT=${ports[0]}
54
+ NNODES=$ARNOLD_WORKER_NUM
55
+ NODE_RANK=$ARNOLD_ID
56
+ GPUS_PER_NODE=$ARNOLD_WORKER_GPU
57
+ # GPUS_PER_NODE=1
58
+ # NNODES=1
59
+ # NODE_RANK=0
60
+ WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES))
61
+
62
+ DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT"
63
+ if [ ! -z $RDZV_BACKEND ]; then
64
+ DISTRIBUTED_ARGS="${DISTRIBUTED_ARGS} --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_id 9863 --rdzv_backend c10d"
65
+ export NCCL_SHM_DISABLE=1
66
+ fi
67
+
68
+ region=$RUNTIME_IDC_NAME
69
+ if [ $region == 'maliva' ]; then
70
+ hdfs_prefix=hdfs://harunava/home/byte_icaip_nebudata
71
+ export ARNOLD_BASE_DIR=hdfs://harunava
72
+ else
73
+ hdfs_prefix=hdfs://harunasg/home/byte_icaip_nebudata_sg
74
+ export RUNTIME_IDC_NAME=my2
75
+ export ARNOLD_BASE_DIR=hdfs://harunasg
76
+ fi
77
+
78
+ echo -e "\033[31mDISTRIBUTED_ARGS: ${DISTRIBUTED_ARGS}\033[0m"
79
+ echo -e "\033[31mPERSISTENCE_PATH: ${hdfs_prefix}\033[0m"
80
+
81
+ #################################################################
82
+
83
+ #################################################################
84
+ ## Training
85
+ #################################################################
86
+ learning_rate="1e-5"
87
+ lr_schedule="cosine_with_restarts"
88
+ optimizer="adamw"
89
+ steps="2000000"
90
+ version="v0.4"
91
+ DATASET_CONFIG="config/dataset_config/512_collection_config_vae1011_aligned_full_dump.yaml"
92
+
93
+ CKPT="/mnt/bn/icvg/users/yangxiao.0/Wan-AI/Wan2.1-I2V-14B-720P-patchsize1"
94
+ # CKPT="./models/Wan2.1-I2V-14B-720P"
95
+ output_dir="hdfs://harunasg/home/byte_icvg_aigc_cp/user/video/dali/dit_ckpt/i2v_wan_imageonly_lime_official_rl_1e-5_rm_with_1st_frame_round_4_2fps_rm_0812_color_VQ_MQ_MPS_0_cc_0814"
96
+ #output_dir="hdfs://harunasg/home/byte_icaip_nebudata_sg/fuwen/results/wan"
97
+ logging_dir="/mnt/bn/icvg/users/xinwei.huang/video_refl_new/log"
98
+ #logging_dir="./results/wan"
99
+ #################################################################
100
+
101
+ #TODO: prefetching
102
+ export WANDB_PROJECT=dc_ae_dit
103
+ export EXP_NAME=refl_2e-5_no_flowmatching_overall_fps6_rm_with_1st_frame_round_3_2fps_0812_RM_color_VQ_MQ_MPS_0_cc_loss
104
+ python3 -m torch.distributed.launch $DISTRIBUTED_ARGS ./training/train_wan_i2v_dc_ae.py \
105
+ --dataset_config $DATASET_CONFIG \
106
+ --frame_buckets 49 \
107
+ --dataloader_num_workers 1 \
108
+ --prefetch_factor 2 \
109
+ --pin_memory \
110
+ --seed 42 \
111
+ --mixed_precision bf16 \
112
+ --output_dir $output_dir \
113
+ --train_batch_size 1 \
114
+ --max_train_steps $steps \
115
+ --checkpointing_steps 50 \
116
+ --gradient_accumulation_steps 1 \
117
+ --learning_rate $learning_rate \
118
+ --lr_scheduler $lr_schedule \
119
+ --lr_warmup_steps 1 \
120
+ --lr_num_cycles 1 \
121
+ --optimizer $optimizer \
122
+ --beta1 0.9 \
123
+ --beta2 0.95 \
124
+ --weight_decay 0.001 \
125
+ --max_grad_norm 1.0 \
126
+ --allow_tf32 \
127
+ --report_to wandb \
128
+ --nccl_timeout 1800 \
129
+ --resume_from_checkpoint latest \
130
+ --wandb_project ${WANDB_PROJECT} \
131
+ --wandb_name ${EXP_NAME} \
132
+ --pretrained_model_name_or_path $CKPT \
133
+ --use_robust_loss \
134
+ --drop_first_frame_condition_threshold 0.00 \
135
+ --drop_last_frame_condition_threshold 0.0 \
136
+ --logging_dir $logging_dir \
137
+ --video_logging_interval 1000000 \
138
+ --scalar_logging_interval 1 \
139
+ --tp_size 8 \
140
+ --gradient_checkpointing \
141
+ --ema \
142
+ --ema_decay 0.99 \
143
+ --ema_interval 1 \
144
+ --sampling_steps 30 \
145
+ --max_turn_step 29 \
146
+ --min_turn_step 6 \
147
+ --optimizing_objective "VQ, MQ" \
148
+ --selected_frames 0 12 24 36 48 60 \
149
+ --half_input \
150
+ --use_cfg \
151
+ --rm_model_path "/mnt/bn/icvg/users/xinwei.huang/VideoAlign/rm_output_0801_first_color" \
152
+ --transformer_model_path "/mnt/bn/icvg/users/xinwei.huang/video_models/rm0806_round3_mps0.13000.pth/model.pt" \
153
+ --frame_reward_loss_weight 0
dataset_code/sft_sftnews/offload/install.sh ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ sudo apt update
2
+ sudo apt install -y libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libavfilter-dev libswscale-dev libswresample-dev gfortran htop screen
3
+ sudo apt-get update
4
+ sudo apt-get install -y build-essential python3-dev python3-setuptools make cmake
5
+ sudo apt-get install -y ffmpeg libavcodec-dev libavfilter-dev libavformat-dev libavutil-dev libssl-dev screen
6
+
7
+ # install the dependencies
8
+ pip install -r requirements.txt
9
+ pip install --upgrade diffusers transformers accelerate deepspeed nvitop
10
+ pip install git+https://github.com/huggingface/diffusers
11
+
12
+ # ## for AIP dataset
13
+ git clone git@code.byted.org:us-cv/mininova.git /tmp/mininova
14
+ pip install /tmp/mininova/py_pkg/byted/nebudata/
15
+ pip install /tmp/mininova/py_pkg/byted/aipcommon/
16
+
17
+ # for decord
18
+ # git clone -b v0.3 https://github.com/dmlc/dlpack.git
19
+ cd /mnt/bn/yufan-dev-my/ysh/Codes/dummy_dataloader/decord_temp/dlpack
20
+ mkdir build
21
+ cd build
22
+ cmake .. -DUSE_CUDA=0 -DCMAKE_BUILD_TYPE=Release
23
+ make
24
+ sudo make install
25
+
26
+ cd /mnt/bn/yufan-dev-my/ysh/Codes/dummy_dataloader/decord_temp/dmlc-core
27
+ mkdir build
28
+ cd build
29
+ cmake .. -DUSE_CUDA=0 -DCMAKE_BUILD_TYPE=Release
30
+ make
31
+ sudo make install
32
+
33
+ cd /mnt/bn/yufan-dev-my/ysh/Codes/dummy_dataloader/decord_temp/decord
34
+ mkdir build
35
+ cd build
36
+ cmake .. -DUSE_CUDA=0 -DCMAKE_BUILD_TYPE=Release
37
+ make
38
+ cd ../python
39
+ pwd=$PWD
40
+ echo "PYTHONPATH=$PYTHONPATH:$pwd" >> ~/.bashrc
41
+ source ~/.bashrc
42
+ sudo python3 setup.py install --user
43
+
44
+ # for flash-attn
45
+ pip install flash-attn==2.7.4.post1 --no-build-isolation
46
+ cp -r /mnt/bn/yufan-dev-my/ysh/Codes/dummy_dataloader/decord_temp/flash-attention /opt/tiger
47
+ cd /opt/tiger/flash-attention/hopper
48
+ pip install ninja==1.11.1.3
49
+ sudo python setup.py install
50
+
51
+ # for github
52
+ # git remote set-url origin https://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@github.com/SHYuanBest/fp_train.git
53
+ git config --global user.name SHYuanBest
54
+ git config --global user.email shyuan-cs@hotmail.com
55
+
56
+ pip uninstall torchao -y
57
+
58
+
59
+ # sudo apt update
60
+ # sudo apt install -y libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libavfilter-dev libswscale-dev libswresample-dev gfortran htop screen
61
+ # sudo apt-get update
62
+ # sudo apt-get install -y build-essential python3-dev python3-setuptools make cmake
63
+ # sudo apt-get install -y ffmpeg libavcodec-dev libavfilter-dev libavformat-dev libavutil-dev libssl-dev screen
64
+
65
+ # # install the dependencies
66
+ # pip install -r requirements.txt
67
+ # pip install --upgrade diffusers transformers accelerate deepspeed nvitop
68
+ # pip install git+https://github.com/huggingface/diffusers
69
+
70
+ # # ## for AIP dataset
71
+ # git clone git@code.byted.org:us-cv/mininova.git /tmp/mininova
72
+ # pip install /tmp/mininova/py_pkg/byted/nebudata/
73
+ # pip install /tmp/mininova/py_pkg/byted/aipcommon/
74
+
75
+ # # for decord
76
+ # # git clone -b v0.3 https://github.com/dmlc/dlpack.git
77
+ # cd /mnt/bn/yufan-dev-my/ysh/Codes/dummy_dataloader/decord_temp/dlpack
78
+ # mkdir build
79
+ # cd build
80
+ # cmake .. -DUSE_CUDA=0 -DCMAKE_BUILD_TYPE=Release
81
+ # make
82
+ # sudo make install
83
+
84
+ # cd /mnt/bn/yufan-dev-my/ysh/Codes/dummy_dataloader/decord_temp/dmlc-core
85
+ # mkdir build
86
+ # cd build
87
+ # cmake .. -DUSE_CUDA=0 -DCMAKE_BUILD_TYPE=Release
88
+ # make
89
+ # sudo make install
90
+
91
+ # cd /mnt/bn/yufan-dev-my/ysh/Codes/dummy_dataloader/decord_temp/decord
92
+ # mkdir build
93
+ # cd build
94
+ # cmake .. -DUSE_CUDA=0 -DCMAKE_BUILD_TYPE=Release
95
+ # make
96
+ # cd ../python
97
+ # pwd=$PWD
98
+ # echo "PYTHONPATH=$PYTHONPATH:$pwd" >> ~/.bashrc
99
+ # source ~/.bashrc
100
+ # sudo python3 setup.py install --user
101
+
102
+ # # for flash-attn
103
+ # pip install torch==2.8.0 torchvision==0.23.0 torchaudio==2.8.0 --index-url https://download.pytorch.org/whl/cu126
104
+ # pip install flashinfer-python==0.3.1 vllm==0.10.1.1 qwen_vl_utils keye_vl_utils opencv-python-headless==4.11.0.86 numpy==1.26.4 video-reader-rs
105
+ # cd /mnt/bn/yufan-dev-my/ysh/Codes/dummy_dataloader/decord_temp/flash-attention-new
106
+ # sudo python setup.py install
107
+
108
+ # cp -r /mnt/bn/yufan-dev-my/ysh/Codes/dummy_dataloader/decord_temp/flash-attention /opt/tiger
109
+ # cd /opt/tiger/flash-attention/hopper
110
+ # pip install ninja==1.11.1.3
111
+ # sudo python setup.py install
112
+
113
+ # # for github
114
+ # # git remote set-url origin https://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@github.com/SHYuanBest/fp_train.git
115
+ # git config --global user.name SHYuanBest
116
+ # git config --global user.email shyuan-cs@hotmail.com
117
+
118
+ # pip uninstall torchao
119
+ # pip uninstall pynvml
dataset_code/sft_sftnews/offload/kill.sh ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pkill -9 -f run_hv_save_videos.sh
2
+ pkill -9 -f run_hv.sh
3
+ pkill -9 -f run_hv_0.sh
4
+ pkill -9 -f run_hv_1.sh
5
+ pkill -9 -f run_hv_2.sh
6
+ pkill -9 -f run_hv_3.sh
7
+ pkill -9 -f run_hv_4.sh
8
+ pkill -9 -f run_hv_5.sh
9
+
10
+ pkill -9 -f offoload_features_hv_save_videos.py
11
+ pkill -9 -f offoload_features_hv.py
dataset_code/sft_sftnews/offload/offoload_features_backup.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from tqdm import tqdm
3
+ from diffusers import AutoencoderKLHunyuanVideo
4
+ from transformers import (
5
+ CLIPTextModel,
6
+ CLIPTokenizer,
7
+ LlamaModel,
8
+ LlamaTokenizerFast,
9
+ SiglipImageProcessor,
10
+ SiglipVisionModel,
11
+ )
12
+ from diffusers.video_processor import VideoProcessor
13
+ from diffusers.utils import export_to_video, load_image
14
+
15
+ from dataset_tool import CollectionDataset, collate_fn_map
16
+ from omegaconf import OmegaConf
17
+ from torch.utils.data import DataLoader
18
+
19
+ import torch
20
+ import torch.distributed as dist
21
+ import torch.nn as nn
22
+ from torch.nn.parallel import DistributedDataParallel as DDP
23
+ import torchvision.transforms as transforms
24
+ import numpy as np
25
+ import matplotlib.pyplot as plt
26
+ from matplotlib.animation import FuncAnimation
27
+ from IPython.display import HTML, display
28
+ from IPython.display import clear_output # 用于清理历史输出
29
+
30
+ from accelerate import Accelerator, DistributedType
31
+ from accelerate.logging import get_logger
32
+ from accelerate.utils import DistributedDataParallelKwargs, ProjectConfiguration, set_seed
33
+
34
+ from utils_framepack import encode_image, encode_prompt
35
+
36
+ def main(rank, world_size):
37
+ weight_dtype = torch.bfloat16
38
+ batch_size = 2
39
+ dataloader_num_workers = 0
40
+ output_latent_folder = "/mnt/bn/yufan-dev-my/ysh/Datasets/fp_offload_latents"
41
+ pretrained_model_name_or_path = "/mnt/bn/yufan-dev-my/ysh/Ckpts/hunyuanvideo-community/HunyuanVideo"
42
+ siglip_model_name_or_path = "/mnt/bn/yufan-dev-my/ysh/Ckpts/lllyasviel/flux_redux_bfl"
43
+ os.makedirs(output_latent_folder, exist_ok=True)
44
+
45
+ device = "cuda"
46
+
47
+ # Load the tokenizers
48
+ tokenizer_one = LlamaTokenizerFast.from_pretrained(
49
+ pretrained_model_name_or_path,
50
+ subfolder="tokenizer",
51
+ )
52
+ tokenizer_two = CLIPTokenizer.from_pretrained(
53
+ pretrained_model_name_or_path,
54
+ subfolder="tokenizer_2",
55
+ )
56
+ feature_extractor = SiglipImageProcessor.from_pretrained(
57
+ siglip_model_name_or_path,
58
+ subfolder="feature_extractor",
59
+
60
+ )
61
+
62
+ vae = AutoencoderKLHunyuanVideo.from_pretrained(
63
+ pretrained_model_name_or_path,
64
+ subfolder="vae",
65
+ torch_dtype=torch.float32,
66
+ )
67
+ vae_scale_factor_spatial = vae.spatial_compression_ratio
68
+ video_processor = VideoProcessor(vae_scale_factor=vae_scale_factor_spatial)
69
+
70
+ text_encoder_one = LlamaModel.from_pretrained(
71
+ pretrained_model_name_or_path,
72
+ subfolder="text_encoder",
73
+ torch_dtype=weight_dtype,
74
+ )
75
+ text_encoder_two = CLIPTextModel.from_pretrained(
76
+ pretrained_model_name_or_path,
77
+ subfolder="text_encoder_2",
78
+ torch_dtype=weight_dtype,
79
+ )
80
+ image_encoder = SiglipVisionModel.from_pretrained(
81
+ siglip_model_name_or_path,
82
+ subfolder="image_encoder",
83
+ torch_dtype=weight_dtype,
84
+ )
85
+
86
+ vae.requires_grad_(False)
87
+ text_encoder_one.requires_grad_(False)
88
+ text_encoder_two.requires_grad_(False)
89
+ image_encoder.requires_grad_(False)
90
+ vae.eval()
91
+ text_encoder_one.eval()
92
+ text_encoder_two.eval()
93
+ image_encoder.eval()
94
+
95
+ vae = vae.to(device)
96
+ text_encoder_one = text_encoder_one.to(device)
97
+ text_encoder_two = text_encoder_two.to(device)
98
+ image_encoder = image_encoder.to(device)
99
+
100
+ configs = OmegaConf.load("512_collection_config_vae1011_aligned_full_dump.yaml")
101
+ dataset = CollectionDataset.create_dataset_function(configs['train_data'],
102
+ configs['train_data_weights'],
103
+ **configs['data']['params'])
104
+ dataloader = DataLoader(
105
+ dataset,
106
+ shuffle=False,
107
+ batch_size=batch_size,
108
+ num_workers=dataloader_num_workers,
109
+ collate_fn=collate_fn_map,
110
+ pin_memory=True,
111
+ prefetch_factor=2 if dataloader_num_workers != 0 else None,
112
+ persistent_workers=True if dataloader_num_workers != 0 else False,
113
+ )
114
+
115
+ for idx, batch in tqdm(enumerate(dataloader), total=len(dataloader), desc="Processing batches"):
116
+ exis_flag = True
117
+ num_frames = batch["video_metadata"]["num_frames"]
118
+ for uttid, num_frame in batch["uttid"], num_frames:
119
+ output_path = os.path.join(output_latent_folder, f"{uttid}_{num_frame}.pt")
120
+ if not os.path.exists(output_path):
121
+ exis_flag = False
122
+ break
123
+ if exis_flag:
124
+ print("skipping!")
125
+ continue
126
+
127
+ with torch.no_grad():
128
+ # Get Vae feature
129
+ pixel_values = batch["videos"].permute(0, 2, 1, 3, 4).to(dtype=vae.dtype, device=device)
130
+ vae_latents = vae.encode(pixel_values).latent_dist.sample()
131
+ vae_latents = vae_latents * vae.config.scaling_factor
132
+
133
+ # Encode prompts
134
+ prompts = batch["prompts"]
135
+ prompt_embeds, pooled_prompt_embeds, prompt_attention_mask = encode_prompt(
136
+ tokenizer=tokenizer_one,
137
+ text_encoder=text_encoder_one,
138
+ tokenizer_2=tokenizer_two,
139
+ text_encoder_2=text_encoder_two,
140
+ prompt=prompts,
141
+ device=device,
142
+ )
143
+
144
+ # Prepare images
145
+ image_tensor = batch["first_frames_images"]
146
+ images = [transforms.ToPILImage()(x.to(torch.uint8)) for x in image_tensor]
147
+ image = video_processor.preprocess(image=images, height=batch["videos"].shape[-2], width=batch["videos"].shape[-1])
148
+ image_embeds = encode_image(
149
+ feature_extractor,
150
+ image_encoder,
151
+ image,
152
+ device=device,
153
+ dtype=weight_dtype,
154
+ )
155
+
156
+ for uttid, cur_vae_latent, cur_prompt_embed, cur_pooled_prompt_embed, cur_prompt_attention_mask, cur_image_embed in zip(batch["uttid"], vae_latents, prompt_embeds, pooled_prompt_embeds, prompt_attention_mask, image_embeds):
157
+ output_path = os.path.join(output_latent_folder, f"{uttid}_{pixel_values.shape[2]}.pt")
158
+ torch.save(
159
+ {
160
+ "vae_latent": cur_vae_latent.cpu().detach(),
161
+ "prompt_embed": cur_prompt_embed.cpu().detach(),
162
+ "pooled_prompt_embeds": cur_pooled_prompt_embed.cpu().detach(),
163
+ "prompt_attention_mask": cur_prompt_attention_mask.cpu().detach(),
164
+ "image_embeds": cur_image_embed.cpu().detach(),
165
+ },
166
+ output_path
167
+ )
168
+ print(f"save to: {output_path}")
169
+
170
+ def setup_distributed_env():
171
+ dist.init_process_group(backend="nccl")
172
+ torch.cuda.set_device(int(os.environ["LOCAL_RANK"]))
173
+
174
+ def cleanup_distributed_env():
175
+ dist.destroy_process_group()
176
+
177
+ if __name__ == "__main__":
178
+ setup_distributed_env()
179
+
180
+ global_rank = dist.get_rank()
181
+ local_rank = int(os.environ["LOCAL_RANK"])
182
+ device = torch.cuda.current_device()
183
+ world_size = dist.get_world_size()
184
+
185
+ main(world_size=world_size, rank = device)
dataset_code/sft_sftnews/offload/offoload_features_hv.py ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import argparse
3
+ import os
4
+ from tqdm import tqdm
5
+ from diffusers import AutoencoderKLHunyuanVideo
6
+ from transformers import (
7
+ CLIPTextModel,
8
+ CLIPTokenizer,
9
+ LlamaModel,
10
+ LlamaTokenizerFast,
11
+ SiglipImageProcessor,
12
+ SiglipVisionModel,
13
+ )
14
+ from diffusers.video_processor import VideoProcessor
15
+ from diffusers.utils import export_to_video, load_image
16
+
17
+ from dataset_tool import CollectionDataset, collate_fn_map
18
+ from omegaconf import OmegaConf
19
+ from torch.utils.data import DataLoader
20
+
21
+ import torch
22
+ import torch.distributed as dist
23
+ import torch.nn as nn
24
+ from torch.nn.parallel import DistributedDataParallel as DDP
25
+ from torch.utils.data.distributed import DistributedSampler
26
+ from torch.utils.data import Subset
27
+ import torchvision.transforms as transforms
28
+ import numpy as np
29
+ import matplotlib.pyplot as plt
30
+ from matplotlib.animation import FuncAnimation
31
+ from IPython.display import HTML, display
32
+ from IPython.display import clear_output # 用于清理历史输出
33
+
34
+ from accelerate import Accelerator, DistributedType
35
+ from accelerate.logging import get_logger
36
+ from accelerate.utils import DistributedDataParallelKwargs, ProjectConfiguration, set_seed
37
+ from diffusers.training_utils import free_memory
38
+
39
+ from utils_framepack import encode_image, encode_prompt
40
+
41
+ def setup_distributed_env():
42
+ dist.init_process_group(backend="nccl")
43
+ torch.cuda.set_device(int(os.environ["LOCAL_RANK"]))
44
+
45
+ def cleanup_distributed_env():
46
+ dist.destroy_process_group()
47
+
48
+ def main(rank, world_size, global_rank, batch_size, dataloader_num_workers, config_path, output_latent_folder, pretrained_model_name_or_path, siglip_model_name_or_path):
49
+ weight_dtype = torch.bfloat16
50
+ # batch_size = 2
51
+ # dataloader_num_workers = 8
52
+ # config_path = "512_collection_config_vae1011_aligned_full_dump.yaml"
53
+ # output_latent_folder = "/mnt/bn/yufan-dev-my/ysh/Datasets/fp_offload_latents"
54
+ # pretrained_model_name_or_path = "/mnt/bn/yufan-dev-my/ysh/Ckpts/hunyuanvideo-community/HunyuanVideo"
55
+ # siglip_model_name_or_path = "/mnt/bn/yufan-dev-my/ysh/Ckpts/lllyasviel/flux_redux_bfl"
56
+
57
+ base_folder = output_latent_folder
58
+ device = rank
59
+
60
+ # Load the tokenizers
61
+ # tokenizer_one = LlamaTokenizerFast.from_pretrained(
62
+ # pretrained_model_name_or_path,
63
+ # subfolder="tokenizer",
64
+ # )
65
+ # tokenizer_two = CLIPTokenizer.from_pretrained(
66
+ # pretrained_model_name_or_path,
67
+ # subfolder="tokenizer_2",
68
+ # )
69
+ # feature_extractor = SiglipImageProcessor.from_pretrained(
70
+ # siglip_model_name_or_path,
71
+ # subfolder="feature_extractor",
72
+
73
+ # )
74
+
75
+ # vae = AutoencoderKLHunyuanVideo.from_pretrained(
76
+ # pretrained_model_name_or_path,
77
+ # subfolder="vae",
78
+ # torch_dtype=torch.float32,
79
+ # )
80
+ # vae_scale_factor_spatial = vae.spatial_compression_ratio
81
+ # video_processor = VideoProcessor(vae_scale_factor=vae_scale_factor_spatial)
82
+
83
+ # text_encoder_one = LlamaModel.from_pretrained(
84
+ # pretrained_model_name_or_path,
85
+ # subfolder="text_encoder",
86
+ # torch_dtype=weight_dtype,
87
+ # )
88
+ # text_encoder_two = CLIPTextModel.from_pretrained(
89
+ # pretrained_model_name_or_path,
90
+ # subfolder="text_encoder_2",
91
+ # torch_dtype=weight_dtype,
92
+ # )
93
+ # image_encoder = SiglipVisionModel.from_pretrained(
94
+ # siglip_model_name_or_path,
95
+ # subfolder="image_encoder",
96
+ # torch_dtype=weight_dtype,
97
+ # )
98
+
99
+ # vae.requires_grad_(False)
100
+ # text_encoder_one.requires_grad_(False)
101
+ # text_encoder_two.requires_grad_(False)
102
+ # image_encoder.requires_grad_(False)
103
+ # vae.eval()
104
+ # text_encoder_one.eval()
105
+ # text_encoder_two.eval()
106
+ # image_encoder.eval()
107
+
108
+ # vae = vae.to(device)
109
+ # text_encoder_one = text_encoder_one.to(device)
110
+ # text_encoder_two = text_encoder_two.to(device)
111
+ # image_encoder = image_encoder.to(device)
112
+
113
+ dist.barrier()
114
+ configs = OmegaConf.load(config_path)
115
+ dataset = CollectionDataset.create_dataset_function(configs['train_data'],
116
+ configs['train_data_weights'],
117
+ **configs['data']['params'])
118
+ print(len(dataset))
119
+
120
+ sampler = DistributedSampler(dataset, rank=rank, num_replicas=world_size,)
121
+ dataloader = DataLoader(
122
+ dataset,
123
+ shuffle=False,
124
+ batch_size=batch_size,
125
+ collate_fn=collate_fn_map,
126
+ num_workers=dataloader_num_workers,
127
+ pin_memory=False,
128
+ prefetch_factor=2 if dataloader_num_workers != 0 else None,
129
+ persistent_workers=False,
130
+ )
131
+
132
+ sampler.set_epoch(0)
133
+ if global_rank == 0:
134
+ pbar = tqdm(total=len(dataloader), desc="Processing")
135
+ dist.barrier()
136
+ for idx, batch in enumerate(dataloader):
137
+ dist.barrier()
138
+ free_memory()
139
+
140
+ output_json = {
141
+ "uttid": batch["uttid"][0],
142
+ "topk_avg_motion_scores_t": batch["topk_avg_motion_scores_t"].item(),
143
+ }
144
+
145
+ if batch["topk_avg_motion_scores_t"].item() >= 400:
146
+ base_path="/mnt/bn/yufan-dev-my/ysh/Datasets/sft_sftnews_videos/new_metadata/high_motion"
147
+ else:
148
+ base_path="/mnt/bn/yufan-dev-my/ysh/Datasets/sft_sftnews_videos/new_metadata/low_motion"
149
+
150
+ os.makedirs(base_path, exist_ok=True)
151
+
152
+ output_path = os.path.join(base_path, f"{batch['uttid'][0]}.json")
153
+
154
+ if os.path.exists(output_path):
155
+ print(f"skipping: {output_path}")
156
+ continue
157
+
158
+ with open(output_path, 'w',) as f:
159
+ json.dump(output_json, f, indent=2)
160
+ print(f"save json to {output_path}")
161
+
162
+ batch = None
163
+ output_json = None
164
+ del batch
165
+ del output_json
166
+ free_memory()
167
+
168
+ # valid_indices = []
169
+ # valid_uttids = []
170
+ # valid_num_frames = []
171
+ # valid_heights = []
172
+ # valid_widths = []
173
+ # valid_videos = []
174
+ # valid_prompts = []
175
+ # valid_first_frames_images = []
176
+ # valid_stride_videos = []
177
+
178
+ # for i, (uttid, num_frame, height, width, topk_avg_motion_scores_t) in enumerate(zip(batch["uttid"], batch["video_metadata"]["num_frames"], batch["video_metadata"]["height"], batch["video_metadata"]["width"], batch["topk_avg_motion_scores_t"])):
179
+ # if topk_avg_motion_scores_t != -1:
180
+ # output_latent_folder = os.path.join(base_folder, "latents/high_motion")
181
+ # else:
182
+ # output_latent_folder = os.path.join(base_folder, "latents/low_motion")
183
+
184
+ # os.makedirs(output_latent_folder, exist_ok=True)
185
+ # output_path = os.path.join(output_latent_folder, f"{uttid}_{num_frame}_{height}_{width}.pt")
186
+ # if not os.path.exists(output_path):
187
+ # valid_indices.append(i)
188
+ # valid_uttids.append(uttid)
189
+ # valid_num_frames.append(num_frame)
190
+ # valid_heights.append(height)
191
+ # valid_widths.append(width)
192
+ # valid_videos.append(batch["videos"][i])
193
+ # valid_prompts.append(batch["prompts"][i])
194
+ # valid_first_frames_images.append(batch["first_frames_images"][i])
195
+ # valid_stride_videos.append(batch["stride_videos"][i])
196
+ # else:
197
+ # print(f"skipping {uttid}")
198
+
199
+ # if not valid_indices:
200
+ # print("skipping entire batch!")
201
+ # continue
202
+
203
+ # batch = None
204
+ # del batch
205
+ # free_memory()
206
+
207
+ # batch = {
208
+ # "uttid": valid_uttids,
209
+ # "video_metadata": {
210
+ # "num_frames": valid_num_frames,
211
+ # "height": valid_heights,
212
+ # "width": valid_widths
213
+ # },
214
+ # "videos": torch.stack(valid_videos),
215
+ # "prompts": valid_prompts,
216
+ # "first_frames_images": torch.stack(valid_first_frames_images),
217
+ # "stride_videos": torch.stack(valid_stride_videos),
218
+ # }
219
+
220
+ # if len(batch["uttid"]) == 0:
221
+ # print("All samples in this batch are already processed, skipping!")
222
+ # continue
223
+
224
+ # with torch.no_grad():
225
+ # # Get Vae feature 1
226
+ # pixel_values = batch["videos"].permute(0, 2, 1, 3, 4).to(dtype=vae.dtype, device=device)
227
+ # vae_latents = vae.encode(pixel_values).latent_dist.sample()
228
+ # vae_latents = vae_latents * vae.config.scaling_factor
229
+
230
+ # # Get Vae feature 2
231
+ # pixel_values_2 = batch["stride_videos"].permute(0, 2, 1, 3, 4).to(dtype=vae.dtype, device=device)
232
+ # vae_latents_2 = vae.encode(pixel_values_2).latent_dist.sample()
233
+ # vae_latents_2 = vae_latents_2 * vae.config.scaling_factor
234
+
235
+ # # Encode prompts
236
+ # prompts = batch["prompts"]
237
+ # prompt_embeds, pooled_prompt_embeds, prompt_attention_mask = encode_prompt(
238
+ # tokenizer=tokenizer_one,
239
+ # text_encoder=text_encoder_one,
240
+ # tokenizer_2=tokenizer_two,
241
+ # text_encoder_2=text_encoder_two,
242
+ # prompt=prompts,
243
+ # device=device,
244
+ # )
245
+
246
+ # # Prepare images
247
+ # image_tensor = batch["first_frames_images"]
248
+ # images = [transforms.ToPILImage()(x.to(torch.uint8)) for x in image_tensor]
249
+ # image = video_processor.preprocess(image=images, height=batch["videos"].shape[-2], width=batch["videos"].shape[-1])
250
+ # image_embeds = encode_image(
251
+ # feature_extractor,
252
+ # image_encoder,
253
+ # image,
254
+ # device=device,
255
+ # dtype=weight_dtype,
256
+ # )
257
+
258
+ # for uttid, num_frame, height, width, cur_vae_latent, cur_prompt_embed, cur_pooled_prompt_embed, cur_prompt_attention_mask, cur_image_embed, cur_vae_latents_2 in zip(batch["uttid"], batch["video_metadata"]["num_frames"], batch["video_metadata"]["height"], batch["video_metadata"]["width"], vae_latents, prompt_embeds, pooled_prompt_embeds, prompt_attention_mask, image_embeds, vae_latents_2):
259
+ # output_path = os.path.join(output_latent_folder, f"{uttid}_{num_frame}_{height}_{width}.pt")
260
+ # temp_to_save = {
261
+ # "vae_latent": cur_vae_latent.cpu().detach(),
262
+ # "prompt_embed": cur_prompt_embed.cpu().detach(),
263
+ # "pooled_prompt_embeds": cur_pooled_prompt_embed.cpu().detach(),
264
+ # "prompt_attention_mask": cur_prompt_attention_mask.cpu().detach(),
265
+ # "image_embeds": cur_image_embed.cpu().detach(),
266
+ # "vae_latents_2": cur_vae_latents_2.cpu().detach(),
267
+ # }
268
+ # torch.save(
269
+ # temp_to_save,
270
+ # output_path
271
+ # )
272
+ # print(f"save latent to: {output_path}")
273
+
274
+ if global_rank == 0:
275
+ pbar.update(1)
276
+ pbar.set_postfix({"batch": idx})
277
+
278
+
279
+ pixel_values = None
280
+ pixel_values_2 = None
281
+ prompts = None
282
+ image_tensor = None
283
+ images = None
284
+ vae_latents = None
285
+ vae_latents_2 = None
286
+ image_embeds = None
287
+ prompt_embeds = None
288
+ pooled_prompt_embeds = None
289
+ prompt_attention_mask = None
290
+ batch = None
291
+ valid_indices = None
292
+ valid_uttids = None
293
+ valid_num_frames = None
294
+ valid_heights = None
295
+ valid_widths = None
296
+ valid_videos = None
297
+ valid_prompts = None
298
+ valid_first_frames_images = None
299
+ valid_stride_videos = None
300
+ temp_to_save = None
301
+
302
+ del pixel_values
303
+ del pixel_values_2
304
+ del prompts
305
+ del image_tensor
306
+ del images
307
+ del vae_latents
308
+ del vae_latents_2
309
+ del image_embeds
310
+ del batch
311
+ del valid_indices
312
+ del valid_uttids
313
+ del valid_num_frames
314
+ del valid_heights
315
+ del valid_widths
316
+ del valid_videos
317
+ del valid_prompts
318
+ del valid_first_frames_images
319
+ del valid_stride_videos
320
+ del temp_to_save
321
+
322
+ free_memory()
323
+
324
+ if __name__ == "__main__":
325
+ parser = argparse.ArgumentParser(description="Script for running model training and data processing.")
326
+ parser.add_argument("--batch_size", type=int, default=1, help="Batch size for processing")
327
+ parser.add_argument("--dataloader_num_workers", type=int, default=0, help="Number of workers for data loading")
328
+ parser.add_argument("--config_path", type=str, default="part1.yaml", help="Path to the config file")
329
+ parser.add_argument("--output_latent_folder", type=str, default="/mnt/bn/yufan-dev-my/ysh/Datasets/sft_sftnews_videos", help="Folder to store output latents")
330
+ parser.add_argument("--pretrained_model_name_or_path", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/hunyuanvideo-community/HunyuanVideo", help="Pretrained model path")
331
+ parser.add_argument("--siglip_model_name_or_path", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/lllyasviel/flux_redux_bfl", help="Siglip model path")
332
+ args = parser.parse_args()
333
+
334
+
335
+ setup_distributed_env()
336
+
337
+ global_rank = dist.get_rank()
338
+ local_rank = int(os.environ["LOCAL_RANK"])
339
+ device = torch.cuda.current_device()
340
+ world_size = dist.get_world_size()
341
+
342
+ main(
343
+ world_size=world_size,
344
+ rank=device,
345
+ global_rank=global_rank,
346
+ batch_size=args.batch_size,
347
+ dataloader_num_workers=args.dataloader_num_workers,
348
+ config_path=args.config_path,
349
+ output_latent_folder=args.output_latent_folder,
350
+ pretrained_model_name_or_path=args.pretrained_model_name_or_path,
351
+ siglip_model_name_or_path=args.siglip_model_name_or_path
352
+ )
dataset_code/sft_sftnews/offload/offoload_features_hv_save_videos.py ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ from tqdm import tqdm
4
+ from diffusers import AutoencoderKLHunyuanVideo
5
+ from transformers import (
6
+ CLIPTextModel,
7
+ CLIPTokenizer,
8
+ LlamaModel,
9
+ LlamaTokenizerFast,
10
+ SiglipImageProcessor,
11
+ SiglipVisionModel,
12
+ )
13
+ from diffusers.video_processor import VideoProcessor
14
+ from diffusers.utils import export_to_video, load_image
15
+
16
+ from dataset_tool import CollectionDataset, collate_fn_map
17
+ from omegaconf import OmegaConf
18
+ from torch.utils.data import DataLoader
19
+
20
+ import torch
21
+ import torch.distributed as dist
22
+ import torch.nn as nn
23
+ from torch.nn.parallel import DistributedDataParallel as DDP
24
+ from torch.utils.data.distributed import DistributedSampler
25
+ from torch.utils.data import Subset
26
+ import torchvision.transforms as transforms
27
+ import numpy as np
28
+ import matplotlib.pyplot as plt
29
+ from matplotlib.animation import FuncAnimation
30
+ from IPython.display import HTML, display
31
+ from IPython.display import clear_output # 用于清理历史输出
32
+
33
+ from accelerate import Accelerator, DistributedType
34
+ from accelerate.logging import get_logger
35
+ from accelerate.utils import DistributedDataParallelKwargs, ProjectConfiguration, set_seed
36
+ from diffusers.training_utils import free_memory
37
+
38
+ from utils_framepack import encode_image, encode_prompt
39
+
40
+ def setup_distributed_env():
41
+ dist.init_process_group(backend="nccl")
42
+ torch.cuda.set_device(int(os.environ["LOCAL_RANK"]))
43
+
44
+ def cleanup_distributed_env():
45
+ dist.destroy_process_group()
46
+
47
+ def main(rank, world_size, global_rank, batch_size, dataloader_num_workers, config_path, output_latent_folder, pretrained_model_name_or_path, siglip_model_name_or_path):
48
+ weight_dtype = torch.bfloat16
49
+ # batch_size = 2
50
+ # dataloader_num_workers = 8
51
+ # config_path = "512_collection_config_vae1011_aligned_full_dump.yaml"
52
+ # output_latent_folder = "/mnt/bn/yufan-dev-my/ysh/Datasets/fp_offload_latents"
53
+ # pretrained_model_name_or_path = "/mnt/bn/yufan-dev-my/ysh/Ckpts/hunyuanvideo-community/HunyuanVideo"
54
+ # siglip_model_name_or_path = "/mnt/bn/yufan-dev-my/ysh/Ckpts/lllyasviel/flux_redux_bfl"
55
+ os.makedirs(output_latent_folder, exist_ok=True)
56
+
57
+ device = rank
58
+
59
+ # # Load the tokenizers
60
+ # tokenizer_one = LlamaTokenizerFast.from_pretrained(
61
+ # pretrained_model_name_or_path,
62
+ # subfolder="tokenizer",
63
+ # )
64
+ # tokenizer_two = CLIPTokenizer.from_pretrained(
65
+ # pretrained_model_name_or_path,
66
+ # subfolder="tokenizer_2",
67
+ # )
68
+ # feature_extractor = SiglipImageProcessor.from_pretrained(
69
+ # siglip_model_name_or_path,
70
+ # subfolder="feature_extractor",
71
+
72
+ # )
73
+
74
+ # vae = AutoencoderKLHunyuanVideo.from_pretrained(
75
+ # pretrained_model_name_or_path,
76
+ # subfolder="vae",
77
+ # torch_dtype=torch.float32,
78
+ # )
79
+ # vae_scale_factor_spatial = vae.spatial_compression_ratio
80
+ # video_processor = VideoProcessor(vae_scale_factor=vae_scale_factor_spatial)
81
+
82
+ # text_encoder_one = LlamaModel.from_pretrained(
83
+ # pretrained_model_name_or_path,
84
+ # subfolder="text_encoder",
85
+ # torch_dtype=weight_dtype,
86
+ # )
87
+ # text_encoder_two = CLIPTextModel.from_pretrained(
88
+ # pretrained_model_name_or_path,
89
+ # subfolder="text_encoder_2",
90
+ # torch_dtype=weight_dtype,
91
+ # )
92
+ # image_encoder = SiglipVisionModel.from_pretrained(
93
+ # siglip_model_name_or_path,
94
+ # subfolder="image_encoder",
95
+ # torch_dtype=weight_dtype,
96
+ # )
97
+
98
+ # vae.requires_grad_(False)
99
+ # text_encoder_one.requires_grad_(False)
100
+ # text_encoder_two.requires_grad_(False)
101
+ # image_encoder.requires_grad_(False)
102
+ # vae.eval()
103
+ # text_encoder_one.eval()
104
+ # text_encoder_two.eval()
105
+ # image_encoder.eval()
106
+
107
+ # vae = vae.to(device)
108
+ # text_encoder_one = text_encoder_one.to(device)
109
+ # text_encoder_two = text_encoder_two.to(device)
110
+ # image_encoder = image_encoder.to(device)
111
+
112
+ dist.barrier()
113
+ configs = OmegaConf.load(config_path)
114
+ dataset = CollectionDataset.create_dataset_function(configs['train_data'],
115
+ configs['train_data_weights'],
116
+ **configs['data']['params'])
117
+ print(len(dataset))
118
+
119
+ sampler = DistributedSampler(dataset, rank=rank, num_replicas=world_size,)
120
+ dataloader = DataLoader(
121
+ dataset,
122
+ shuffle=False,
123
+ batch_size=batch_size,
124
+ collate_fn=collate_fn_map,
125
+ num_workers=dataloader_num_workers,
126
+ pin_memory=False,
127
+ prefetch_factor=2 if dataloader_num_workers != 0 else None,
128
+ persistent_workers=False,
129
+ )
130
+
131
+ sampler.set_epoch(0)
132
+ if global_rank == 0:
133
+ pbar = tqdm(total=len(dataloader), desc="Processing")
134
+ dist.barrier()
135
+ for idx, batch in enumerate(dataloader):
136
+ valid_indices = []
137
+ valid_uttids = []
138
+ valid_num_frames = []
139
+ valid_heights = []
140
+ valid_widths = []
141
+ valid_videos = []
142
+ valid_prompts = []
143
+ valid_first_frames_images = []
144
+
145
+ # for i, (uttid, num_frame, height, width) in enumerate(zip(batch["uttid"], batch["video_metadata"]["num_frames"], batch["video_metadata"]["height"], batch["video_metadata"]["width"])):
146
+ # output_path = os.path.join(output_latent_folder, f"{uttid}_{num_frame}_{height}_{width}.pt")
147
+ # if not os.path.exists(output_path):
148
+ # valid_indices.append(i)
149
+ # valid_uttids.append(uttid)
150
+ # valid_num_frames.append(num_frame)
151
+ # valid_heights.append(height)
152
+ # valid_widths.append(width)
153
+ # valid_videos.append(batch["videos"][i])
154
+ # valid_prompts.append(batch["prompts"][i])
155
+ # valid_first_frames_images.append(batch["first_frames_images"][i])
156
+ # else:
157
+ # print(f"skipping {uttid}")
158
+
159
+ # if not valid_indices:
160
+ # print("skipping entire batch!")
161
+ # continue
162
+
163
+ # batch = {
164
+ # "uttid": valid_uttids,
165
+ # "video_metadata": {
166
+ # "num_frames": valid_num_frames,
167
+ # "height": valid_heights,
168
+ # "width": valid_widths
169
+ # },
170
+ # "videos": torch.stack(valid_videos),
171
+ # "prompts": valid_prompts,
172
+ # "first_frames_images": torch.stack(valid_first_frames_images)
173
+ # }
174
+
175
+ # if len(batch["uttid"]) == 0:
176
+ # print("All samples in this batch are already processed, skipping!")
177
+ # continue
178
+
179
+ # with torch.no_grad():
180
+ # # Get Vae feature
181
+ # pixel_values = batch["videos"].permute(0, 2, 1, 3, 4).to(dtype=vae.dtype, device=device)
182
+ # vae_latents = vae.encode(pixel_values).latent_dist.sample()
183
+ # vae_latents = vae_latents * vae.config.scaling_factor
184
+
185
+ # # Encode prompts
186
+ # prompts = batch["prompts"]
187
+ # prompt_embeds, pooled_prompt_embeds, prompt_attention_mask = encode_prompt(
188
+ # tokenizer=tokenizer_one,
189
+ # text_encoder=text_encoder_one,
190
+ # tokenizer_2=tokenizer_two,
191
+ # text_encoder_2=text_encoder_two,
192
+ # prompt=prompts,
193
+ # device=device,
194
+ # )
195
+
196
+ # # Prepare images
197
+ # image_tensor = batch["first_frames_images"]
198
+ # images = [transforms.ToPILImage()(x.to(torch.uint8)) for x in image_tensor]
199
+ # image = video_processor.preprocess(image=images, height=batch["videos"].shape[-2], width=batch["videos"].shape[-1])
200
+ # image_embeds = encode_image(
201
+ # feature_extractor,
202
+ # image_encoder,
203
+ # image,
204
+ # device=device,
205
+ # dtype=weight_dtype,
206
+ # )
207
+
208
+ # for uttid, num_frame, height, width, cur_vae_latent, cur_prompt_embed, cur_pooled_prompt_embed, cur_prompt_attention_mask, cur_image_embed in zip(batch["uttid"], batch["video_metadata"]["num_frames"], batch["video_metadata"]["height"], batch["video_metadata"]["width"], vae_latents, prompt_embeds, pooled_prompt_embeds, prompt_attention_mask, image_embeds):
209
+ # output_path = os.path.join(output_latent_folder, f"{uttid}_{num_frame}_{height}_{width}.pt")
210
+ # torch.save(
211
+ # {
212
+ # "vae_latent": cur_vae_latent.cpu().detach(),
213
+ # "prompt_embed": cur_prompt_embed.cpu().detach(),
214
+ # "pooled_prompt_embeds": cur_pooled_prompt_embed.cpu().detach(),
215
+ # "prompt_attention_mask": cur_prompt_attention_mask.cpu().detach(),
216
+ # "image_embeds": cur_image_embed.cpu().detach(),
217
+ # },
218
+ # output_path
219
+ # )
220
+ # print(f"save to: {output_path}")
221
+
222
+ if global_rank == 0:
223
+ pbar.update(1)
224
+ pbar.set_postfix({"batch": idx})
225
+ free_memory()
226
+
227
+ if __name__ == "__main__":
228
+ parser = argparse.ArgumentParser(description="Script for running model training and data processing.")
229
+ parser.add_argument("--batch_size", type=int, default=1, help="Batch size for processing")
230
+ parser.add_argument("--dataloader_num_workers", type=int, default=12, help="Number of workers for data loading")
231
+ parser.add_argument("--config_path", type=str, default="part1.yaml", help="Path to the config file")
232
+ parser.add_argument("--output_latent_folder", type=str, default="/mnt/bn/yufan-dev-my/ysh/Datasets/fp_offload_latents", help="Folder to store output latents")
233
+ parser.add_argument("--pretrained_model_name_or_path", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/hunyuanvideo-community/HunyuanVideo", help="Pretrained model path")
234
+ parser.add_argument("--siglip_model_name_or_path", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/lllyasviel/flux_redux_bfl", help="Siglip model path")
235
+ args = parser.parse_args()
236
+
237
+
238
+ setup_distributed_env()
239
+
240
+ global_rank = dist.get_rank()
241
+ local_rank = int(os.environ["LOCAL_RANK"])
242
+ device = torch.cuda.current_device()
243
+ world_size = dist.get_world_size()
244
+
245
+ main(
246
+ world_size=world_size,
247
+ rank=device,
248
+ global_rank=global_rank,
249
+ batch_size=args.batch_size,
250
+ dataloader_num_workers=args.dataloader_num_workers,
251
+ config_path=args.config_path,
252
+ output_latent_folder=args.output_latent_folder,
253
+ pretrained_model_name_or_path=args.pretrained_model_name_or_path,
254
+ siglip_model_name_or_path=args.siglip_model_name_or_path
255
+ )
dataset_code/sft_sftnews/offload/offoload_features_wan.py ADDED
@@ -0,0 +1,417 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import html
3
+ import ftfy
4
+ import regex as re
5
+ import random
6
+ from typing import Any, Dict, List, Optional, Tuple, Union
7
+ import argparse
8
+ import os
9
+ from tqdm import tqdm
10
+ from diffusers import AutoencoderKLWan
11
+ from transformers import (
12
+ AutoTokenizer,
13
+ CLIPImageProcessor,
14
+ CLIPVisionModel,
15
+ UMT5EncoderModel,
16
+ SiglipImageProcessor,
17
+ SiglipVisionModel
18
+ )
19
+ from diffusers.video_processor import VideoProcessor
20
+ from diffusers.utils import export_to_video, load_image
21
+
22
+ from dataset_tool import CollectionDataset, collate_fn_map
23
+ from omegaconf import OmegaConf
24
+ from torch.utils.data import DataLoader
25
+
26
+ import torch
27
+ import torch.distributed as dist
28
+ import torch.nn as nn
29
+ from torch.nn.parallel import DistributedDataParallel as DDP
30
+ from torch.utils.data.distributed import DistributedSampler
31
+ from torch.utils.data import Subset
32
+ import torchvision.transforms as transforms
33
+ import numpy as np
34
+ import matplotlib.pyplot as plt
35
+ from matplotlib.animation import FuncAnimation
36
+ from IPython.display import HTML, display
37
+ from IPython.display import clear_output # 用于清理历史输出
38
+
39
+ from accelerate import Accelerator, DistributedType
40
+ from accelerate.logging import get_logger
41
+ from accelerate.utils import DistributedDataParallelKwargs, ProjectConfiguration, set_seed
42
+ from diffusers.training_utils import free_memory
43
+
44
+ from utils_framepack import encode_image
45
+
46
+ def encode_image_1(
47
+ image_processor,
48
+ image_encoder,
49
+ image,
50
+ device: Optional[torch.device] = "cuda",
51
+ ):
52
+ device = device
53
+ image = image_processor(images=image, return_tensors="pt").to(device)
54
+ image_embeds = image_encoder(**image, output_hidden_states=True)
55
+ return image_embeds.hidden_states[-2]
56
+
57
+ def basic_clean(text):
58
+ text = ftfy.fix_text(text)
59
+ text = html.unescape(html.unescape(text))
60
+ return text.strip()
61
+
62
+
63
+ def whitespace_clean(text):
64
+ text = re.sub(r"\s+", " ", text)
65
+ text = text.strip()
66
+ return text
67
+
68
+
69
+ def prompt_clean(text):
70
+ text = whitespace_clean(basic_clean(text))
71
+ return text
72
+
73
+
74
+ def _get_t5_prompt_embeds(
75
+ tokenizer,
76
+ text_encoder,
77
+ prompt: Union[str, List[str]] = None,
78
+ num_videos_per_prompt: int = 1,
79
+ max_sequence_length: int = 512,
80
+ caption_dropout_p: float = 0.0,
81
+ device: Optional[torch.device] = "cuda",
82
+ dtype: Optional[torch.dtype] = torch.bfloat16,
83
+ ):
84
+ device = device
85
+ dtype = dtype
86
+
87
+ prompt = [prompt] if isinstance(prompt, str) else prompt
88
+ prompt = [prompt_clean(u) for u in prompt]
89
+ batch_size = len(prompt)
90
+
91
+ text_inputs = tokenizer(
92
+ prompt,
93
+ padding="max_length",
94
+ max_length=max_sequence_length,
95
+ truncation=True,
96
+ add_special_tokens=True,
97
+ return_attention_mask=True,
98
+ return_tensors="pt",
99
+ )
100
+ text_input_ids, mask = text_inputs.input_ids, text_inputs.attention_mask
101
+
102
+ prompt_embeds = text_encoder(text_input_ids.to(device), mask.to(device)).last_hidden_state
103
+ prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
104
+
105
+ if random.random() < caption_dropout_p:
106
+ prompt_embeds.fill_(0)
107
+ mask.fill_(False)
108
+ seq_lens = mask.gt(0).sum(dim=1).long()
109
+
110
+ prompt_embeds = [u[:v] for u, v in zip(prompt_embeds, seq_lens)]
111
+ prompt_embeds = torch.stack([
112
+ torch.cat([u,
113
+ u.new_zeros(max_sequence_length - u.size(0), u.size(1))])
114
+ for u in prompt_embeds
115
+ ],
116
+ dim=0)
117
+
118
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
119
+ _, seq_len, _ = prompt_embeds.shape
120
+ prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1)
121
+ prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt,
122
+ seq_len, -1)
123
+
124
+ return prompt_embeds
125
+
126
+
127
+ # Copied from diffusers.pipelines.wan.pipeline_wan.WanPipeline.encode_prompt
128
+ def encode_prompt(
129
+ tokenizer,
130
+ text_encoder,
131
+ prompt: Union[str, List[str]],
132
+ num_videos_per_prompt: int = 1,
133
+ prompt_embeds: Optional[torch.Tensor] = None,
134
+ max_sequence_length: int = 512,
135
+ caption_dropout_p: float = 0.0,
136
+ device: Optional[torch.device] = "cuda",
137
+ dtype: Optional[torch.dtype] = torch.bfloat16,
138
+ ):
139
+ device = device
140
+
141
+ prompt = [prompt] if isinstance(prompt, str) else prompt
142
+ if prompt is not None:
143
+ batch_size = len(prompt)
144
+ else:
145
+ batch_size = prompt_embeds.shape[0]
146
+
147
+ if prompt_embeds is None:
148
+ prompt_embeds = _get_t5_prompt_embeds(
149
+ tokenizer,
150
+ text_encoder,
151
+ prompt=prompt,
152
+ num_videos_per_prompt=num_videos_per_prompt,
153
+ max_sequence_length=max_sequence_length,
154
+ caption_dropout_p=caption_dropout_p,
155
+ device=device,
156
+ dtype=dtype,
157
+ )
158
+
159
+ return prompt_embeds
160
+
161
+ def setup_distributed_env():
162
+ dist.init_process_group(backend="nccl")
163
+ torch.cuda.set_device(int(os.environ["LOCAL_RANK"]))
164
+
165
+ def cleanup_distributed_env():
166
+ dist.destroy_process_group()
167
+
168
+ def main(rank, world_size, global_rank, batch_size, dataloader_num_workers, config_path, output_latent_folder, pretrained_model_name_or_path, siglip_model_name_or_path):
169
+ weight_dtype = torch.bfloat16
170
+ # batch_size = 2
171
+ # dataloader_num_workers = 8
172
+ # config_path = "512_collection_config_vae1011_aligned_full_dump.yaml"
173
+ # output_latent_folder = "/mnt/bn/yufan-dev-my/ysh/Datasets/fp_offload_latents"
174
+ # pretrained_model_name_or_path = "/mnt/bn/yufan-dev-my/ysh/Ckpts/hunyuanvideo-community/HunyuanVideo"
175
+ # siglip_model_name_or_path = "/mnt/bn/yufan-dev-my/ysh/Ckpts/lllyasviel/flux_redux_bfl"
176
+ os.makedirs(output_latent_folder, exist_ok=True)
177
+
178
+ device = rank
179
+
180
+ # load tokenizers
181
+ tokenizer = AutoTokenizer.from_pretrained(
182
+ args.pretrained_model_name_or_path,
183
+ subfolder="tokenizer",
184
+ )
185
+ clip_image_processor = CLIPImageProcessor.from_pretrained(
186
+ args.pretrained_model_name_or_path,
187
+ subfolder="image_processor",
188
+ )
189
+ feature_extractor = SiglipImageProcessor.from_pretrained(
190
+ siglip_model_name_or_path,
191
+ subfolder="feature_extractor",
192
+ )
193
+
194
+ # load encoders
195
+ text_encoder = UMT5EncoderModel.from_pretrained(
196
+ args.pretrained_model_name_or_path,
197
+ subfolder="text_encoder",
198
+ torch_dtype=torch.float16,
199
+ )
200
+ clip_image_encoder = CLIPVisionModel.from_pretrained(
201
+ args.pretrained_model_name_or_path,
202
+ subfolder="image_encoder",
203
+ torch_dtype=torch.float16,
204
+ )
205
+ image_encoder = SiglipVisionModel.from_pretrained(
206
+ siglip_model_name_or_path,
207
+ subfolder="image_encoder",
208
+ torch_dtype=weight_dtype,
209
+ )
210
+
211
+
212
+ vae = AutoencoderKLWan.from_pretrained(
213
+ pretrained_model_name_or_path,
214
+ subfolder="vae",
215
+ torch_dtype=torch.float32,
216
+ )
217
+ vae_scale_factor_spatial = vae.spatial_compression_ratio
218
+ video_processor = VideoProcessor(vae_scale_factor=vae_scale_factor_spatial)
219
+
220
+ vae.requires_grad_(False)
221
+ text_encoder.requires_grad_(False)
222
+ clip_image_encoder.requires_grad_(False)
223
+ image_encoder.requires_grad_(False)
224
+ vae.eval()
225
+ text_encoder.eval()
226
+ clip_image_encoder.eval()
227
+ image_encoder.eval()
228
+
229
+ vae = vae.to(device)
230
+ text_encoder = text_encoder.to(device)
231
+ image_encoder = image_encoder.to(device)
232
+ clip_image_encoder = clip_image_encoder.to(device)
233
+
234
+ dist.barrier()
235
+ configs = OmegaConf.load(config_path)
236
+ dataset = CollectionDataset.create_dataset_function(configs['train_data'],
237
+ configs['train_data_weights'],
238
+ **configs['data']['params'])
239
+ print(len(dataset))
240
+
241
+ if global_rank == 0:
242
+ pbar = tqdm(total=len(dataset) // world_size, desc="Processing")
243
+ dist.barrier()
244
+
245
+ # dataloader = DataLoader(
246
+ # dataset,
247
+ # shuffle=False,
248
+ # batch_size=batch_size,
249
+ # collate_fn=collate_fn_map,
250
+ # num_workers=dataloader_num_workers,
251
+ # pin_memory=True,
252
+ # prefetch_factor=2 if dataloader_num_workers != 0 else None,
253
+ # persistent_workers=True if dataloader_num_workers != 0 else False,
254
+ # )
255
+
256
+ # def distributed_iterate_dataloader(dataloader, world_size, rank):
257
+ # sample_count = 0
258
+ # for idx, batch in enumerate(dataloader):
259
+ # if sample_count % world_size == rank:
260
+ # # No need to call collate_fn_map again as it's already done by DataLoader
261
+ # yield batch # Yield the batch directly
262
+ # sample_count += 1
263
+
264
+ # for idx, batch in enumerate(distributed_iterate_dataloader(dataloader, dist.get_world_size(), dist.get_rank())):
265
+
266
+
267
+ def distributed_iterate_dataset(dataset, world_size, rank):
268
+ iterator = iter(dataset)
269
+ sample_count = 0
270
+
271
+ while True:
272
+ try:
273
+ batch = next(iterator)
274
+
275
+ if sample_count % world_size == rank:
276
+ processed_batch = collate_fn_map(batch)
277
+ yield processed_batch
278
+
279
+ sample_count += 1
280
+
281
+ except StopIteration:
282
+ break
283
+
284
+ for idx, batch in enumerate(distributed_iterate_dataset(dataset, dist.get_world_size(), dist.get_rank())):
285
+ valid_indices = []
286
+ valid_uttids = []
287
+ valid_num_frames = []
288
+ valid_heights = []
289
+ valid_widths = []
290
+ valid_videos = []
291
+ valid_prompts = []
292
+ valid_first_frames_images = []
293
+
294
+ for i, (uttid, num_frame, height, width) in enumerate(zip(batch["uttid"], batch["video_metadata"]["num_frames"], batch["video_metadata"]["height"], batch["video_metadata"]["width"])):
295
+ output_path = os.path.join(output_latent_folder, f"{uttid}_{num_frame}_{height}_{width}.pt")
296
+ if not os.path.exists(output_path):
297
+ valid_indices.append(i)
298
+ valid_uttids.append(uttid)
299
+ valid_num_frames.append(num_frame)
300
+ valid_heights.append(height)
301
+ valid_widths.append(width)
302
+ valid_videos.append(batch["videos"][i])
303
+ valid_prompts.append(batch["prompts"][i])
304
+ valid_first_frames_images.append(batch["first_frames_images"][i])
305
+ else:
306
+ print(f"skipping {uttid}")
307
+
308
+ if not valid_indices:
309
+ print("skipping entire batch!")
310
+ continue
311
+
312
+ batch = {
313
+ "uttid": valid_uttids,
314
+ "video_metadata": {
315
+ "num_frames": valid_num_frames,
316
+ "height": valid_heights,
317
+ "width": valid_widths
318
+ },
319
+ "videos": torch.stack(valid_videos),
320
+ "prompts": valid_prompts,
321
+ "first_frames_images": torch.stack(valid_first_frames_images)
322
+ }
323
+
324
+ if len(batch["uttid"]) == 0:
325
+ print("All samples in this batch are already processed, skipping!")
326
+ continue
327
+
328
+ with torch.no_grad():
329
+ # Get Vae feature
330
+ latents_mean = torch.tensor(
331
+ vae.config.latents_mean).view(
332
+ 1, vae.config.z_dim, 1, 1,
333
+ 1).to(vae.device, vae.dtype)
334
+ latents_std = 1.0 / torch.tensor(
335
+ vae.config.latents_std).view(
336
+ 1, vae.config.z_dim, 1, 1, 1).to(
337
+ vae.device, vae.dtype)
338
+ pixel_values = batch["videos"].permute(0, 2, 1, 3, 4).to(dtype=vae.dtype, device=device)
339
+ vae_latents = vae.encode(pixel_values).latent_dist.sample()
340
+ vae_latents = (vae_latents - latents_mean) * latents_std
341
+
342
+ # Encode prompts
343
+ prompts = batch["prompts"]
344
+ prompt_embeds = encode_prompt(
345
+ tokenizer=tokenizer,
346
+ text_encoder=text_encoder,
347
+ prompt=prompts,
348
+ device=device,
349
+ )
350
+
351
+ # Prepare images
352
+ image_tensor = batch["first_frames_images"]
353
+ images = [transforms.ToPILImage()(x.to(torch.uint8)) for x in image_tensor]
354
+
355
+ clip_image_embeds = encode_image_1(
356
+ image_processor=clip_image_processor,
357
+ image_encoder=clip_image_encoder,
358
+ image=images,
359
+ device=device
360
+ )
361
+
362
+ image = video_processor.preprocess(image=images, height=batch["videos"].shape[-2], width=batch["videos"].shape[-1])
363
+ image_embeds = encode_image(
364
+ feature_extractor,
365
+ image_encoder,
366
+ image,
367
+ device=device,
368
+ dtype=weight_dtype,
369
+ )
370
+
371
+ for uttid, num_frame, height, width, cur_vae_latent, cur_prompt_embed, cur_clip_image_embed, cur_image_embed in zip(batch["uttid"], batch["video_metadata"]["num_frames"], batch["video_metadata"]["height"], batch["video_metadata"]["width"], vae_latents, prompt_embeds, clip_image_embeds, image_embeds):
372
+ output_path = os.path.join(output_latent_folder, f"{uttid}_{num_frame}_{height}_{width}.pt")
373
+ torch.save(
374
+ {
375
+ "vae_latent": cur_vae_latent.cpu().detach(),
376
+ "prompt_embed": cur_prompt_embed.cpu().detach(),
377
+ "clip_image_embeds": cur_clip_image_embed.cpu().detach(),
378
+ "image_embeds": cur_image_embed.cpu().detach(),
379
+ },
380
+ output_path
381
+ )
382
+ print(f"save to: {output_path}")
383
+
384
+ if global_rank == 0:
385
+ pbar.update(1)
386
+ pbar.set_postfix({"batch": idx})
387
+ free_memory()
388
+
389
+ if __name__ == "__main__":
390
+ parser = argparse.ArgumentParser(description="Script for running model training and data processing.")
391
+ parser.add_argument("--batch_size", type=int, default=1, help="Batch size for processing")
392
+ parser.add_argument("--dataloader_num_workers", type=int, default=8, help="Number of workers for data loading")
393
+ parser.add_argument("--config_path", type=str, default="part1.yaml", help="Path to the config file")
394
+ parser.add_argument("--output_latent_folder", type=str, default="/mnt/bn/yufan-dev-my/ysh/Datasets/fp_offload_latents_wan", help="Folder to store output latents")
395
+ parser.add_argument("--pretrained_model_name_or_path", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Wan-AI/Wan2.1-I2V-14B-720P-Diffusers/", help="Pretrained model path")
396
+ parser.add_argument("--siglip_model_name_or_path", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/lllyasviel/flux_redux_bfl", help="Siglip model path")
397
+ args = parser.parse_args()
398
+
399
+
400
+ setup_distributed_env()
401
+
402
+ global_rank = dist.get_rank()
403
+ local_rank = int(os.environ["LOCAL_RANK"])
404
+ device = torch.cuda.current_device()
405
+ world_size = dist.get_world_size()
406
+
407
+ main(
408
+ world_size=world_size,
409
+ rank=device,
410
+ global_rank=global_rank,
411
+ batch_size=args.batch_size,
412
+ dataloader_num_workers=args.dataloader_num_workers,
413
+ config_path=args.config_path,
414
+ output_latent_folder=args.output_latent_folder,
415
+ pretrained_model_name_or_path=args.pretrained_model_name_or_path,
416
+ siglip_model_name_or_path=args.siglip_model_name_or_path
417
+ )
dataset_code/sft_sftnews/offload/part0.yaml ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # vae1011-98022219
2
+ # train_data: ['albatross_2_dump', 'budgie_1_dump', 'budgie_2_dump', 'canary_2_dump', 'condor_2_dump', 'falcon_dump', 'filmsupply_highres_dump', 'guillemot_2_dump', 'guillemot_4_dump', 'gull_2_dump', 'harrier_2_dump', 'hdvg_dump', 'hornbill_2_dump', 'hummingbird_2_dump', 'kingfisher_dump', 'lovebird_dump', 'macaw_2_dump', 'movie_2_dump', 'panda70m_dump', 'partridge_2_dump', 'partridge_4_dump', 'petrel_2_dump', 'pigeon_2_dump', 'puffin_2_dump', 'swallow_2_dump', 'vimeo_2_dump', 'vimeo_4_dump', 'warbler_2_dump', 'wren_2_dump']
3
+ # train_data_weights: [68859, 1192, 15856, 203755, 1384503, 78671, 26307, 343789, 514339, 152912, 1762929, 6288112, 594676, 34082, 16263, 49979, 62714, 447823, 19018149, 7013003, 16887569, 3790563, 584691, 477319, 10022018, 9587751, 8486291, 7210, 10100894]
4
+
5
+
6
+ #high quality data-30372699
7
+ # train_data: ['Istock_sports_videos','abaka_short','malayan','movie_v0_1','nexdata','rf123_1080p']
8
+ # train_data_weights: [3431837,742656,5699324,14771089,1502775,4225018]
9
+
10
+ #high quality data : vae1011 = 1:1
11
+ # train_data: ['Istock_sports_videos','abaka_short','malayan','movie_v0_1','nexdata','rf123_1080p', 'albatross_2_dump', 'budgie_1_dump', 'budgie_2_dump', 'canary_2_dump', 'condor_2_dump', 'falcon_dump', 'filmsupply_highres_dump', 'guillemot_2_dump', 'guillemot_4_dump', 'gull_2_dump', 'harrier_2_dump', 'hdvg_dump', 'hornbill_2_dump', 'hummingbird_2_dump', 'kingfisher_dump', 'lovebird_dump', 'macaw_2_dump', 'movie_2_dump', 'panda70m_dump', 'partridge_2_dump', 'partridge_4_dump', 'petrel_2_dump', 'pigeon_2_dump', 'puffin_2_dump', 'swallow_2_dump', 'vimeo_2_dump', 'vimeo_4_dump', 'warbler_2_dump', 'wren_2_dump']
12
+ # train_data_weights: [3431837,742656,5699324,14771089,1502775,4225018, 22953, 397, 5285, 67918, 461501, 26223, 8769, 114596, 171446, 50970, 587643, 2096037, 198225, 11360, 5421, 16659, 20904, 149274, 6339383, 2337667, 5629189, 1263521, 194897, 159106, 3340672, 3195917, 2828763, 2403, 3366964]
13
+
14
+ # train_data: ['flow_test']
15
+ # train_data_weights: [1]
16
+ # train_data: ['sft','sft_hq']
17
+ # train_data_weights: [1,10]
18
+ # train_data: ['eval']
19
+ # train_data_weights: [1]
20
+ train_data: ['sft_new','sft_new_1']
21
+ train_data_weights: [536463, 135600]
22
+ # train_data: ['sft']
23
+ # train_data_weights: [1]
24
+
25
+ data:
26
+ params:
27
+ batch_size: 1 # the real batch size
28
+ image_batch_size: 16 # real image batch size
29
+ enable_bucket: True
30
+ dataset_collections: # list all available datasets
31
+ sft_new:
32
+ target: dataset_tool.SeedV1Dataset
33
+ path: "hdfs://harunasg/home/byte_icvg_aigc_cp/user/video/temp/19900101/v2_en_new/2025-02-13-05-39-30/data"
34
+ resolution: 512
35
+ aspect_ratios:
36
+ "320p-2.4": [768, 320] # 245760
37
+ "384p-2.0": [768, 384] # 294912
38
+ "512p-1.6": [640, 384] # 245760
39
+ "512p-1.5": [768, 512] # 393216
40
+ "448p-1.29": [576, 448] # 258048
41
+ "512p-1.0": [512, 512] # 262144
42
+ "448p-0.78": [448, 576] # 258048
43
+ "512p-0.67": [512, 768] # 393216
44
+ "512p-0.6": [384, 640] # 245760
45
+ "384p-0.5": [384, 768] # 294912
46
+ "320p-0.42": [320, 768] # 245760
47
+ ratio_strategy: closest
48
+ params:
49
+ sample_size: -1 # set to -1 to keep the original resolution
50
+ fps: 24
51
+ num_parallel_files: 1
52
+ video_frame_sampler:
53
+ type: 'adaptive_advanced'
54
+ strategies:
55
+ - stride: 1
56
+ stride_prob: 1.0
57
+ frame_lengths: [ 121 ]
58
+ frame_lengths_prob: 'harmonic'
59
+ clip: 'simple'
60
+ text_sampler:
61
+ type: 'frequency'
62
+ frequency:
63
+ recaption_7B_: 1.0
64
+ origin_title: 0.0
65
+ part_idx: 0
66
+
67
+ sft_new_1:
68
+ target: dataset_tool.SeedV1Dataset
69
+ path: "hdfs://harunasg/home/byte_icvg_aigc_cp/user/video/temp/19900101/v2_en/2025-02-13-05-39-30/data"
70
+ resolution: 512
71
+ aspect_ratios:
72
+ "320p-2.4": [768, 320]
73
+ "384p-2.0": [768, 384]
74
+ "512p-1.6": [640, 384]
75
+ "512p-1.5": [768, 512]
76
+ "448p-1.29": [576, 448]
77
+ "512p-1.0": [512, 512]
78
+ "448p-0.78": [448, 576]
79
+ "512p-0.67": [512, 768]
80
+ "512p-0.6": [384, 640]
81
+ "384p-0.5": [384, 768]
82
+ "320p-0.42": [320, 768]
83
+ ratio_strategy: closest
84
+ params:
85
+ sample_size: -1 # set to -1 to keep the original resolution
86
+ fps: 24
87
+ num_parallel_files: 1
88
+ video_frame_sampler:
89
+ type: 'adaptive_advanced'
90
+ strategies:
91
+ - stride: 1
92
+ stride_prob: 1.0
93
+ frame_lengths: [ 121 ]
94
+ frame_lengths_prob: 'harmonic'
95
+ clip: 'simple'
96
+ text_sampler:
97
+ type: 'frequency'
98
+ frequency:
99
+ recaption_7B_: 1.0
100
+ origin_title: 0.0
101
+ part_idx: 0
dataset_code/sft_sftnews/offload/part1.yaml ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # vae1011-98022219
2
+ # train_data: ['albatross_2_dump', 'budgie_1_dump', 'budgie_2_dump', 'canary_2_dump', 'condor_2_dump', 'falcon_dump', 'filmsupply_highres_dump', 'guillemot_2_dump', 'guillemot_4_dump', 'gull_2_dump', 'harrier_2_dump', 'hdvg_dump', 'hornbill_2_dump', 'hummingbird_2_dump', 'kingfisher_dump', 'lovebird_dump', 'macaw_2_dump', 'movie_2_dump', 'panda70m_dump', 'partridge_2_dump', 'partridge_4_dump', 'petrel_2_dump', 'pigeon_2_dump', 'puffin_2_dump', 'swallow_2_dump', 'vimeo_2_dump', 'vimeo_4_dump', 'warbler_2_dump', 'wren_2_dump']
3
+ # train_data_weights: [68859, 1192, 15856, 203755, 1384503, 78671, 26307, 343789, 514339, 152912, 1762929, 6288112, 594676, 34082, 16263, 49979, 62714, 447823, 19018149, 7013003, 16887569, 3790563, 584691, 477319, 10022018, 9587751, 8486291, 7210, 10100894]
4
+
5
+
6
+ #high quality data-30372699
7
+ # train_data: ['Istock_sports_videos','abaka_short','malayan','movie_v0_1','nexdata','rf123_1080p']
8
+ # train_data_weights: [3431837,742656,5699324,14771089,1502775,4225018]
9
+
10
+ #high quality data : vae1011 = 1:1
11
+ # train_data: ['Istock_sports_videos','abaka_short','malayan','movie_v0_1','nexdata','rf123_1080p', 'albatross_2_dump', 'budgie_1_dump', 'budgie_2_dump', 'canary_2_dump', 'condor_2_dump', 'falcon_dump', 'filmsupply_highres_dump', 'guillemot_2_dump', 'guillemot_4_dump', 'gull_2_dump', 'harrier_2_dump', 'hdvg_dump', 'hornbill_2_dump', 'hummingbird_2_dump', 'kingfisher_dump', 'lovebird_dump', 'macaw_2_dump', 'movie_2_dump', 'panda70m_dump', 'partridge_2_dump', 'partridge_4_dump', 'petrel_2_dump', 'pigeon_2_dump', 'puffin_2_dump', 'swallow_2_dump', 'vimeo_2_dump', 'vimeo_4_dump', 'warbler_2_dump', 'wren_2_dump']
12
+ # train_data_weights: [3431837,742656,5699324,14771089,1502775,4225018, 22953, 397, 5285, 67918, 461501, 26223, 8769, 114596, 171446, 50970, 587643, 2096037, 198225, 11360, 5421, 16659, 20904, 149274, 6339383, 2337667, 5629189, 1263521, 194897, 159106, 3340672, 3195917, 2828763, 2403, 3366964]
13
+
14
+ # train_data: ['flow_test']
15
+ # train_data_weights: [1]
16
+ # train_data: ['sft','sft_hq']
17
+ # train_data_weights: [1,10]
18
+ # train_data: ['eval']
19
+ # train_data_weights: [1]
20
+ train_data: ['sft_new','sft_new_1']
21
+ train_data_weights: [536463, 135600]
22
+ # train_data: ['sft']
23
+ # train_data_weights: [1]
24
+
25
+ data:
26
+ params:
27
+ batch_size: 1 # the real batch size
28
+ image_batch_size: 16 # real image batch size
29
+ enable_bucket: True
30
+ dataset_collections: # list all available datasets
31
+ sft_new:
32
+ target: dataset_tool.SeedV1Dataset
33
+ path: "hdfs://harunasg/home/byte_icvg_aigc_cp/user/video/temp/19900101/v2_en_new/2025-02-13-05-39-30/data"
34
+ resolution: 512
35
+ aspect_ratios:
36
+ "320p-2.4": [768, 320]
37
+ "384p-2.0": [768, 384]
38
+ "512p-1.6": [640, 384]
39
+ "512p-1.5": [768, 512]
40
+ "448p-1.29": [576, 448]
41
+ "512p-1.0": [512, 512]
42
+ "448p-0.78": [448, 576]
43
+ "512p-0.67": [512, 768]
44
+ "512p-0.6": [384, 640]
45
+ "384p-0.5": [384, 768]
46
+ "320p-0.42": [320, 768]
47
+ ratio_strategy: closest
48
+ params:
49
+ sample_size: -1 # set to -1 to keep the original resolution
50
+ fps: 24
51
+ num_parallel_files: 1
52
+ video_frame_sampler:
53
+ type: 'adaptive_advanced'
54
+ strategies:
55
+ - stride: 1
56
+ stride_prob: 1.0
57
+ frame_lengths: [ 121 ]
58
+ frame_lengths_prob: 'harmonic'
59
+ clip: 'simple'
60
+ text_sampler:
61
+ type: 'frequency'
62
+ frequency:
63
+ recaption_7B_: 1.0
64
+ origin_title: 0.0
65
+ part_idx: 1
66
+
67
+ sft_new_1:
68
+ target: dataset_tool.SeedV1Dataset
69
+ path: "hdfs://harunasg/home/byte_icvg_aigc_cp/user/video/temp/19900101/v2_en/2025-02-13-05-39-30/data"
70
+ resolution: 512
71
+ aspect_ratios:
72
+ "320p-2.4": [768, 320]
73
+ "384p-2.0": [768, 384]
74
+ "512p-1.6": [640, 384]
75
+ "512p-1.5": [768, 512]
76
+ "448p-1.29": [576, 448]
77
+ "512p-1.0": [512, 512]
78
+ "448p-0.78": [448, 576]
79
+ "512p-0.67": [512, 768]
80
+ "512p-0.6": [384, 640]
81
+ "384p-0.5": [384, 768]
82
+ "320p-0.42": [320, 768]
83
+ ratio_strategy: closest
84
+ params:
85
+ sample_size: -1 # set to -1 to keep the original resolution
86
+ fps: 24
87
+ num_parallel_files: 1
88
+ video_frame_sampler:
89
+ type: 'adaptive_advanced'
90
+ strategies:
91
+ - stride: 1
92
+ stride_prob: 1.0
93
+ frame_lengths: [ 121 ]
94
+ frame_lengths_prob: 'harmonic'
95
+ clip: 'simple'
96
+ text_sampler:
97
+ type: 'frequency'
98
+ frequency:
99
+ recaption_7B_: 1.0
100
+ origin_title: 0.0
101
+ part_idx: 1
dataset_code/sft_sftnews/offload/part2.yaml ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # vae1011-98022219
2
+ # train_data: ['albatross_2_dump', 'budgie_1_dump', 'budgie_2_dump', 'canary_2_dump', 'condor_2_dump', 'falcon_dump', 'filmsupply_highres_dump', 'guillemot_2_dump', 'guillemot_4_dump', 'gull_2_dump', 'harrier_2_dump', 'hdvg_dump', 'hornbill_2_dump', 'hummingbird_2_dump', 'kingfisher_dump', 'lovebird_dump', 'macaw_2_dump', 'movie_2_dump', 'panda70m_dump', 'partridge_2_dump', 'partridge_4_dump', 'petrel_2_dump', 'pigeon_2_dump', 'puffin_2_dump', 'swallow_2_dump', 'vimeo_2_dump', 'vimeo_4_dump', 'warbler_2_dump', 'wren_2_dump']
3
+ # train_data_weights: [68859, 1192, 15856, 203755, 1384503, 78671, 26307, 343789, 514339, 152912, 1762929, 6288112, 594676, 34082, 16263, 49979, 62714, 447823, 19018149, 7013003, 16887569, 3790563, 584691, 477319, 10022018, 9587751, 8486291, 7210, 10100894]
4
+
5
+
6
+ #high quality data-30372699
7
+ # train_data: ['Istock_sports_videos','abaka_short','malayan','movie_v0_1','nexdata','rf123_1080p']
8
+ # train_data_weights: [3431837,742656,5699324,14771089,1502775,4225018]
9
+
10
+ #high quality data : vae1011 = 1:1
11
+ # train_data: ['Istock_sports_videos','abaka_short','malayan','movie_v0_1','nexdata','rf123_1080p', 'albatross_2_dump', 'budgie_1_dump', 'budgie_2_dump', 'canary_2_dump', 'condor_2_dump', 'falcon_dump', 'filmsupply_highres_dump', 'guillemot_2_dump', 'guillemot_4_dump', 'gull_2_dump', 'harrier_2_dump', 'hdvg_dump', 'hornbill_2_dump', 'hummingbird_2_dump', 'kingfisher_dump', 'lovebird_dump', 'macaw_2_dump', 'movie_2_dump', 'panda70m_dump', 'partridge_2_dump', 'partridge_4_dump', 'petrel_2_dump', 'pigeon_2_dump', 'puffin_2_dump', 'swallow_2_dump', 'vimeo_2_dump', 'vimeo_4_dump', 'warbler_2_dump', 'wren_2_dump']
12
+ # train_data_weights: [3431837,742656,5699324,14771089,1502775,4225018, 22953, 397, 5285, 67918, 461501, 26223, 8769, 114596, 171446, 50970, 587643, 2096037, 198225, 11360, 5421, 16659, 20904, 149274, 6339383, 2337667, 5629189, 1263521, 194897, 159106, 3340672, 3195917, 2828763, 2403, 3366964]
13
+
14
+ # train_data: ['flow_test']
15
+ # train_data_weights: [1]
16
+ # train_data: ['sft','sft_hq']
17
+ # train_data_weights: [1,10]
18
+ # train_data: ['eval']
19
+ # train_data_weights: [1]
20
+ train_data: ['sft_new','sft_new_1']
21
+ train_data_weights: [536463, 135600]
22
+ # train_data: ['sft']
23
+ # train_data_weights: [1]
24
+
25
+ data:
26
+ params:
27
+ batch_size: 1 # the real batch size
28
+ image_batch_size: 16 # real image batch size
29
+ enable_bucket: True
30
+ dataset_collections: # list all available datasets
31
+ sft_new:
32
+ target: dataset_tool.SeedV1Dataset
33
+ path: "hdfs://harunasg/home/byte_icvg_aigc_cp/user/video/temp/19900101/v2_en_new/2025-02-13-05-39-30/data"
34
+ resolution: 512
35
+ aspect_ratios:
36
+ "320p-2.4": [768, 320]
37
+ "384p-2.0": [768, 384]
38
+ "512p-1.6": [640, 384]
39
+ "512p-1.5": [768, 512]
40
+ "448p-1.29": [576, 448]
41
+ "512p-1.0": [512, 512]
42
+ "448p-0.78": [448, 576]
43
+ "512p-0.67": [512, 768]
44
+ "512p-0.6": [384, 640]
45
+ "384p-0.5": [384, 768]
46
+ "320p-0.42": [320, 768]
47
+ ratio_strategy: closest
48
+ params:
49
+ sample_size: -1 # set to -1 to keep the original resolution
50
+ fps: 24
51
+ num_parallel_files: 1
52
+ video_frame_sampler:
53
+ type: 'adaptive_advanced'
54
+ strategies:
55
+ - stride: 1
56
+ stride_prob: 1.0
57
+ frame_lengths: [ 121 ]
58
+ frame_lengths_prob: 'harmonic'
59
+ clip: 'simple'
60
+ text_sampler:
61
+ type: 'frequency'
62
+ frequency:
63
+ recaption_7B_: 1.0
64
+ origin_title: 0.0
65
+ part_idx: 2
66
+
67
+ sft_new_1:
68
+ target: dataset_tool.SeedV1Dataset
69
+ path: "hdfs://harunasg/home/byte_icvg_aigc_cp/user/video/temp/19900101/v2_en/2025-02-13-05-39-30/data"
70
+ resolution: 512
71
+ aspect_ratios:
72
+ "320p-2.4": [768, 320]
73
+ "384p-2.0": [768, 384]
74
+ "512p-1.6": [640, 384]
75
+ "512p-1.5": [768, 512]
76
+ "448p-1.29": [576, 448]
77
+ "512p-1.0": [512, 512]
78
+ "448p-0.78": [448, 576]
79
+ "512p-0.67": [512, 768]
80
+ "512p-0.6": [384, 640]
81
+ "384p-0.5": [384, 768]
82
+ "320p-0.42": [320, 768]
83
+ ratio_strategy: closest
84
+ params:
85
+ sample_size: -1 # set to -1 to keep the original resolution
86
+ fps: 24
87
+ num_parallel_files: 1
88
+ video_frame_sampler:
89
+ type: 'adaptive_advanced'
90
+ strategies:
91
+ - stride: 1
92
+ stride_prob: 1.0
93
+ frame_lengths: [ 121 ]
94
+ frame_lengths_prob: 'harmonic'
95
+ clip: 'simple'
96
+ text_sampler:
97
+ type: 'frequency'
98
+ frequency:
99
+ recaption_7B_: 1.0
100
+ origin_title: 0.0
101
+ part_idx: 2
dataset_code/sft_sftnews/offload/part3.yaml ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # vae1011-98022219
2
+ # train_data: ['albatross_2_dump', 'budgie_1_dump', 'budgie_2_dump', 'canary_2_dump', 'condor_2_dump', 'falcon_dump', 'filmsupply_highres_dump', 'guillemot_2_dump', 'guillemot_4_dump', 'gull_2_dump', 'harrier_2_dump', 'hdvg_dump', 'hornbill_2_dump', 'hummingbird_2_dump', 'kingfisher_dump', 'lovebird_dump', 'macaw_2_dump', 'movie_2_dump', 'panda70m_dump', 'partridge_2_dump', 'partridge_4_dump', 'petrel_2_dump', 'pigeon_2_dump', 'puffin_2_dump', 'swallow_2_dump', 'vimeo_2_dump', 'vimeo_4_dump', 'warbler_2_dump', 'wren_2_dump']
3
+ # train_data_weights: [68859, 1192, 15856, 203755, 1384503, 78671, 26307, 343789, 514339, 152912, 1762929, 6288112, 594676, 34082, 16263, 49979, 62714, 447823, 19018149, 7013003, 16887569, 3790563, 584691, 477319, 10022018, 9587751, 8486291, 7210, 10100894]
4
+
5
+
6
+ #high quality data-30372699
7
+ # train_data: ['Istock_sports_videos','abaka_short','malayan','movie_v0_1','nexdata','rf123_1080p']
8
+ # train_data_weights: [3431837,742656,5699324,14771089,1502775,4225018]
9
+
10
+ #high quality data : vae1011 = 1:1
11
+ # train_data: ['Istock_sports_videos','abaka_short','malayan','movie_v0_1','nexdata','rf123_1080p', 'albatross_2_dump', 'budgie_1_dump', 'budgie_2_dump', 'canary_2_dump', 'condor_2_dump', 'falcon_dump', 'filmsupply_highres_dump', 'guillemot_2_dump', 'guillemot_4_dump', 'gull_2_dump', 'harrier_2_dump', 'hdvg_dump', 'hornbill_2_dump', 'hummingbird_2_dump', 'kingfisher_dump', 'lovebird_dump', 'macaw_2_dump', 'movie_2_dump', 'panda70m_dump', 'partridge_2_dump', 'partridge_4_dump', 'petrel_2_dump', 'pigeon_2_dump', 'puffin_2_dump', 'swallow_2_dump', 'vimeo_2_dump', 'vimeo_4_dump', 'warbler_2_dump', 'wren_2_dump']
12
+ # train_data_weights: [3431837,742656,5699324,14771089,1502775,4225018, 22953, 397, 5285, 67918, 461501, 26223, 8769, 114596, 171446, 50970, 587643, 2096037, 198225, 11360, 5421, 16659, 20904, 149274, 6339383, 2337667, 5629189, 1263521, 194897, 159106, 3340672, 3195917, 2828763, 2403, 3366964]
13
+
14
+ # train_data: ['flow_test']
15
+ # train_data_weights: [1]
16
+ # train_data: ['sft','sft_hq']
17
+ # train_data_weights: [1,10]
18
+ # train_data: ['eval']
19
+ # train_data_weights: [1]
20
+ train_data: ['sft_new','sft_new_1']
21
+ train_data_weights: [536463, 135600]
22
+ # train_data: ['sft']
23
+ # train_data_weights: [1]
24
+
25
+ data:
26
+ params:
27
+ batch_size: 1 # the real batch size
28
+ image_batch_size: 16 # real image batch size
29
+ enable_bucket: True
30
+ dataset_collections: # list all available datasets
31
+ sft_new:
32
+ target: dataset_tool.SeedV1Dataset
33
+ path: "hdfs://harunasg/home/byte_icvg_aigc_cp/user/video/temp/19900101/v2_en_new/2025-02-13-05-39-30/data"
34
+ resolution: 512
35
+ aspect_ratios:
36
+ "320p-2.4": [768, 320]
37
+ "384p-2.0": [768, 384]
38
+ "512p-1.6": [640, 384]
39
+ "512p-1.5": [768, 512]
40
+ "448p-1.29": [576, 448]
41
+ "512p-1.0": [512, 512]
42
+ "448p-0.78": [448, 576]
43
+ "512p-0.67": [512, 768]
44
+ "512p-0.6": [384, 640]
45
+ "384p-0.5": [384, 768]
46
+ "320p-0.42": [320, 768]
47
+ ratio_strategy: closest
48
+ params:
49
+ sample_size: -1 # set to -1 to keep the original resolution
50
+ fps: 24
51
+ num_parallel_files: 1
52
+ video_frame_sampler:
53
+ type: 'adaptive_advanced'
54
+ strategies:
55
+ - stride: 1
56
+ stride_prob: 1.0
57
+ frame_lengths: [ 121 ]
58
+ frame_lengths_prob: 'harmonic'
59
+ clip: 'simple'
60
+ text_sampler:
61
+ type: 'frequency'
62
+ frequency:
63
+ recaption_7B_: 1.0
64
+ origin_title: 0.0
65
+ part_idx: 3
66
+
67
+ sft_new_1:
68
+ target: dataset_tool.SeedV1Dataset
69
+ path: "hdfs://harunasg/home/byte_icvg_aigc_cp/user/video/temp/19900101/v2_en/2025-02-13-05-39-30/data"
70
+ resolution: 512
71
+ aspect_ratios:
72
+ "320p-2.4": [768, 320]
73
+ "384p-2.0": [768, 384]
74
+ "512p-1.6": [640, 384]
75
+ "512p-1.5": [768, 512]
76
+ "448p-1.29": [576, 448]
77
+ "512p-1.0": [512, 512]
78
+ "448p-0.78": [448, 576]
79
+ "512p-0.67": [512, 768]
80
+ "512p-0.6": [384, 640]
81
+ "384p-0.5": [384, 768]
82
+ "320p-0.42": [320, 768]
83
+ ratio_strategy: closest
84
+ params:
85
+ sample_size: -1 # set to -1 to keep the original resolution
86
+ fps: 24
87
+ num_parallel_files: 1
88
+ video_frame_sampler:
89
+ type: 'adaptive_advanced'
90
+ strategies:
91
+ - stride: 1
92
+ stride_prob: 1.0
93
+ frame_lengths: [ 121 ]
94
+ frame_lengths_prob: 'harmonic'
95
+ clip: 'simple'
96
+ text_sampler:
97
+ type: 'frequency'
98
+ frequency:
99
+ recaption_7B_: 1.0
100
+ origin_title: 0.0
101
+ part_idx: 3
dataset_code/sft_sftnews/offload/part4.yaml ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # vae1011-98022219
2
+ # train_data: ['albatross_2_dump', 'budgie_1_dump', 'budgie_2_dump', 'canary_2_dump', 'condor_2_dump', 'falcon_dump', 'filmsupply_highres_dump', 'guillemot_2_dump', 'guillemot_4_dump', 'gull_2_dump', 'harrier_2_dump', 'hdvg_dump', 'hornbill_2_dump', 'hummingbird_2_dump', 'kingfisher_dump', 'lovebird_dump', 'macaw_2_dump', 'movie_2_dump', 'panda70m_dump', 'partridge_2_dump', 'partridge_4_dump', 'petrel_2_dump', 'pigeon_2_dump', 'puffin_2_dump', 'swallow_2_dump', 'vimeo_2_dump', 'vimeo_4_dump', 'warbler_2_dump', 'wren_2_dump']
3
+ # train_data_weights: [68859, 1192, 15856, 203755, 1384503, 78671, 26307, 343789, 514339, 152912, 1762929, 6288112, 594676, 34082, 16263, 49979, 62714, 447823, 19018149, 7013003, 16887569, 3790563, 584691, 477319, 10022018, 9587751, 8486291, 7210, 10100894]
4
+
5
+
6
+ #high quality data-30372699
7
+ # train_data: ['Istock_sports_videos','abaka_short','malayan','movie_v0_1','nexdata','rf123_1080p']
8
+ # train_data_weights: [3431837,742656,5699324,14771089,1502775,4225018]
9
+
10
+ #high quality data : vae1011 = 1:1
11
+ # train_data: ['Istock_sports_videos','abaka_short','malayan','movie_v0_1','nexdata','rf123_1080p', 'albatross_2_dump', 'budgie_1_dump', 'budgie_2_dump', 'canary_2_dump', 'condor_2_dump', 'falcon_dump', 'filmsupply_highres_dump', 'guillemot_2_dump', 'guillemot_4_dump', 'gull_2_dump', 'harrier_2_dump', 'hdvg_dump', 'hornbill_2_dump', 'hummingbird_2_dump', 'kingfisher_dump', 'lovebird_dump', 'macaw_2_dump', 'movie_2_dump', 'panda70m_dump', 'partridge_2_dump', 'partridge_4_dump', 'petrel_2_dump', 'pigeon_2_dump', 'puffin_2_dump', 'swallow_2_dump', 'vimeo_2_dump', 'vimeo_4_dump', 'warbler_2_dump', 'wren_2_dump']
12
+ # train_data_weights: [3431837,742656,5699324,14771089,1502775,4225018, 22953, 397, 5285, 67918, 461501, 26223, 8769, 114596, 171446, 50970, 587643, 2096037, 198225, 11360, 5421, 16659, 20904, 149274, 6339383, 2337667, 5629189, 1263521, 194897, 159106, 3340672, 3195917, 2828763, 2403, 3366964]
13
+
14
+ # train_data: ['flow_test']
15
+ # train_data_weights: [1]
16
+ # train_data: ['sft','sft_hq']
17
+ # train_data_weights: [1,10]
18
+ # train_data: ['eval']
19
+ # train_data_weights: [1]
20
+ train_data: ['sft_new','sft_new_1']
21
+ train_data_weights: [536463, 135600]
22
+ # train_data: ['sft']
23
+ # train_data_weights: [1]
24
+
25
+ data:
26
+ params:
27
+ batch_size: 1 # the real batch size
28
+ image_batch_size: 16 # real image batch size
29
+ enable_bucket: True
30
+ dataset_collections: # list all available datasets
31
+ sft_new:
32
+ target: dataset_tool.SeedV1Dataset
33
+ path: "hdfs://harunasg/home/byte_icvg_aigc_cp/user/video/temp/19900101/v2_en_new/2025-02-13-05-39-30/data"
34
+ resolution: 512
35
+ aspect_ratios:
36
+ "320p-2.4": [768, 320]
37
+ "384p-2.0": [768, 384]
38
+ "512p-1.6": [640, 384]
39
+ "512p-1.5": [768, 512]
40
+ "448p-1.29": [576, 448]
41
+ "512p-1.0": [512, 512]
42
+ "448p-0.78": [448, 576]
43
+ "512p-0.67": [512, 768]
44
+ "512p-0.6": [384, 640]
45
+ "384p-0.5": [384, 768]
46
+ "320p-0.42": [320, 768]
47
+ ratio_strategy: closest
48
+ params:
49
+ sample_size: -1 # set to -1 to keep the original resolution
50
+ fps: 24
51
+ num_parallel_files: 1
52
+ video_frame_sampler:
53
+ type: 'adaptive_advanced'
54
+ strategies:
55
+ - stride: 1
56
+ stride_prob: 1.0
57
+ frame_lengths: [ 121 ]
58
+ frame_lengths_prob: 'harmonic'
59
+ clip: 'simple'
60
+ text_sampler:
61
+ type: 'frequency'
62
+ frequency:
63
+ recaption_7B_: 1.0
64
+ origin_title: 0.0
65
+ part_idx: 4
66
+
67
+ sft_new_1:
68
+ target: dataset_tool.SeedV1Dataset
69
+ path: "hdfs://harunasg/home/byte_icvg_aigc_cp/user/video/temp/19900101/v2_en/2025-02-13-05-39-30/data"
70
+ resolution: 512
71
+ aspect_ratios:
72
+ "320p-2.4": [768, 320]
73
+ "384p-2.0": [768, 384]
74
+ "512p-1.6": [640, 384]
75
+ "512p-1.5": [768, 512]
76
+ "448p-1.29": [576, 448]
77
+ "512p-1.0": [512, 512]
78
+ "448p-0.78": [448, 576]
79
+ "512p-0.67": [512, 768]
80
+ "512p-0.6": [384, 640]
81
+ "384p-0.5": [384, 768]
82
+ "320p-0.42": [320, 768]
83
+ ratio_strategy: closest
84
+ params:
85
+ sample_size: -1 # set to -1 to keep the original resolution
86
+ fps: 24
87
+ num_parallel_files: 1
88
+ video_frame_sampler:
89
+ type: 'adaptive_advanced'
90
+ strategies:
91
+ - stride: 1
92
+ stride_prob: 1.0
93
+ frame_lengths: [ 121 ]
94
+ frame_lengths_prob: 'harmonic'
95
+ clip: 'simple'
96
+ text_sampler:
97
+ type: 'frequency'
98
+ frequency:
99
+ recaption_7B_: 1.0
100
+ origin_title: 0.0
101
+ part_idx: 4
dataset_code/sft_sftnews/offload/part5.yaml ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # vae1011-98022219
2
+ # train_data: ['albatross_2_dump', 'budgie_1_dump', 'budgie_2_dump', 'canary_2_dump', 'condor_2_dump', 'falcon_dump', 'filmsupply_highres_dump', 'guillemot_2_dump', 'guillemot_4_dump', 'gull_2_dump', 'harrier_2_dump', 'hdvg_dump', 'hornbill_2_dump', 'hummingbird_2_dump', 'kingfisher_dump', 'lovebird_dump', 'macaw_2_dump', 'movie_2_dump', 'panda70m_dump', 'partridge_2_dump', 'partridge_4_dump', 'petrel_2_dump', 'pigeon_2_dump', 'puffin_2_dump', 'swallow_2_dump', 'vimeo_2_dump', 'vimeo_4_dump', 'warbler_2_dump', 'wren_2_dump']
3
+ # train_data_weights: [68859, 1192, 15856, 203755, 1384503, 78671, 26307, 343789, 514339, 152912, 1762929, 6288112, 594676, 34082, 16263, 49979, 62714, 447823, 19018149, 7013003, 16887569, 3790563, 584691, 477319, 10022018, 9587751, 8486291, 7210, 10100894]
4
+
5
+
6
+ #high quality data-30372699
7
+ # train_data: ['Istock_sports_videos','abaka_short','malayan','movie_v0_1','nexdata','rf123_1080p']
8
+ # train_data_weights: [3431837,742656,5699324,14771089,1502775,4225018]
9
+
10
+ #high quality data : vae1011 = 1:1
11
+ # train_data: ['Istock_sports_videos','abaka_short','malayan','movie_v0_1','nexdata','rf123_1080p', 'albatross_2_dump', 'budgie_1_dump', 'budgie_2_dump', 'canary_2_dump', 'condor_2_dump', 'falcon_dump', 'filmsupply_highres_dump', 'guillemot_2_dump', 'guillemot_4_dump', 'gull_2_dump', 'harrier_2_dump', 'hdvg_dump', 'hornbill_2_dump', 'hummingbird_2_dump', 'kingfisher_dump', 'lovebird_dump', 'macaw_2_dump', 'movie_2_dump', 'panda70m_dump', 'partridge_2_dump', 'partridge_4_dump', 'petrel_2_dump', 'pigeon_2_dump', 'puffin_2_dump', 'swallow_2_dump', 'vimeo_2_dump', 'vimeo_4_dump', 'warbler_2_dump', 'wren_2_dump']
12
+ # train_data_weights: [3431837,742656,5699324,14771089,1502775,4225018, 22953, 397, 5285, 67918, 461501, 26223, 8769, 114596, 171446, 50970, 587643, 2096037, 198225, 11360, 5421, 16659, 20904, 149274, 6339383, 2337667, 5629189, 1263521, 194897, 159106, 3340672, 3195917, 2828763, 2403, 3366964]
13
+
14
+ # train_data: ['flow_test']
15
+ # train_data_weights: [1]
16
+ # train_data: ['sft','sft_hq']
17
+ # train_data_weights: [1,10]
18
+ # train_data: ['eval']
19
+ # train_data_weights: [1]
20
+ train_data: ['sft_new','sft_new_1']
21
+ train_data_weights: [536463, 135600]
22
+ # train_data: ['sft']
23
+ # train_data_weights: [1]
24
+
25
+ data:
26
+ params:
27
+ batch_size: 1 # the real batch size
28
+ image_batch_size: 16 # real image batch size
29
+ enable_bucket: True
30
+ dataset_collections: # list all available datasets
31
+ sft_new:
32
+ target: dataset_tool.SeedV1Dataset
33
+ path: "hdfs://harunasg/home/byte_icvg_aigc_cp/user/video/temp/19900101/v2_en_new/2025-02-13-05-39-30/data"
34
+ resolution: 512
35
+ aspect_ratios:
36
+ "320p-2.4": [768, 320]
37
+ "384p-2.0": [768, 384]
38
+ "512p-1.6": [640, 384]
39
+ "512p-1.5": [768, 512]
40
+ "448p-1.29": [576, 448]
41
+ "512p-1.0": [512, 512]
42
+ "448p-0.78": [448, 576]
43
+ "512p-0.67": [512, 768]
44
+ "512p-0.6": [384, 640]
45
+ "384p-0.5": [384, 768]
46
+ "320p-0.42": [320, 768]
47
+ ratio_strategy: closest
48
+ params:
49
+ sample_size: -1 # set to -1 to keep the original resolution
50
+ fps: 24
51
+ num_parallel_files: 1
52
+ video_frame_sampler:
53
+ type: 'adaptive_advanced'
54
+ strategies:
55
+ - stride: 1
56
+ stride_prob: 1.0
57
+ frame_lengths: [ 121 ]
58
+ frame_lengths_prob: 'harmonic'
59
+ clip: 'simple'
60
+ text_sampler:
61
+ type: 'frequency'
62
+ frequency:
63
+ recaption_7B_: 1.0
64
+ origin_title: 0.0
65
+ part_idx: 5
66
+
67
+ sft_new_1:
68
+ target: dataset_tool.SeedV1Dataset
69
+ path: "hdfs://harunasg/home/byte_icvg_aigc_cp/user/video/temp/19900101/v2_en/2025-02-13-05-39-30/data"
70
+ resolution: 512
71
+ aspect_ratios:
72
+ "320p-2.4": [768, 320]
73
+ "384p-2.0": [768, 384]
74
+ "512p-1.6": [640, 384]
75
+ "512p-1.5": [768, 512]
76
+ "448p-1.29": [576, 448]
77
+ "512p-1.0": [512, 512]
78
+ "448p-0.78": [448, 576]
79
+ "512p-0.67": [512, 768]
80
+ "512p-0.6": [384, 640]
81
+ "384p-0.5": [384, 768]
82
+ "320p-0.42": [320, 768]
83
+ ratio_strategy: closest
84
+ params:
85
+ sample_size: -1 # set to -1 to keep the original resolution
86
+ fps: 24
87
+ num_parallel_files: 1
88
+ video_frame_sampler:
89
+ type: 'adaptive_advanced'
90
+ strategies:
91
+ - stride: 1
92
+ stride_prob: 1.0
93
+ frame_lengths: [ 121 ]
94
+ frame_lengths_prob: 'harmonic'
95
+ clip: 'simple'
96
+ text_sampler:
97
+ type: 'frequency'
98
+ frequency:
99
+ recaption_7B_: 1.0
100
+ origin_title: 0.0
101
+ part_idx: 5
dataset_code/test.sh ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ input_file="/mnt/bn/yufan-dev-my/ysh/Ckpts/SpatialVID/e6d91e1b-e366-5fa9-aef4-9769eb0bf631.mp4"
3
+ output_file="output_trimmed.mp4"
4
+
5
+ # 获取总帧数
6
+ total_frames=$(ffprobe -v quiet -select_streams v:0 -count_frames -show_entries stream=nb_frames -of csv=p=0 "$input_file")
7
+
8
+ # 计算要保留的帧数
9
+ keep_frames=$((total_frames - 19))
10
+
11
+ # 执行裁剪
12
+ ffmpeg -i "$input_file" -vf "select='lt(n,$keep_frames)'" -vsync 0 -c:a copy "$output_file"
13
+
14
+ echo "处理完成: $output_file"
15
+ echo "原始帧数: $total_frames"
16
+ echo "保留帧数: $keep_frames"
dataset_code/vae_decode_hv.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ os.environ["HF_ENABLE_PARALLEL_LOADING"] = "yes"
4
+
5
+ import torch
6
+ from diffusers import AutoencoderKLHunyuanVideo
7
+ from diffusers.video_processor import VideoProcessor
8
+ from diffusers.utils import export_to_video
9
+
10
+ device = "cuda"
11
+ pretrained_model_name_or_path = "/mnt/bn/yufan-dev-my/ysh/Ckpts/hunyuanvideo-community/HunyuanVideo"
12
+ vae = AutoencoderKLHunyuanVideo.from_pretrained(
13
+ pretrained_model_name_or_path,
14
+ subfolder="vae",
15
+ torch_dtype=torch.float32,
16
+ ).to(device)
17
+ vae.eval()
18
+ vae.requires_grad_(False)
19
+ vae.enable_tiling()
20
+
21
+ vae_scale_factor_spatial = vae.spatial_compression_ratio
22
+ video_processor = VideoProcessor(vae_scale_factor=vae_scale_factor_spatial)
23
+
24
+ latents = torch.load('/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-drone/latents_stride1/9F82nRgRthI_0046499_0046799_281_384_640.pt', map_location='cpu', weights_only=False)
25
+ vae_latents = latents['vae_latent'] / vae.config.scaling_factor
26
+ # vae_latents = vae_latents.to(device=device, dtype=vae.dtype)[:, :9, :, :]
27
+
28
+ video = vae.decode(vae_latents.unsqueeze(0).to(vae.device), return_dict=False)[0]
29
+ video = video_processor.postprocess_video(video, output_type="pil")
30
+ export_to_video(video[0], "output_fp_hv_33.mp4", fps=30)
31
+
32
+ # video[0][0].save("1_0.png")
33
+ # video[0][-1].save("2_0.png")
34
+
35
+ # first_vae_latents = latents['vae_latent'][:, 0, :, :].unsqueeze(1) / vae.config.scaling_factor
36
+ # first_vae_latents = first_vae_latents.to(device=device, dtype=vae.dtype)
37
+ # first_image = vae.decode(first_vae_latents.unsqueeze(0), return_dict=False)[0]
38
+ # first_image = video_processor.postprocess_video(first_image, output_type="pil")[0][0]
39
+ # first_image.save("1_1.png")
40
+
41
+ # last_vae_latents = latents['vae_latent'][:, -1, :, :].unsqueeze(1) / vae.config.scaling_factor
42
+ # last_vae_latents = last_vae_latents.to(device=device, dtype=vae.dtype)
43
+ # last_image = vae.decode(last_vae_latents.unsqueeze(0), return_dict=False)[0]
44
+ # last_image = video_processor.postprocess_video(last_image, output_type="pil")[0][0]
45
+ # last_image.save("2_1.png")
46
+
47
+ # print(f"Max memory: {torch.cuda.max_memory_allocated() / 1024**3:.3f} GB")
48
+
49
+ # import sys
50
+ # sys.path.append("/mnt/bn/yufan-dev-my/ysh/Codes/Efficient/fp_train/dance_forcing/utils")
51
+
52
+ # from utils_framepack import get_framepack_input_i2v
53
+
54
+ # (
55
+ # model_input, # torch.Size([2, 16, 9, 60, 104])
56
+ # indices_latents, # torch.Size([2, 9])
57
+ # latents_clean, # torch.Size([2, 16, 2, 60, 104])
58
+ # indices_clean_latents, # torch.Size([2, 2])
59
+ # latents_history_2x, # torch.Size([2, 16, 2, 60, 104])
60
+ # indices_latents_history_2x, # torch.Size([2, 2])
61
+ # latents_history_4x, # torch.Size([2, 16, 16, 60, 104])
62
+ # indices_latents_history_4x, # torch.Size([2, 16])
63
+ # section_to_video_idx,
64
+ # ) = get_framepack_input_i2v(
65
+ # vae_latents=latents['vae_latent'].unsqueeze(0),
66
+ # latent_window_size=9,
67
+ # vanilla_sampling=True,
68
+ # is_local_flf2v=True,
69
+ # dtype=torch.bfloat16,
70
+ # )
71
+
72
+ # vae_latents_1 = torch.cat([model_input[0:1], model_input[-1:]], dim = 2)
73
+ # vae_latents_1 = vae_latents_1.to(vae.device, dtype=vae.dtype) / vae.config.scaling_factor
74
+ # video = vae.decode(vae_latents_1, return_dict=False)[0]
75
+ # video = video_processor.postprocess_video(video, output_type="pil")
76
+ # export_to_video(video[0], "output_fp_f1_test_1.mp4", fps=30)
77
+
78
+
79
+ # def remove_front_padding(tensor, dim=1):
80
+ # non_zero_indices = torch.any(tensor != 0, dim=tuple(i for i in range(tensor.ndim) if i != dim))
81
+ # first_non_zero = torch.argmax(non_zero_indices.float())
82
+ # slices = [slice(None)] * tensor.ndim
83
+ # slices[dim] = slice(first_non_zero.item(), None)
84
+ # return tensor[tuple(slices)]
85
+
86
+ # vae_latents_1 = remove_front_padding(torch.cat([latents_history_4x[-1:], latents_history_2x[-1:], latents_clean[-1:][:, :, 0:1,], model_input[-1:], latents_clean[-1:][:, :, 1:,]], dim = 2), dim = 2)
87
+ # vae_latents_1 = vae_latents_1.to(vae.device, dtype=vae.dtype) / vae.config.scaling_factor
88
+ # video = vae.decode(vae_latents_1, return_dict=False)[0]
89
+ # video = video_processor.postprocess_video(video, output_type="pil")
90
+ # export_to_video(video[0], "output_fp_f1_test_2.mp4", fps=30)
91
+
92
+ # import pdb;pdb.set_trace()
dataset_code/vae_decode_hv_batch.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import glob
3
+ import torch
4
+ import torch.multiprocessing as mp
5
+ from diffusers import AutoencoderKLHunyuanVideo
6
+ from diffusers.video_processor import VideoProcessor
7
+ from diffusers.utils import export_to_video
8
+ from concurrent.futures import ProcessPoolExecutor
9
+ import time
10
+
11
+ os.environ["HF_ENABLE_PARALLEL_LOADING"] = "yes"
12
+
13
+ def process_files_on_gpu(gpu_id, file_list, pretrained_model_path, output_folder):
14
+ """在指定GPU上处理文件列表"""
15
+ device = f"cuda:{gpu_id}"
16
+
17
+ # 初始化VAE模型
18
+ vae = AutoencoderKLHunyuanVideo.from_pretrained(
19
+ pretrained_model_path,
20
+ subfolder="vae",
21
+ torch_dtype=torch.float32,
22
+ ).to(device)
23
+ vae.eval()
24
+ vae.requires_grad_(False)
25
+ vae.enable_tiling()
26
+
27
+ vae_scale_factor_spatial = vae.spatial_compression_ratio
28
+ video_processor = VideoProcessor(vae_scale_factor=vae_scale_factor_spatial)
29
+
30
+ for i, pt_file in enumerate(file_list):
31
+ try:
32
+ print(f"GPU {gpu_id} - 正在处理 ({i+1}/{len(file_list)}): {os.path.basename(pt_file)}")
33
+
34
+ # 加载latents
35
+ latents = torch.load(pt_file, map_location='cpu', weights_only=False)
36
+ vae_latents = latents['vae_latent'] / vae.config.scaling_factor
37
+ vae_latents = vae_latents.to(device=device, dtype=vae.dtype)
38
+
39
+ # 解码视频
40
+ video = vae.decode(vae_latents.unsqueeze(0), return_dict=False)[0]
41
+ video = video_processor.postprocess_video(video, output_type="pil")
42
+
43
+ # 生成输出文件名
44
+ base_name = os.path.splitext(os.path.basename(pt_file))[0]
45
+ output_path = os.path.join(output_folder, f"{base_name}.mp4")
46
+
47
+ # 导出视频
48
+ export_to_video(video[0], output_path, fps=30)
49
+ print(f"GPU {gpu_id} - 成功保存: {output_path}")
50
+
51
+ # 清理GPU内存
52
+ del latents, vae_latents, video
53
+ torch.cuda.empty_cache()
54
+
55
+ except Exception as e:
56
+ print(f"GPU {gpu_id} - 处理文件 {pt_file} 时出错: {str(e)}")
57
+ continue
58
+
59
+ print(f"GPU {gpu_id} - 完成所有分配的文件处理!")
60
+
61
+ def main():
62
+ # 设置路径
63
+ pretrained_model_path = "/mnt/bn/yufan-dev-my/ysh/Ckpts/hunyuanvideo-community/HunyuanVideo"
64
+ input_folder = "/mnt/bn/yufan-dev-my/ysh/Datasets/dummy_fp_offload_latents"
65
+ output_folder = "/mnt/bn/yufan-dev-my/ysh/Datasets/dummy_fp_offload_latents/decoded_videos"
66
+
67
+ # 创建输出文件夹
68
+ os.makedirs(output_folder, exist_ok=True)
69
+
70
+ # 获取所有.pt文件
71
+ pt_files = glob.glob(os.path.join(input_folder, "*.pt"))
72
+ print(f"找到 {len(pt_files)} 个.pt文件")
73
+
74
+ if len(pt_files) == 0:
75
+ print("没有找到.pt文件!")
76
+ return
77
+
78
+ # 检查可用GPU数量
79
+ num_gpus = min(8, torch.cuda.device_count())
80
+ print(f"使用 {num_gpus} 个GPU进行并行处理")
81
+
82
+ # 将文件分配到不同的GPU
83
+ files_per_gpu = len(pt_files) // num_gpus
84
+ file_chunks = []
85
+
86
+ for i in range(num_gpus):
87
+ start_idx = i * files_per_gpu
88
+ if i == num_gpus - 1: # 最后一个GPU处理剩余的所有文件
89
+ end_idx = len(pt_files)
90
+ else:
91
+ end_idx = (i + 1) * files_per_gpu
92
+
93
+ file_chunks.append(pt_files[start_idx:end_idx])
94
+ print(f"GPU {i} 将处理 {len(file_chunks[i])} 个文件")
95
+
96
+ # 使用多进程并行处理
97
+ start_time = time.time()
98
+
99
+ processes = []
100
+ for gpu_id in range(num_gpus):
101
+ if len(file_chunks[gpu_id]) > 0: # 只为有文件的GPU创建进程
102
+ p = mp.Process(
103
+ target=process_files_on_gpu,
104
+ args=(gpu_id, file_chunks[gpu_id], pretrained_model_path, output_folder)
105
+ )
106
+ p.start()
107
+ processes.append(p)
108
+
109
+ # 等待所有进程完成
110
+ for p in processes:
111
+ p.join()
112
+
113
+ end_time = time.time()
114
+ print(f"\n所有文件处理完成!总耗时: {end_time - start_time:.2f} 秒")
115
+
116
+ if __name__ == "__main__":
117
+ mp.set_start_method('spawn', force=True) # 确保多进程兼容性
118
+ main()
dataset_code/vae_decode_wan.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ os.environ["HF_ENABLE_PARALLEL_LOADING"] = "yes"
4
+
5
+ import torch
6
+ from diffusers import AutoencoderKLWan
7
+ from diffusers.video_processor import VideoProcessor
8
+ from diffusers.utils import export_to_video
9
+
10
+ device = "cuda"
11
+ pretrained_model_name_or_path = "/mnt/bn/yufan-dev-my/ysh/Ckpts/Wan-AI/Wan2.1-I2V-14B-720P-Diffusers/"
12
+ vae = AutoencoderKLWan.from_pretrained(
13
+ pretrained_model_name_or_path,
14
+ subfolder="vae",
15
+ torch_dtype=torch.float32,
16
+ ).to(device)
17
+ vae.eval()
18
+ vae.requires_grad_(False)
19
+ vae.enable_tiling()
20
+
21
+ vae_scale_factor_spatial = vae.spatial_compression_ratio
22
+ video_processor = VideoProcessor(vae_scale_factor=vae_scale_factor_spatial)
23
+
24
+ latents = torch.load('/mnt/bn/yufan-dev-my/ysh/Datasets/fp_offload_latents_wan/6ad434bc-df9b-40be-9632-c8f9508f1ccc_121_768_384.pt', map_location='cpu', weights_only=False)
25
+ latents_mean = torch.tensor(vae.config.latents_mean).view(1, vae.config.z_dim, 1, 1, 1)
26
+ latents_std = 1.0 / torch.tensor(vae.config.latents_std).view(1, vae.config.z_dim, 1, 1, 1)
27
+ vae_latents = latents['vae_latent'] / latents_std + latents_mean
28
+ vae_latents = vae_latents.to(device=device, dtype=vae.dtype)
29
+
30
+ video = vae.decode(vae_latents, return_dict=False)[0]
31
+ video = video_processor.postprocess_video(video, output_type="pil")
32
+ export_to_video(video[0], "output_wan.mp4", fps=30)