SuperCS's picture
Add files using upload-large-folder tool
e051419 verified
import os
import pandas as pd
import argparse
from tqdm import tqdm
def extract_uttid_from_row(row):
"""
从CSV的行数据中提取uttid(通过start_frame和end_frame组合)
"""
uttid = f"{row['id']}_{row['start_frame']}_{row['end_frame']}"
return uttid
def create_filtered_csv(csv_file, output_latent_folder, output_csv_file):
"""
创建一个过滤后的CSV文件,只包含需要处理的样本
只使用uttid匹配,不依赖其他元数据
"""
# 读取原始CSV
df = pd.read_csv(csv_file)
print(f"Original dataset size: {len(df)}")
# 获取已经存在的latent文件
existing_files = set()
if os.path.exists(output_latent_folder):
for filename in os.listdir(output_latent_folder):
if filename.endswith('.pt'):
parts = filename[:-3].split('_')
if len(parts) >= 4: # 至少要有uttid + 3个元数据
uttid_parts = parts[:-3] # 提取uttid部分
uttid = '_'.join(uttid_parts)
existing_files.add(uttid)
print(f"Found {len(existing_files)} existing latent files")
# 使用新的方法从行数据中提取uttid
df_uttids = df.apply(extract_uttid_from_row, axis=1)
# 筛选出未处理的样本
mask = ~df_uttids.isin(existing_files)
filtered_df = df[mask]
# 保存到新的CSV文件
os.makedirs(os.path.dirname(output_csv_file), exist_ok=True)
filtered_df.to_csv(output_csv_file, index=False)
print(f"Filtered dataset size: {len(filtered_df)}")
print(f"Filtered CSV saved to: {output_csv_file}")
return len(filtered_df)
def create_all_filtered_csvs():
"""
为所有数据集创建过滤后的CSV文件
"""
base_csv_path = "/mnt/bn/yufan-dev-my/ysh/Ckpts/SpatialVID/SpatialVID-HQ-Final/"
base_output_latent_path = "/mnt/bn/yufan-dev-my/ysh/Ckpts/SpatialVID/SpatialVID-HQ-Final"
csv_paths = [
"data/SpatialVID_HQ_step2.csv",
]
output_latent_paths = [
"latents_stride1",
]
for csv_path, output_latent_path in zip(csv_paths, output_latent_paths):
original_csv = os.path.join(base_csv_path, csv_path)
output_latent_folder = os.path.join(base_output_latent_path, output_latent_path)
# 创建过滤后的CSV文件名
filtered_csv_name = csv_path.replace('.csv', '_filtered.csv')
filtered_csv_path = os.path.join(base_csv_path, filtered_csv_name)
print(f"\nProcessing: {csv_path}")
filtered_count = create_filtered_csv(
csv_file=original_csv,
output_latent_folder=output_latent_folder,
output_csv_file=filtered_csv_path
)
print(f"Created filtered CSV: {filtered_csv_path} with {filtered_count} samples")
def main():
parser = argparse.ArgumentParser(description="Create filtered CSV for processing")
# parser.add_argument("--csv_file", type=str, help="Original CSV file path")
# parser.add_argument("--output_latent_folder", type=str, help="Output latent folder path")
# parser.add_argument("--output_csv_file", type=str, help="Output filtered CSV file path")
parser.add_argument("--batch", action="store_true", help="Process all datasets in batch")
args = parser.parse_args()
create_all_filtered_csvs()
# if args.batch:
# # 批量处理所有数据集
# create_all_filtered_csvs()
# else:
# # 单个处理
# if not all([args.csv_file, args.output_latent_folder, args.output_csv_file]):
# print("Error: For single processing, --csv_file, --output_latent_folder, and --output_csv_file are required")
# return
# filtered_count = create_filtered_csv(
# csv_file=args.csv_file,
# output_latent_folder=args.output_latent_folder,
# output_csv_file=args.output_csv_file
# )
# if filtered_count == 0:
# print("No samples need processing!")
# else:
# print(f"Ready to process {filtered_count} samples")
if __name__ == "__main__":
main()