|
|
import os |
|
|
import pandas as pd |
|
|
import argparse |
|
|
from tqdm import tqdm |
|
|
|
|
|
def extract_uttid_from_row(row): |
|
|
""" |
|
|
从CSV的行数据中提取uttid(通过start_frame和end_frame组合) |
|
|
""" |
|
|
uttid = f"{row['id']}_{row['start_frame']}_{row['end_frame']}" |
|
|
return uttid |
|
|
|
|
|
def create_filtered_csv(csv_file, output_latent_folder, output_csv_file): |
|
|
""" |
|
|
创建一个过滤后的CSV文件,只包含需要处理的样本 |
|
|
只使用uttid匹配,不依赖其他元数据 |
|
|
""" |
|
|
|
|
|
df = pd.read_csv(csv_file) |
|
|
print(f"Original dataset size: {len(df)}") |
|
|
|
|
|
|
|
|
existing_files = set() |
|
|
if os.path.exists(output_latent_folder): |
|
|
for filename in os.listdir(output_latent_folder): |
|
|
if filename.endswith('.pt'): |
|
|
parts = filename[:-3].split('_') |
|
|
if len(parts) >= 4: |
|
|
uttid_parts = parts[:-3] |
|
|
uttid = '_'.join(uttid_parts) |
|
|
existing_files.add(uttid) |
|
|
|
|
|
print(f"Found {len(existing_files)} existing latent files") |
|
|
|
|
|
|
|
|
df_uttids = df.apply(extract_uttid_from_row, axis=1) |
|
|
|
|
|
|
|
|
mask = ~df_uttids.isin(existing_files) |
|
|
filtered_df = df[mask] |
|
|
|
|
|
|
|
|
os.makedirs(os.path.dirname(output_csv_file), exist_ok=True) |
|
|
filtered_df.to_csv(output_csv_file, index=False) |
|
|
|
|
|
print(f"Filtered dataset size: {len(filtered_df)}") |
|
|
print(f"Filtered CSV saved to: {output_csv_file}") |
|
|
|
|
|
return len(filtered_df) |
|
|
|
|
|
def create_all_filtered_csvs(): |
|
|
""" |
|
|
为所有数据集创建过滤后的CSV文件 |
|
|
""" |
|
|
base_csv_path = "/mnt/bn/yufan-dev-my/ysh/Ckpts/SpatialVID/SpatialVID-HQ-Final/" |
|
|
base_output_latent_path = "/mnt/bn/yufan-dev-my/ysh/Ckpts/SpatialVID/SpatialVID-HQ-Final" |
|
|
|
|
|
csv_paths = [ |
|
|
"data/SpatialVID_HQ_step2.csv", |
|
|
|
|
|
] |
|
|
output_latent_paths = [ |
|
|
"latents_stride1", |
|
|
] |
|
|
|
|
|
for csv_path, output_latent_path in zip(csv_paths, output_latent_paths): |
|
|
original_csv = os.path.join(base_csv_path, csv_path) |
|
|
output_latent_folder = os.path.join(base_output_latent_path, output_latent_path) |
|
|
|
|
|
|
|
|
filtered_csv_name = csv_path.replace('.csv', '_filtered.csv') |
|
|
filtered_csv_path = os.path.join(base_csv_path, filtered_csv_name) |
|
|
|
|
|
print(f"\nProcessing: {csv_path}") |
|
|
|
|
|
filtered_count = create_filtered_csv( |
|
|
csv_file=original_csv, |
|
|
output_latent_folder=output_latent_folder, |
|
|
output_csv_file=filtered_csv_path |
|
|
) |
|
|
|
|
|
print(f"Created filtered CSV: {filtered_csv_path} with {filtered_count} samples") |
|
|
|
|
|
def main(): |
|
|
parser = argparse.ArgumentParser(description="Create filtered CSV for processing") |
|
|
|
|
|
|
|
|
|
|
|
parser.add_argument("--batch", action="store_true", help="Process all datasets in batch") |
|
|
|
|
|
args = parser.parse_args() |
|
|
create_all_filtered_csvs() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|