diff --git a/.gitattributes b/.gitattributes index 1ef325f1b111266a6b26e0196871bd78baa8c2f3..c32722d368cd815d184afd25a892409b586567b7 100644 --- a/.gitattributes +++ b/.gitattributes @@ -57,3 +57,15 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text # Video files - compressed *.mp4 filter=lfs diff=lfs merge=lfs -text *.webm filter=lfs diff=lfs merge=lfs -text +exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/sd3_text_encoder.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text +exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/sd_unet.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text +exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/sdxl_unet.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text +exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/svd_unet.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text +exp_code/1_benchmark/DiffSynth-Studio/diffsynth/tokenizer_configs/hunyuan_video/tokenizer_2/tokenizer.json filter=lfs diff=lfs merge=lfs -text +exp_code/1_benchmark/DiffSynth-Studio/diffsynth/tokenizer_configs/kolors/tokenizer/vocab.txt filter=lfs diff=lfs merge=lfs -text +exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.1-T2V-1.3B/google/umt5-xxl/tokenizer.json filter=lfs diff=lfs merge=lfs -text +exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/google/umt5-xxl/tokenizer.json filter=lfs diff=lfs merge=lfs -text +exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/wav2vec2-large-xlsr-53-english/language_model/lm.binary filter=lfs diff=lfs merge=lfs -text +exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/__pycache__/hv_train_network.cpython-312.pyc filter=lfs diff=lfs merge=lfs -text +exp_code/1_benchmark/pa_vdm/outputs/wandb/wandb/offline-run-20250718_161437-mq58lm4k/run-mq58lm4k.wandb filter=lfs diff=lfs merge=lfs -text +exp_code/1_benchmark/pa_vdm/outputs/wandb/wandb/offline-run-20250721_133358-bb8mn98e/run-bb8mn98e.wandb filter=lfs diff=lfs merge=lfs -text diff --git a/exp_code/1_benchmark/ALG/assets/boat.png b/exp_code/1_benchmark/ALG/assets/boat.png new file mode 100644 index 0000000000000000000000000000000000000000..2b2461fb1f2a65386efb06530dc7c376cee8c9d3 --- /dev/null +++ b/exp_code/1_benchmark/ALG/assets/boat.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f272f82ace0ccca3af656eda32a86c67d64fc139b0bdf2179bc0edfa7480d85e +size 609165 diff --git a/exp_code/1_benchmark/ALG/assets/city.png b/exp_code/1_benchmark/ALG/assets/city.png new file mode 100644 index 0000000000000000000000000000000000000000..52df01ca6f4c3cd035b2954ab5463039aeaf88b9 --- /dev/null +++ b/exp_code/1_benchmark/ALG/assets/city.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:228f393789fe1fb361b67091133901c28d31e6e114fac2ec824e9521348d5f9c +size 531659 diff --git a/exp_code/1_benchmark/ALG/assets/helicopter.png b/exp_code/1_benchmark/ALG/assets/helicopter.png new file mode 100644 index 0000000000000000000000000000000000000000..af1bdbae4fca0455e9a0e71d286caf878e8b814a --- /dev/null +++ b/exp_code/1_benchmark/ALG/assets/helicopter.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8aec534b5e8e87b3640da8db22b15158c5c4f510b2501030279d81d9d47eb6a2 +size 421517 diff --git a/exp_code/1_benchmark/ALG/assets/snowboard.png b/exp_code/1_benchmark/ALG/assets/snowboard.png new file mode 100644 index 0000000000000000000000000000000000000000..94daa202287bc5480214b1bc8953caadacf89bbb --- /dev/null +++ b/exp_code/1_benchmark/ALG/assets/snowboard.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6646c91fb26981cc46c9265b1674f939792acd2e0cb7b767d516d76b39ac1dd +size 343141 diff --git a/exp_code/1_benchmark/ALG/assets/tennis.png b/exp_code/1_benchmark/ALG/assets/tennis.png new file mode 100644 index 0000000000000000000000000000000000000000..34490d084fc682d89e86ee9f81a8b3fcf1717b78 --- /dev/null +++ b/exp_code/1_benchmark/ALG/assets/tennis.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f56a10a93e58e37891c653e7e0dc1fbfded643f5a8113eb2bac6db2c8803f51 +size 432618 diff --git a/exp_code/1_benchmark/ALG/city_alg.mp4 b/exp_code/1_benchmark/ALG/city_alg.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..311fd1222e3e2716aa47dc6fae9e3e978c07b484 --- /dev/null +++ b/exp_code/1_benchmark/ALG/city_alg.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eeac14e4a69cc0de55c7143b622450be80f2ee63cab0b637bdbead19db26fdcd +size 304382 diff --git a/exp_code/1_benchmark/DiffSynth-Studio/.github/workflows/logo.gif b/exp_code/1_benchmark/DiffSynth-Studio/.github/workflows/logo.gif new file mode 100644 index 0000000000000000000000000000000000000000..ef5717efc17bbb2018a37a530a9d7a09e86277d9 --- /dev/null +++ b/exp_code/1_benchmark/DiffSynth-Studio/.github/workflows/logo.gif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36a7627b7f0f0a508ec64aba72e5d95d38dfe7958bd8cf42d2a63f6ac2641529 +size 149067 diff --git a/exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/sd3_text_encoder.cpython-311.pyc b/exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/sd3_text_encoder.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd532f5e2d13c8be18631493065922ace0a1f833 --- /dev/null +++ b/exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/sd3_text_encoder.cpython-311.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a5a5052cdef651e24a6a2a4241599f3f2a962452138beba97b2b6dd01b56d0f +size 115615 diff --git a/exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/sd_unet.cpython-311.pyc b/exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/sd_unet.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9d333b471bb231b52b2f5d4cd8df14d42ea17e6 --- /dev/null +++ b/exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/sd_unet.cpython-311.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71abc382b1eb34758070c9aa51006951a206d31cd268895ab98d0aefea585723 +size 117023 diff --git a/exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/sdxl_unet.cpython-311.pyc b/exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/sdxl_unet.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be8d532e38a69b2e9fa0c6f5193d53e77e573e0f --- /dev/null +++ b/exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/sdxl_unet.cpython-311.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25870ec9d9bbb066b338d371ee503cc298aaf5c33194aa0fd05ab5a698617a5d +size 262732 diff --git a/exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/svd_unet.cpython-311.pyc b/exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/svd_unet.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4d6c6fbc50a309da8260062e35f86b18db8c774 --- /dev/null +++ b/exp_code/1_benchmark/DiffSynth-Studio/diffsynth/models/__pycache__/svd_unet.cpython-311.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0658a31994594dbbb536a0fb90a436457532d93fc6a20bf8bbac5d3f275d4580 +size 232307 diff --git a/exp_code/1_benchmark/DiffSynth-Studio/diffsynth/tokenizer_configs/cog/tokenizer/spiece.model b/exp_code/1_benchmark/DiffSynth-Studio/diffsynth/tokenizer_configs/cog/tokenizer/spiece.model new file mode 100644 index 0000000000000000000000000000000000000000..317a5ccbde45300f5d1d970d4d449af2108b147e --- /dev/null +++ b/exp_code/1_benchmark/DiffSynth-Studio/diffsynth/tokenizer_configs/cog/tokenizer/spiece.model @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d60acb128cf7b7f2536e8f38a5b18a05535c9e14c7a355904270e15b0945ea86 +size 791656 diff --git a/exp_code/1_benchmark/DiffSynth-Studio/diffsynth/tokenizer_configs/flux/tokenizer_2/spiece.model b/exp_code/1_benchmark/DiffSynth-Studio/diffsynth/tokenizer_configs/flux/tokenizer_2/spiece.model new file mode 100644 index 0000000000000000000000000000000000000000..317a5ccbde45300f5d1d970d4d449af2108b147e --- /dev/null +++ b/exp_code/1_benchmark/DiffSynth-Studio/diffsynth/tokenizer_configs/flux/tokenizer_2/spiece.model @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d60acb128cf7b7f2536e8f38a5b18a05535c9e14c7a355904270e15b0945ea86 +size 791656 diff --git a/exp_code/1_benchmark/DiffSynth-Studio/diffsynth/tokenizer_configs/hunyuan_dit/tokenizer_t5/spiece.model b/exp_code/1_benchmark/DiffSynth-Studio/diffsynth/tokenizer_configs/hunyuan_dit/tokenizer_t5/spiece.model new file mode 100644 index 0000000000000000000000000000000000000000..e417801865fd66bd40f9d45d46b6d0d0c2aa36b6 --- /dev/null +++ b/exp_code/1_benchmark/DiffSynth-Studio/diffsynth/tokenizer_configs/hunyuan_dit/tokenizer_t5/spiece.model @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef78f86560d809067d12bac6c09f19a462cb3af3f54d2b8acbba26e1433125d6 +size 4309802 diff --git a/exp_code/1_benchmark/DiffSynth-Studio/diffsynth/tokenizer_configs/hunyuan_video/tokenizer_2/tokenizer.json b/exp_code/1_benchmark/DiffSynth-Studio/diffsynth/tokenizer_configs/hunyuan_video/tokenizer_2/tokenizer.json new file mode 100644 index 0000000000000000000000000000000000000000..b1099b4b107cba42aa962f7f40693409b3d6add2 --- /dev/null +++ b/exp_code/1_benchmark/DiffSynth-Studio/diffsynth/tokenizer_configs/hunyuan_video/tokenizer_2/tokenizer.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d2c593db4aa75b17a42c1f74d7cc38e257eaeed222e6a52674c65544165dcbaa +size 17210098 diff --git a/exp_code/1_benchmark/DiffSynth-Studio/diffsynth/tokenizer_configs/kolors/tokenizer/tokenizer.model b/exp_code/1_benchmark/DiffSynth-Studio/diffsynth/tokenizer_configs/kolors/tokenizer/tokenizer.model new file mode 100644 index 0000000000000000000000000000000000000000..8a8007697b7cc3d3868dcffbbebf8c1f2bd690ba --- /dev/null +++ b/exp_code/1_benchmark/DiffSynth-Studio/diffsynth/tokenizer_configs/kolors/tokenizer/tokenizer.model @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7dc4c393423b76e4373e5157ddc34803a0189ba96b21ddbb40269d31468a6f2 +size 1018370 diff --git a/exp_code/1_benchmark/DiffSynth-Studio/diffsynth/tokenizer_configs/kolors/tokenizer/vocab.txt b/exp_code/1_benchmark/DiffSynth-Studio/diffsynth/tokenizer_configs/kolors/tokenizer/vocab.txt new file mode 100644 index 0000000000000000000000000000000000000000..8a8007697b7cc3d3868dcffbbebf8c1f2bd690ba --- /dev/null +++ b/exp_code/1_benchmark/DiffSynth-Studio/diffsynth/tokenizer_configs/kolors/tokenizer/vocab.txt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7dc4c393423b76e4373e5157ddc34803a0189ba96b21ddbb40269d31468a6f2 +size 1018370 diff --git a/exp_code/1_benchmark/DiffSynth-Studio/diffsynth/tokenizer_configs/stable_diffusion_3/tokenizer_3/spiece.model b/exp_code/1_benchmark/DiffSynth-Studio/diffsynth/tokenizer_configs/stable_diffusion_3/tokenizer_3/spiece.model new file mode 100644 index 0000000000000000000000000000000000000000..317a5ccbde45300f5d1d970d4d449af2108b147e --- /dev/null +++ b/exp_code/1_benchmark/DiffSynth-Studio/diffsynth/tokenizer_configs/stable_diffusion_3/tokenizer_3/spiece.model @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d60acb128cf7b7f2536e8f38a5b18a05535c9e14c7a355904270e15b0945ea86 +size 791656 diff --git a/exp_code/1_benchmark/Flux-dev/flux-dev_160.png b/exp_code/1_benchmark/Flux-dev/flux-dev_160.png new file mode 100644 index 0000000000000000000000000000000000000000..303fc8619a2b8696b3818edbf77287d9f71b56d9 --- /dev/null +++ b/exp_code/1_benchmark/Flux-dev/flux-dev_160.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e0fd9b9e99dea166a0ed93b7240fdb337fe714a3d04b3bac08f72b173fd30e3 +size 23123 diff --git a/exp_code/1_benchmark/Flux-dev/flux-dev_320.png b/exp_code/1_benchmark/Flux-dev/flux-dev_320.png new file mode 100644 index 0000000000000000000000000000000000000000..7663ddb773b8dd11db6f4e02591569b5969a9ce2 --- /dev/null +++ b/exp_code/1_benchmark/Flux-dev/flux-dev_320.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9176da40513b207264791a52b1635febf512f5cb4c9ce0e0b6c95d3cdf81df1 +size 79830 diff --git a/exp_code/1_benchmark/Flux-dev/flux-dev_640.png b/exp_code/1_benchmark/Flux-dev/flux-dev_640.png new file mode 100644 index 0000000000000000000000000000000000000000..f21eb09c48620e271ac3967a01578f9de8759504 --- /dev/null +++ b/exp_code/1_benchmark/Flux-dev/flux-dev_640.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e33d5a7be182757ecab47e0bc36b50035cdddd9857a6dfc26cb412d1e0cf2c29 +size 251686 diff --git a/exp_code/1_benchmark/FramePack_diffusers/flf2v_input_first_frame.png b/exp_code/1_benchmark/FramePack_diffusers/flf2v_input_first_frame.png new file mode 100644 index 0000000000000000000000000000000000000000..9e1a27331c48d6e69d78bf6d12c0a19a18a793d5 --- /dev/null +++ b/exp_code/1_benchmark/FramePack_diffusers/flf2v_input_first_frame.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc27f36ae244c3366d6d342c5fc78a959bfeddc7c6ddfb62fc255c18cdac977f +size 1782362 diff --git a/exp_code/1_benchmark/FramePack_diffusers/flf2v_input_last_frame.png b/exp_code/1_benchmark/FramePack_diffusers/flf2v_input_last_frame.png new file mode 100644 index 0000000000000000000000000000000000000000..3f8b26ae73f272a50cfd5893f5176722a1dbd615 --- /dev/null +++ b/exp_code/1_benchmark/FramePack_diffusers/flf2v_input_last_frame.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:981d904fda073c87225089971b4bbff6f15d1f2511a111903989f391c526820f +size 1460632 diff --git a/exp_code/1_benchmark/FramePack_diffusers/output.mp4 b/exp_code/1_benchmark/FramePack_diffusers/output.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..85cf96578a22f774a4e8ddf42ed2a46fa08650e7 --- /dev/null +++ b/exp_code/1_benchmark/FramePack_diffusers/output.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c648e5104b73221b6070fabbeef31e0f1b42a28585c82faaa1548a6ede86193 +size 325685 diff --git a/exp_code/1_benchmark/FramePack_diffusers/penguin.png b/exp_code/1_benchmark/FramePack_diffusers/penguin.png new file mode 100644 index 0000000000000000000000000000000000000000..e990427e365726ba81bc3b62530ead4c41f31a71 --- /dev/null +++ b/exp_code/1_benchmark/FramePack_diffusers/penguin.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f416333e578b2238bd81a5bfcdd48d03f0f6ea04e4c532caa7ff4b656f08025b +size 613127 diff --git a/exp_code/1_benchmark/Pyramid-Flow/assets/motivation.jpg b/exp_code/1_benchmark/Pyramid-Flow/assets/motivation.jpg new file mode 100644 index 0000000000000000000000000000000000000000..675ea76cfb6d953b072101f79adb533272c18a63 --- /dev/null +++ b/exp_code/1_benchmark/Pyramid-Flow/assets/motivation.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bcfbf38e61d3586dd4a30da3f4ab12c303be6fbbf415724228a19f5996338f97 +size 141345 diff --git a/exp_code/1_benchmark/Pyramid-Flow/assets/the_great_wall.jpg b/exp_code/1_benchmark/Pyramid-Flow/assets/the_great_wall.jpg new file mode 100644 index 0000000000000000000000000000000000000000..46cde605e8f6bdd2465b86953abff818312deaa1 --- /dev/null +++ b/exp_code/1_benchmark/Pyramid-Flow/assets/the_great_wall.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:476a71026838e063c1151f138b38b31fd2ee5e230b7f62fd596acc5ce1cc4a48 +size 351344 diff --git a/exp_code/1_benchmark/Pyramid-Flow/assets/user_study.jpg b/exp_code/1_benchmark/Pyramid-Flow/assets/user_study.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2d677e730e5561c96e589f770f1eb8004de90195 --- /dev/null +++ b/exp_code/1_benchmark/Pyramid-Flow/assets/user_study.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98d737ae85b989ee9ffd210f4b3fa421080785f521c3b21398f00d609ecdef48 +size 131591 diff --git a/exp_code/1_benchmark/Pyramid-Flow/assets/vbench.jpg b/exp_code/1_benchmark/Pyramid-Flow/assets/vbench.jpg new file mode 100644 index 0000000000000000000000000000000000000000..483c3fb6f62329ffc3854b22c90acdcdc38e19b4 --- /dev/null +++ b/exp_code/1_benchmark/Pyramid-Flow/assets/vbench.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:40ce9235033300759aaa92533eea3ab04ae940b5e0fdb3f65f2904aa9217dcdb +size 230216 diff --git a/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/1752302868.0289106/events.out.tfevents.1752302868.dsw-222941-6d784d977c-6pnsc.822589.1 b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/1752302868.0289106/events.out.tfevents.1752302868.dsw-222941-6d784d977c-6pnsc.822589.1 new file mode 100644 index 0000000000000000000000000000000000000000..22b5c419ccd725fcb153446d7cec62461810eef6 --- /dev/null +++ b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/1752302868.0289106/events.out.tfevents.1752302868.dsw-222941-6d784d977c-6pnsc.822589.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e770358c0601baf65a77fa28a228a125c32e0d500ed8be15a369e862d5110e0 +size 3642 diff --git a/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/1752302960.6678133/events.out.tfevents.1752302960.dsw-222941-6d784d977c-6pnsc.828258.1 b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/1752302960.6678133/events.out.tfevents.1752302960.dsw-222941-6d784d977c-6pnsc.828258.1 new file mode 100644 index 0000000000000000000000000000000000000000..ee606234bdeb390c18484e84c98a3004e322b7d9 --- /dev/null +++ b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/1752302960.6678133/events.out.tfevents.1752302960.dsw-222941-6d784d977c-6pnsc.828258.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ad01292807b360d98acab3bff510c9eef6201928b6ee36bb2206fa47cf40a6a +size 3642 diff --git a/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/1752303059.8008823/events.out.tfevents.1752303059.dsw-222941-6d784d977c-6pnsc.833899.1 b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/1752303059.8008823/events.out.tfevents.1752303059.dsw-222941-6d784d977c-6pnsc.833899.1 new file mode 100644 index 0000000000000000000000000000000000000000..af05e0a7f1e6e71e5fc477035353fbe60c049aeb --- /dev/null +++ b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/1752303059.8008823/events.out.tfevents.1752303059.dsw-222941-6d784d977c-6pnsc.833899.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c12de5542ad487743c4e9d57876b34036e64d5e670a2ec77a920bc1199dd6abb +size 3642 diff --git a/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/1752303272.9302385/events.out.tfevents.1752303272.dsw-222941-6d784d977c-6pnsc.850586.1 b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/1752303272.9302385/events.out.tfevents.1752303272.dsw-222941-6d784d977c-6pnsc.850586.1 new file mode 100644 index 0000000000000000000000000000000000000000..7cfb54641d9b0e243bd414e86d8579ca76d564eb --- /dev/null +++ b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/1752303272.9302385/events.out.tfevents.1752303272.dsw-222941-6d784d977c-6pnsc.850586.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c0c0700efbd4219c23bf2a7c4bb5711498d91b25732e8430fbe6cee5c3ff1d4 +size 3642 diff --git a/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/1752303415.7278402/events.out.tfevents.1752303415.dsw-222941-6d784d977c-6pnsc.856217.1 b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/1752303415.7278402/events.out.tfevents.1752303415.dsw-222941-6d784d977c-6pnsc.856217.1 new file mode 100644 index 0000000000000000000000000000000000000000..1f8874c54c1e4311130573b2f0adbfe8bb791e61 --- /dev/null +++ b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/1752303415.7278402/events.out.tfevents.1752303415.dsw-222941-6d784d977c-6pnsc.856217.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2489523c4f35c6713dd886b6233901d790b7b08d511f9aa6a5e93d08f31ff276 +size 3642 diff --git a/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/1752303771.4686987/events.out.tfevents.1752303771.dsw-222941-6d784d977c-6pnsc.884225.1 b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/1752303771.4686987/events.out.tfevents.1752303771.dsw-222941-6d784d977c-6pnsc.884225.1 new file mode 100644 index 0000000000000000000000000000000000000000..aad8c68ce767e25a085d1a380d1be2efe821e7da --- /dev/null +++ b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/1752303771.4686987/events.out.tfevents.1752303771.dsw-222941-6d784d977c-6pnsc.884225.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:698c3279d4545bf9997e71facf17f60e100bcf9951bea3d37076e6bb2ca79837 +size 3642 diff --git a/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/1752303843.0167975/events.out.tfevents.1752303843.dsw-222941-6d784d977c-6pnsc.890043.1 b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/1752303843.0167975/events.out.tfevents.1752303843.dsw-222941-6d784d977c-6pnsc.890043.1 new file mode 100644 index 0000000000000000000000000000000000000000..6d071d9a55ec96023ac5fcdfb7fa3cd0ffd62e5e --- /dev/null +++ b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/1752303843.0167975/events.out.tfevents.1752303843.dsw-222941-6d784d977c-6pnsc.890043.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4345f8fac87ff8fd68ced907d8589ad395af4a84edaef5fea5c649b61249b44 +size 3642 diff --git a/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/1752304010.1700637/events.out.tfevents.1752304010.dsw-222941-6d784d977c-6pnsc.896592.1 b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/1752304010.1700637/events.out.tfevents.1752304010.dsw-222941-6d784d977c-6pnsc.896592.1 new file mode 100644 index 0000000000000000000000000000000000000000..432b3b2f7bbb18238edc4b2afa072f33c4a5ec20 --- /dev/null +++ b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/1752304010.1700637/events.out.tfevents.1752304010.dsw-222941-6d784d977c-6pnsc.896592.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc0464f14abf14f0d8de6d81bbef39e72569bc2db90f586b4eac8151178780b9 +size 3642 diff --git a/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/1752304288.3765576/events.out.tfevents.1752304288.dsw-222941-6d784d977c-6pnsc.916677.1 b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/1752304288.3765576/events.out.tfevents.1752304288.dsw-222941-6d784d977c-6pnsc.916677.1 new file mode 100644 index 0000000000000000000000000000000000000000..554f08c12297c6335e330216f217a50ac1297bdb --- /dev/null +++ b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/1752304288.3765576/events.out.tfevents.1752304288.dsw-222941-6d784d977c-6pnsc.916677.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7620d328f916586c7e1c5f356d7e9801a254cc321fdd482ab12f9cedd2da6060 +size 3642 diff --git a/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/1752305122.8399298/events.out.tfevents.1752305122.dsw-222941-6d784d977c-6pnsc.918240.1 b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/1752305122.8399298/events.out.tfevents.1752305122.dsw-222941-6d784d977c-6pnsc.918240.1 new file mode 100644 index 0000000000000000000000000000000000000000..03c09b59db9132432788a167f3ba9506bcb2d5a2 --- /dev/null +++ b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/1752305122.8399298/events.out.tfevents.1752305122.dsw-222941-6d784d977c-6pnsc.918240.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e3d6dc6c8f3407cd8e662ca0b28badd2630a501f8d3f39bad7476bec366f1af3 +size 3642 diff --git a/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/1752308532.94589/events.out.tfevents.1752308532.dsw-222941-6d784d977c-6pnsc.920437.1 b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/1752308532.94589/events.out.tfevents.1752308532.dsw-222941-6d784d977c-6pnsc.920437.1 new file mode 100644 index 0000000000000000000000000000000000000000..5c06430a6e8b53174401f81d335286b4b4ce42fa --- /dev/null +++ b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/1752308532.94589/events.out.tfevents.1752308532.dsw-222941-6d784d977c-6pnsc.920437.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e630e55066c2ffadfcceb44ff033564e237fb16096a47cc047ba7dcb7489d10 +size 3642 diff --git a/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/1752309552.6219614/events.out.tfevents.1752309552.dsw-222941-6d784d977c-6pnsc.921366.1 b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/1752309552.6219614/events.out.tfevents.1752309552.dsw-222941-6d784d977c-6pnsc.921366.1 new file mode 100644 index 0000000000000000000000000000000000000000..c79950289d48cd7e4d84ca3304231e4469fbb3c3 --- /dev/null +++ b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/1752309552.6219614/events.out.tfevents.1752309552.dsw-222941-6d784d977c-6pnsc.921366.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60c0414fe51fa3bd0c49fdea4bc9fdccb6ae7ea42f9158dc855b1e91100e7f1b +size 3642 diff --git a/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/events.out.tfevents.1752302867.dsw-222941-6d784d977c-6pnsc.822589.0 b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/events.out.tfevents.1752302867.dsw-222941-6d784d977c-6pnsc.822589.0 new file mode 100644 index 0000000000000000000000000000000000000000..891c60261cf14789c22332877392899359a6ac8e --- /dev/null +++ b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/events.out.tfevents.1752302867.dsw-222941-6d784d977c-6pnsc.822589.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b9ab63b20cdd9d40126546c0a813b6929f22607ba3ec6c99f8b2c7318fd3504 +size 88 diff --git a/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/events.out.tfevents.1752302960.dsw-222941-6d784d977c-6pnsc.828258.0 b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/events.out.tfevents.1752302960.dsw-222941-6d784d977c-6pnsc.828258.0 new file mode 100644 index 0000000000000000000000000000000000000000..be563e20516f57f9e4ed3476ccff95ae8fe47274 --- /dev/null +++ b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/events.out.tfevents.1752302960.dsw-222941-6d784d977c-6pnsc.828258.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:535ebe031dddb0533e7f99d4929f88a81f729c1063d656c41f3b475e62a49a1e +size 88 diff --git a/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/events.out.tfevents.1752303059.dsw-222941-6d784d977c-6pnsc.833899.0 b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/events.out.tfevents.1752303059.dsw-222941-6d784d977c-6pnsc.833899.0 new file mode 100644 index 0000000000000000000000000000000000000000..1c76732b389b4483b809f2f41d9713562fece88d --- /dev/null +++ b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/events.out.tfevents.1752303059.dsw-222941-6d784d977c-6pnsc.833899.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bfaeb8dd7111c76fc8fa234aa8f0990842d96a8c1507325b5b95c2ede29b3622 +size 88 diff --git a/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/events.out.tfevents.1752303272.dsw-222941-6d784d977c-6pnsc.850586.0 b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/events.out.tfevents.1752303272.dsw-222941-6d784d977c-6pnsc.850586.0 new file mode 100644 index 0000000000000000000000000000000000000000..6b947e8e6ead62c819f1a2f3614bb9a5bac4c456 --- /dev/null +++ b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/events.out.tfevents.1752303272.dsw-222941-6d784d977c-6pnsc.850586.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4a5460b13e3468adde13fd9458b4fc4f67b8fb403800d306790f3d7ef7404adf +size 88 diff --git a/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/events.out.tfevents.1752303415.dsw-222941-6d784d977c-6pnsc.856217.0 b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/events.out.tfevents.1752303415.dsw-222941-6d784d977c-6pnsc.856217.0 new file mode 100644 index 0000000000000000000000000000000000000000..9e1e84778059e2550c48a8013cda76f78c34b1f3 --- /dev/null +++ b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/events.out.tfevents.1752303415.dsw-222941-6d784d977c-6pnsc.856217.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d00862680364d45b78d8e6075c04c70be8b76f1b673ad5a0ff3403d974d9ab10 +size 2296 diff --git a/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/events.out.tfevents.1752303771.dsw-222941-6d784d977c-6pnsc.884225.0 b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/events.out.tfevents.1752303771.dsw-222941-6d784d977c-6pnsc.884225.0 new file mode 100644 index 0000000000000000000000000000000000000000..23994c84faf191551778519479441ee8953148a1 --- /dev/null +++ b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/events.out.tfevents.1752303771.dsw-222941-6d784d977c-6pnsc.884225.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b4cb12eaccc2fdfb4f4a680c3f43ba703d6d09146431cbd71d9126c671d427c +size 88 diff --git a/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/events.out.tfevents.1752303842.dsw-222941-6d784d977c-6pnsc.890043.0 b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/events.out.tfevents.1752303842.dsw-222941-6d784d977c-6pnsc.890043.0 new file mode 100644 index 0000000000000000000000000000000000000000..9e47f081a4d7a9ad7ffe28d0904adfe9322ca8d7 --- /dev/null +++ b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/events.out.tfevents.1752303842.dsw-222941-6d784d977c-6pnsc.890043.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5cc3e404c1015b53ab3612c896ca895f9fa3552eae6be652ad7d01d445053f1b +size 88 diff --git a/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/events.out.tfevents.1752304010.dsw-222941-6d784d977c-6pnsc.896592.0 b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/events.out.tfevents.1752304010.dsw-222941-6d784d977c-6pnsc.896592.0 new file mode 100644 index 0000000000000000000000000000000000000000..df3f1cbb29fec761a605a0027120361f1c1f0cf6 --- /dev/null +++ b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/events.out.tfevents.1752304010.dsw-222941-6d784d977c-6pnsc.896592.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:87310395156d20a7d05bf6edf51caff44fcf78319a4e85576686d10aa8bf6817 +size 3064 diff --git a/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/events.out.tfevents.1752304288.dsw-222941-6d784d977c-6pnsc.916677.0 b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/events.out.tfevents.1752304288.dsw-222941-6d784d977c-6pnsc.916677.0 new file mode 100644 index 0000000000000000000000000000000000000000..f84c362a6394a9a4364c2781faf025fce1bf470b --- /dev/null +++ b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/events.out.tfevents.1752304288.dsw-222941-6d784d977c-6pnsc.916677.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad3fb4c8e87319ba713f6be93209df7bb8bd5d00e9601ca6e9023e21d734afb5 +size 88 diff --git a/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/events.out.tfevents.1752305122.dsw-222941-6d784d977c-6pnsc.918240.0 b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/events.out.tfevents.1752305122.dsw-222941-6d784d977c-6pnsc.918240.0 new file mode 100644 index 0000000000000000000000000000000000000000..2486d39c9316bfc2cd109eb470795bef80de21af --- /dev/null +++ b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/events.out.tfevents.1752305122.dsw-222941-6d784d977c-6pnsc.918240.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a66c69c3edd8118cd6d649dca2bef5c0dc642034dc3b26777595624bfc3ba35 +size 88 diff --git a/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/events.out.tfevents.1752308532.dsw-222941-6d784d977c-6pnsc.920437.0 b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/events.out.tfevents.1752308532.dsw-222941-6d784d977c-6pnsc.920437.0 new file mode 100644 index 0000000000000000000000000000000000000000..140afba429c70d28c33991cd4e72674125a9d860 --- /dev/null +++ b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/events.out.tfevents.1752308532.dsw-222941-6d784d977c-6pnsc.920437.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:badc83f6ca3942911ab6cdb9b519d92cd3494733f0cce82e2181408b3d4d625f +size 88 diff --git a/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/events.out.tfevents.1752309552.dsw-222941-6d784d977c-6pnsc.921366.0 b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/events.out.tfevents.1752309552.dsw-222941-6d784d977c-6pnsc.921366.0 new file mode 100644 index 0000000000000000000000000000000000000000..b727c566d59ffa03591e1d144680316185d1ab37 --- /dev/null +++ b/exp_code/1_benchmark/Pyramid-Flow/output_dir/log/output_dir/events.out.tfevents.1752309552.dsw-222941-6d784d977c-6pnsc.921366.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec9aee7e08f059eeb3a5dc337cf53ceb5d4c7f251d2ff6f874595d9abbec92a8 +size 88 diff --git a/exp_code/1_benchmark/Pyramid-Flow/text_to_video_sample.mp4 b/exp_code/1_benchmark/Pyramid-Flow/text_to_video_sample.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..9816473d2de761d7d4029cf3c401a811a46bc188 --- /dev/null +++ b/exp_code/1_benchmark/Pyramid-Flow/text_to_video_sample.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69750f3396f42f645ce8e4c719da19da3b209abd7cc966c3aafc9dc1a4540cc9 +size 232167 diff --git a/exp_code/1_benchmark/Pyramid-Flow/text_to_video_sample_1.mp4 b/exp_code/1_benchmark/Pyramid-Flow/text_to_video_sample_1.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..162cf11db074a67b3283a0ded9880a148f6f1224 --- /dev/null +++ b/exp_code/1_benchmark/Pyramid-Flow/text_to_video_sample_1.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f284ed45cf3f79a86d9a12c7dfd13141fad2e9e64d384b6a823176e801aa0e02 +size 302255 diff --git a/exp_code/1_benchmark/Self-Forcing/videos/self_forcing_dmd/A beautifully detailed papercraft illustration of a vibrant coral reef teeming with colorful fish an-0.mp4 b/exp_code/1_benchmark/Self-Forcing/videos/self_forcing_dmd/A beautifully detailed papercraft illustration of a vibrant coral reef teeming with colorful fish an-0.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..88dd7471b4c07b6ae4b1f4acc9e2586bad4e43e8 --- /dev/null +++ b/exp_code/1_benchmark/Self-Forcing/videos/self_forcing_dmd/A beautifully detailed papercraft illustration of a vibrant coral reef teeming with colorful fish an-0.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51c2a8c061ab995bdca98a08ffd4a2bedf615609cf2a93b6b6a394dbe4cfa772 +size 1490718 diff --git a/exp_code/1_benchmark/Self-Forcing/videos/self_forcing_dmd/A close-up 3D animated scene of a short, fluffy monster kneeling beside a melting red candle. The mo-0.mp4 b/exp_code/1_benchmark/Self-Forcing/videos/self_forcing_dmd/A close-up 3D animated scene of a short, fluffy monster kneeling beside a melting red candle. The mo-0.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..c17156cabf3733ba084e14576d9e680bdca9a402 --- /dev/null +++ b/exp_code/1_benchmark/Self-Forcing/videos/self_forcing_dmd/A close-up 3D animated scene of a short, fluffy monster kneeling beside a melting red candle. The mo-0.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd4d8280b0ba6d3612fe176c994d744ef04c0441865edc479bccfe2b804cf7f3 +size 424558 diff --git "a/exp_code/1_benchmark/Self-Forcing/videos/self_forcing_dmd/A drone view of waves crashing against the rugged cliffs along Big Sur\342\200\231s Garay Point beach. The cras-0.mp4" "b/exp_code/1_benchmark/Self-Forcing/videos/self_forcing_dmd/A drone view of waves crashing against the rugged cliffs along Big Sur\342\200\231s Garay Point beach. The cras-0.mp4" new file mode 100644 index 0000000000000000000000000000000000000000..624d9651f9674764119ab159fbb8ddae762a2593 --- /dev/null +++ "b/exp_code/1_benchmark/Self-Forcing/videos/self_forcing_dmd/A drone view of waves crashing against the rugged cliffs along Big Sur\342\200\231s Garay Point beach. The cras-0.mp4" @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee9b0f97b2c3ed978c6cda3639a59e79df82395a027e32c107bb8dae0633971d +size 1262901 diff --git a/exp_code/1_benchmark/Self-Forcing/videos/self_forcing_dmd/A movie trailer in a classic cinematic style, featuring the adventurous journey of a 30-year-old spa-0.mp4 b/exp_code/1_benchmark/Self-Forcing/videos/self_forcing_dmd/A movie trailer in a classic cinematic style, featuring the adventurous journey of a 30-year-old spa-0.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..1d45b268204da5746974106077394e35c8c54d9e --- /dev/null +++ b/exp_code/1_benchmark/Self-Forcing/videos/self_forcing_dmd/A movie trailer in a classic cinematic style, featuring the adventurous journey of a 30-year-old spa-0.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a174baf83e4e068cc5095e5709ec67edea7e1a09670460c13af2422a84eb0e44 +size 817870 diff --git a/exp_code/1_benchmark/Self-Forcing/videos/self_forcing_dmd/A stunning mid-afternoon landscape photograph with a low camera angle, showcasing several giant wool-0.mp4 b/exp_code/1_benchmark/Self-Forcing/videos/self_forcing_dmd/A stunning mid-afternoon landscape photograph with a low camera angle, showcasing several giant wool-0.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..952d77a67ef534ffce38fde033a66124e26cd9f3 --- /dev/null +++ b/exp_code/1_benchmark/Self-Forcing/videos/self_forcing_dmd/A stunning mid-afternoon landscape photograph with a low camera angle, showcasing several giant wool-0.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73179ea6d28b30980794a4b755f5eb9617090683f22e88baffdfb5b4c782763a +size 1002401 diff --git a/exp_code/1_benchmark/Self-Forcing/videos/self_forcing_dmd/A stylish woman strolls down a bustling Tokyo street, the warm glow of neon lights and animated city-0.mp4 b/exp_code/1_benchmark/Self-Forcing/videos/self_forcing_dmd/A stylish woman strolls down a bustling Tokyo street, the warm glow of neon lights and animated city-0.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..b13e25828b59efff01128dd5280cc6db2142f4e1 --- /dev/null +++ b/exp_code/1_benchmark/Self-Forcing/videos/self_forcing_dmd/A stylish woman strolls down a bustling Tokyo street, the warm glow of neon lights and animated city-0.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e0caba2149971b14bd83b130e5b3d09a53a10b37fb34d775e2a5d9b05f1423bf +size 824671 diff --git a/exp_code/1_benchmark/SkyReels-V2/assets/logo2.png b/exp_code/1_benchmark/SkyReels-V2/assets/logo2.png new file mode 100644 index 0000000000000000000000000000000000000000..9a7c90cca973f345fece5d250e7faeec52d67c58 --- /dev/null +++ b/exp_code/1_benchmark/SkyReels-V2/assets/logo2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c3dcf6c144f8ed79b194ec902a39889f4e619537a35f5cff21b69b0e8f3dce2 +size 23490 diff --git a/exp_code/1_benchmark/SkyReels-V2/assets/main_pipeline.jpg b/exp_code/1_benchmark/SkyReels-V2/assets/main_pipeline.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e6fbca5400e03c115a8357a333f3472462fd3205 --- /dev/null +++ b/exp_code/1_benchmark/SkyReels-V2/assets/main_pipeline.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8fd982dd51a3edd0a1ce451b391526c1a51ea94ae68f5ed79380173dbcce7fb +size 183244 diff --git a/exp_code/1_benchmark/SkyReels-V2/skycaptioner_v1/examples/data/1.mp4 b/exp_code/1_benchmark/SkyReels-V2/skycaptioner_v1/examples/data/1.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..8dbcee6568d95d6664f35e20fcca7dda3006b9d7 --- /dev/null +++ b/exp_code/1_benchmark/SkyReels-V2/skycaptioner_v1/examples/data/1.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16becc00811e427d9e3f5da5a977239907ea3a6d45b5482b9bcfea2abc3c6b7f +size 4821469 diff --git a/exp_code/1_benchmark/SkyReels-V2/skycaptioner_v1/examples/data/2.mp4 b/exp_code/1_benchmark/SkyReels-V2/skycaptioner_v1/examples/data/2.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..0b8f73f4324da32c3231aa8e6d09713c9b993b42 --- /dev/null +++ b/exp_code/1_benchmark/SkyReels-V2/skycaptioner_v1/examples/data/2.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:109b954b7b1e36addef840554af53241415492e71972c87b7bfb03f77bf0d68a +size 1030652 diff --git a/exp_code/1_benchmark/SkyReels-V2/skycaptioner_v1/examples/data/3.mp4 b/exp_code/1_benchmark/SkyReels-V2/skycaptioner_v1/examples/data/3.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..8cd2be162a8470f919932bf95da5672df0faad16 --- /dev/null +++ b/exp_code/1_benchmark/SkyReels-V2/skycaptioner_v1/examples/data/3.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0fd544787b24265605cf16c954f495fe9d73680e0529f1438e47c02803a9c2bf +size 1661366 diff --git a/exp_code/1_benchmark/SkyReels-V2/skycaptioner_v1/examples/data/4.mp4 b/exp_code/1_benchmark/SkyReels-V2/skycaptioner_v1/examples/data/4.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..85d95205fbb43009a4eb776307e818fb9b286eae --- /dev/null +++ b/exp_code/1_benchmark/SkyReels-V2/skycaptioner_v1/examples/data/4.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5cc4038924aae963a54779d713f7f9680259ae81a4446fb2775cc6bb51d907c +size 2523332 diff --git a/exp_code/1_benchmark/Wan-S2V/data/example_video_dataset/wans2v/pose.mp4 b/exp_code/1_benchmark/Wan-S2V/data/example_video_dataset/wans2v/pose.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..992e268e302220e3fd48d82f3cac660b4fd3fa4a --- /dev/null +++ b/exp_code/1_benchmark/Wan-S2V/data/example_video_dataset/wans2v/pose.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:467b541d19625fdc42bf47f9d4db2d02cf0579f4e3a9233c543b2117dbed8a8e +size 2192979 diff --git a/exp_code/1_benchmark/Wan-S2V/data/example_video_dataset/wans2v/pose.png b/exp_code/1_benchmark/Wan-S2V/data/example_video_dataset/wans2v/pose.png new file mode 100644 index 0000000000000000000000000000000000000000..0862eafcd7d4b864322ab39c83759cc50d1d6223 --- /dev/null +++ b/exp_code/1_benchmark/Wan-S2V/data/example_video_dataset/wans2v/pose.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53a5d9b435adaf15dd8ffcab1a833b61da4a63079200fb9cec33127ee10f733b +size 822774 diff --git a/exp_code/1_benchmark/Wan-S2V/data/example_video_dataset/wans2v/sing.MP3 b/exp_code/1_benchmark/Wan-S2V/data/example_video_dataset/wans2v/sing.MP3 new file mode 100644 index 0000000000000000000000000000000000000000..7625679a1da8f5d2d46eacec1940e341e1b94226 --- /dev/null +++ b/exp_code/1_benchmark/Wan-S2V/data/example_video_dataset/wans2v/sing.MP3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:520217a826cd078a61ff1eac7a3f8dfa55ade170d07a977d86d9bcb049d7fa59 +size 300144 diff --git a/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.1-T2V-1.3B/._____temp/models_t5_umt5-xxl-enc-bf16.pth b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.1-T2V-1.3B/._____temp/models_t5_umt5-xxl-enc-bf16.pth new file mode 100644 index 0000000000000000000000000000000000000000..94edcb0a0a479235d9af6f9c66992f39b61856fc --- /dev/null +++ b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.1-T2V-1.3B/._____temp/models_t5_umt5-xxl-enc-bf16.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a8ba9d3cf648cb488cef925fec240f2b0a215b2e74ff6e2fa2036e0812c85764 +size 385875968 diff --git a/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.1-T2V-1.3B/google/umt5-xxl/spiece.model b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.1-T2V-1.3B/google/umt5-xxl/spiece.model new file mode 100644 index 0000000000000000000000000000000000000000..2fe5f347e9f9367585589ae89e997dfbd5cf802c --- /dev/null +++ b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.1-T2V-1.3B/google/umt5-xxl/spiece.model @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e3909a67b780650b35cf529ac782ad2b6b26e6d1f849d3fbb6a872905f452458 +size 4548313 diff --git a/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.1-T2V-1.3B/google/umt5-xxl/tokenizer.json b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.1-T2V-1.3B/google/umt5-xxl/tokenizer.json new file mode 100644 index 0000000000000000000000000000000000000000..7bd4ee3cf5633e49c0e06a3aac2a621559e6222a --- /dev/null +++ b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.1-T2V-1.3B/google/umt5-xxl/tokenizer.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e197b4d3dbd71da14b4eb255f4fa91c9c1f2068b20a2de2472967ca3d22602b +size 16837417 diff --git a/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/Wan2.1_VAE.pth b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/Wan2.1_VAE.pth new file mode 100644 index 0000000000000000000000000000000000000000..5897fba405232a6b07a947d6188d19a8e050ccfb --- /dev/null +++ b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/Wan2.1_VAE.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38071ab59bd94681c686fa51d75a1968f64e470262043be31f7a094e442fd981 +size 507609880 diff --git a/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/assets/471504690-b63bfa58-d5d7-4de6-a1a2-98970b06d9a7.mp4 b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/assets/471504690-b63bfa58-d5d7-4de6-a1a2-98970b06d9a7.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..938a5b8c7b180b31031e248d28f86a220a9723f8 --- /dev/null +++ b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/assets/471504690-b63bfa58-d5d7-4de6-a1a2-98970b06d9a7.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f33a16fd16b23b108fd585b7ca9463987543a5685b46272c1b534bd8fc4a14de +size 9193286 diff --git a/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/assets/comp_effic.png b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/assets/comp_effic.png new file mode 100644 index 0000000000000000000000000000000000000000..bdc26da30b575b80d78f5972a08811eca9a6c455 --- /dev/null +++ b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/assets/comp_effic.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:75ee012dcfb08365bec67a3ec7afc126fc2817f79b9f80e38711792d4770e32b +size 202156 diff --git a/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/assets/logo.png b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/assets/logo.png new file mode 100644 index 0000000000000000000000000000000000000000..14cea40a9ec4c2aa8de3b46806b25d766980d909 --- /dev/null +++ b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/assets/logo.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:96cddc0f667293436d0b9f92a299b6346b65b231d38ee49719a33d46c91fe1e3 +size 56322 diff --git a/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/assets/moe_2.png b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/assets/moe_2.png new file mode 100644 index 0000000000000000000000000000000000000000..c788c691cf96bbf9c0598cb440c23935848e8619 --- /dev/null +++ b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/assets/moe_2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ea471ccb64349bd08bc9a78f336ae000e9ca3b40da9a652b8028b214a8c6093 +size 527914 diff --git a/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/assets/moe_arch.png b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/assets/moe_arch.png new file mode 100644 index 0000000000000000000000000000000000000000..65521ffd856fea389792460efb02090aa130f03d --- /dev/null +++ b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/assets/moe_arch.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:709510625af90bad3f9509c8c9bb6dc779576cd7f80b9bc95d75fe33f2755079 +size 74900 diff --git a/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/assets/performance.png b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/assets/performance.png new file mode 100644 index 0000000000000000000000000000000000000000..ca558e3e33efc415814aeb3b103ae2e2b34ba233 --- /dev/null +++ b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/assets/performance.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97ef99c13c8ae717a8a11c8d8ec927b69077c647cc6689755d08fc38e7fbb830 +size 306535 diff --git a/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/assets/vae.png b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/assets/vae.png new file mode 100644 index 0000000000000000000000000000000000000000..7d290425cf3f7ab20e2d80bece0d780259b24303 --- /dev/null +++ b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/assets/vae.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4aaea5e187f1c5908e15ade5bef24c9fb59882986bc3d2ad75f7fe820f3d772f +size 165486 diff --git a/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/diffusion_pytorch_model-00001-of-00004.safetensors b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/diffusion_pytorch_model-00001-of-00004.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..9df3c69fa8dd2c1dc5879fb7118f1770f5a39d32 --- /dev/null +++ b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/diffusion_pytorch_model-00001-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5fb54febf10b729a6da7da222625d6ecaedde78becda01efbc13f6bebaeb6d43 +size 9968229352 diff --git a/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/diffusion_pytorch_model-00002-of-00004.safetensors b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/diffusion_pytorch_model-00002-of-00004.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..cb96fdc871c163273d7c2ff02b2edd227a2e4e1b --- /dev/null +++ b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/diffusion_pytorch_model-00002-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:922928094d8ea9b075888d473b7f9bf80ec60852eb83934079141b161a186f01 +size 9891539248 diff --git a/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/diffusion_pytorch_model-00003-of-00004.safetensors b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/diffusion_pytorch_model-00003-of-00004.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..a26abca4f529cd4d2922c9c64ec4d7eee006cc2a --- /dev/null +++ b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/diffusion_pytorch_model-00003-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:017fab8d2845976c9555df841765255118552fbad3e6760629342b47c36c24fc +size 9956985634 diff --git a/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/diffusion_pytorch_model-00004-of-00004.safetensors b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/diffusion_pytorch_model-00004-of-00004.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..7de54691de38d2edca6edf039c51558db62bab80 --- /dev/null +++ b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/diffusion_pytorch_model-00004-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c194909066b8d7ed91f2c214389aa7ec7f25c60e3dd10ba8f607daf4f5cc6c0 +size 2774887624 diff --git a/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/google/umt5-xxl/spiece.model b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/google/umt5-xxl/spiece.model new file mode 100644 index 0000000000000000000000000000000000000000..2fe5f347e9f9367585589ae89e997dfbd5cf802c --- /dev/null +++ b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/google/umt5-xxl/spiece.model @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e3909a67b780650b35cf529ac782ad2b6b26e6d1f849d3fbb6a872905f452458 +size 4548313 diff --git a/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/google/umt5-xxl/tokenizer.json b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/google/umt5-xxl/tokenizer.json new file mode 100644 index 0000000000000000000000000000000000000000..7bd4ee3cf5633e49c0e06a3aac2a621559e6222a --- /dev/null +++ b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/google/umt5-xxl/tokenizer.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e197b4d3dbd71da14b4eb255f4fa91c9c1f2068b20a2de2472967ca3d22602b +size 16837417 diff --git a/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/models_t5_umt5-xxl-enc-bf16.pth b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/models_t5_umt5-xxl-enc-bf16.pth new file mode 100644 index 0000000000000000000000000000000000000000..d5dad910304ab4b909a2c8a225a71840606e6de4 --- /dev/null +++ b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/models_t5_umt5-xxl-enc-bf16.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7cace0da2b446bbbbc57d031ab6cf163a3d59b366da94e5afe36745b746fd81d +size 11361920418 diff --git a/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/wav2vec2-large-xlsr-53-english/flax_model.msgpack b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/wav2vec2-large-xlsr-53-english/flax_model.msgpack new file mode 100644 index 0000000000000000000000000000000000000000..553171bf13053bd2b9b0f1170e6e91855e41a74e --- /dev/null +++ b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/wav2vec2-large-xlsr-53-english/flax_model.msgpack @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d3842440388a575e19f19cdb05714afd7018392bd1a7e247c601530a653aa40 +size 1261905572 diff --git a/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/wav2vec2-large-xlsr-53-english/language_model/lm.binary b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/wav2vec2-large-xlsr-53-english/language_model/lm.binary new file mode 100644 index 0000000000000000000000000000000000000000..5e5770b89e4c231b0fe6c7e03f87c3f3044c10cb --- /dev/null +++ b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/wav2vec2-large-xlsr-53-english/language_model/lm.binary @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:47e16abf6384ebd1b3395144330b60710dc43f3d16c4b2b4794071cd117230e5 +size 862913451 diff --git a/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/wav2vec2-large-xlsr-53-english/model.safetensors b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/wav2vec2-large-xlsr-53-english/model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..95e0a44b4bdd8239f5ce17b8175664a51b246dcd --- /dev/null +++ b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/wav2vec2-large-xlsr-53-english/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6144f8464c6aaa220dd57c5a2ad4039b5710dcf8ee6e67057675f76597c19875 +size 1261942732 diff --git a/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/wav2vec2-large-xlsr-53-english/pytorch_model.bin b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/wav2vec2-large-xlsr-53-english/pytorch_model.bin new file mode 100644 index 0000000000000000000000000000000000000000..d2aabbf887ea847a9201e0eb2cdd8e2f9d088ef1 --- /dev/null +++ b/exp_code/1_benchmark/Wan-S2V/models/Wan-AI/Wan2.2-S2V-14B/wav2vec2-large-xlsr-53-english/pytorch_model.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b7688644eeefe1f5760bb4c4a61d085793a3740159fdbf19fd37c5d4f3729bf +size 1262069143 diff --git a/exp_code/1_benchmark/Wan/astronaut.jpg b/exp_code/1_benchmark/Wan/astronaut.jpg new file mode 100644 index 0000000000000000000000000000000000000000..71ee34661867b0ed05fdfff28980099d191378a6 --- /dev/null +++ b/exp_code/1_benchmark/Wan/astronaut.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1257de01d8440baea06865ea35ce557d885b6dda700281ba25202193c58285c +size 958045 diff --git a/exp_code/1_benchmark/Wan/output.mp4 b/exp_code/1_benchmark/Wan/output.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..602b32a6443f64a917e773aacd093896f9c8dc8f --- /dev/null +++ b/exp_code/1_benchmark/Wan/output.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7aaf66e095b9ce06098196aea9197846d2c612fe37c009dc7680765fea0a888a +size 339826 diff --git a/exp_code/1_benchmark/Wan/wan_i2v_input.JPG b/exp_code/1_benchmark/Wan/wan_i2v_input.JPG new file mode 100644 index 0000000000000000000000000000000000000000..8c7fabd943752179587eb717362db32ce1eb4800 --- /dev/null +++ b/exp_code/1_benchmark/Wan/wan_i2v_input.JPG @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:077e3d965090c9028c69c00931675f42e1acc815c6eb450ab291b3b72d211a8e +size 250628 diff --git a/exp_code/1_benchmark/Wan/yiyi_test_6_ti2v_5b_output.mp4 b/exp_code/1_benchmark/Wan/yiyi_test_6_ti2v_5b_output.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..5f6cdff3248e6aac491250ecd405b67c6ce78cf2 --- /dev/null +++ b/exp_code/1_benchmark/Wan/yiyi_test_6_ti2v_5b_output.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d67f34b28d5a87df001147e970a37b55660996f40837c8fd9095d27156c037a8 +size 506811 diff --git a/exp_code/1_benchmark/Wan2.2/assets/comp_effic.png b/exp_code/1_benchmark/Wan2.2/assets/comp_effic.png new file mode 100644 index 0000000000000000000000000000000000000000..bdc26da30b575b80d78f5972a08811eca9a6c455 --- /dev/null +++ b/exp_code/1_benchmark/Wan2.2/assets/comp_effic.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:75ee012dcfb08365bec67a3ec7afc126fc2817f79b9f80e38711792d4770e32b +size 202156 diff --git a/exp_code/1_benchmark/Wan2.2/assets/logo.png b/exp_code/1_benchmark/Wan2.2/assets/logo.png new file mode 100644 index 0000000000000000000000000000000000000000..14cea40a9ec4c2aa8de3b46806b25d766980d909 --- /dev/null +++ b/exp_code/1_benchmark/Wan2.2/assets/logo.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:96cddc0f667293436d0b9f92a299b6346b65b231d38ee49719a33d46c91fe1e3 +size 56322 diff --git a/exp_code/1_benchmark/Wan2.2/assets/moe_2.png b/exp_code/1_benchmark/Wan2.2/assets/moe_2.png new file mode 100644 index 0000000000000000000000000000000000000000..c788c691cf96bbf9c0598cb440c23935848e8619 --- /dev/null +++ b/exp_code/1_benchmark/Wan2.2/assets/moe_2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ea471ccb64349bd08bc9a78f336ae000e9ca3b40da9a652b8028b214a8c6093 +size 527914 diff --git a/exp_code/1_benchmark/Wan2.2/assets/moe_arch.png b/exp_code/1_benchmark/Wan2.2/assets/moe_arch.png new file mode 100644 index 0000000000000000000000000000000000000000..65521ffd856fea389792460efb02090aa130f03d --- /dev/null +++ b/exp_code/1_benchmark/Wan2.2/assets/moe_arch.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:709510625af90bad3f9509c8c9bb6dc779576cd7f80b9bc95d75fe33f2755079 +size 74900 diff --git a/exp_code/1_benchmark/Wan2.2/assets/performance.png b/exp_code/1_benchmark/Wan2.2/assets/performance.png new file mode 100644 index 0000000000000000000000000000000000000000..ca558e3e33efc415814aeb3b103ae2e2b34ba233 --- /dev/null +++ b/exp_code/1_benchmark/Wan2.2/assets/performance.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97ef99c13c8ae717a8a11c8d8ec927b69077c647cc6689755d08fc38e7fbb830 +size 306535 diff --git a/exp_code/1_benchmark/Wan2.2/assets/vae.png b/exp_code/1_benchmark/Wan2.2/assets/vae.png new file mode 100644 index 0000000000000000000000000000000000000000..7d290425cf3f7ab20e2d80bece0d780259b24303 --- /dev/null +++ b/exp_code/1_benchmark/Wan2.2/assets/vae.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4aaea5e187f1c5908e15ade5bef24c9fb59882986bc3d2ad75f7fe820f3d772f +size 165486 diff --git a/exp_code/1_benchmark/Wan2.2/examples/Five Hundred Miles.MP3 b/exp_code/1_benchmark/Wan2.2/examples/Five Hundred Miles.MP3 new file mode 100644 index 0000000000000000000000000000000000000000..9eed5a199b949de42347ff395945f954c0810a5a --- /dev/null +++ b/exp_code/1_benchmark/Wan2.2/examples/Five Hundred Miles.MP3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d969412bfe4d5e4b328d3ff92c1307fc39f7988ceba66860b5c1a17e40502d6 +size 121043 diff --git a/exp_code/1_benchmark/Wan2.2/examples/Five Hundred Miles.png b/exp_code/1_benchmark/Wan2.2/examples/Five Hundred Miles.png new file mode 100644 index 0000000000000000000000000000000000000000..913ec0d13e9994794771286b84f92f40418ca4ec --- /dev/null +++ b/exp_code/1_benchmark/Wan2.2/examples/Five Hundred Miles.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5775ff7fbb162b937ec7ea2ff028cf4207d77a156720f9be822004b549ab4e98 +size 878185 diff --git a/exp_code/1_benchmark/Wan2.2/examples/i2v_input.JPG b/exp_code/1_benchmark/Wan2.2/examples/i2v_input.JPG new file mode 100644 index 0000000000000000000000000000000000000000..8c7fabd943752179587eb717362db32ce1eb4800 --- /dev/null +++ b/exp_code/1_benchmark/Wan2.2/examples/i2v_input.JPG @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:077e3d965090c9028c69c00931675f42e1acc815c6eb450ab291b3b72d211a8e +size 250628 diff --git a/exp_code/1_benchmark/Wan2.2/examples/pose.mp4 b/exp_code/1_benchmark/Wan2.2/examples/pose.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..992e268e302220e3fd48d82f3cac660b4fd3fa4a --- /dev/null +++ b/exp_code/1_benchmark/Wan2.2/examples/pose.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:467b541d19625fdc42bf47f9d4db2d02cf0579f4e3a9233c543b2117dbed8a8e +size 2192979 diff --git a/exp_code/1_benchmark/Wan2.2/examples/pose.png b/exp_code/1_benchmark/Wan2.2/examples/pose.png new file mode 100644 index 0000000000000000000000000000000000000000..0862eafcd7d4b864322ab39c83759cc50d1d6223 --- /dev/null +++ b/exp_code/1_benchmark/Wan2.2/examples/pose.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53a5d9b435adaf15dd8ffcab1a833b61da4a63079200fb9cec33127ee10f733b +size 822774 diff --git a/exp_code/1_benchmark/Wan2.2/examples/sing.MP3 b/exp_code/1_benchmark/Wan2.2/examples/sing.MP3 new file mode 100644 index 0000000000000000000000000000000000000000..7625679a1da8f5d2d46eacec1940e341e1b94226 --- /dev/null +++ b/exp_code/1_benchmark/Wan2.2/examples/sing.MP3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:520217a826cd078a61ff1eac7a3f8dfa55ade170d07a977d86d9bcb049d7fa59 +size 300144 diff --git a/exp_code/1_benchmark/Wan2.2/examples/talk.wav b/exp_code/1_benchmark/Wan2.2/examples/talk.wav new file mode 100644 index 0000000000000000000000000000000000000000..e14dabfdc84cb68789ac76e932a713ae24277dbc --- /dev/null +++ b/exp_code/1_benchmark/Wan2.2/examples/talk.wav @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c8b0b80ae25baaa402853f34b24f5ba64decd67bcf9a512640d1e8b1d040824f +size 884814 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/docs/source/en/imgs/access_request.png b/exp_code/1_benchmark/diffusers-WanS2V/docs/source/en/imgs/access_request.png new file mode 100644 index 0000000000000000000000000000000000000000..1a19908c64bd08dcba67f10375813d2821bf6f66 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/docs/source/en/imgs/access_request.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9688dabf75e180590251cd1f75d18966f9c94d5d6584bc7d0278b698c175c61f +size 104814 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/docs/source/en/imgs/diffusers_library.jpg b/exp_code/1_benchmark/diffusers-WanS2V/docs/source/en/imgs/diffusers_library.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c2f8c529a69d4e01f4601bfc435ae90b24659fca --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/docs/source/en/imgs/diffusers_library.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be2485d6656bec11b85f469b2bc04736a8de8270fa2f3779d9d40bfab3966950 +size 14061 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/examples/research_projects/gligen/generated-images-100000-00.png b/exp_code/1_benchmark/diffusers-WanS2V/examples/research_projects/gligen/generated-images-100000-00.png new file mode 100644 index 0000000000000000000000000000000000000000..9ac046ce4d065f409bf5ebfa210504c5df0868c0 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/examples/research_projects/gligen/generated-images-100000-00.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:55fd4e850dce2d501c2b6186039496a09f3a2a98711122473cf527b6504d8467 +size 1588149 diff --git a/exp_code/1_benchmark/musubi-tuner/docs/betas_for_sigma_rel.png b/exp_code/1_benchmark/musubi-tuner/docs/betas_for_sigma_rel.png new file mode 100644 index 0000000000000000000000000000000000000000..ca96d0c7f48d22529de74769cf52e6e91a59c889 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/docs/betas_for_sigma_rel.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77ec2f19a0ef7e6a677a30b8530b05eeeeb0fa9c33df723c38ede3532c478d21 +size 66031 diff --git a/exp_code/1_benchmark/musubi-tuner/docs/kisekaeichi_ref.png b/exp_code/1_benchmark/musubi-tuner/docs/kisekaeichi_ref.png new file mode 100644 index 0000000000000000000000000000000000000000..b3c97e632672364b360c6a26d2bc251dcf485dd8 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/docs/kisekaeichi_ref.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5037f0a0cfb1a6b0a8d1f19fb462df75fb53384d0d9e654c359ca984fafa605 +size 583507 diff --git a/exp_code/1_benchmark/musubi-tuner/docs/kisekaeichi_ref_mask.png b/exp_code/1_benchmark/musubi-tuner/docs/kisekaeichi_ref_mask.png new file mode 100644 index 0000000000000000000000000000000000000000..f3017bc9c4ff921381ddfd4a3f4da3be194510c8 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/docs/kisekaeichi_ref_mask.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69ebba82dd24ff20b0c60cbb0a6d32e879dee36a58c6ca35867862c3a5c407a4 +size 17351 diff --git a/exp_code/1_benchmark/musubi-tuner/docs/kisekaeichi_result.png b/exp_code/1_benchmark/musubi-tuner/docs/kisekaeichi_result.png new file mode 100644 index 0000000000000000000000000000000000000000..52c13c39df20aee7b405e5faa64104ea0c4c641f --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/docs/kisekaeichi_result.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:223dacb98ac834a442ee124641a6b852b1cde3bc1f11939e78192fc8be2f7b49 +size 408282 diff --git a/exp_code/1_benchmark/musubi-tuner/docs/kisekaeichi_start.png b/exp_code/1_benchmark/musubi-tuner/docs/kisekaeichi_start.png new file mode 100644 index 0000000000000000000000000000000000000000..b31c37500db6083aa54b1c6b868cb8a56d928745 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/docs/kisekaeichi_start.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:beee4a910402ef2798b00aa4d193b0b7186380ed24928a4d39acc8635d2cfdaf +size 1033975 diff --git a/exp_code/1_benchmark/musubi-tuner/docs/kisekaeichi_start_mask.png b/exp_code/1_benchmark/musubi-tuner/docs/kisekaeichi_start_mask.png new file mode 100644 index 0000000000000000000000000000000000000000..0a775dd6af68f4674dbf06fcc44b30210f445d13 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/docs/kisekaeichi_start_mask.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e81933caf5125439e46a34476d339912b981d868517e278d6f9fb6064b328c6 +size 14908 diff --git a/exp_code/1_benchmark/musubi-tuner/scripts/demo_output_dir/20250703-222235_1234_latent.safetensors b/exp_code/1_benchmark/musubi-tuner/scripts/demo_output_dir/20250703-222235_1234_latent.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..5006489157044fb0f6a101396ac7fab91f6a4502 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/scripts/demo_output_dir/20250703-222235_1234_latent.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:37963dedb13951fbda3e0cc90de07229a51f3e15b0214a7e3beb9e91ec8e9ec9 +size 14776712 diff --git a/exp_code/1_benchmark/musubi-tuner/scripts/demo_output_dir/20250703-222312_1234_.mp4 b/exp_code/1_benchmark/musubi-tuner/scripts/demo_output_dir/20250703-222312_1234_.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..f89d7a9d747ea75eebcf8ec365d63c6cfb178709 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/scripts/demo_output_dir/20250703-222312_1234_.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:580d6a1cd00488add75981b832877eea54ed685a43c315447a2f259bada6120a +size 2407142 diff --git a/exp_code/1_benchmark/musubi-tuner/src/a3c275fc2eb0a67168a7c58a6a9adb14.mp4 b/exp_code/1_benchmark/musubi-tuner/src/a3c275fc2eb0a67168a7c58a6a9adb14.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..f41577fb56368a7489b71b192f563a1169e2e334 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/a3c275fc2eb0a67168a7c58a6a9adb14.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67b422a72fc1df0e48792b5a20328ed2e62f0aa2420cbff8e89abb8bb1eaaa5b +size 230176 diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/__pycache__/hv_train_network.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/__pycache__/hv_train_network.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8dba0c45a516bf41834f01458651aa6a610784a9 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/__pycache__/hv_train_network.cpython-312.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:13b701e95aabab3d44560e203ffcd6081ffe38e483fe8643b18085a92c9a14f5 +size 125905 diff --git a/exp_code/1_benchmark/pa_vdm/outputs/pavdm_inference_0000.mp4 b/exp_code/1_benchmark/pa_vdm/outputs/pavdm_inference_0000.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..a675a99b96d1aba1b2c02d7ab2aba521b2082367 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/outputs/pavdm_inference_0000.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b93981fdc48cb9b751325240dc399b99a62c722e2f2039bc288490591ecb07fa +size 1055942 diff --git a/exp_code/1_benchmark/pa_vdm/outputs/wandb/wandb/offline-run-20250718_161437-mq58lm4k/run-mq58lm4k.wandb b/exp_code/1_benchmark/pa_vdm/outputs/wandb/wandb/offline-run-20250718_161437-mq58lm4k/run-mq58lm4k.wandb new file mode 100644 index 0000000000000000000000000000000000000000..bc5026057e800628fc4266d3aee693e8fd1a7c88 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/outputs/wandb/wandb/offline-run-20250718_161437-mq58lm4k/run-mq58lm4k.wandb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ebf2e0c94e04869a01bb9284b9b124d6623a39d41be4fba1416853a169f2306 +size 407557 diff --git a/exp_code/1_benchmark/pa_vdm/outputs/wandb/wandb/offline-run-20250721_133358-bb8mn98e/run-bb8mn98e.wandb b/exp_code/1_benchmark/pa_vdm/outputs/wandb/wandb/offline-run-20250721_133358-bb8mn98e/run-bb8mn98e.wandb new file mode 100644 index 0000000000000000000000000000000000000000..772f6293bd07657d328a82a5ea9f5bb3dc4b0a15 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/outputs/wandb/wandb/offline-run-20250721_133358-bb8mn98e/run-bb8mn98e.wandb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:524f3ed2d156d10459090f77e67736cc30d312c5c96c7718e543bebfe4209a94 +size 5701632 diff --git a/exp_code/1_benchmark/pa_vdm/pavdm/test_data/output.mp4 b/exp_code/1_benchmark/pa_vdm/pavdm/test_data/output.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..85cf96578a22f774a4e8ddf42ed2a46fa08650e7 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/pavdm/test_data/output.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c648e5104b73221b6070fabbeef31e0f1b42a28585c82faaa1548a6ede86193 +size 325685 diff --git a/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752752559.dsw-268426-57f8f9bbcb-pjknh.4011.0 b/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752752559.dsw-268426-57f8f9bbcb-pjknh.4011.0 new file mode 100644 index 0000000000000000000000000000000000000000..be32c5ce1dba19a363de27f7f264567668dfd69a --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752752559.dsw-268426-57f8f9bbcb-pjknh.4011.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a2fa1777f6477aecab6c45fec084cbe235106f2ff0026b50befd9ac2ab3feed +size 88 diff --git a/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752752629.dsw-268426-57f8f9bbcb-pjknh.4275.0 b/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752752629.dsw-268426-57f8f9bbcb-pjknh.4275.0 new file mode 100644 index 0000000000000000000000000000000000000000..742a9f26757c8d1e8ccf68c21c0b14636697afa3 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752752629.dsw-268426-57f8f9bbcb-pjknh.4275.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25fea6dc10c34d951d47ff65236ae7832c4ca4c4c4220a8d592f08745450dc58 +size 88 diff --git a/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752752863.dsw-268426-57f8f9bbcb-pjknh.5009.0 b/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752752863.dsw-268426-57f8f9bbcb-pjknh.5009.0 new file mode 100644 index 0000000000000000000000000000000000000000..bd3e179701245bc8e45b05ed95525fd691918693 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752752863.dsw-268426-57f8f9bbcb-pjknh.5009.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:642bc51439a577eb51e427d12018b68a513f3255afbbbc0802fb01ca5ec10a8a +size 88 diff --git a/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752752983.dsw-268426-57f8f9bbcb-pjknh.5706.0 b/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752752983.dsw-268426-57f8f9bbcb-pjknh.5706.0 new file mode 100644 index 0000000000000000000000000000000000000000..e2b2292d15a2f10259c324cdcef7afd4cfc1783c --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752752983.dsw-268426-57f8f9bbcb-pjknh.5706.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1767f9b7db1a334f0bc1b61c34d0d9f16e22b476d6bab40152f2a67386f8077 +size 88 diff --git a/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752753076.dsw-268426-57f8f9bbcb-pjknh.6307.0 b/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752753076.dsw-268426-57f8f9bbcb-pjknh.6307.0 new file mode 100644 index 0000000000000000000000000000000000000000..fdee970bafa54e0791bf1d7fe216bd21f09ccaed --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752753076.dsw-268426-57f8f9bbcb-pjknh.6307.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45b01e1fe3772b4c53e18d7513c089259cef712ff461efa65754393c2aa9e237 +size 88 diff --git a/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752754327.dsw-268426-57f8f9bbcb-pjknh.7182.0 b/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752754327.dsw-268426-57f8f9bbcb-pjknh.7182.0 new file mode 100644 index 0000000000000000000000000000000000000000..8016d57584f957d9d3808ee4c799ce79604ba60a --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752754327.dsw-268426-57f8f9bbcb-pjknh.7182.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98811f3e41c7ffc11631f960000bfd6fa87dc24b379d884f4e507b4cfa67703f +size 88 diff --git a/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752754461.dsw-268426-57f8f9bbcb-pjknh.8021.0 b/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752754461.dsw-268426-57f8f9bbcb-pjknh.8021.0 new file mode 100644 index 0000000000000000000000000000000000000000..b780411f78f4236a6f14458aaad4e9f89671e91d --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752754461.dsw-268426-57f8f9bbcb-pjknh.8021.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a64f8f0a143e4010c74b61209dee05fabedc17c7da0eeb81f6199fb71160610f +size 88 diff --git a/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752754734.dsw-268426-57f8f9bbcb-pjknh.10492.0 b/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752754734.dsw-268426-57f8f9bbcb-pjknh.10492.0 new file mode 100644 index 0000000000000000000000000000000000000000..4a57b69b457abee3b4111b13c8cc8f3251b6a811 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752754734.dsw-268426-57f8f9bbcb-pjknh.10492.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:663d01aa79eef879d3520980194f657cd093ced30ef29e65fa99b72be54c00c0 +size 88 diff --git a/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752754961.dsw-268426-57f8f9bbcb-pjknh.13225.0 b/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752754961.dsw-268426-57f8f9bbcb-pjknh.13225.0 new file mode 100644 index 0000000000000000000000000000000000000000..f722c7a48a6bc06cbcaa229d718a60e650468bce --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752754961.dsw-268426-57f8f9bbcb-pjknh.13225.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4510f30707e5b0c952d50c756e2310b12a4e20095a52424abc717a65cd291cdd +size 88 diff --git a/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752755575.dsw-268426-57f8f9bbcb-pjknh.17259.0 b/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752755575.dsw-268426-57f8f9bbcb-pjknh.17259.0 new file mode 100644 index 0000000000000000000000000000000000000000..30f566e73a39c365a7764594cd587c118567c7c7 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752755575.dsw-268426-57f8f9bbcb-pjknh.17259.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33ef8b5b0bfddf1372fd9401f23e63c1d7f65b67f7e582b077df6732714ce8ad +size 88 diff --git a/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752755683.dsw-268426-57f8f9bbcb-pjknh.19121.0 b/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752755683.dsw-268426-57f8f9bbcb-pjknh.19121.0 new file mode 100644 index 0000000000000000000000000000000000000000..28139f6cba795417974a89cc4f4fa4783c4cf771 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752755683.dsw-268426-57f8f9bbcb-pjknh.19121.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f11dde545a14e8774e77a5ccb10d4fb5d9e3a7f29e85e388df5b053cd4b2ac2c +size 88 diff --git a/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752755854.dsw-268426-57f8f9bbcb-pjknh.21042.0 b/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752755854.dsw-268426-57f8f9bbcb-pjknh.21042.0 new file mode 100644 index 0000000000000000000000000000000000000000..c893a95d684a92574540cd5521fb0d7bb7aaa160 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752755854.dsw-268426-57f8f9bbcb-pjknh.21042.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90da555948374fa066547408c5e12dada49f13626685eee69f0ac9439020c27c +size 88 diff --git a/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752756085.dsw-268426-57f8f9bbcb-pjknh.22858.0 b/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752756085.dsw-268426-57f8f9bbcb-pjknh.22858.0 new file mode 100644 index 0000000000000000000000000000000000000000..fc25eaa3f6b71290dd9c9d8c15fd8113218b177a --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752756085.dsw-268426-57f8f9bbcb-pjknh.22858.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02db40f6879f508ef3fd441525ebe5d0c351d77351041d3a7d2433de1a075bcc +size 214 diff --git a/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752826230.dsw-222941-6d784d977c-6pnsc.1694581.0 b/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752826230.dsw-222941-6d784d977c-6pnsc.1694581.0 new file mode 100644 index 0000000000000000000000000000000000000000..b0dd2d7eacd5db1721b2d8c17cd53d90b83eaff7 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752826230.dsw-222941-6d784d977c-6pnsc.1694581.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3dd13b0beaa79a5c2a1b48f8e34d1bde23c39878e5709467134c2f4837f67443 +size 88 diff --git a/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752826474.dsw-222941-6d784d977c-6pnsc.1699244.0 b/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752826474.dsw-222941-6d784d977c-6pnsc.1699244.0 new file mode 100644 index 0000000000000000000000000000000000000000..1949ab3d2bf04b0f20314945235dbf2532d7278d --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752826474.dsw-222941-6d784d977c-6pnsc.1699244.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d592d117bd3f8d617c1f7a8ca839c3350327156e5a6340170147e40224fb49e3 +size 88 diff --git a/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752910975.dsw-222941-6d784d977c-6pnsc.1788291.0 b/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752910975.dsw-222941-6d784d977c-6pnsc.1788291.0 new file mode 100644 index 0000000000000000000000000000000000000000..0f5b29e2d4467adfb0cc62dff03e148ac8dbfe4a --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752910975.dsw-222941-6d784d977c-6pnsc.1788291.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf0108012407cd428f1c5a6e71effd2dead26060decc0c1d78e5672efa83ee39 +size 88 diff --git a/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752994645.dsw-222941-6d784d977c-6pnsc.1829349.0 b/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752994645.dsw-222941-6d784d977c-6pnsc.1829349.0 new file mode 100644 index 0000000000000000000000000000000000000000..c95ed3e35487df5fd6d10274d9c90a7b47da4361 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752994645.dsw-222941-6d784d977c-6pnsc.1829349.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d846107636d1d56ba549f267c1360764b08dd4f5acfa0d013489f0d87419330b +size 88 diff --git a/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752994740.dsw-222941-6d784d977c-6pnsc.1831962.0 b/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752994740.dsw-222941-6d784d977c-6pnsc.1831962.0 new file mode 100644 index 0000000000000000000000000000000000000000..9d053acf11391ef4252aa0c3c7f258e5a9bb9f35 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1752994740.dsw-222941-6d784d977c-6pnsc.1831962.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd1bb8f022d619232f3bd49fa773b00f8be447ff94e5b8937077f62fccf791c1 +size 130 diff --git a/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1753076035.dsw-222941-6d784d977c-6pnsc.1855056.0 b/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1753076035.dsw-222941-6d784d977c-6pnsc.1855056.0 new file mode 100644 index 0000000000000000000000000000000000000000..3f75013fe12f7dbb41b0986eaf64fd44002724fe --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/temp_train_outputs000-STDiT3-XL-2-bn/tensorboard/events.out.tfevents.1753076035.dsw-222941-6d784d977c-6pnsc.1855056.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4ce506921a8a3cae97ff1d4942895b9db3dd78f2afd0fe59c8de2090a8a98a4 +size 88 diff --git a/exp_code/1_benchmark/pa_vdm/tools/datasets/analyze.py b/exp_code/1_benchmark/pa_vdm/tools/datasets/analyze.py new file mode 100644 index 0000000000000000000000000000000000000000..7151689a4d309e5516f1a461fe4bec47dbff97e2 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/tools/datasets/analyze.py @@ -0,0 +1,96 @@ +import argparse +import os + +import matplotlib.pyplot as plt +import pandas as pd + + +def read_file(input_path): + if input_path.endswith(".csv"): + return pd.read_csv(input_path) + elif input_path.endswith(".parquet"): + return pd.read_parquet(input_path) + else: + raise NotImplementedError(f"Unsupported file format: {input_path}") + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("input", type=str, help="Path to the input dataset") + parser.add_argument("--save-img", type=str, default="samples/infos/", help="Path to save the image") + return parser.parse_args() + + +def plot_data(data, column, bins, name): + plt.clf() + data.hist(column=column, bins=bins) + os.makedirs(os.path.dirname(name), exist_ok=True) + plt.savefig(name) + print(f"Saved {name}") + + +def plot_categorical_data(data, column, name): + plt.clf() + data[column].value_counts().plot(kind="bar") + os.makedirs(os.path.dirname(name), exist_ok=True) + plt.savefig(name) + print(f"Saved {name}") + + +COLUMNS = { + "num_frames": 100, + "resolution": 100, + "text_len": 100, + "aes": 100, + "match": 100, + "flow": 100, + "cmotion": None, +} + + +def main(args): + data = read_file(args.input) + + # === Image Data Info === + image_index = data["num_frames"] == 1 + if image_index.sum() > 0: + print("=== Image Data Info ===") + img_data = data[image_index] + print(f"Number of images: {len(img_data)}") + print(img_data.head()) + print(img_data.describe()) + if args.save_img: + for column in COLUMNS: + if column in img_data.columns and column not in ["num_frames", "cmotion"]: + if COLUMNS[column] is None: + plot_categorical_data(img_data, column, os.path.join(args.save_img, f"image_{column}.png")) + else: + plot_data(img_data, column, COLUMNS[column], os.path.join(args.save_img, f"image_{column}.png")) + + # === Video Data Info === + if not image_index.all(): + print("=== Video Data Info ===") + video_data = data[~image_index] + print(f"Number of videos: {len(video_data)}") + if "num_frames" in video_data.columns: + total_num_frames = video_data["num_frames"].sum() + print(f"Number of frames: {total_num_frames}") + DEFAULT_FPS = 30 + total_hours = total_num_frames / DEFAULT_FPS / 3600 + print(f"Total hours (30 FPS): {int(total_hours)}") + print(video_data.head()) + print(video_data.describe()) + if args.save_img: + for column in COLUMNS: + if column in video_data.columns: + if COLUMNS[column] is None: + plot_categorical_data(video_data, column, os.path.join(args.save_img, f"video_{column}.png")) + else: + plot_data( + video_data, column, COLUMNS[column], os.path.join(args.save_img, f"video_{column}.png") + ) + + +if __name__ == "__main__": + args = parse_args() + main(args) diff --git a/exp_code/1_benchmark/pa_vdm/tools/datasets/convert.py b/exp_code/1_benchmark/pa_vdm/tools/datasets/convert.py new file mode 100644 index 0000000000000000000000000000000000000000..fad128f2fb448fa0c70886b20778c30c2a0b5fc1 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/tools/datasets/convert.py @@ -0,0 +1,143 @@ +import argparse +import os +import time + +import pandas as pd +from torchvision.datasets import ImageNet + +IMG_EXTENSIONS = (".jpg", ".jpeg", ".png", ".ppm", ".bmp", ".pgm", ".tif", ".tiff", ".webp") +VID_EXTENSIONS = (".mp4", ".avi", ".mov", ".mkv", ".m2ts") + + +def scan_recursively(root): + num = 0 + for entry in os.scandir(root): + if entry.is_file(): + yield entry + elif entry.is_dir(): + num += 1 + if num % 100 == 0: + print(f"Scanned {num} directories.") + yield from scan_recursively(entry.path) + + +def get_filelist(file_path, exts=None): + filelist = [] + time_start = time.time() + + # == OS Walk == + # for home, dirs, files in os.walk(file_path): + # for filename in files: + # ext = os.path.splitext(filename)[-1].lower() + # if exts is None or ext in exts: + # filelist.append(os.path.join(home, filename)) + + # == Scandir == + obj = scan_recursively(file_path) + for entry in obj: + if entry.is_file(): + ext = os.path.splitext(entry.name)[-1].lower() + if exts is None or ext in exts: + filelist.append(entry.path) + + time_end = time.time() + print(f"Scanned {len(filelist)} files in {time_end - time_start:.2f} seconds.") + return filelist + + +def split_by_capital(name): + # BoxingPunchingBag -> Boxing Punching Bag + new_name = "" + for i in range(len(name)): + if name[i].isupper() and i != 0: + new_name += " " + new_name += name[i] + return new_name + + +def process_imagenet(root, split): + root = os.path.expanduser(root) + data = ImageNet(root, split=split) + samples = [(path, data.classes[label][0]) for path, label in data.samples] + output = f"imagenet_{split}.csv" + + df = pd.DataFrame(samples, columns=["path", "text"]) + df.to_csv(output, index=False) + print(f"Saved {len(samples)} samples to {output}.") + + +def process_ucf101(root, split): + root = os.path.expanduser(root) + video_lists = get_filelist(os.path.join(root, split)) + classes = [x.split("/")[-2] for x in video_lists] + classes = [split_by_capital(x) for x in classes] + samples = list(zip(video_lists, classes)) + output = f"ucf101_{split}.csv" + + df = pd.DataFrame(samples, columns=["path", "text"]) + df.to_csv(output, index=False) + print(f"Saved {len(samples)} samples to {output}.") + + +def process_vidprom(root, info): + root = os.path.expanduser(root) + video_lists = get_filelist(root) + video_set = set(video_lists) + # read info csv + infos = pd.read_csv(info) + abs_path = infos["uuid"].apply(lambda x: os.path.join(root, f"pika-{x}.mp4")) + is_exist = abs_path.apply(lambda x: x in video_set) + df = pd.DataFrame(dict(path=abs_path[is_exist], text=infos["prompt"][is_exist])) + df.to_csv("vidprom.csv", index=False) + print(f"Saved {len(df)} samples to vidprom.csv.") + + +def process_general_images(root, output): + root = os.path.expanduser(root) + if not os.path.exists(root): + return + path_list = get_filelist(root, IMG_EXTENSIONS) + fname_list = [os.path.splitext(os.path.basename(x))[0] for x in path_list] + df = pd.DataFrame(dict(id=fname_list, path=path_list)) + + os.makedirs(os.path.dirname(output), exist_ok=True) + df.to_csv(output, index=False) + print(f"Saved {len(df)} samples to {output}.") + + +def process_general_videos(root, output): + root = os.path.expanduser(root) + if not os.path.exists(root): + return + path_list = get_filelist(root, VID_EXTENSIONS) + path_list = list(set(path_list)) # remove duplicates + fname_list = [os.path.splitext(os.path.basename(x))[0] for x in path_list] + relpath_list = [os.path.relpath(x, root) for x in path_list] + df = pd.DataFrame(dict(path=path_list, id=fname_list, relpath=relpath_list)) + + os.makedirs(os.path.dirname(output), exist_ok=True) + df.to_csv(output, index=False) + print(f"Saved {len(df)} samples to {output}.") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("dataset", type=str, choices=["imagenet", "ucf101", "vidprom", "image", "video"]) + parser.add_argument("root", type=str) + parser.add_argument("--split", type=str, default="train") + parser.add_argument("--info", type=str, default=None) + parser.add_argument("--output", type=str, default=None, required=True, help="Output path") + args = parser.parse_args() + + if args.dataset == "imagenet": + process_imagenet(args.root, args.split) + elif args.dataset == "ucf101": + process_ucf101(args.root, args.split) + elif args.dataset == "vidprom": + process_vidprom(args.root, args.info) + elif args.dataset == "image": + process_general_images(args.root, args.output) + elif args.dataset == "video": + process_general_videos(args.root, args.output) + else: + raise ValueError("Invalid dataset") diff --git a/exp_code/1_benchmark/pa_vdm/tools/datasets/datautil.py b/exp_code/1_benchmark/pa_vdm/tools/datasets/datautil.py new file mode 100644 index 0000000000000000000000000000000000000000..f972dfd642cd7de96b164176b6939db210df9d87 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/tools/datasets/datautil.py @@ -0,0 +1,878 @@ +import argparse +import html +import json +import os +import random +import re +from functools import partial +from glob import glob + +import cv2 +import numpy as np +import pandas as pd +from PIL import Image +from tqdm import tqdm + +from opensora.datasets.read_video import read_video + +from .utils import IMG_EXTENSIONS + +tqdm.pandas() + +try: + from pandarallel import pandarallel + + PANDA_USE_PARALLEL = True +except ImportError: + PANDA_USE_PARALLEL = False + + +def apply(df, func, **kwargs): + if PANDA_USE_PARALLEL: + return df.parallel_apply(func, **kwargs) + return df.progress_apply(func, **kwargs) + + +TRAIN_COLUMNS = ["path", "text", "num_frames", "fps", "height", "width", "aspect_ratio", "resolution", "text_len"] + +# ====================================================== +# --info +# ====================================================== + + +def get_video_length(cap, method="header"): + assert method in ["header", "set"] + if method == "header": + length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + else: + cap.set(cv2.CAP_PROP_POS_AVI_RATIO, 1) + length = int(cap.get(cv2.CAP_PROP_POS_FRAMES)) + return length + + +def get_info_old(path): + try: + ext = os.path.splitext(path)[1].lower() + if ext in IMG_EXTENSIONS: + im = cv2.imread(path) + if im is None: + return 0, 0, 0, np.nan, np.nan, np.nan + height, width = im.shape[:2] + num_frames, fps = 1, np.nan + else: + cap = cv2.VideoCapture(path) + num_frames, height, width, fps = ( + get_video_length(cap, method="header"), + int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), + int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), + float(cap.get(cv2.CAP_PROP_FPS)), + ) + hw = height * width + aspect_ratio = height / width if width > 0 else np.nan + return num_frames, height, width, aspect_ratio, fps, hw + except: + return 0, 0, 0, np.nan, np.nan, np.nan + + +def get_info(path): + try: + ext = os.path.splitext(path)[1].lower() + if ext in IMG_EXTENSIONS: + return get_image_info(path) + else: + return get_video_info(path) + except: + return 0, 0, 0, np.nan, np.nan, np.nan + + +def get_image_info(path, backend="pillow"): + if backend == "pillow": + try: + with open(path, "rb") as f: + img = Image.open(f) + img = img.convert("RGB") + width, height = img.size + num_frames, fps = 1, np.nan + hw = height * width + aspect_ratio = height / width if width > 0 else np.nan + return num_frames, height, width, aspect_ratio, fps, hw + except: + return 0, 0, 0, np.nan, np.nan, np.nan + elif backend == "cv2": + try: + im = cv2.imread(path) + if im is None: + return 0, 0, 0, np.nan, np.nan, np.nan + height, width = im.shape[:2] + num_frames, fps = 1, np.nan + hw = height * width + aspect_ratio = height / width if width > 0 else np.nan + return num_frames, height, width, aspect_ratio, fps, hw + except: + return 0, 0, 0, np.nan, np.nan, np.nan + else: + raise ValueError + + +def get_video_info(path, backend="torchvision"): + if backend == "torchvision": + try: + vframes, infos = read_video(path) + num_frames, height, width = vframes.shape[0], vframes.shape[2], vframes.shape[3] + if "video_fps" in infos: + fps = infos["video_fps"] + else: + fps = np.nan + hw = height * width + aspect_ratio = height / width if width > 0 else np.nan + return num_frames, height, width, aspect_ratio, fps, hw + except: + return 0, 0, 0, np.nan, np.nan, np.nan + elif backend == "cv2": + try: + cap = cv2.VideoCapture(path) + num_frames, height, width, fps = ( + get_video_length(cap, method="header"), + int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), + int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), + float(cap.get(cv2.CAP_PROP_FPS)), + ) + hw = height * width + aspect_ratio = height / width if width > 0 else np.nan + return num_frames, height, width, aspect_ratio, fps, hw + except: + return 0, 0, 0, np.nan, np.nan, np.nan + else: + raise ValueError + + +# ====================================================== +# --refine-llm-caption +# ====================================================== + +LLAVA_PREFIX = [ + "The video shows", + "The video captures", + "The video features", + "The video depicts", + "The video presents", + "The video features", + "The video is ", + "In the video,", + "The image shows", + "The image captures", + "The image features", + "The image depicts", + "The image presents", + "The image features", + "The image is ", + "The image portrays", + "In the image,", +] + + +def remove_caption_prefix(caption): + for prefix in LLAVA_PREFIX: + if caption.startswith(prefix) or caption.startswith(prefix.lower()): + caption = caption[len(prefix) :].strip() + if caption[0].islower(): + caption = caption[0].upper() + caption[1:] + return caption + return caption + + +# ====================================================== +# --merge-cmotion +# ====================================================== + +CMOTION_TEXT = { + "static": "static", + "pan_right": "pan right", + "pan_left": "pan left", + "zoom_in": "zoom in", + "zoom_out": "zoom out", + "tilt_up": "tilt up", + "tilt_down": "tilt down", + # "pan/tilt": "The camera is panning.", + # "dynamic": "The camera is moving.", + # "unknown": None, +} +CMOTION_PROBS = { + # hard-coded probabilities + "static": 1.0, + "zoom_in": 1.0, + "zoom_out": 1.0, + "pan_left": 1.0, + "pan_right": 1.0, + "tilt_up": 1.0, + "tilt_down": 1.0, + # "dynamic": 1.0, + # "unknown": 0.0, + # "pan/tilt": 1.0, +} + + +def merge_cmotion(caption, cmotion): + text = CMOTION_TEXT[cmotion] + prob = CMOTION_PROBS[cmotion] + if text is not None and random.random() < prob: + caption = f"{caption} Camera motion: {text}." + return caption + + +# ====================================================== +# --lang +# ====================================================== + + +def build_lang_detector(lang_to_detect): + from lingua import Language, LanguageDetectorBuilder + + lang_dict = dict(en=Language.ENGLISH) + assert lang_to_detect in lang_dict + valid_lang = lang_dict[lang_to_detect] + detector = LanguageDetectorBuilder.from_all_spoken_languages().with_low_accuracy_mode().build() + + def detect_lang(caption): + confidence_values = detector.compute_language_confidence_values(caption) + confidence = [x.language for x in confidence_values[:5]] + if valid_lang not in confidence: + return False + return True + + return detect_lang + + +# ====================================================== +# --clean-caption +# ====================================================== + + +def basic_clean(text): + import ftfy + + text = ftfy.fix_text(text) + text = html.unescape(html.unescape(text)) + return text.strip() + + +BAD_PUNCT_REGEX = re.compile( + r"[" + "#®•©™&@·º½¾¿¡§~" + "\)" + "\(" + "\]" + "\[" + "\}" + "\{" + "\|" + "\\" + "\/" + "\*" + r"]{1,}" +) # noqa + + +def clean_caption(caption): + import urllib.parse as ul + + from bs4 import BeautifulSoup + + caption = str(caption) + caption = ul.unquote_plus(caption) + caption = caption.strip().lower() + caption = re.sub("", "person", caption) + # urls: + caption = re.sub( + r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + caption = re.sub( + r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + # html: + caption = BeautifulSoup(caption, features="html.parser").text + + # @ + caption = re.sub(r"@[\w\d]+\b", "", caption) + + # 31C0—31EF CJK Strokes + # 31F0—31FF Katakana Phonetic Extensions + # 3200—32FF Enclosed CJK Letters and Months + # 3300—33FF CJK Compatibility + # 3400—4DBF CJK Unified Ideographs Extension A + # 4DC0—4DFF Yijing Hexagram Symbols + # 4E00—9FFF CJK Unified Ideographs + caption = re.sub(r"[\u31c0-\u31ef]+", "", caption) + caption = re.sub(r"[\u31f0-\u31ff]+", "", caption) + caption = re.sub(r"[\u3200-\u32ff]+", "", caption) + caption = re.sub(r"[\u3300-\u33ff]+", "", caption) + caption = re.sub(r"[\u3400-\u4dbf]+", "", caption) + caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption) + caption = re.sub(r"[\u4e00-\u9fff]+", "", caption) + ####################################################### + + # все виды тире / all types of dash --> "-" + caption = re.sub( + r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa + "-", + caption, + ) + + # кавычки к одному стандарту + caption = re.sub(r"[`´«»“”¨]", '"', caption) + caption = re.sub(r"[‘’]", "'", caption) + + # " + caption = re.sub(r""?", "", caption) + # & + caption = re.sub(r"&", "", caption) + + # ip adresses: + caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption) + + # article ids: + caption = re.sub(r"\d:\d\d\s+$", "", caption) + + # \n + caption = re.sub(r"\\n", " ", caption) + + # "#123" + caption = re.sub(r"#\d{1,3}\b", "", caption) + # "#12345.." + caption = re.sub(r"#\d{5,}\b", "", caption) + # "123456.." + caption = re.sub(r"\b\d{6,}\b", "", caption) + # filenames: + caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption) + + # + caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT""" + caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT""" + + caption = re.sub(BAD_PUNCT_REGEX, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT + caption = re.sub(r"\s+\.\s+", r" ", caption) # " . " + + # this-is-my-cute-cat / this_is_my_cute_cat + regex2 = re.compile(r"(?:\-|\_)") + if len(re.findall(regex2, caption)) > 3: + caption = re.sub(regex2, " ", caption) + + caption = basic_clean(caption) + + caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640 + caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc + caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231 + + caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption) + caption = re.sub(r"(free\s)?download(\sfree)?", "", caption) + caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption) + caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption) + caption = re.sub(r"\bpage\s+\d+\b", "", caption) + + caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a... + + caption = re.sub(r"\b\d+\.?\d*[xх×]\d+\.?\d*\b", "", caption) + + caption = re.sub(r"\b\s+\:\s+", r": ", caption) + caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption) + caption = re.sub(r"\s+", " ", caption) + + caption.strip() + + caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption) + caption = re.sub(r"^[\'\_,\-\:;]", r"", caption) + caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption) + caption = re.sub(r"^\.\S+$", "", caption) + + return caption.strip() + + +def text_preprocessing(text, use_text_preprocessing: bool = True): + if use_text_preprocessing: + # The exact text cleaning as was in the training stage: + text = clean_caption(text) + text = clean_caption(text) + return text + else: + return text.lower().strip() + + +# ====================================================== +# load caption +# ====================================================== + + +def load_caption(path, ext): + try: + assert ext in ["json"] + json_path = path.split(".")[0] + ".json" + with open(json_path, "r") as f: + data = json.load(f) + caption = data["caption"] + return caption + except: + return "" + + +# ====================================================== +# --clean-caption +# ====================================================== + +DROP_SCORE_PROB = 0.2 + + +def score_to_text(data): + text = data["text"] + scores = [] + # aesthetic + if "aes" in data: + aes = data["aes"] + if random.random() > DROP_SCORE_PROB: + score_text = f"aesthetic score: {aes:.1f}" + scores.append(score_text) + if "flow" in data: + flow = data["flow"] + if random.random() > DROP_SCORE_PROB: + score_text = f"motion score: {flow:.1f}" + scores.append(score_text) + if len(scores) > 0: + text = f"{text} [{', '.join(scores)}]" + return text + + +# ====================================================== +# read & write +# ====================================================== + + +def read_file(input_path): + if input_path.endswith(".csv"): + return pd.read_csv(input_path) + elif input_path.endswith(".parquet"): + return pd.read_parquet(input_path) + else: + raise NotImplementedError(f"Unsupported file format: {input_path}") + + +def save_file(data, output_path): + output_dir = os.path.dirname(output_path) + if not os.path.exists(output_dir) and output_dir != "": + os.makedirs(output_dir) + if output_path.endswith(".csv"): + return data.to_csv(output_path, index=False) + elif output_path.endswith(".parquet"): + return data.to_parquet(output_path, index=False) + else: + raise NotImplementedError(f"Unsupported file format: {output_path}") + + +def read_data(input_paths): + data = [] + input_name = "" + input_list = [] + for input_path in input_paths: + input_list.extend(glob(input_path)) + print("Input files:", input_list) + for i, input_path in enumerate(input_list): + if not os.path.exists(input_path): + continue + data.append(read_file(input_path)) + input_name += os.path.basename(input_path).split(".")[0] + if i != len(input_list) - 1: + input_name += "+" + print(f"Loaded {len(data[-1])} samples from '{input_path}'.") + if len(data) == 0: + print(f"No samples to process. Exit.") + exit() + data = pd.concat(data, ignore_index=True, sort=False) + print(f"Total number of samples: {len(data)}") + return data, input_name + + +# ====================================================== +# main +# ====================================================== +# To add a new method, register it in the main, parse_args, and get_output_path functions, and update the doc at /tools/datasets/README.md#documentation + + +def main(args): + # reading data + data, input_name = read_data(args.input) + + # make difference + if args.difference is not None: + data_diff = pd.read_csv(args.difference) + print(f"Difference csv contains {len(data_diff)} samples.") + data = data[~data["path"].isin(data_diff["path"])] + input_name += f"-{os.path.basename(args.difference).split('.')[0]}" + print(f"Filtered number of samples: {len(data)}.") + + # make intersection + if args.intersection is not None: + data_new = pd.read_csv(args.intersection) + print(f"Intersection csv contains {len(data_new)} samples.") + cols_to_use = data_new.columns.difference(data.columns) + + col_on = "path" + # if 'id' in data.columns and 'id' in data_new.columns: + # col_on = 'id' + cols_to_use = cols_to_use.insert(0, col_on) + data = pd.merge(data, data_new[cols_to_use], on=col_on, how="inner") + print(f"Intersection number of samples: {len(data)}.") + + # get output path + output_path = get_output_path(args, input_name) + + # preparation + if args.lang is not None: + detect_lang = build_lang_detector(args.lang) + if args.count_num_token == "t5": + from transformers import AutoTokenizer + + tokenizer = AutoTokenizer.from_pretrained("DeepFloyd/t5-v1_1-xxl") + + # IO-related + if args.load_caption is not None: + assert "path" in data.columns + data["text"] = apply(data["path"], load_caption, ext=args.load_caption) + if args.info: + info = apply(data["path"], get_info) + ( + data["num_frames"], + data["height"], + data["width"], + data["aspect_ratio"], + data["fps"], + data["resolution"], + ) = zip(*info) + if args.video_info: + info = apply(data["path"], get_video_info) + ( + data["num_frames"], + data["height"], + data["width"], + data["aspect_ratio"], + data["fps"], + data["resolution"], + ) = zip(*info) + if args.ext: + assert "path" in data.columns + data = data[apply(data["path"], os.path.exists)] + + # filtering + if args.remove_url: + assert "text" in data.columns + data = data[~data["text"].str.contains(r"(?Phttps?://[^\s]+)", regex=True)] + if args.lang is not None: + assert "text" in data.columns + data = data[data["text"].progress_apply(detect_lang)] # cannot parallelize + if args.remove_empty_path: + assert "path" in data.columns + data = data[data["path"].str.len() > 0] + data = data[~data["path"].isna()] + if args.remove_empty_caption: + assert "text" in data.columns + data = data[data["text"].str.len() > 0] + data = data[~data["text"].isna()] + if args.remove_path_duplication: + assert "path" in data.columns + data = data.drop_duplicates(subset=["path"]) + if args.path_subset: + data = data[data["path"].str.contains(args.path_subset)] + + # processing + if args.relpath is not None: + data["path"] = apply(data["path"], lambda x: os.path.relpath(x, args.relpath)) + if args.abspath is not None: + data["path"] = apply(data["path"], lambda x: os.path.join(args.abspath, x)) + if args.path_to_id: + data["id"] = apply(data["path"], lambda x: os.path.splitext(os.path.basename(x))[0]) + if args.merge_cmotion: + data["text"] = apply(data, lambda x: merge_cmotion(x["text"], x["cmotion"]), axis=1) + if args.refine_llm_caption: + assert "text" in data.columns + data["text"] = apply(data["text"], remove_caption_prefix) + if args.append_text is not None: + assert "text" in data.columns + data["text"] = data["text"] + args.append_text + if args.score_to_text: + data["text"] = apply(data, score_to_text, axis=1) + if args.clean_caption: + assert "text" in data.columns + data["text"] = apply( + data["text"], + partial(text_preprocessing, use_text_preprocessing=True), + ) + if args.count_num_token is not None: + assert "text" in data.columns + data["text_len"] = apply(data["text"], lambda x: len(tokenizer(x)["input_ids"])) + if args.update_text is not None: + data_new = pd.read_csv(args.update_text) + num_updated = data.path.isin(data_new.path).sum() + print(f"Number of updated samples: {num_updated}.") + data = data.set_index("path") + data_new = data_new[["path", "text"]].set_index("path") + data.update(data_new) + data = data.reset_index() + + # sort + if args.sort is not None: + data = data.sort_values(by=args.sort, ascending=False) + if args.sort_ascending is not None: + data = data.sort_values(by=args.sort_ascending, ascending=True) + + # filtering + if args.filesize: + assert "path" in data.columns + data["filesize"] = apply(data["path"], lambda x: os.stat(x).st_size / 1024 / 1024) + if args.fsmax is not None: + assert "filesize" in data.columns + data = data[data["filesize"] <= args.fsmax] + if args.remove_empty_caption: + assert "text" in data.columns + data = data[data["text"].str.len() > 0] + data = data[~data["text"].isna()] + if args.fmin is not None: + assert "num_frames" in data.columns + data = data[data["num_frames"] >= args.fmin] + if args.fmax is not None: + assert "num_frames" in data.columns + data = data[data["num_frames"] <= args.fmax] + if args.fpsmax is not None: + assert "fps" in data.columns + data = data[(data["fps"] <= args.fpsmax) | np.isnan(data["fps"])] + if args.hwmax is not None: + if "resolution" not in data.columns: + height = data["height"] + width = data["width"] + data["resolution"] = height * width + data = data[data["resolution"] <= args.hwmax] + if args.aesmin is not None: + assert "aes" in data.columns + data = data[data["aes"] >= args.aesmin] + if args.matchmin is not None: + assert "match" in data.columns + data = data[data["match"] >= args.matchmin] + if args.flowmin is not None: + assert "flow" in data.columns + data = data[data["flow"] >= args.flowmin] + if args.remove_text_duplication: + data = data.drop_duplicates(subset=["text"], keep="first") + if args.img_only: + data = data[data["path"].str.lower().str.endswith(IMG_EXTENSIONS)] + if args.vid_only: + data = data[~data["path"].str.lower().str.endswith(IMG_EXTENSIONS)] + + # process data + if args.shuffle: + data = data.sample(frac=1).reset_index(drop=True) # shuffle + if args.head is not None: + data = data.head(args.head) + + # train columns + if args.train_column: + all_columns = data.columns + columns_to_drop = all_columns.difference(TRAIN_COLUMNS) + data = data.drop(columns=columns_to_drop) + + print(f"Filtered number of samples: {len(data)}.") + + # shard data + if args.shard is not None: + sharded_data = np.array_split(data, args.shard) + for i in range(args.shard): + output_path_part = output_path.split(".") + output_path_s = ".".join(output_path_part[:-1]) + f"_{i}." + output_path_part[-1] + save_file(sharded_data[i], output_path_s) + print(f"Saved {len(sharded_data[i])} samples to {output_path_s}.") + else: + save_file(data, output_path) + print(f"Saved {len(data)} samples to {output_path}.") + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("input", type=str, nargs="+", help="path to the input dataset") + parser.add_argument("--output", type=str, default=None, help="output path") + parser.add_argument("--format", type=str, default="csv", help="output format", choices=["csv", "parquet"]) + parser.add_argument("--disable-parallel", action="store_true", help="disable parallel processing") + parser.add_argument("--num-workers", type=int, default=None, help="number of workers") + parser.add_argument("--seed", type=int, default=42, help="random seed") + + # special case + parser.add_argument("--shard", type=int, default=None, help="shard the dataset") + parser.add_argument("--sort", type=str, default=None, help="sort by column") + parser.add_argument("--sort-ascending", type=str, default=None, help="sort by column (ascending order)") + parser.add_argument("--difference", type=str, default=None, help="get difference from the dataset") + parser.add_argument( + "--intersection", type=str, default=None, help="keep the paths in csv from the dataset and merge columns" + ) + parser.add_argument("--train-column", action="store_true", help="only keep the train column") + + # IO-related + parser.add_argument("--info", action="store_true", help="get the basic information of each video and image") + parser.add_argument("--video-info", action="store_true", help="get the basic information of each video") + parser.add_argument("--ext", action="store_true", help="check if the file exists") + parser.add_argument( + "--load-caption", type=str, default=None, choices=["json", "txt"], help="load the caption from json or txt" + ) + + # path processing + parser.add_argument("--relpath", type=str, default=None, help="modify the path to relative path by root given") + parser.add_argument("--abspath", type=str, default=None, help="modify the path to absolute path by root given") + parser.add_argument("--path-to-id", action="store_true", help="add id based on path") + parser.add_argument( + "--path-subset", type=str, default=None, help="extract a subset data containing the given `path-subset` value" + ) + parser.add_argument( + "--remove-empty-path", + action="store_true", + help="remove rows with empty path", # caused by transform, cannot read path + ) + + # caption filtering + parser.add_argument( + "--remove-empty-caption", + action="store_true", + help="remove rows with empty caption", + ) + parser.add_argument("--remove-url", action="store_true", help="remove rows with url in caption") + parser.add_argument("--lang", type=str, default=None, help="remove rows with other language") + parser.add_argument("--remove-path-duplication", action="store_true", help="remove rows with duplicated path") + parser.add_argument("--remove-text-duplication", action="store_true", help="remove rows with duplicated caption") + + # caption processing + parser.add_argument("--refine-llm-caption", action="store_true", help="modify the caption generated by LLM") + parser.add_argument( + "--clean-caption", action="store_true", help="modify the caption according to T5 pipeline to suit training" + ) + parser.add_argument("--merge-cmotion", action="store_true", help="merge the camera motion to the caption") + parser.add_argument( + "--count-num-token", type=str, choices=["t5"], default=None, help="Count the number of tokens in the caption" + ) + parser.add_argument("--append-text", type=str, default=None, help="append text to the caption") + parser.add_argument("--score-to-text", action="store_true", help="convert score to text") + parser.add_argument("--update-text", type=str, default=None, help="update the text with the given text") + + # score filtering + parser.add_argument("--filesize", action="store_true", help="get the filesize of each video and image in MB") + parser.add_argument("--fsmax", type=int, default=None, help="filter the dataset by maximum filesize") + parser.add_argument("--fmin", type=int, default=None, help="filter the dataset by minimum number of frames") + parser.add_argument("--fmax", type=int, default=None, help="filter the dataset by maximum number of frames") + parser.add_argument("--hwmax", type=int, default=None, help="filter the dataset by maximum resolution") + parser.add_argument("--aesmin", type=float, default=None, help="filter the dataset by minimum aes score") + parser.add_argument("--matchmin", type=float, default=None, help="filter the dataset by minimum match score") + parser.add_argument("--flowmin", type=float, default=None, help="filter the dataset by minimum flow score") + parser.add_argument("--fpsmax", type=float, default=None, help="filter the dataset by maximum fps") + parser.add_argument("--img-only", action="store_true", help="only keep the image data") + parser.add_argument("--vid-only", action="store_true", help="only keep the video data") + + # data processing + parser.add_argument("--shuffle", default=False, action="store_true", help="shuffle the dataset") + parser.add_argument("--head", type=int, default=None, help="return the first n rows of data") + + return parser.parse_args() + + +def get_output_path(args, input_name): + if args.output is not None: + return args.output + name = input_name + dir_path = os.path.dirname(args.input[0]) + + # sort + if args.sort is not None: + assert args.sort_ascending is None + name += "_sort" + if args.sort_ascending is not None: + assert args.sort is None + name += "_sort" + + # IO-related + # for IO-related, the function must be wrapped in try-except + if args.info: + name += "_info" + if args.video_info: + name += "_vinfo" + if args.ext: + name += "_ext" + if args.load_caption: + name += f"_load{args.load_caption}" + + # path processing + if args.relpath is not None: + name += "_relpath" + if args.abspath is not None: + name += "_abspath" + if args.remove_empty_path: + name += "_noemptypath" + + # caption filtering + if args.remove_empty_caption: + name += "_noempty" + if args.remove_url: + name += "_nourl" + if args.lang is not None: + name += f"_{args.lang}" + if args.remove_path_duplication: + name += "_noduppath" + if args.remove_text_duplication: + name += "_noduptext" + if args.path_subset: + name += "_subset" + + # caption processing + if args.refine_llm_caption: + name += "_llm" + if args.clean_caption: + name += "_clean" + if args.merge_cmotion: + name += "_cmcaption" + if args.count_num_token: + name += "_ntoken" + if args.append_text is not None: + name += "_appendtext" + if args.score_to_text: + name += "_score2text" + if args.update_text is not None: + name += "_update" + + # score filtering + if args.filesize: + name += "_filesize" + if args.fsmax is not None: + name += f"_fsmax{args.fsmax}" + if args.fmin is not None: + name += f"_fmin{args.fmin}" + if args.fmax is not None: + name += f"_fmax{args.fmax}" + if args.fpsmax is not None: + name += f"_fpsmax{args.fpsmax}" + if args.hwmax is not None: + name += f"_hwmax{args.hwmax}" + if args.aesmin is not None: + name += f"_aesmin{args.aesmin}" + if args.matchmin is not None: + name += f"_matchmin{args.matchmin}" + if args.flowmin is not None: + name += f"_flowmin{args.flowmin}" + if args.img_only: + name += "_img" + if args.vid_only: + name += "_vid" + + # processing + if args.shuffle: + name += f"_shuffled_seed{args.seed}" + if args.head is not None: + name += f"_first_{args.head}_data" + + output_path = os.path.join(dir_path, f"{name}.{args.format}") + return output_path + + +if __name__ == "__main__": + args = parse_args() + if args.disable_parallel: + PANDA_USE_PARALLEL = False + if PANDA_USE_PARALLEL: + if args.num_workers is not None: + pandarallel.initialize(nb_workers=args.num_workers, progress_bar=True) + else: + pandarallel.initialize(progress_bar=True) + if args.seed is not None: + random.seed(args.seed) + np.random.seed(args.seed) + main(args) diff --git a/exp_code/1_benchmark/pa_vdm/tools/datasets/filter_panda10m.py b/exp_code/1_benchmark/pa_vdm/tools/datasets/filter_panda10m.py new file mode 100644 index 0000000000000000000000000000000000000000..86a9f1922069fea0ece6e121e7359d38b1d6ce30 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/tools/datasets/filter_panda10m.py @@ -0,0 +1,262 @@ +# TODO: remove this file before releasing + +import argparse +import html +import os +import re + +import pandas as pd +from tqdm import tqdm + +tqdm.pandas() + +try: + from pandarallel import pandarallel + + pandarallel.initialize(progress_bar=True) + pandas_has_parallel = True +except ImportError: + pandas_has_parallel = False + + +def apply(df, func, **kwargs): + if pandas_has_parallel: + return df.parallel_apply(func, **kwargs) + return df.progress_apply(func, **kwargs) + + +def basic_clean(text): + import ftfy + + text = ftfy.fix_text(text) + text = html.unescape(html.unescape(text)) + return text.strip() + + +BAD_PUNCT_REGEX = re.compile( + r"[" + "#®•©™&@·º½¾¿¡§~" + "\)" + "\(" + "\]" + "\[" + "\}" + "\{" + "\|" + "\\" + "\/" + "\*" + r"]{1,}" +) # noqa + + +def clean_caption(caption): + import urllib.parse as ul + + from bs4 import BeautifulSoup + + caption = str(caption) + caption = ul.unquote_plus(caption) + caption = caption.strip().lower() + caption = re.sub("", "person", caption) + # urls: + caption = re.sub( + r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + caption = re.sub( + r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + # html: + caption = BeautifulSoup(caption, features="html.parser").text + + # @ + caption = re.sub(r"@[\w\d]+\b", "", caption) + + # 31C0—31EF CJK Strokes + # 31F0—31FF Katakana Phonetic Extensions + # 3200—32FF Enclosed CJK Letters and Months + # 3300—33FF CJK Compatibility + # 3400—4DBF CJK Unified Ideographs Extension A + # 4DC0—4DFF Yijing Hexagram Symbols + # 4E00—9FFF CJK Unified Ideographs + caption = re.sub(r"[\u31c0-\u31ef]+", "", caption) + caption = re.sub(r"[\u31f0-\u31ff]+", "", caption) + caption = re.sub(r"[\u3200-\u32ff]+", "", caption) + caption = re.sub(r"[\u3300-\u33ff]+", "", caption) + caption = re.sub(r"[\u3400-\u4dbf]+", "", caption) + caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption) + caption = re.sub(r"[\u4e00-\u9fff]+", "", caption) + ####################################################### + + # все виды тире / all types of dash --> "-" + caption = re.sub( + r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa + "-", + caption, + ) + + # кавычки к одному стандарту + caption = re.sub(r"[`´«»“”¨]", '"', caption) + caption = re.sub(r"[‘’]", "'", caption) + + # " + caption = re.sub(r""?", "", caption) + # & + caption = re.sub(r"&", "", caption) + + # ip adresses: + caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption) + + # article ids: + caption = re.sub(r"\d:\d\d\s+$", "", caption) + + # \n + caption = re.sub(r"\\n", " ", caption) + + # "#123" + caption = re.sub(r"#\d{1,3}\b", "", caption) + # "#12345.." + caption = re.sub(r"#\d{5,}\b", "", caption) + # "123456.." + caption = re.sub(r"\b\d{6,}\b", "", caption) + # filenames: + caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption) + + # + caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT""" + caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT""" + + caption = re.sub(BAD_PUNCT_REGEX, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT + caption = re.sub(r"\s+\.\s+", r" ", caption) # " . " + + # this-is-my-cute-cat / this_is_my_cute_cat + regex2 = re.compile(r"(?:\-|\_)") + if len(re.findall(regex2, caption)) > 3: + caption = re.sub(regex2, " ", caption) + + caption = basic_clean(caption) + + caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640 + caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc + caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231 + + caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption) + caption = re.sub(r"(free\s)?download(\sfree)?", "", caption) + caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption) + caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption) + caption = re.sub(r"\bpage\s+\d+\b", "", caption) + + caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a... + + caption = re.sub(r"\b\d+\.?\d*[xх×]\d+\.?\d*\b", "", caption) + + caption = re.sub(r"\b\s+\:\s+", r": ", caption) + caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption) + caption = re.sub(r"\s+", " ", caption) + + caption.strip() + + caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption) + caption = re.sub(r"^[\'\_,\-\:;]", r"", caption) + caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption) + caption = re.sub(r"^\.\S+$", "", caption) + + return caption.strip() + + +def get_10m_set(): + meta_path_10m = "/mnt/hdd/data/Panda-70M/raw/meta/train/panda70m_training_10m.csv" + meta_10m = pd.read_csv(meta_path_10m) + + def process_single_caption(row): + text_list = eval(row["caption"]) + clean_list = [clean_caption(x) for x in text_list] + return str(clean_list) + + ret = apply(meta_10m, process_single_caption, axis=1) + # ret = meta_10m.progress_apply(process_single_caption, axis=1) + print("==> text processed.") + + text_list = [] + for x in ret: + text_list += eval(x) + # text_set = text_set.union(set(eval(x))) + text_set = set(text_list) + # meta_10m['caption_new'] = ret + # meta_10m.to_csv('/mnt/hdd/data/Panda-70M/raw/meta/train/panda70m_training_10m_new-cap.csv') + + # video_id_set = set(meta_10m['videoID']) + # id2t = {} + # for idx, row in tqdm(meta_10m.iterrows(), total=len(meta_10m)): + # video_id = row['videoID'] + # text_list = eval(row['caption']) + # id2t[video_id] = set(text_list) + + print(f"==> Loaded meta_10m from '{meta_path_10m}'") + return text_set + + +def filter_panda10m_text(meta_path, text_set): + def process_single_row(row): + # path = row['path'] + t = row["text"] + # fname = os.path.basename(path) + # video_id = fname[:fname.rindex('_')] + if t not in text_set: + return False + return True + + meta = pd.read_csv(meta_path) + ret = apply(meta, process_single_row, axis=1) + # ret = meta.progress_apply(process_single_row, axis=1) + + meta = meta[ret] + wo_ext, ext = os.path.splitext(meta_path) + out_path = f"{wo_ext}_filter-10m{ext}" + meta.to_csv(out_path, index=False) + print(f"New meta (shape={meta.shape}) saved to '{out_path}'.") + + +def filter_panda10m_timestamp(meta_path): + meta_path_10m = "/mnt/hdd/data/Panda-70M/raw/meta/train/panda70m_training_10m.csv" + meta_10m = pd.read_csv(meta_path_10m) + + id2t = {} + for idx, row in tqdm(meta_10m.iterrows(), total=len(meta_10m)): + video_id = row["videoID"] + timestamp = eval(row["timestamp"]) + timestamp = [str(tuple(x)) for x in timestamp] + id2t[video_id] = timestamp + + # video_id_set_10m = set(meta_10m['videoID']) + print(f"==> Loaded meta_10m from '{meta_path_10m}'") + + def process_single_row(row): + path = row["path"] + t = row["timestamp"] + fname = os.path.basename(path) + video_id = fname[: fname.rindex("_")] + if video_id not in id2t: + return False + if t not in id2t[video_id]: + return False + return True + # return video_id in video_id_set_10m + + meta = pd.read_csv(meta_path) + ret = apply(meta, process_single_row, axis=1) + + meta = meta[ret] + wo_ext, ext = os.path.splitext(meta_path) + out_path = f"{wo_ext}_filter-10m{ext}" + meta.to_csv(out_path, index=False) + print(f"New meta (shape={meta.shape}) saved to '{out_path}'.") + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--meta_path", type=str, nargs="+") + parser.add_argument("--num_workers", default=5, type=int) + + args = parser.parse_args() + return args + + +if __name__ == "__main__": + args = parse_args() + + text_set = get_10m_set() + for x in args.meta_path: + filter_panda10m_text(x, text_set) diff --git a/exp_code/1_benchmark/pa_vdm/tools/datasets/split.py b/exp_code/1_benchmark/pa_vdm/tools/datasets/split.py new file mode 100644 index 0000000000000000000000000000000000000000..4e312b2bd55adcbe834e8897affd7e3099fd2d42 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/tools/datasets/split.py @@ -0,0 +1,72 @@ +import argparse +from typing import List + +import pandas as pd +from mmengine.config import Config + +from opensora.datasets.bucket import Bucket + + +def split_by_bucket( + bucket: Bucket, + input_files: List[str], + output_path: str, + limit: int, + frame_interval: int, +): + print(f"Split {len(input_files)} files into {len(bucket)} buckets") + total_limit = len(bucket) * limit + bucket_cnt = {} + # get all bucket id + for hw_id, d in bucket.ar_criteria.items(): + for t_id, v in d.items(): + for ar_id in v.keys(): + bucket_id = (hw_id, t_id, ar_id) + bucket_cnt[bucket_id] = 0 + output_df = None + # split files + for path in input_files: + df = pd.read_csv(path) + if output_df is None: + output_df = pd.DataFrame(columns=df.columns) + for i in range(len(df)): + row = df.iloc[i] + t, h, w = row["num_frames"], row["height"], row["width"] + bucket_id = bucket.get_bucket_id(t, h, w, frame_interval) + if bucket_id is None: + continue + if bucket_cnt[bucket_id] < limit: + bucket_cnt[bucket_id] += 1 + output_df = pd.concat([output_df, pd.DataFrame([row])], ignore_index=True) + if len(output_df) >= total_limit: + break + if len(output_df) >= total_limit: + break + assert len(output_df) <= total_limit + if len(output_df) == total_limit: + print(f"All buckets are full ({total_limit} samples)") + else: + print(f"Only {len(output_df)} files are used") + output_df.to_csv(output_path, index=False) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("input", type=str, nargs="+") + parser.add_argument("-o", "--output", required=True) + parser.add_argument("-c", "--config", required=True) + parser.add_argument("-l", "--limit", default=200, type=int) + args = parser.parse_args() + assert args.limit > 0 + + cfg = Config.fromfile(args.config) + bucket_config = cfg.bucket_config + # rewrite bucket_config + for ar, d in bucket_config.items(): + for frames, t in d.items(): + p, bs = t + if p > 0.0: + p = 1.0 + d[frames] = (p, bs) + bucket = Bucket(bucket_config) + split_by_bucket(bucket, args.input, args.output, args.limit, cfg.dataset.frame_interval) diff --git a/exp_code/1_benchmark/pa_vdm/tools/datasets/transform.py b/exp_code/1_benchmark/pa_vdm/tools/datasets/transform.py new file mode 100644 index 0000000000000000000000000000000000000000..bffb191d35c2941cfecf5035cf6982d6a2bef734 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/tools/datasets/transform.py @@ -0,0 +1,124 @@ +import argparse +import os +import random + +import cv2 +import numpy as np +import pandas as pd +from tqdm import tqdm + +from .utils import IMG_EXTENSIONS, extract_frames + +tqdm.pandas() + +try: + from pandarallel import pandarallel + + pandarallel.initialize(progress_bar=True) + pandas_has_parallel = True +except ImportError: + pandas_has_parallel = False + + +def apply(df, func, **kwargs): + if pandas_has_parallel: + return df.parallel_apply(func, **kwargs) + return df.progress_apply(func, **kwargs) + + +def get_new_path(path, input_dir, output): + path_new = os.path.join(output, os.path.relpath(path, input_dir)) + os.makedirs(os.path.dirname(path_new), exist_ok=True) + return path_new + + +def resize(path, length, input_dir, output): + path_new = get_new_path(path, input_dir, output) + ext = os.path.splitext(path)[1].lower() + assert ext in IMG_EXTENSIONS + img = cv2.imread(path) + if img is not None: + h, w = img.shape[:2] + if min(h, w) > length: + if h > w: + new_h = length + new_w = int(w * new_h / h) + else: + new_w = length + new_h = int(h * new_w / w) + img = cv2.resize(img, (new_w, new_h)) + cv2.imwrite(path_new, img) + else: + path_new = "" + return path_new + + +def rand_crop(path, input_dir, output): + ext = os.path.splitext(path)[1].lower() + path_new = get_new_path(path, input_dir, output) + assert ext in IMG_EXTENSIONS + img = cv2.imread(path) + if img is not None: + h, w = img.shape[:2] + width, height, _ = img.shape + pos = random.randint(0, 3) + if pos == 0: + img_cropped = img[: width // 2, : height // 2] + elif pos == 1: + img_cropped = img[width // 2 :, : height // 2] + elif pos == 2: + img_cropped = img[: width // 2, height // 2 :] + else: + img_cropped = img[width // 2 :, height // 2 :] + cv2.imwrite(path_new, img_cropped) + else: + path_new = "" + return path_new + + +def main(args): + data = pd.read_csv(args.input) + if args.method == "img_rand_crop": + data["path"] = apply(data["path"], lambda x: rand_crop(x, args.input_dir, args.output)) + output_csv = args.input.replace(".csv", f"_rand_crop.csv") + elif args.method == "img_resize": + data["path"] = apply(data["path"], lambda x: resize(x, args.length, args.input_dir, args.output)) + output_csv = args.input.replace(".csv", f"_resized{args.length}.csv") + elif args.method == "vid_frame_extract": + points = args.points if args.points is not None else args.points_index + data = pd.DataFrame(np.repeat(data.values, 3, axis=0), columns=data.columns) + num_points = len(points) + data["point"] = np.nan + for i, point in enumerate(points): + if isinstance(point, int): + data.loc[i::num_points, "point"] = point + else: + data.loc[i::num_points, "point"] = data.loc[i::num_points, "num_frames"] * point + data["path"] = apply(data, lambda x: extract_frames(x["path"], args.input_dir, args.output, x["point"]), axis=1) + output_csv = args.input.replace(".csv", f"_vid_frame_extract.csv") + + data.to_csv(output_csv, index=False) + print(f"Saved to {output_csv}") + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("method", type=str, choices=["img_resize", "img_rand_crop", "vid_frame_extract"]) + parser.add_argument("input", type=str) + parser.add_argument("input_dir", type=str) + parser.add_argument("output", type=str) + parser.add_argument("--disable-parallel", action="store_true") + parser.add_argument("--length", type=int, default=2160) + parser.add_argument("--seed", type=int, default=42, help="seed for random") + parser.add_argument("--points", nargs="+", type=float, default=None) + parser.add_argument("--points_index", nargs="+", type=int, default=None) + args = parser.parse_args() + return args + + +if __name__ == "__main__": + args = parse_args() + random.seed(args.seed) + if args.disable_parallel: + pandas_has_parallel = False + main(args) diff --git a/exp_code/1_benchmark/pa_vdm/tools/datasets/utils.py b/exp_code/1_benchmark/pa_vdm/tools/datasets/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ec7e1b9e15a204d4cebf30bfa7c48cdf49d30183 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/tools/datasets/utils.py @@ -0,0 +1,130 @@ +import os + +import cv2 +import numpy as np +from PIL import Image + +IMG_EXTENSIONS = (".jpg", ".jpeg", ".png", ".ppm", ".bmp", ".pgm", ".tif", ".tiff", ".webp") +VID_EXTENSIONS = (".mp4", ".avi", ".mov", ".mkv") + + +def is_video(filename): + ext = os.path.splitext(filename)[-1].lower() + return ext in VID_EXTENSIONS + + +def extract_frames( + video_path, + frame_inds=None, + points=None, + backend="opencv", + return_length=False, + num_frames=None, +): + """ + Args: + video_path (str): path to video + frame_inds (List[int]): indices of frames to extract + points (List[float]): values within [0, 1); multiply #frames to get frame indices + Return: + List[PIL.Image] + """ + assert backend in ["av", "opencv", "decord"] + assert (frame_inds is None) or (points is None) + + if backend == "av": + import av + + container = av.open(video_path) + if num_frames is not None: + total_frames = num_frames + else: + total_frames = container.streams.video[0].frames + + if points is not None: + frame_inds = [int(p * total_frames) for p in points] + + frames = [] + for idx in frame_inds: + if idx >= total_frames: + idx = total_frames - 1 + target_timestamp = int(idx * av.time_base / container.streams.video[0].average_rate) + container.seek(target_timestamp) + frame = next(container.decode(video=0)).to_image() + frames.append(frame) + + if return_length: + return frames, total_frames + return frames + + elif backend == "decord": + import decord + + container = decord.VideoReader(video_path, num_threads=1) + if num_frames is not None: + total_frames = num_frames + else: + total_frames = len(container) + + if points is not None: + frame_inds = [int(p * total_frames) for p in points] + + frame_inds = np.array(frame_inds).astype(np.int32) + frame_inds[frame_inds >= total_frames] = total_frames - 1 + frames = container.get_batch(frame_inds).asnumpy() # [N, H, W, C] + frames = [Image.fromarray(x) for x in frames] + + if return_length: + return frames, total_frames + return frames + + elif backend == "opencv": + cap = cv2.VideoCapture(video_path) + if num_frames is not None: + total_frames = num_frames + else: + total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + + if points is not None: + frame_inds = [int(p * total_frames) for p in points] + + frames = [] + for idx in frame_inds: + if idx >= total_frames: + idx = total_frames - 1 + + cap.set(cv2.CAP_PROP_POS_FRAMES, idx) + + # HACK: sometimes OpenCV fails to read frames, return a black frame instead + try: + ret, frame = cap.read() + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + frame = Image.fromarray(frame) + except Exception as e: + print(f"[Warning] Error reading frame {idx} from {video_path}: {e}") + # First, try to read the first frame + try: + print(f"[Warning] Try reading first frame.") + cap.set(cv2.CAP_PROP_POS_FRAMES, 0) + ret, frame = cap.read() + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + frame = Image.fromarray(frame) + # If that fails, return a black frame + except Exception as e: + print(f"[Warning] Error in reading first frame from {video_path}: {e}") + height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + frame = Image.new("RGB", (width, height), (0, 0, 0)) + + # HACK: if height or width is 0, return a black frame instead + if frame.height == 0 or frame.width == 0: + height = width = 256 + frame = Image.new("RGB", (width, height), (0, 0, 0)) + + frames.append(frame) + + if return_length: + return frames, total_frames + return frames + else: + raise ValueError diff --git a/exp_code/1_benchmark/pa_vdm/tools/frame_interpolation/README.md b/exp_code/1_benchmark/pa_vdm/tools/frame_interpolation/README.md new file mode 100644 index 0000000000000000000000000000000000000000..3886018475bacc896a0ec63d4ca81ef00ed0f100 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/tools/frame_interpolation/README.md @@ -0,0 +1,44 @@ +# Frame Interpolation + +For current version, we sample 1 frame out of 3 frames in the video. Although we are going to use VAE to avoid frame loss, we provide a frame interpolation tool to interpolate the video now. The frame interpolation tool is based on [AMT](https://github.com/MCG-NKU/AMT). + +Interpolation can be useful for scenery videos, but it may not be suitable for videos with fast motion. + +## Requirement + +Install the required dependancies by following our [installation instructions](../../docs/installation.md)'s "Data Dependencies" and "Frame Interpolation" sections. + + + +## Model + +We use **AMT** as our frame interpolation model. After sampling, you can use frame interpolation model to interpolate your video smoothly. + +## Usage + +The ckpt file will be automatically downloaded in user's `.cache` directory. You can use frame interpolation to your video file or a video folder. + +1. Process a video file + +```python +python -m tools.frame_interpolation.interpolation your_video.mp4 +``` + +2. Process all video file in target directory + +```python +python -m tools.frame_interpolation.interpolation your_video_dir --output_path samples/interpolation +``` + +The output video will be stored at `output_path` and its duration time is equal `the total number of frames after frame interpolation / the frame rate` + +### Command Line Arguments + +* `input`: Path of the input video. **Video path** or **Folder path(with --folder)** +* `--ckpt`: Pretrained model of [AMT](https://github.com/MCG-NKU/AMT). Default path: `~/.cache/amt-g.pth`. +* `--niter`: Iterations of interpolation. With $m$ input frames, `[N_ITER]` $=n$ corresponds to $2^n\times (m-1)+1$ output frames. +* `--fps`: Frame rate of the input video. (Default: 8) +* `--output_path`: **Folder Path** of the output video. diff --git a/exp_code/1_benchmark/pa_vdm/tools/frame_interpolation/__init__.py b/exp_code/1_benchmark/pa_vdm/tools/frame_interpolation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/pa_vdm/tools/frame_interpolation/interpolation.py b/exp_code/1_benchmark/pa_vdm/tools/frame_interpolation/interpolation.py new file mode 100644 index 0000000000000000000000000000000000000000..c9d8d1d2e08b4f837d9563efe937fdc445e75185 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/tools/frame_interpolation/interpolation.py @@ -0,0 +1,219 @@ +# this script is modified from https://github.com/MCG-NKU/AMT/blob/main/demos/demo_2x.py +import argparse +import os +import os.path as osp + +import cv2 +import numpy as np +import torch + +from opensora.utils.ckpt_utils import download_model + +from .networks.amt_g import Model +from .utils.utils import InputPadder, img2tensor, tensor2img + +hf_endpoint = os.environ.get("HF_ENDPOINT") +if hf_endpoint is None: + hf_endpoint = "https://huggingface.co" +VID_EXT = [".mp4", ".avi", ".mov", ".mkv", ".flv", ".wmv", ".webm"] +network_cfg = { + "params": { + "corr_radius": 3, + "corr_lvls": 4, + "num_flows": 5, + }, +} +device = "cuda" if torch.cuda.is_available() else "cpu" + + +def init(): + """ + initialize the device and the anchor resolution. + """ + + if device == "cuda": + anchor_resolution = 1024 * 512 + anchor_memory = 1500 * 1024**2 + anchor_memory_bias = 2500 * 1024**2 + vram_avail = torch.cuda.get_device_properties(device).total_memory + print("VRAM available: {:.1f} MB".format(vram_avail / 1024**2)) + else: + # Do not resize in cpu mode + anchor_resolution = 8192 * 8192 + anchor_memory = 1 + anchor_memory_bias = 0 + vram_avail = 1 + + return anchor_resolution, anchor_memory, anchor_memory_bias, vram_avail + + +def get_input_video_from_path(input_path): + """ + Get the input video from the input_path. + + params: + input_path: str, the path of the input video. + devices: str, the device to run the model. + returns: + inputs: list, the list of the input frames. + scale: float, the scale of the input frames. + padder: InputPadder, the padder to pad the input frames. + """ + + anchor_resolution, anchor_memory, anchor_memory_bias, vram_avail = init() + + if osp.splitext(input_path)[-1].lower() in VID_EXT: + vcap = cv2.VideoCapture(input_path) + + inputs = [] + w = int(vcap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(vcap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + scale = anchor_resolution / (h * w) * np.sqrt((vram_avail - anchor_memory_bias) / anchor_memory) + scale = 1 if scale > 1 else scale + scale = 1 / np.floor(1 / np.sqrt(scale) * 16) * 16 + if scale < 1: + print(f"Due to the limited VRAM, the video will be scaled by {scale:.2f}") + padding = int(16 / scale) + padder = InputPadder((h, w), padding) + while True: + ret, frame = vcap.read() + if ret is False: + break + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + frame_t = img2tensor(frame).to(device) + frame_t = padder.pad(frame_t) + inputs.append(frame_t) + print(f"Loading the [video] from {input_path}, the number of frames [{len(inputs)}]") + else: + raise TypeError("Input should be a video.") + + return inputs, scale, padder + + +def load_model(ckpt): + """ + load the frame interpolation model. + """ + params = network_cfg.get("params", {}) + model = Model(**params) + model.load_state_dict(ckpt["state_dict"]) + model = model.to(device) + model.eval() + return model + + +def interpolater(model, inputs, scale, padder, iters=1): + """ + interpolating with the interpolation model. + + params: + model: nn.Module, the frame interpolation model. + inputs: list, the list of the input frames. + scale: float, the scale of the input frames. + iters: int, the number of iterations of interpolation. The final frames model generating is 2 ** iters * (m - 1) + 1 and m is input frames. + returns: + outputs: list, the list of the output frames. + """ + + print("Start frame interpolation:") + embt = torch.tensor(1 / 2).float().view(1, 1, 1, 1).to(device) + + for i in range(iters): + print(f"Iter {i+1}. input_frames={len(inputs)} output_frames={2*len(inputs)-1}") + outputs = [inputs[0]] + for in_0, in_1 in zip(inputs[:-1], inputs[1:]): + in_0 = in_0.to(device) + in_1 = in_1.to(device) + with torch.no_grad(): + imgt_pred = model(in_0, in_1, embt, scale_factor=scale, eval=True)["imgt_pred"] + outputs += [imgt_pred.cpu(), in_1.cpu()] + inputs = outputs + + outputs = padder.unpad(*outputs) + return outputs + + +def write(outputs, input_path, output_path, fps=30): + """ + write results to the output_path. + """ + + if osp.exists(output_path) is False: + os.makedirs(output_path) + + size = outputs[0].shape[2:][::-1] + + _, file_name_with_extension = os.path.split(input_path) + file_name, _ = os.path.splitext(file_name_with_extension) + + save_video_path = f"{output_path}/fps{fps}_{file_name}.mp4" + fourcc = cv2.VideoWriter_fourcc(*"mp4v") + writer = cv2.VideoWriter(save_video_path, fourcc, fps, size) + + for i, imgt_pred in enumerate(outputs): + imgt_pred = tensor2img(imgt_pred) + imgt_pred = cv2.cvtColor(imgt_pred, cv2.COLOR_RGB2BGR) + writer.write(imgt_pred) + print(f"Demo video is saved to [{save_video_path}]") + + writer.release() + + +def process( + model, + image_path, + output_path, + fps, + iters, +): + inputs, scale, padder = get_input_video_from_path(image_path) + outputs = interpolater(model, inputs, scale, padder, iters) + write(outputs, image_path, output_path, fps) + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("input", help="Input video.") + parser.add_argument("--ckpt", type=str, default="./pretrained_models/amt-g.pth", help="The pretrained model.") + parser.add_argument( + "--niters", + type=int, + default=1, + help="Iter of Interpolation. The number of frames will be double after per iter.", + ) + parser.add_argument("--output_path", type=str, default="samples", help="Output path.") + parser.add_argument("--fps", type=int, default=8, help="Frames rate of the output video.") + parser.add_argument("--folder", action="store_true", help="If the input is a folder, set this flag.") + args = parser.parse_args() + + times_frame = 2**args.niters + old_fps = args.fps + args.fps = args.fps * times_frame + print(f"Interpolation will turn {old_fps}fps video to {args.fps}fps video.") + args.input = os.path.expanduser(args.input) + args.ckpt = os.path.expanduser(args.ckpt) + args.folder = osp.splitext(args.input)[-1].lower() not in VID_EXT + args.ckpt = download_model(local_path=args.ckpt, url=hf_endpoint + "/lalala125/AMT/resolve/main/amt-g.pth") + return args + + +if __name__ == "__main__": + args = parse_args() + ckpt_path = args.ckpt + input_path = args.input + output_path = args.output_path + iters = int(args.niters) + fps = int(args.fps) + + model = load_model(ckpt_path) + + if args.folder: + for file in os.listdir(input_path): + if osp.splitext(file)[-1].lower() in VID_EXT: + vid_path = os.path.join(input_path, file) + process(model, vid_path, output_path, fps, iters) + else: + process(model, input_path, output_path, fps, iters) + + print("Interpolation is done.") + print(f"Output path: {output_path}") diff --git a/exp_code/1_benchmark/pa_vdm/tools/frame_interpolation/networks/__init__.py b/exp_code/1_benchmark/pa_vdm/tools/frame_interpolation/networks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4db0516c70c506c454be74855adffa9ba686e0fe --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/tools/frame_interpolation/networks/__init__.py @@ -0,0 +1 @@ +from .amt_g import Model diff --git a/exp_code/1_benchmark/pa_vdm/tools/frame_interpolation/networks/amt_g.py b/exp_code/1_benchmark/pa_vdm/tools/frame_interpolation/networks/amt_g.py new file mode 100644 index 0000000000000000000000000000000000000000..84b28cbfabfd469be5ff47815babc49cd7ddbe12 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/tools/frame_interpolation/networks/amt_g.py @@ -0,0 +1,156 @@ +import torch +import torch.nn as nn + +from .blocks.feat_enc import LargeEncoder +from .blocks.ifrnet import Encoder, InitDecoder, IntermediateDecoder, resize +from .blocks.multi_flow import MultiFlowDecoder, multi_flow_combine +from .blocks.raft import BasicUpdateBlock, BidirCorrBlock, coords_grid + + +class Model(nn.Module): + def __init__(self, corr_radius=3, corr_lvls=4, num_flows=5, channels=[84, 96, 112, 128], skip_channels=84): + super(Model, self).__init__() + self.radius = corr_radius + self.corr_levels = corr_lvls + self.num_flows = num_flows + + self.feat_encoder = LargeEncoder(output_dim=128, norm_fn="instance", dropout=0.0) + self.encoder = Encoder(channels, large=True) + self.decoder4 = InitDecoder(channels[3], channels[2], skip_channels) + self.decoder3 = IntermediateDecoder(channels[2], channels[1], skip_channels) + self.decoder2 = IntermediateDecoder(channels[1], channels[0], skip_channels) + self.decoder1 = MultiFlowDecoder(channels[0], skip_channels, num_flows) + + self.update4 = self._get_updateblock(112, None) + self.update3_low = self._get_updateblock(96, 2.0) + self.update2_low = self._get_updateblock(84, 4.0) + + self.update3_high = self._get_updateblock(96, None) + self.update2_high = self._get_updateblock(84, None) + + self.comb_block = nn.Sequential( + nn.Conv2d(3 * self.num_flows, 6 * self.num_flows, 7, 1, 3), + nn.PReLU(6 * self.num_flows), + nn.Conv2d(6 * self.num_flows, 3, 7, 1, 3), + ) + + def _get_updateblock(self, cdim, scale_factor=None): + return BasicUpdateBlock( + cdim=cdim, + hidden_dim=192, + flow_dim=64, + corr_dim=256, + corr_dim2=192, + fc_dim=188, + scale_factor=scale_factor, + corr_levels=self.corr_levels, + radius=self.radius, + ) + + def _corr_scale_lookup(self, corr_fn, coord, flow0, flow1, embt, downsample=1): + # convert t -> 0 to 0 -> 1 | convert t -> 1 to 1 -> 0 + # based on linear assumption + t1_scale = 1.0 / embt + t0_scale = 1.0 / (1.0 - embt) + if downsample != 1: + inv = 1 / downsample + flow0 = inv * resize(flow0, scale_factor=inv) + flow1 = inv * resize(flow1, scale_factor=inv) + + corr0, corr1 = corr_fn(coord + flow1 * t1_scale, coord + flow0 * t0_scale) + corr = torch.cat([corr0, corr1], dim=1) + flow = torch.cat([flow0, flow1], dim=1) + return corr, flow + + def forward(self, img0, img1, embt, scale_factor=1.0, eval=False, **kwargs): + mean_ = torch.cat([img0, img1], 2).mean(1, keepdim=True).mean(2, keepdim=True).mean(3, keepdim=True) + img0 = img0 - mean_ + img1 = img1 - mean_ + img0_ = resize(img0, scale_factor) if scale_factor != 1.0 else img0 + img1_ = resize(img1, scale_factor) if scale_factor != 1.0 else img1 + b, _, h, w = img0_.shape + coord = coords_grid(b, h // 8, w // 8, img0.device) + + fmap0, fmap1 = self.feat_encoder([img0_, img1_]) # [1, 128, H//8, W//8] + corr_fn = BidirCorrBlock(fmap0, fmap1, radius=self.radius, num_levels=self.corr_levels) + + # f0_1: [1, c0, H//2, W//2] | f0_2: [1, c1, H//4, W//4] + # f0_3: [1, c2, H//8, W//8] | f0_4: [1, c3, H//16, W//16] + f0_1, f0_2, f0_3, f0_4 = self.encoder(img0_) + f1_1, f1_2, f1_3, f1_4 = self.encoder(img1_) + + ######################################### the 4th decoder ######################################### + up_flow0_4, up_flow1_4, ft_3_ = self.decoder4(f0_4, f1_4, embt) + corr_4, flow_4 = self._corr_scale_lookup(corr_fn, coord, up_flow0_4, up_flow1_4, embt, downsample=1) + + # residue update with lookup corr + delta_ft_3_, delta_flow_4 = self.update4(ft_3_, flow_4, corr_4) + delta_flow0_4, delta_flow1_4 = torch.chunk(delta_flow_4, 2, 1) + up_flow0_4 = up_flow0_4 + delta_flow0_4 + up_flow1_4 = up_flow1_4 + delta_flow1_4 + ft_3_ = ft_3_ + delta_ft_3_ + + ######################################### the 3rd decoder ######################################### + up_flow0_3, up_flow1_3, ft_2_ = self.decoder3(ft_3_, f0_3, f1_3, up_flow0_4, up_flow1_4) + corr_3, flow_3 = self._corr_scale_lookup(corr_fn, coord, up_flow0_3, up_flow1_3, embt, downsample=2) + + # residue update with lookup corr + delta_ft_2_, delta_flow_3 = self.update3_low(ft_2_, flow_3, corr_3) + delta_flow0_3, delta_flow1_3 = torch.chunk(delta_flow_3, 2, 1) + up_flow0_3 = up_flow0_3 + delta_flow0_3 + up_flow1_3 = up_flow1_3 + delta_flow1_3 + ft_2_ = ft_2_ + delta_ft_2_ + + # residue update with lookup corr (hr) + corr_3 = resize(corr_3, scale_factor=2.0) + up_flow_3 = torch.cat([up_flow0_3, up_flow1_3], dim=1) + delta_ft_2_, delta_up_flow_3 = self.update3_high(ft_2_, up_flow_3, corr_3) + ft_2_ += delta_ft_2_ + up_flow0_3 += delta_up_flow_3[:, 0:2] + up_flow1_3 += delta_up_flow_3[:, 2:4] + + ######################################### the 2nd decoder ######################################### + up_flow0_2, up_flow1_2, ft_1_ = self.decoder2(ft_2_, f0_2, f1_2, up_flow0_3, up_flow1_3) + corr_2, flow_2 = self._corr_scale_lookup(corr_fn, coord, up_flow0_2, up_flow1_2, embt, downsample=4) + + # residue update with lookup corr + delta_ft_1_, delta_flow_2 = self.update2_low(ft_1_, flow_2, corr_2) + delta_flow0_2, delta_flow1_2 = torch.chunk(delta_flow_2, 2, 1) + up_flow0_2 = up_flow0_2 + delta_flow0_2 + up_flow1_2 = up_flow1_2 + delta_flow1_2 + ft_1_ = ft_1_ + delta_ft_1_ + + # residue update with lookup corr (hr) + corr_2 = resize(corr_2, scale_factor=4.0) + up_flow_2 = torch.cat([up_flow0_2, up_flow1_2], dim=1) + delta_ft_1_, delta_up_flow_2 = self.update2_high(ft_1_, up_flow_2, corr_2) + ft_1_ += delta_ft_1_ + up_flow0_2 += delta_up_flow_2[:, 0:2] + up_flow1_2 += delta_up_flow_2[:, 2:4] + + ######################################### the 1st decoder ######################################### + up_flow0_1, up_flow1_1, mask, img_res = self.decoder1(ft_1_, f0_1, f1_1, up_flow0_2, up_flow1_2) + + if scale_factor != 1.0: + up_flow0_1 = resize(up_flow0_1, scale_factor=(1.0 / scale_factor)) * (1.0 / scale_factor) + up_flow1_1 = resize(up_flow1_1, scale_factor=(1.0 / scale_factor)) * (1.0 / scale_factor) + mask = resize(mask, scale_factor=(1.0 / scale_factor)) + img_res = resize(img_res, scale_factor=(1.0 / scale_factor)) + + # Merge multiple predictions + imgt_pred = multi_flow_combine(self.comb_block, img0, img1, up_flow0_1, up_flow1_1, mask, img_res, mean_) + imgt_pred = torch.clamp(imgt_pred, 0, 1) + + if eval: + return { + "imgt_pred": imgt_pred, + } + else: + up_flow0_1 = up_flow0_1.reshape(b, self.num_flows, 2, h, w) + up_flow1_1 = up_flow1_1.reshape(b, self.num_flows, 2, h, w) + return { + "imgt_pred": imgt_pred, + "flow0_pred": [up_flow0_1, up_flow0_2, up_flow0_3, up_flow0_4], + "flow1_pred": [up_flow1_1, up_flow1_2, up_flow1_3, up_flow1_4], + "ft_pred": [ft_1_, ft_2_, ft_3_], + } diff --git a/exp_code/1_benchmark/pa_vdm/tools/frame_interpolation/networks/blocks/__init__.py b/exp_code/1_benchmark/pa_vdm/tools/frame_interpolation/networks/blocks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/pa_vdm/tools/frame_interpolation/networks/blocks/feat_enc.py b/exp_code/1_benchmark/pa_vdm/tools/frame_interpolation/networks/blocks/feat_enc.py new file mode 100644 index 0000000000000000000000000000000000000000..479833824b8b2da7e9e3ba05c84b0359b8c79c37 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/tools/frame_interpolation/networks/blocks/feat_enc.py @@ -0,0 +1,335 @@ +import torch +import torch.nn as nn + + +class BottleneckBlock(nn.Module): + def __init__(self, in_planes, planes, norm_fn="group", stride=1): + super(BottleneckBlock, self).__init__() + + self.conv1 = nn.Conv2d(in_planes, planes // 4, kernel_size=1, padding=0) + self.conv2 = nn.Conv2d(planes // 4, planes // 4, kernel_size=3, padding=1, stride=stride) + self.conv3 = nn.Conv2d(planes // 4, planes, kernel_size=1, padding=0) + self.relu = nn.ReLU(inplace=True) + + num_groups = planes // 8 + + if norm_fn == "group": + self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes // 4) + self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes // 4) + self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) + if not stride == 1: + self.norm4 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) + + elif norm_fn == "batch": + self.norm1 = nn.BatchNorm2d(planes // 4) + self.norm2 = nn.BatchNorm2d(planes // 4) + self.norm3 = nn.BatchNorm2d(planes) + if not stride == 1: + self.norm4 = nn.BatchNorm2d(planes) + + elif norm_fn == "instance": + self.norm1 = nn.InstanceNorm2d(planes // 4) + self.norm2 = nn.InstanceNorm2d(planes // 4) + self.norm3 = nn.InstanceNorm2d(planes) + if not stride == 1: + self.norm4 = nn.InstanceNorm2d(planes) + + elif norm_fn == "none": + self.norm1 = nn.Sequential() + self.norm2 = nn.Sequential() + self.norm3 = nn.Sequential() + if not stride == 1: + self.norm4 = nn.Sequential() + + if stride == 1: + self.downsample = None + + else: + self.downsample = nn.Sequential(nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm4) + + def forward(self, x): + y = x + y = self.relu(self.norm1(self.conv1(y))) + y = self.relu(self.norm2(self.conv2(y))) + y = self.relu(self.norm3(self.conv3(y))) + + if self.downsample is not None: + x = self.downsample(x) + + return self.relu(x + y) + + +class ResidualBlock(nn.Module): + def __init__(self, in_planes, planes, norm_fn="group", stride=1): + super(ResidualBlock, self).__init__() + + self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, stride=stride) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1) + self.relu = nn.ReLU(inplace=True) + + num_groups = planes // 8 + + if norm_fn == "group": + self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) + self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) + if not stride == 1: + self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) + + elif norm_fn == "batch": + self.norm1 = nn.BatchNorm2d(planes) + self.norm2 = nn.BatchNorm2d(planes) + if not stride == 1: + self.norm3 = nn.BatchNorm2d(planes) + + elif norm_fn == "instance": + self.norm1 = nn.InstanceNorm2d(planes) + self.norm2 = nn.InstanceNorm2d(planes) + if not stride == 1: + self.norm3 = nn.InstanceNorm2d(planes) + + elif norm_fn == "none": + self.norm1 = nn.Sequential() + self.norm2 = nn.Sequential() + if not stride == 1: + self.norm3 = nn.Sequential() + + if stride == 1: + self.downsample = None + + else: + self.downsample = nn.Sequential(nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm3) + + def forward(self, x): + y = x + y = self.relu(self.norm1(self.conv1(y))) + y = self.relu(self.norm2(self.conv2(y))) + + if self.downsample is not None: + x = self.downsample(x) + + return self.relu(x + y) + + +class SmallEncoder(nn.Module): + def __init__(self, output_dim=128, norm_fn="batch", dropout=0.0): + super(SmallEncoder, self).__init__() + self.norm_fn = norm_fn + + if self.norm_fn == "group": + self.norm1 = nn.GroupNorm(num_groups=8, num_channels=32) + + elif self.norm_fn == "batch": + self.norm1 = nn.BatchNorm2d(32) + + elif self.norm_fn == "instance": + self.norm1 = nn.InstanceNorm2d(32) + + elif self.norm_fn == "none": + self.norm1 = nn.Sequential() + + self.conv1 = nn.Conv2d(3, 32, kernel_size=7, stride=2, padding=3) + self.relu1 = nn.ReLU(inplace=True) + + self.in_planes = 32 + self.layer1 = self._make_layer(32, stride=1) + self.layer2 = self._make_layer(64, stride=2) + self.layer3 = self._make_layer(96, stride=2) + + self.dropout = None + if dropout > 0: + self.dropout = nn.Dropout2d(p=dropout) + + self.conv2 = nn.Conv2d(96, output_dim, kernel_size=1) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu") + elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)): + if m.weight is not None: + nn.init.constant_(m.weight, 1) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def _make_layer(self, dim, stride=1): + layer1 = BottleneckBlock(self.in_planes, dim, self.norm_fn, stride=stride) + layer2 = BottleneckBlock(dim, dim, self.norm_fn, stride=1) + layers = (layer1, layer2) + + self.in_planes = dim + return nn.Sequential(*layers) + + def forward(self, x): + # if input is list, combine batch dimension + is_list = isinstance(x, tuple) or isinstance(x, list) + if is_list: + batch_dim = x[0].shape[0] + x = torch.cat(x, dim=0) + + x = self.conv1(x) + x = self.norm1(x) + x = self.relu1(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.conv2(x) + + if self.training and self.dropout is not None: + x = self.dropout(x) + + if is_list: + x = torch.split(x, [batch_dim, batch_dim], dim=0) + + return x + + +class BasicEncoder(nn.Module): + def __init__(self, output_dim=128, norm_fn="batch", dropout=0.0): + super(BasicEncoder, self).__init__() + self.norm_fn = norm_fn + + if self.norm_fn == "group": + self.norm1 = nn.GroupNorm(num_groups=8, num_channels=64) + + elif self.norm_fn == "batch": + self.norm1 = nn.BatchNorm2d(64) + + elif self.norm_fn == "instance": + self.norm1 = nn.InstanceNorm2d(64) + + elif self.norm_fn == "none": + self.norm1 = nn.Sequential() + + self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3) + self.relu1 = nn.ReLU(inplace=True) + + self.in_planes = 64 + self.layer1 = self._make_layer(64, stride=1) + self.layer2 = self._make_layer(72, stride=2) + self.layer3 = self._make_layer(128, stride=2) + + # output convolution + self.conv2 = nn.Conv2d(128, output_dim, kernel_size=1) + + self.dropout = None + if dropout > 0: + self.dropout = nn.Dropout2d(p=dropout) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu") + elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)): + if m.weight is not None: + nn.init.constant_(m.weight, 1) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def _make_layer(self, dim, stride=1): + layer1 = ResidualBlock(self.in_planes, dim, self.norm_fn, stride=stride) + layer2 = ResidualBlock(dim, dim, self.norm_fn, stride=1) + layers = (layer1, layer2) + + self.in_planes = dim + return nn.Sequential(*layers) + + def forward(self, x): + # if input is list, combine batch dimension + is_list = isinstance(x, tuple) or isinstance(x, list) + if is_list: + batch_dim = x[0].shape[0] + x = torch.cat(x, dim=0) + + x = self.conv1(x) + x = self.norm1(x) + x = self.relu1(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + + x = self.conv2(x) + + if self.training and self.dropout is not None: + x = self.dropout(x) + + if is_list: + x = torch.split(x, [batch_dim, batch_dim], dim=0) + + return x + + +class LargeEncoder(nn.Module): + def __init__(self, output_dim=128, norm_fn="batch", dropout=0.0): + super(LargeEncoder, self).__init__() + self.norm_fn = norm_fn + + if self.norm_fn == "group": + self.norm1 = nn.GroupNorm(num_groups=8, num_channels=64) + + elif self.norm_fn == "batch": + self.norm1 = nn.BatchNorm2d(64) + + elif self.norm_fn == "instance": + self.norm1 = nn.InstanceNorm2d(64) + + elif self.norm_fn == "none": + self.norm1 = nn.Sequential() + + self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3) + self.relu1 = nn.ReLU(inplace=True) + + self.in_planes = 64 + self.layer1 = self._make_layer(64, stride=1) + self.layer2 = self._make_layer(112, stride=2) + self.layer3 = self._make_layer(160, stride=2) + self.layer3_2 = self._make_layer(160, stride=1) + + # output convolution + self.conv2 = nn.Conv2d(self.in_planes, output_dim, kernel_size=1) + + self.dropout = None + if dropout > 0: + self.dropout = nn.Dropout2d(p=dropout) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu") + elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)): + if m.weight is not None: + nn.init.constant_(m.weight, 1) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def _make_layer(self, dim, stride=1): + layer1 = ResidualBlock(self.in_planes, dim, self.norm_fn, stride=stride) + layer2 = ResidualBlock(dim, dim, self.norm_fn, stride=1) + layers = (layer1, layer2) + + self.in_planes = dim + return nn.Sequential(*layers) + + def forward(self, x): + # if input is list, combine batch dimension + is_list = isinstance(x, tuple) or isinstance(x, list) + if is_list: + batch_dim = x[0].shape[0] + x = torch.cat(x, dim=0) + + x = self.conv1(x) + x = self.norm1(x) + x = self.relu1(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer3_2(x) + + x = self.conv2(x) + + if self.training and self.dropout is not None: + x = self.dropout(x) + + if is_list: + x = torch.split(x, [batch_dim, batch_dim], dim=0) + + return x diff --git a/exp_code/1_benchmark/pa_vdm/tools/frame_interpolation/networks/blocks/ifrnet.py b/exp_code/1_benchmark/pa_vdm/tools/frame_interpolation/networks/blocks/ifrnet.py new file mode 100644 index 0000000000000000000000000000000000000000..5719a040e102c36a417925e78f5acb4cf4402725 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/tools/frame_interpolation/networks/blocks/ifrnet.py @@ -0,0 +1,115 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from tools.frame_interpolation.utils.flow_utils import warp + + +def resize(x, scale_factor): + return F.interpolate(x, scale_factor=scale_factor, mode="bilinear", align_corners=False) + + +def convrelu(in_channels, out_channels, kernel_size=3, stride=1, padding=1, dilation=1, groups=1, bias=True): + return nn.Sequential( + nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias=bias), + nn.PReLU(out_channels), + ) + + +class ResBlock(nn.Module): + def __init__(self, in_channels, side_channels, bias=True): + super(ResBlock, self).__init__() + self.side_channels = side_channels + self.conv1 = nn.Sequential( + nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1, bias=bias), nn.PReLU(in_channels) + ) + self.conv2 = nn.Sequential( + nn.Conv2d(side_channels, side_channels, kernel_size=3, stride=1, padding=1, bias=bias), + nn.PReLU(side_channels), + ) + self.conv3 = nn.Sequential( + nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1, bias=bias), nn.PReLU(in_channels) + ) + self.conv4 = nn.Sequential( + nn.Conv2d(side_channels, side_channels, kernel_size=3, stride=1, padding=1, bias=bias), + nn.PReLU(side_channels), + ) + self.conv5 = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1, bias=bias) + self.prelu = nn.PReLU(in_channels) + + def forward(self, x): + out = self.conv1(x) + + res_feat = out[:, : -self.side_channels, ...] + side_feat = out[:, -self.side_channels :, :, :] + side_feat = self.conv2(side_feat) + out = self.conv3(torch.cat([res_feat, side_feat], 1)) + + res_feat = out[:, : -self.side_channels, ...] + side_feat = out[:, -self.side_channels :, :, :] + side_feat = self.conv4(side_feat) + out = self.conv5(torch.cat([res_feat, side_feat], 1)) + + out = self.prelu(x + out) + return out + + +class Encoder(nn.Module): + def __init__(self, channels, large=False): + super(Encoder, self).__init__() + self.channels = channels + prev_ch = 3 + for idx, ch in enumerate(channels, 1): + k = 7 if large and idx == 1 else 3 + p = 3 if k == 7 else 1 + self.register_module( + f"pyramid{idx}", nn.Sequential(convrelu(prev_ch, ch, k, 2, p), convrelu(ch, ch, 3, 1, 1)) + ) + prev_ch = ch + + def forward(self, in_x): + fs = [] + for idx in range(len(self.channels)): + out_x = getattr(self, f"pyramid{idx+1}")(in_x) + fs.append(out_x) + in_x = out_x + return fs + + +class InitDecoder(nn.Module): + def __init__(self, in_ch, out_ch, skip_ch) -> None: + super().__init__() + self.convblock = nn.Sequential( + convrelu(in_ch * 2 + 1, in_ch * 2), + ResBlock(in_ch * 2, skip_ch), + nn.ConvTranspose2d(in_ch * 2, out_ch + 4, 4, 2, 1, bias=True), + ) + + def forward(self, f0, f1, embt): + h, w = f0.shape[2:] + embt = embt.repeat(1, 1, h, w) + out = self.convblock(torch.cat([f0, f1, embt], 1)) + flow0, flow1 = torch.chunk(out[:, :4, ...], 2, 1) + ft_ = out[:, 4:, ...] + return flow0, flow1, ft_ + + +class IntermediateDecoder(nn.Module): + def __init__(self, in_ch, out_ch, skip_ch) -> None: + super().__init__() + self.convblock = nn.Sequential( + convrelu(in_ch * 3 + 4, in_ch * 3), + ResBlock(in_ch * 3, skip_ch), + nn.ConvTranspose2d(in_ch * 3, out_ch + 4, 4, 2, 1, bias=True), + ) + + def forward(self, ft_, f0, f1, flow0_in, flow1_in): + f0_warp = warp(f0, flow0_in) + f1_warp = warp(f1, flow1_in) + f_in = torch.cat([ft_, f0_warp, f1_warp, flow0_in, flow1_in], 1) + out = self.convblock(f_in) + flow0, flow1 = torch.chunk(out[:, :4, ...], 2, 1) + ft_ = out[:, 4:, ...] + flow0 = flow0 + 2.0 * resize(flow0_in, scale_factor=2.0) + flow1 = flow1 + 2.0 * resize(flow1_in, scale_factor=2.0) + return flow0, flow1, ft_ diff --git a/exp_code/1_benchmark/pa_vdm/tools/frame_interpolation/networks/blocks/multi_flow.py b/exp_code/1_benchmark/pa_vdm/tools/frame_interpolation/networks/blocks/multi_flow.py new file mode 100644 index 0000000000000000000000000000000000000000..cbb96a9ef6bcee99627e7c844e45987bfb2d9308 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/tools/frame_interpolation/networks/blocks/multi_flow.py @@ -0,0 +1,62 @@ +import torch +import torch.nn as nn + +from tools.frame_interpolation.utils.flow_utils import warp + +from .ifrnet import ResBlock, convrelu, resize + + +def multi_flow_combine(comb_block, img0, img1, flow0, flow1, mask=None, img_res=None, mean=None): + """ + A parallel implementation of multiple flow field warping + comb_block: An nn.Seqential object. + img shape: [b, c, h, w] + flow shape: [b, 2*num_flows, h, w] + mask (opt): + If 'mask' is None, the function conduct a simple average. + img_res (opt): + If 'img_res' is None, the function adds zero instead. + mean (opt): + If 'mean' is None, the function adds zero instead. + """ + b, c, h, w = flow0.shape + num_flows = c // 2 + flow0 = flow0.reshape(b, num_flows, 2, h, w).reshape(-1, 2, h, w) + flow1 = flow1.reshape(b, num_flows, 2, h, w).reshape(-1, 2, h, w) + + mask = mask.reshape(b, num_flows, 1, h, w).reshape(-1, 1, h, w) if mask is not None else None + img_res = img_res.reshape(b, num_flows, 3, h, w).reshape(-1, 3, h, w) if img_res is not None else 0 + img0 = torch.stack([img0] * num_flows, 1).reshape(-1, 3, h, w) + img1 = torch.stack([img1] * num_flows, 1).reshape(-1, 3, h, w) + mean = torch.stack([mean] * num_flows, 1).reshape(-1, 1, 1, 1) if mean is not None else 0 + + img0_warp = warp(img0, flow0) + img1_warp = warp(img1, flow1) + img_warps = mask * img0_warp + (1 - mask) * img1_warp + mean + img_res + img_warps = img_warps.reshape(b, num_flows, 3, h, w) + imgt_pred = img_warps.mean(1) + comb_block(img_warps.view(b, -1, h, w)) + return imgt_pred + + +class MultiFlowDecoder(nn.Module): + def __init__(self, in_ch, skip_ch, num_flows=3): + super(MultiFlowDecoder, self).__init__() + self.num_flows = num_flows + self.convblock = nn.Sequential( + convrelu(in_ch * 3 + 4, in_ch * 3), + ResBlock(in_ch * 3, skip_ch), + nn.ConvTranspose2d(in_ch * 3, 8 * num_flows, 4, 2, 1, bias=True), + ) + + def forward(self, ft_, f0, f1, flow0, flow1): + n = self.num_flows + f0_warp = warp(f0, flow0) + f1_warp = warp(f1, flow1) + out = self.convblock(torch.cat([ft_, f0_warp, f1_warp, flow0, flow1], 1)) + delta_flow0, delta_flow1, mask, img_res = torch.split(out, [2 * n, 2 * n, n, 3 * n], 1) + mask = torch.sigmoid(mask) + + flow0 = delta_flow0 + 2.0 * resize(flow0, scale_factor=2.0).repeat(1, self.num_flows, 1, 1) + flow1 = delta_flow1 + 2.0 * resize(flow1, scale_factor=2.0).repeat(1, self.num_flows, 1, 1) + + return flow0, flow1, mask, img_res diff --git a/exp_code/1_benchmark/pa_vdm/tools/frame_interpolation/networks/blocks/raft.py b/exp_code/1_benchmark/pa_vdm/tools/frame_interpolation/networks/blocks/raft.py new file mode 100644 index 0000000000000000000000000000000000000000..1576889201c49614224450c9a223b871e8031f2d --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/tools/frame_interpolation/networks/blocks/raft.py @@ -0,0 +1,213 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + + +def resize(x, scale_factor): + return F.interpolate(x, scale_factor=scale_factor, mode="bilinear", align_corners=False) + + +def bilinear_sampler(img, coords, mask=False): + """Wrapper for grid_sample, uses pixel coordinates""" + H, W = img.shape[-2:] + xgrid, ygrid = coords.split([1, 1], dim=-1) + xgrid = 2 * xgrid / (W - 1) - 1 + ygrid = 2 * ygrid / (H - 1) - 1 + + grid = torch.cat([xgrid, ygrid], dim=-1) + img = F.grid_sample(img, grid, align_corners=True) + + if mask: + mask = (xgrid > -1) & (ygrid > -1) & (xgrid < 1) & (ygrid < 1) + return img, mask.float() + + return img + + +def coords_grid(batch, ht, wd, device): + coords = torch.meshgrid(torch.arange(ht, device=device), torch.arange(wd, device=device), indexing="ij") + coords = torch.stack(coords[::-1], dim=0).float() + return coords[None].repeat(batch, 1, 1, 1) + + +class SmallUpdateBlock(nn.Module): + def __init__(self, cdim, hidden_dim, flow_dim, corr_dim, fc_dim, corr_levels=4, radius=3, scale_factor=None): + super(SmallUpdateBlock, self).__init__() + cor_planes = corr_levels * (2 * radius + 1) ** 2 + self.scale_factor = scale_factor + + self.convc1 = nn.Conv2d(2 * cor_planes, corr_dim, 1, padding=0) + self.convf1 = nn.Conv2d(4, flow_dim * 2, 7, padding=3) + self.convf2 = nn.Conv2d(flow_dim * 2, flow_dim, 3, padding=1) + self.conv = nn.Conv2d(corr_dim + flow_dim, fc_dim, 3, padding=1) + + self.gru = nn.Sequential( + nn.Conv2d(fc_dim + 4 + cdim, hidden_dim, 3, padding=1), + nn.LeakyReLU(negative_slope=0.1, inplace=True), + nn.Conv2d(hidden_dim, hidden_dim, 3, padding=1), + ) + + self.feat_head = nn.Sequential( + nn.Conv2d(hidden_dim, hidden_dim, 3, padding=1), + nn.LeakyReLU(negative_slope=0.1, inplace=True), + nn.Conv2d(hidden_dim, cdim, 3, padding=1), + ) + + self.flow_head = nn.Sequential( + nn.Conv2d(hidden_dim, hidden_dim, 3, padding=1), + nn.LeakyReLU(negative_slope=0.1, inplace=True), + nn.Conv2d(hidden_dim, 4, 3, padding=1), + ) + + self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True) + + def forward(self, net, flow, corr): + net = resize(net, 1 / self.scale_factor) if self.scale_factor is not None else net + cor = self.lrelu(self.convc1(corr)) + flo = self.lrelu(self.convf1(flow)) + flo = self.lrelu(self.convf2(flo)) + cor_flo = torch.cat([cor, flo], dim=1) + inp = self.lrelu(self.conv(cor_flo)) + inp = torch.cat([inp, flow, net], dim=1) + + out = self.gru(inp) + delta_net = self.feat_head(out) + delta_flow = self.flow_head(out) + + if self.scale_factor is not None: + delta_net = resize(delta_net, scale_factor=self.scale_factor) + delta_flow = self.scale_factor * resize(delta_flow, scale_factor=self.scale_factor) + + return delta_net, delta_flow + + +class BasicUpdateBlock(nn.Module): + def __init__( + self, + cdim, + hidden_dim, + flow_dim, + corr_dim, + corr_dim2, + fc_dim, + corr_levels=4, + radius=3, + scale_factor=None, + out_num=1, + ): + super(BasicUpdateBlock, self).__init__() + cor_planes = corr_levels * (2 * radius + 1) ** 2 + + self.scale_factor = scale_factor + self.convc1 = nn.Conv2d(2 * cor_planes, corr_dim, 1, padding=0) + self.convc2 = nn.Conv2d(corr_dim, corr_dim2, 3, padding=1) + self.convf1 = nn.Conv2d(4, flow_dim * 2, 7, padding=3) + self.convf2 = nn.Conv2d(flow_dim * 2, flow_dim, 3, padding=1) + self.conv = nn.Conv2d(flow_dim + corr_dim2, fc_dim, 3, padding=1) + + self.gru = nn.Sequential( + nn.Conv2d(fc_dim + 4 + cdim, hidden_dim, 3, padding=1), + nn.LeakyReLU(negative_slope=0.1, inplace=True), + nn.Conv2d(hidden_dim, hidden_dim, 3, padding=1), + ) + + self.feat_head = nn.Sequential( + nn.Conv2d(hidden_dim, hidden_dim, 3, padding=1), + nn.LeakyReLU(negative_slope=0.1, inplace=True), + nn.Conv2d(hidden_dim, cdim, 3, padding=1), + ) + + self.flow_head = nn.Sequential( + nn.Conv2d(hidden_dim, hidden_dim, 3, padding=1), + nn.LeakyReLU(negative_slope=0.1, inplace=True), + nn.Conv2d(hidden_dim, 4 * out_num, 3, padding=1), + ) + + self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True) + + def forward(self, net, flow, corr): + net = resize(net, 1 / self.scale_factor) if self.scale_factor is not None else net + cor = self.lrelu(self.convc1(corr)) + cor = self.lrelu(self.convc2(cor)) + flo = self.lrelu(self.convf1(flow)) + flo = self.lrelu(self.convf2(flo)) + cor_flo = torch.cat([cor, flo], dim=1) + inp = self.lrelu(self.conv(cor_flo)) + inp = torch.cat([inp, flow, net], dim=1) + + out = self.gru(inp) + delta_net = self.feat_head(out) + delta_flow = self.flow_head(out) + + if self.scale_factor is not None: + delta_net = resize(delta_net, scale_factor=self.scale_factor) + delta_flow = self.scale_factor * resize(delta_flow, scale_factor=self.scale_factor) + return delta_net, delta_flow + + +class BidirCorrBlock: + def __init__(self, fmap1, fmap2, num_levels=4, radius=4): + self.num_levels = num_levels + self.radius = radius + self.corr_pyramid = [] + self.corr_pyramid_T = [] + + corr = BidirCorrBlock.corr(fmap1, fmap2) + batch, h1, w1, dim, h2, w2 = corr.shape + corr_T = corr.clone().permute(0, 4, 5, 3, 1, 2) + + corr = corr.reshape(batch * h1 * w1, dim, h2, w2) + corr_T = corr_T.reshape(batch * h2 * w2, dim, h1, w1) + + self.corr_pyramid.append(corr) + self.corr_pyramid_T.append(corr_T) + + for _ in range(self.num_levels - 1): + corr = F.avg_pool2d(corr, 2, stride=2) + corr_T = F.avg_pool2d(corr_T, 2, stride=2) + self.corr_pyramid.append(corr) + self.corr_pyramid_T.append(corr_T) + + def __call__(self, coords0, coords1): + r = self.radius + coords0 = coords0.permute(0, 2, 3, 1) + coords1 = coords1.permute(0, 2, 3, 1) + assert coords0.shape == coords1.shape, f"coords0 shape: [{coords0.shape}] is not equal to [{coords1.shape}]" + batch, h1, w1, _ = coords0.shape + + out_pyramid = [] + out_pyramid_T = [] + for i in range(self.num_levels): + corr = self.corr_pyramid[i] + corr_T = self.corr_pyramid_T[i] + + dx = torch.linspace(-r, r, 2 * r + 1, device=coords0.device) + dy = torch.linspace(-r, r, 2 * r + 1, device=coords0.device) + delta = torch.stack(torch.meshgrid(dy, dx, indexing="ij"), axis=-1) + delta_lvl = delta.view(1, 2 * r + 1, 2 * r + 1, 2) + + centroid_lvl_0 = coords0.reshape(batch * h1 * w1, 1, 1, 2) / 2**i + centroid_lvl_1 = coords1.reshape(batch * h1 * w1, 1, 1, 2) / 2**i + coords_lvl_0 = centroid_lvl_0 + delta_lvl + coords_lvl_1 = centroid_lvl_1 + delta_lvl + + corr = bilinear_sampler(corr, coords_lvl_0) + corr_T = bilinear_sampler(corr_T, coords_lvl_1) + corr = corr.view(batch, h1, w1, -1) + corr_T = corr_T.view(batch, h1, w1, -1) + out_pyramid.append(corr) + out_pyramid_T.append(corr_T) + + out = torch.cat(out_pyramid, dim=-1) + out_T = torch.cat(out_pyramid_T, dim=-1) + return out.permute(0, 3, 1, 2).contiguous().float(), out_T.permute(0, 3, 1, 2).contiguous().float() + + @staticmethod + def corr(fmap1, fmap2): + batch, dim, ht, wd = fmap1.shape + fmap1 = fmap1.view(batch, dim, ht * wd) + fmap2 = fmap2.view(batch, dim, ht * wd) + + corr = torch.matmul(fmap1.transpose(1, 2), fmap2) + corr = corr.view(batch, ht, wd, 1, ht, wd) + return corr / torch.sqrt(torch.tensor(dim).float()) diff --git a/exp_code/1_benchmark/pa_vdm/tools/frame_interpolation/utils/__init__.py b/exp_code/1_benchmark/pa_vdm/tools/frame_interpolation/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/pa_vdm/tools/frame_interpolation/utils/dist_utils.py b/exp_code/1_benchmark/pa_vdm/tools/frame_interpolation/utils/dist_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d754d4fc7a6ed1a9bae246b2f895456218d815ea --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/tools/frame_interpolation/utils/dist_utils.py @@ -0,0 +1,48 @@ +import os + +import torch + + +def get_world_size(): + """Find OMPI world size without calling mpi functions + :rtype: int + """ + if os.environ.get("PMI_SIZE") is not None: + return int(os.environ.get("PMI_SIZE") or 1) + elif os.environ.get("OMPI_COMM_WORLD_SIZE") is not None: + return int(os.environ.get("OMPI_COMM_WORLD_SIZE") or 1) + else: + return torch.cuda.device_count() + + +def get_global_rank(): + """Find OMPI world rank without calling mpi functions + :rtype: int + """ + if os.environ.get("PMI_RANK") is not None: + return int(os.environ.get("PMI_RANK") or 0) + elif os.environ.get("OMPI_COMM_WORLD_RANK") is not None: + return int(os.environ.get("OMPI_COMM_WORLD_RANK") or 0) + else: + return 0 + + +def get_local_rank(): + """Find OMPI local rank without calling mpi functions + :rtype: int + """ + if os.environ.get("MPI_LOCALRANKID") is not None: + return int(os.environ.get("MPI_LOCALRANKID") or 0) + elif os.environ.get("OMPI_COMM_WORLD_LOCAL_RANK") is not None: + return int(os.environ.get("OMPI_COMM_WORLD_LOCAL_RANK") or 0) + else: + return 0 + + +def get_master_ip(): + if os.environ.get("AZ_BATCH_MASTER_NODE") is not None: + return os.environ.get("AZ_BATCH_MASTER_NODE").split(":")[0] + elif os.environ.get("AZ_BATCHAI_MPI_MASTER_NODE") is not None: + return os.environ.get("AZ_BATCHAI_MPI_MASTER_NODE") + else: + return "127.0.0.1" diff --git a/exp_code/1_benchmark/pa_vdm/tools/frame_interpolation/utils/flow_utils.py b/exp_code/1_benchmark/pa_vdm/tools/frame_interpolation/utils/flow_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d16fe7113e22594f099f1745ed06d10675fb18e3 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/tools/frame_interpolation/utils/flow_utils.py @@ -0,0 +1,125 @@ +import numpy as np +import torch +import torch.nn.functional as F +from PIL import ImageFile + +ImageFile.LOAD_TRUNCATED_IMAGES = True + + +def warp(img, flow): + B, _, H, W = flow.shape + xx = torch.linspace(-1.0, 1.0, W).view(1, 1, 1, W).expand(B, -1, H, -1) + yy = torch.linspace(-1.0, 1.0, H).view(1, 1, H, 1).expand(B, -1, -1, W) + grid = torch.cat([xx, yy], 1).to(img) + flow_ = torch.cat([flow[:, 0:1, :, :] / ((W - 1.0) / 2.0), flow[:, 1:2, :, :] / ((H - 1.0) / 2.0)], 1) + grid_ = (grid + flow_).permute(0, 2, 3, 1) + output = F.grid_sample(input=img, grid=grid_, mode="bilinear", padding_mode="border", align_corners=True) + return output + + +def make_colorwheel(): + """ + Generates a color wheel for optical flow visualization as presented in: + Baker et al. "A Database and Evaluation Methodology for Optical Flow" (ICCV, 2007) + URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf + Code follows the original C++ source code of Daniel Scharstein. + Code follows the Matlab source code of Deqing Sun. + Returns: + np.ndarray: Color wheel + """ + + RY = 15 + YG = 6 + GC = 4 + CB = 11 + BM = 13 + MR = 6 + + ncols = RY + YG + GC + CB + BM + MR + colorwheel = np.zeros((ncols, 3)) + col = 0 + + # RY + colorwheel[0:RY, 0] = 255 + colorwheel[0:RY, 1] = np.floor(255 * np.arange(0, RY) / RY) + col = col + RY + # YG + colorwheel[col : col + YG, 0] = 255 - np.floor(255 * np.arange(0, YG) / YG) + colorwheel[col : col + YG, 1] = 255 + col = col + YG + # GC + colorwheel[col : col + GC, 1] = 255 + colorwheel[col : col + GC, 2] = np.floor(255 * np.arange(0, GC) / GC) + col = col + GC + # CB + colorwheel[col : col + CB, 1] = 255 - np.floor(255 * np.arange(CB) / CB) + colorwheel[col : col + CB, 2] = 255 + col = col + CB + # BM + colorwheel[col : col + BM, 2] = 255 + colorwheel[col : col + BM, 0] = np.floor(255 * np.arange(0, BM) / BM) + col = col + BM + # MR + colorwheel[col : col + MR, 2] = 255 - np.floor(255 * np.arange(MR) / MR) + colorwheel[col : col + MR, 0] = 255 + return colorwheel + + +def flow_uv_to_colors(u, v, convert_to_bgr=False): + """ + Applies the flow color wheel to (possibly clipped) flow components u and v. + According to the C++ source code of Daniel Scharstein + According to the Matlab source code of Deqing Sun + Args: + u (np.ndarray): Input horizontal flow of shape [H,W] + v (np.ndarray): Input vertical flow of shape [H,W] + convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False. + Returns: + np.ndarray: Flow visualization image of shape [H,W,3] + """ + flow_image = np.zeros((u.shape[0], u.shape[1], 3), np.uint8) + colorwheel = make_colorwheel() # shape [55x3] + ncols = colorwheel.shape[0] + rad = np.sqrt(np.square(u) + np.square(v)) + a = np.arctan2(-v, -u) / np.pi + fk = (a + 1) / 2 * (ncols - 1) + k0 = np.floor(fk).astype(np.int32) + k1 = k0 + 1 + k1[k1 == ncols] = 0 + f = fk - k0 + for i in range(colorwheel.shape[1]): + tmp = colorwheel[:, i] + col0 = tmp[k0] / 255.0 + col1 = tmp[k1] / 255.0 + col = (1 - f) * col0 + f * col1 + idx = rad <= 1 + col[idx] = 1 - rad[idx] * (1 - col[idx]) + col[~idx] = col[~idx] * 0.75 # out of range + # Note the 2-i => BGR instead of RGB + ch_idx = 2 - i if convert_to_bgr else i + flow_image[:, :, ch_idx] = np.floor(255 * col) + return flow_image + + +def flow_to_image(flow_uv, clip_flow=None, convert_to_bgr=False): + """ + Expects a two dimensional flow image of shape. + Args: + flow_uv (np.ndarray): Flow UV image of shape [H,W,2] + clip_flow (float, optional): Clip maximum of flow values. Defaults to None. + convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False. + Returns: + np.ndarray: Flow visualization image of shape [H,W,3] + """ + assert flow_uv.ndim == 3, "input flow must have three dimensions" + assert flow_uv.shape[2] == 2, "input flow must have shape [H,W,2]" + if clip_flow is not None: + flow_uv = np.clip(flow_uv, 0, clip_flow) + u = flow_uv[:, :, 0] + v = flow_uv[:, :, 1] + rad = np.sqrt(np.square(u) + np.square(v)) + rad_max = np.max(rad) + epsilon = 1e-5 + u = u / (rad_max + epsilon) + v = v / (rad_max + epsilon) + return flow_uv_to_colors(u, v, convert_to_bgr) diff --git a/exp_code/1_benchmark/pa_vdm/tools/frame_interpolation/utils/utils.py b/exp_code/1_benchmark/pa_vdm/tools/frame_interpolation/utils/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..285a65fd454e034ce672dcea82d1449bc77ef953 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/tools/frame_interpolation/utils/utils.py @@ -0,0 +1,314 @@ +import random +import re +import sys + +import numpy as np +import torch +import torch.nn.functional as F +from imageio import imread, imwrite +from PIL import ImageFile + +ImageFile.LOAD_TRUNCATED_IMAGES = True + + +class AverageMeter: + def __init__(self): + self.reset() + + def reset(self): + self.val = 0.0 + self.avg = 0.0 + self.sum = 0.0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + +class AverageMeterGroups: + def __init__(self) -> None: + self.meter_dict = dict() + + def update(self, dict, n=1): + for name, val in dict.items(): + if self.meter_dict.get(name) is None: + self.meter_dict[name] = AverageMeter() + self.meter_dict[name].update(val, n) + + def reset(self, name=None): + if name is None: + for v in self.meter_dict.values(): + v.reset() + else: + meter = self.meter_dict.get(name) + if meter is not None: + meter.reset() + + def avg(self, name): + meter = self.meter_dict.get(name) + if meter is not None: + return meter.avg + + +class InputPadder: + """Pads images such that dimensions are divisible by divisor""" + + def __init__(self, dims, divisor=16): + self.ht, self.wd = dims[-2:] + pad_ht = (((self.ht // divisor) + 1) * divisor - self.ht) % divisor + pad_wd = (((self.wd // divisor) + 1) * divisor - self.wd) % divisor + self._pad = [pad_wd // 2, pad_wd - pad_wd // 2, pad_ht // 2, pad_ht - pad_ht // 2] + + def pad(self, *inputs): + if len(inputs) == 1: + return F.pad(inputs[0], self._pad, mode="replicate") + else: + return [F.pad(x, self._pad, mode="replicate") for x in inputs] + + def unpad(self, *inputs): + if len(inputs) == 1: + return self._unpad(inputs[0]) + else: + return [self._unpad(x) for x in inputs] + + def _unpad(self, x): + ht, wd = x.shape[-2:] + c = [self._pad[2], ht - self._pad[3], self._pad[0], wd - self._pad[1]] + return x[..., c[0] : c[1], c[2] : c[3]] + + +def img2tensor(img): + if img.shape[-1] > 3: + img = img[:, :, :3] + return torch.tensor(img).permute(2, 0, 1).unsqueeze(0) / 255.0 + + +def tensor2img(img_t): + return (img_t * 255.0).detach().squeeze(0).permute(1, 2, 0).cpu().numpy().clip(0, 255).astype(np.uint8) + + +def seed_all(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + + +def read(file): + if file.endswith(".float3"): + return readFloat(file) + elif file.endswith(".flo"): + return readFlow(file) + elif file.endswith(".ppm"): + return readImage(file) + elif file.endswith(".pgm"): + return readImage(file) + elif file.endswith(".png"): + return readImage(file) + elif file.endswith(".jpg"): + return readImage(file) + elif file.endswith(".pfm"): + return readPFM(file)[0] + else: + raise Exception("don't know how to read %s" % file) + + +def write(file, data): + if file.endswith(".float3"): + return writeFloat(file, data) + elif file.endswith(".flo"): + return writeFlow(file, data) + elif file.endswith(".ppm"): + return writeImage(file, data) + elif file.endswith(".pgm"): + return writeImage(file, data) + elif file.endswith(".png"): + return writeImage(file, data) + elif file.endswith(".jpg"): + return writeImage(file, data) + elif file.endswith(".pfm"): + return writePFM(file, data) + else: + raise Exception("don't know how to write %s" % file) + + +def readPFM(file): + file = open(file, "rb") + + color = None + width = None + height = None + scale = None + endian = None + + header = file.readline().rstrip() + if header.decode("ascii") == "PF": + color = True + elif header.decode("ascii") == "Pf": + color = False + else: + raise Exception("Not a PFM file.") + + dim_match = re.match(r"^(\d+)\s(\d+)\s$", file.readline().decode("ascii")) + if dim_match: + width, height = list(map(int, dim_match.groups())) + else: + raise Exception("Malformed PFM header.") + + scale = float(file.readline().decode("ascii").rstrip()) + if scale < 0: + endian = "<" + scale = -scale + else: + endian = ">" + + data = np.fromfile(file, endian + "f") + shape = (height, width, 3) if color else (height, width) + + data = np.reshape(data, shape) + data = np.flipud(data) + return data, scale + + +def writePFM(file, image, scale=1): + file = open(file, "wb") + + color = None + + if image.dtype.name != "float32": + raise Exception("Image dtype must be float32.") + + image = np.flipud(image) + + if len(image.shape) == 3 and image.shape[2] == 3: + color = True + elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1: + color = False + else: + raise Exception("Image must have H x W x 3, H x W x 1 or H x W dimensions.") + + file.write("PF\n" if color else "Pf\n".encode()) + file.write("%d %d\n".encode() % (image.shape[1], image.shape[0])) + + endian = image.dtype.byteorder + + if endian == "<" or endian == "=" and sys.byteorder == "little": + scale = -scale + + file.write("%f\n".encode() % scale) + + image.tofile(file) + + +def readFlow(name): + if name.endswith(".pfm") or name.endswith(".PFM"): + return readPFM(name)[0][:, :, 0:2] + + f = open(name, "rb") + + header = f.read(4) + if header.decode("utf-8") != "PIEH": + raise Exception("Flow file header does not contain PIEH") + + width = np.fromfile(f, np.int32, 1).squeeze() + height = np.fromfile(f, np.int32, 1).squeeze() + + flow = np.fromfile(f, np.float32, width * height * 2).reshape((height, width, 2)) + + return flow.astype(np.float32) + + +def readImage(name): + if name.endswith(".pfm") or name.endswith(".PFM"): + data = readPFM(name)[0] + if len(data.shape) == 3: + return data[:, :, 0:3] + else: + return data + return imread(name) + + +def writeImage(name, data): + if name.endswith(".pfm") or name.endswith(".PFM"): + return writePFM(name, data, 1) + return imwrite(name, data) + + +def writeFlow(name, flow): + f = open(name, "wb") + f.write("PIEH".encode("utf-8")) + np.array([flow.shape[1], flow.shape[0]], dtype=np.int32).tofile(f) + flow = flow.astype(np.float32) + flow.tofile(f) + + +def readFloat(name): + f = open(name, "rb") + + if (f.readline().decode("utf-8")) != "float\n": + raise Exception("float file %s did not contain keyword" % name) + + dim = int(f.readline()) + + dims = [] + count = 1 + for i in range(0, dim): + d = int(f.readline()) + dims.append(d) + count *= d + + dims = list(reversed(dims)) + + data = np.fromfile(f, np.float32, count).reshape(dims) + if dim > 2: + data = np.transpose(data, (2, 1, 0)) + data = np.transpose(data, (1, 0, 2)) + + return data + + +def writeFloat(name, data): + f = open(name, "wb") + + dim = len(data.shape) + if dim > 3: + raise Exception("bad float file dimension: %d" % dim) + + f.write(("float\n").encode("ascii")) + f.write(("%d\n" % dim).encode("ascii")) + + if dim == 1: + f.write(("%d\n" % data.shape[0]).encode("ascii")) + else: + f.write(("%d\n" % data.shape[1]).encode("ascii")) + f.write(("%d\n" % data.shape[0]).encode("ascii")) + for i in range(2, dim): + f.write(("%d\n" % data.shape[i]).encode("ascii")) + + data = data.astype(np.float32) + if dim == 2: + data.tofile(f) + + else: + np.transpose(data, (2, 0, 1)).tofile(f) + + +def check_dim_and_resize(tensor_list): + shape_list = [] + for t in tensor_list: + shape_list.append(t.shape[2:]) + + if len(set(shape_list)) > 1: + desired_shape = shape_list[0] + print(f"Inconsistent size of input video frames. All frames will be resized to {desired_shape}") + + resize_tensor_list = [] + for t in tensor_list: + resize_tensor_list.append(torch.nn.functional.interpolate(t, size=tuple(desired_shape), mode="bilinear")) + + tensor_list = resize_tensor_list + + return tensor_list diff --git a/exp_code/1_benchmark/pa_vdm/tools/scene_cut/README.md b/exp_code/1_benchmark/pa_vdm/tools/scene_cut/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1c1bae59f5bb438ff600ed5d8bc5dc599bc83cd5 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/tools/scene_cut/README.md @@ -0,0 +1,63 @@ +# Scene Detection and Video Splitting + +- [Scene Detection and Video Splitting](#scene-detection-and-video-splitting) + - [Prepare Meta Files](#prepare-meta-files) + - [Scene Detection](#scene-detection) + - [Video Splitting](#video-splitting) + +In many cases, raw videos contain several scenes and are too long for training. Thus, it is essential to split them into shorter +clips based on scenes. Here, we provide code for scene detection and video splitting. + +## Prepare Meta Files +At this step, you should have a raw video dataset prepared. A meta file of the dataset information is needed for data processing. To create a meta file from a folder, run: + +```bash +python -m tools.datasets.convert video /path/to/video/folder --output /path/to/save/meta.csv +``` +This should output a `.csv` file with column `path`. + +If you already have a meta file for the videos and want to keep the information. +**Make sure** the meta file has column `id`, which is the id for each video, and the video is named as `{id}.mp4`. +The following command will add a new column `path` to the meta file. + +```bash +python tools/scene_cut/convert_id_to_path.py /path/to/meta.csv --folder_path /path/to/video/folder +``` +This should output +- `{prefix}_path-filtered.csv` with column `path` (broken videos filtered) +- `{prefix}_path_intact.csv` with column `path` and `intact` (`intact` indicating a video is intact or not) + + +## Scene Detection + +Install the required dependancies by following our [installation instructions](../../docs/installation.md)'s "Data Dependencies" and "Scene Detection" sections. + + + +**Make sure** the input meta file has column `path`, which is the path of a video. + +```bash +python tools/scene_cut/scene_detect.py /path/to/meta.csv +``` +The output is `{prefix}_timestamp.csv` with column `timestamp`. Each cell in column `timestamp` is a list of tuples, +with each tuple indicating the start and end timestamp of a scene +(e.g., `[('00:00:01.234', '00:00:02.345'), ('00:00:03.456', '00:00:04.567')]`). + +## Video Splitting +After obtaining timestamps for scenes, we conduct video splitting (cutting). +**Make sure** the meta file contains column `timestamp`. + +```bash +python tools/scene_cut/cut.py /path/to/meta.csv --save_dir /path/to/output/dir +``` + +This will save video clips to `/path/to/output/dir`. The video clips are named as `{video_id}_scene-{scene_id}.mp4` + +To create a new meta file for the generated clips, run: +```bash +python -m tools.datasets.convert video /path/to/video/folder --output /path/to/save/meta.csv +``` diff --git a/exp_code/1_benchmark/pa_vdm/tools/scene_cut/__init__.py b/exp_code/1_benchmark/pa_vdm/tools/scene_cut/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/pa_vdm/tools/scene_cut/convert_id_to_path.py b/exp_code/1_benchmark/pa_vdm/tools/scene_cut/convert_id_to_path.py new file mode 100644 index 0000000000000000000000000000000000000000..b8122d313226aaf89ef8f3f654aabb23a4bf8ab6 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/tools/scene_cut/convert_id_to_path.py @@ -0,0 +1,131 @@ +import argparse +import json +import os +from functools import partial + +import cv2 +import numpy as np +import pandas as pd +from mmengine.logging import print_log +from moviepy.editor import VideoFileClip +from pandarallel import pandarallel +from tqdm import tqdm + +tqdm.pandas() + + +def is_intact_video(video_path, mode="moviepy", verbose=False, logger=None): + if not os.path.exists(video_path): + if verbose: + print_log(f"Could not find '{video_path}'", logger=logger) + return False + + if mode == "moviepy": + try: + VideoFileClip(video_path) + if verbose: + print_log(f"The video file '{video_path}' is intact.", logger=logger) + return True + except Exception as e: + if verbose: + print_log(f"Error: {e}", logger=logger) + print_log(f"The video file '{video_path}' is not intact.", logger=logger) + return False + elif mode == "cv2": + try: + cap = cv2.VideoCapture(video_path) + if cap.isOpened(): + if verbose: + print_log(f"The video file '{video_path}' is intact.", logger=logger) + return True + except Exception as e: + if verbose: + print_log(f"Error: {e}", logger=logger) + print_log(f"The video file '{video_path}' is not intact.", logger=logger) + return False + else: + raise ValueError + + +def has_downloaded_success(json_path): + if not os.path.exists(json_path): + return False + + try: + with open(json_path, "r") as f: + data = json.load(f) + if "success" not in data or isinstance(data["success"], bool) is False or data["success"] is False: + return False + except Exception: + return False + + return True + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("meta_path", type=str) + parser.add_argument("--folder_path", type=str, required=True) + parser.add_argument("--mode", type=str, default=None) + parser.add_argument("--num_workers", type=int, default=None, help="#workers for pandarallel") + + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + meta_path = args.meta_path + folder_path = args.folder_path + mode = args.mode + + def is_intact(row, mode=None): + video_id = row["id"] + video_path = os.path.join(folder_path, f"{video_id}.mp4") + row["path"] = video_path + + if mode == ".mp4": + if is_intact_video(video_path): + return True, video_path + return False, video_path + elif mode == ".json": + # json_path = os.path.join(root_raw, f"data/{split}/{video_id}.json") + json_path = os.path.join(folder_path, f"{video_id}.json") + if has_downloaded_success(json_path): + return True, video_path + return False, video_path + elif mode is None: + return True, video_path + else: + raise ValueError + + meta_dirpath = os.path.dirname(meta_path) + meta_fname = os.path.basename(meta_path) + wo_ext, ext = os.path.splitext(meta_fname) + + if args.num_workers is not None: + pandarallel.initialize(progress_bar=True, nb_workers=args.num_workers) + else: + pandarallel.initialize(progress_bar=True) + is_intact_partial = partial(is_intact, mode=mode) + + meta = pd.read_csv(meta_path) + ret = meta.parallel_apply(is_intact_partial, axis=1) + intact, paths = list(zip(*ret)) + + meta["intact"] = intact + meta["path"] = paths + out_path = os.path.join(meta_dirpath, f"{wo_ext}_path_intact.csv") + meta.to_csv(out_path, index=False) + print(f"New meta (shape={meta.shape}) with intact info saved to '{out_path}'") + + meta_format = meta[np.array(intact)] + meta_format.drop("intact", axis=1, inplace=True) + out_path = os.path.join(meta_dirpath, f"{wo_ext}_path-filtered.csv") + meta_format.to_csv(out_path, index=False) + print(f"New meta (shape={meta_format.shape}) with format info saved to '{out_path}'") + + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/pa_vdm/tools/scene_cut/cut.py b/exp_code/1_benchmark/pa_vdm/tools/scene_cut/cut.py new file mode 100644 index 0000000000000000000000000000000000000000..9fe488089c91b5790bb1ff5b6718744d964cf649 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/tools/scene_cut/cut.py @@ -0,0 +1,208 @@ +import cv2 # isort:skip + +import argparse +import os +import subprocess +from functools import partial + +import pandas as pd +from imageio_ffmpeg import get_ffmpeg_exe +from pandarallel import pandarallel +from scenedetect import FrameTimecode +from tqdm import tqdm + +tqdm.pandas() + + +def print_log(s, logger=None): + if logger is not None: + logger.info(s) + else: + print(s) + + +def process_single_row(row, args): + video_path = row["path"] + + logger = None + + # check mp4 integrity + # if not is_intact_video(video_path, logger=logger): + # return False + try: + if "timestamp" in row: + timestamp = row["timestamp"] + if not (timestamp.startswith("[") and timestamp.endswith("]")): + return False + scene_list = eval(timestamp) + scene_list = [(FrameTimecode(s, fps=100), FrameTimecode(t, fps=100)) for s, t in scene_list] + else: + scene_list = [None] + if args.drop_invalid_timestamps: + return True + except Exception as e: + if args.drop_invalid_timestamps: + return False + + if "relpath" in row: + save_dir = os.path.dirname(os.path.join(args.save_dir, row["relpath"])) + os.makedirs(save_dir, exist_ok=True) + else: + save_dir = args.save_dir + + shorter_size = args.shorter_size + if (shorter_size is not None) and ("height" in row) and ("width" in row): + min_size = min(row["height"], row["width"]) + if min_size <= shorter_size: + shorter_size = None + + split_video( + video_path, + scene_list, + save_dir=save_dir, + min_seconds=args.min_seconds, + max_seconds=args.max_seconds, + target_fps=args.target_fps, + shorter_size=shorter_size, + logger=logger, + ) + return True + +def split_video( + video_path, + scene_list, + save_dir, + min_seconds=2, + max_seconds=15, + target_fps=30, + shorter_size=None, + verbose=False, + logger=None, +): + """ + scenes shorter than min_seconds will be ignored; + scenes longer than max_seconds will be cut to save the beginning max_seconds. + Currently, the saved file name pattern is f'{fname}_scene-{idx}'.mp4 + + Args: + scene_list (List[Tuple[FrameTimecode, FrameTimecode]]): each element is (s, t): start and end of a scene. + min_seconds (float | None) + max_seconds (float | None) + target_fps (int | None) + shorter_size (int | None) + """ + FFMPEG_PATH = get_ffmpeg_exe() + + save_path_list = [] + for idx, scene in enumerate(scene_list): + if scene is not None: + s, t = scene # FrameTimecode + if min_seconds is not None: + if (t - s).get_seconds() < min_seconds: + continue + + duration = t - s + if max_seconds is not None: + fps = s.framerate + max_duration = FrameTimecode(max_seconds, fps=fps) + duration = min(max_duration, duration) + + # save path + fname = os.path.basename(video_path) + fname_wo_ext = os.path.splitext(fname)[0] + # TODO: fname pattern + save_path = os.path.join(save_dir, f"{fname_wo_ext}_scene-{idx}.mp4") + if os.path.exists(save_path): + # print_log(f"File '{save_path}' already exists. Skip.", logger=logger) + continue + + # ffmpeg cmd + cmd = [FFMPEG_PATH] + + # Only show ffmpeg output for the first call, which will display any + # errors if it fails, and then break the loop. We only show error messages + # for the remaining calls. + # cmd += ['-v', 'error'] + + # clip to cut + # Note: -ss after -i is very slow; put -ss before -i !!! + if scene is None: + cmd += ["-nostdin", "-y", "-i", video_path] + else: + cmd += ["-nostdin", "-y", "-ss", str(s.get_seconds()), "-i", video_path, "-t", str(duration.get_seconds())] + + # target fps + if target_fps is not None: + cmd += ["-r", f"{target_fps}"] + + # aspect ratio + if shorter_size is not None: + cmd += ["-vf", f"scale='if(gt(iw,ih),-2,{shorter_size})':'if(gt(iw,ih),{shorter_size},-2)'"] + # cmd += ['-vf', f"scale='if(gt(iw,ih),{shorter_size},trunc(ow/a/2)*2)':-2"] + + cmd += ["-map", "0:v", save_path] + # print(cmd) + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + stdout, stderr = proc.communicate() + # stdout = stdout.decode("utf-8") + # print_log(stdout, logger=logger) + + save_path_list.append(video_path) + if verbose: + print_log(f"Video clip saved to '{save_path}'", logger=logger) + + return save_path_list + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("meta_path", type=str) + parser.add_argument("--save_dir", type=str) + parser.add_argument( + "--min_seconds", type=float, default=None, help="if not None, clip shorter than min_seconds is ignored" + ) + parser.add_argument( + "--max_seconds", type=float, default=None, help="if not None, clip longer than max_seconds is truncated" + ) + parser.add_argument("--target_fps", type=int, default=None, help="target fps of clips") + parser.add_argument( + "--shorter_size", type=int, default=None, help="resize the shorter size by keeping ratio; will not do upscale" + ) + parser.add_argument("--num_workers", type=int, default=None, help="#workers for pandarallel") + parser.add_argument("--disable_parallel", action="store_true", help="disable parallel processing") + parser.add_argument("--drop_invalid_timestamps", action="store_true", help="drop rows with invalid timestamps") + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + meta_path = args.meta_path + if not os.path.exists(meta_path): + print(f"Meta file '{meta_path}' not found. Exit.") + exit() + + # create save_dir + os.makedirs(args.save_dir, exist_ok=True) + + # initialize pandarallel + if not args.disable_parallel: + if args.num_workers is not None: + pandarallel.initialize(progress_bar=True, nb_workers=args.num_workers) + else: + pandarallel.initialize(progress_bar=True) + process_single_row_partial = partial(process_single_row, args=args) + + # process + meta = pd.read_csv(args.meta_path) + if not args.disable_parallel: + results = meta.parallel_apply(process_single_row_partial, axis=1) + else: + results = meta.apply(process_single_row_partial, axis=1) + if args.drop_invalid_timestamps: + meta = meta[results] + assert args.meta_path.endswith("timestamp.csv"), "Only support *timestamp.csv" + meta.to_csv(args.meta_path.replace("timestamp.csv", "correct_timestamp.csv"), index=False) + print(f"Corrected timestamp file saved to '{args.meta_path.replace('timestamp.csv', 'correct_timestamp.csv')}'") +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/pa_vdm/tools/scene_cut/scene_detect.py b/exp_code/1_benchmark/pa_vdm/tools/scene_cut/scene_detect.py new file mode 100644 index 0000000000000000000000000000000000000000..c564d73c2388e956995736a3a3edddd6bd3a8042 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/tools/scene_cut/scene_detect.py @@ -0,0 +1,69 @@ +import argparse +import os + +import numpy as np +import pandas as pd +from pandarallel import pandarallel +from scenedetect import AdaptiveDetector, detect +from tqdm import tqdm + +tqdm.pandas() + + +def process_single_row(row): + # windows + # from scenedetect import detect, ContentDetector, AdaptiveDetector + + video_path = row["path"] + + detector = AdaptiveDetector( + adaptive_threshold=3.0, + # luma_only=True, + ) + # detector = ContentDetector() + # TODO: catch error here + try: + scene_list = detect(video_path, detector, start_in_scene=True) + timestamp = [(s.get_timecode(), t.get_timecode()) for s, t in scene_list] + return True, str(timestamp) + except Exception as e: + print(f"Video '{video_path}' with error {e}") + return False, "" + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("meta_path", type=str) + parser.add_argument("--num_workers", type=int, default=None, help="#workers for pandarallel") + + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + meta_path = args.meta_path + if not os.path.exists(meta_path): + print(f"Meta file '{meta_path}' not found. Exit.") + exit() + + if args.num_workers is not None: + pandarallel.initialize(progress_bar=True, nb_workers=args.num_workers) + else: + pandarallel.initialize(progress_bar=True) + + meta = pd.read_csv(meta_path) + ret = meta.parallel_apply(process_single_row, axis=1) + + succ, timestamps = list(zip(*ret)) + meta["timestamp"] = timestamps + meta = meta[np.array(succ)] + + wo_ext, ext = os.path.splitext(meta_path) + out_path = f"{wo_ext}_timestamp{ext}" + meta.to_csv(out_path, index=False) + print(f"New meta (shape={meta.shape}) with timestamp saved to '{out_path}'.") + + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/pa_vdm/tools/scoring/README.md b/exp_code/1_benchmark/pa_vdm/tools/scoring/README.md new file mode 100644 index 0000000000000000000000000000000000000000..40949c7f16d6ff93d8af1e0b96a6ccb5dc77b9b2 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/tools/scoring/README.md @@ -0,0 +1,115 @@ +# Scoring and Filtering + +- [Scoring and Filtering](#scoring-and-filtering) + - [Aesthetic Score](#aesthetic-score) + - [Optical Flow Score](#optical-flow-score) + - [OCR](#ocr) + - [Matching Score](#matching-score) + - [Filtering](#filtering) + +## Aesthetic Score + +To evaluate the aesthetic quality of videos, we use the scoring model from [CLIP+MLP Aesthetic Score Predictor](https://github.com/christophschuhmann/improved-aesthetic-predictor). This model is trained on 176K SAC (Simulacra Aesthetic Captions) pairs, 15K LAION-Logos (Logos) pairs, and 250K AVA (The Aesthetic Visual Analysis) image-text pairs. + +The aesthetic score is between 1 and 10, where 5.5 can be considered as the threshold for fair aesthetics, and 6.5 for high aesthetics. Good text-to-image models can achieve a score of 7.0 or higher. + +For videos, we extract the first, last, and the middle frames for evaluation. The script also supports images as input. +The throughput of our code is ~1K videos/s on a single H800 GPU. It also supports running on multiple GPUs for further acceleration. + +First, install the required packages following our [installation instructions](../../docs/installation.md)'s "Data Dependencies". + +Next, download the scoring model to `./pretrained_models/aesthetic.pth`. + +```bash +wget https://github.com/christophschuhmann/improved-aesthetic-predictor/raw/main/sac+logos+ava1-l14-linearMSE.pth -O pretrained_models/aesthetic.pth +``` + + + +Then, run the following command. **Make sure** the meta file has column `path` (path to the sample). +```bash +torchrun --nproc_per_node 8 -m tools.scoring.aesthetic.inference /path/to/meta.csv --bs 1024 --num_workers 16 +``` +This will generate multiple part files, each corresponding to a node . Run `python -m tools.datasets.datautil /path/to/meta_aes_part*.csv --output /path/to/meta_aes.csv` to merge them. + +## Optical Flow Score + +Optical flow scores are used to assess the motion of a video. Higher optical flow scores indicate larger movement. +We use the [UniMatch](https://github.com/autonomousvision/unimatch) model for this task. + +First, install the required packages following our [installation instructions](../../docs/installation.md)'s "Data Dependencies". + +Next, download the pretrained model to `./pretrained_model/unimatch/` +```bash +wget https://s3.eu-central-1.amazonaws.com/avg-projects/unimatch/pretrained/gmflow-scale2-regrefine6-mixdata-train320x576-4e7b215d.pth -P ./pretrained_models/unimatch/ +``` + +Then, run the following command. **Make sure** the meta file has column `path` (path to the sample). +```bash +torchrun --standalone --nproc_per_node 8 tools/scoring/optical_flow/inference.py /path/to/meta.csv +``` + +This should output `/path/to/meta_flow.csv` with column `flow`. + +## OCR +Some videos are of dense text scenes like news broadcast and advertisement, which are not desired for training. +We apply Optical Character Recognition (OCR) to detect texts and drop samples with dense texts. Here, we use +the [DBNet++](https://arxiv.org/abs/2202.10304) model implemented by [MMOCR](https://github.com/open-mmlab/mmocr/). + +First, install the required packages following our [installation instructions](../../docs/installation.md)'s "Data Dependencies" and "OCR" section. + + + +Then, run the following command. **Make sure** the meta file has column `path` (path to the sample). + +```bash +torchrun --standalone --nproc_per_node 8 -m tools.scoring.ocr.inference /path/to/meta.csv +``` +This should output `/path/to/meta_ocr.csv` with column `ocr`, indicating the number of text regions with detection confidence > 0.3. + + +## Matching Score + +Matching scores are calculated to evaluate the alignment between an image/video and its caption. +Here, we use the [CLIP](https://github.com/openai/CLIP) model, which is trained on image-text pairs. +We simply use the cosine similarity as the matching score. +For videos, we extract the middle frame and compare it with the caption. + +First, install OpenAI CLIP. +```bash +pip install git+https://github.com/openai/CLIP.git +``` + +Then, run the following command. **Make sure** the meta file has column `path` (path to the sample) and `text` (caption of the sample). + +```bash +torchrun --standalone --nproc_per_node 8 tools/scoring/matching/inference.py /path/to/meta.csv +``` + +This should output `/path/to/meta_match.csv` with column `match`. Higher matching scores indicate better image-text/video-text alignment. + + +## Filtering +Once scores are obtained, it is simple to filter samples based on these scores. Here is an example to remove +samples of aesthetic score < 5.0. +``` +python -m tools.datasets.datautil /path/to/meta.csv --aesmin 5.0 +``` +This should output `/path/to/meta_aesmin5.0.csv` with column `aes` >= 5.0 diff --git a/exp_code/1_benchmark/pa_vdm/tools/scoring/__init__.py b/exp_code/1_benchmark/pa_vdm/tools/scoring/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/pa_vdm/tools/scoring/aesthetic/__init__.py b/exp_code/1_benchmark/pa_vdm/tools/scoring/aesthetic/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/pa_vdm/tools/scoring/aesthetic/inference.py b/exp_code/1_benchmark/pa_vdm/tools/scoring/aesthetic/inference.py new file mode 100644 index 0000000000000000000000000000000000000000..be6618f37cd3aff4dc76f3bfd91bd448d79082c4 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/tools/scoring/aesthetic/inference.py @@ -0,0 +1,214 @@ +# adapted from https://github.com/christophschuhmann/improved-aesthetic-predictor/blob/main/simple_inference.py +import cv2 # isort:skip + +import argparse +import gc +import os +from datetime import timedelta + +import clip +import numpy as np +import pandas as pd +import torch +import torch.distributed as dist +import torch.nn as nn +import torch.nn.functional as F +from einops import rearrange +from torch.utils.data import DataLoader, DistributedSampler +from torchvision.datasets.folder import pil_loader +from tqdm import tqdm + +from tools.datasets.utils import extract_frames, is_video + +NUM_FRAMES_POINTS = { + 1: (0.5,), + 2: (0.25, 0.5), + 3: (0.1, 0.5, 0.9), +} + + +def merge_scores(gathered_list: list, meta: pd.DataFrame, column): + # reorder + indices_list = list(map(lambda x: x[0], gathered_list)) + scores_list = list(map(lambda x: x[1], gathered_list)) + + flat_indices = [] + for x in zip(*indices_list): + flat_indices.extend(x) + flat_scores = [] + for x in zip(*scores_list): + flat_scores.extend(x) + flat_indices = np.array(flat_indices) + flat_scores = np.array(flat_scores) + + # filter duplicates + unique_indices, unique_indices_idx = np.unique(flat_indices, return_index=True) + meta.loc[unique_indices, column] = flat_scores[unique_indices_idx] + + # drop indices in meta not in unique_indices + meta = meta.loc[unique_indices] + return meta + + +class VideoTextDataset(torch.utils.data.Dataset): + def __init__(self, meta_path, transform=None, num_frames=3): + self.meta_path = meta_path + self.meta = pd.read_csv(meta_path) + self.transform = transform + self.points = NUM_FRAMES_POINTS[num_frames] + + def __getitem__(self, index): + sample = self.meta.iloc[index] + path = sample["path"] + + # extract frames + if not is_video(path): + images = [pil_loader(path)] + else: + num_frames = sample["num_frames"] if "num_frames" in sample else None + images = extract_frames(sample["path"], points=self.points, backend="opencv", num_frames=num_frames) + + # transform + images = [self.transform(img) for img in images] + + # stack + images = torch.stack(images) + + ret = dict(index=index, images=images) + return ret + + def __len__(self): + return len(self.meta) + + +class MLP(nn.Module): + def __init__(self, input_size): + super().__init__() + self.input_size = input_size + self.layers = nn.Sequential( + nn.Linear(self.input_size, 1024), + nn.Dropout(0.2), + nn.Linear(1024, 128), + nn.Dropout(0.2), + nn.Linear(128, 64), + nn.Dropout(0.1), + nn.Linear(64, 16), + nn.Linear(16, 1), + ) + + def forward(self, x): + return self.layers(x) + + +class AestheticScorer(nn.Module): + def __init__(self, input_size, device): + super().__init__() + self.mlp = MLP(input_size) + self.clip, self.preprocess = clip.load("ViT-L/14", device=device) + + self.eval() + self.to(device) + + def forward(self, x): + image_features = self.clip.encode_image(x) + image_features = F.normalize(image_features, p=2, dim=-1).float() + return self.mlp(image_features) + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("meta_path", type=str, help="Path to the input CSV file") + parser.add_argument("--bs", type=int, default=1024, help="Batch size") + parser.add_argument("--num_workers", type=int, default=16, help="Number of workers") + parser.add_argument("--prefetch_factor", type=int, default=3, help="Prefetch factor") + parser.add_argument("--num_frames", type=int, default=3, help="Number of frames to extract") + parser.add_argument("--skip_if_existing", action="store_true") + args = parser.parse_args() + + return args + + +def main(): + args = parse_args() + + meta_path = args.meta_path + if not os.path.exists(meta_path): + print(f"Meta file '{meta_path}' not found. Exit.") + exit() + + wo_ext, ext = os.path.splitext(meta_path) + out_path = f"{wo_ext}_aes{ext}" + if args.skip_if_existing and os.path.exists(out_path): + print(f"Output meta file '{out_path}' already exists. Exit.") + exit() + + dist.init_process_group(backend="nccl", timeout=timedelta(hours=24)) + torch.cuda.set_device(dist.get_rank() % torch.cuda.device_count()) + + # build model + device = "cuda" if torch.cuda.is_available() else "cpu" + model = AestheticScorer(768, device) + model.mlp.load_state_dict(torch.load("pretrained_models/aesthetic.pth", map_location=device)) + preprocess = model.preprocess + + # build dataset + dataset = VideoTextDataset(args.meta_path, transform=preprocess, num_frames=args.num_frames) + dataloader = DataLoader( + dataset, + batch_size=args.bs, + num_workers=args.num_workers, + sampler=DistributedSampler( + dataset, + num_replicas=dist.get_world_size(), + rank=dist.get_rank(), + shuffle=False, + drop_last=False, + ), + ) + + # compute aesthetic scores + indices_list = [] + scores_list = [] + model.eval() + for batch in tqdm(dataloader, disable=dist.get_rank() != 0): + indices = batch["index"] + images = batch["images"].to(device, non_blocking=True) + + B = images.shape[0] + images = rearrange(images, "B N C H W -> (B N) C H W") + + # compute score + with torch.no_grad(): + scores = model(images) + + scores = rearrange(scores, "(B N) 1 -> B N", B=B) + scores = scores.mean(dim=1) + scores_np = scores.to(torch.float32).cpu().numpy() + + indices_list.extend(indices.tolist()) + scores_list.extend(scores_np.tolist()) + + # save local results + meta_local = merge_scores([(indices_list, scores_list)], dataset.meta, column="aes") + save_dir_local = os.path.join(os.path.dirname(out_path), "parts") + os.makedirs(save_dir_local, exist_ok=True) + out_path_local = os.path.join( + save_dir_local, os.path.basename(out_path).replace(".csv", f"_part_{dist.get_rank()}.csv") + ) + meta_local.to_csv(out_path_local, index=False) + + # wait for all ranks to finish data processing + dist.barrier() + + torch.cuda.empty_cache() + gc.collect() + gathered_list = [None] * dist.get_world_size() + dist.all_gather_object(gathered_list, (indices_list, scores_list)) + if dist.get_rank() == 0: + meta_new = merge_scores(gathered_list, dataset.meta, column="aes") + meta_new.to_csv(out_path, index=False) + print(f"New meta with aesthetic scores saved to '{out_path}'.") + + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/pa_vdm/tools/scoring/matching/__init__.py b/exp_code/1_benchmark/pa_vdm/tools/scoring/matching/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/pa_vdm/tools/scoring/matching/inference.py b/exp_code/1_benchmark/pa_vdm/tools/scoring/matching/inference.py new file mode 100644 index 0000000000000000000000000000000000000000..70209eb750492e8c7ab5ec4a0bac177c7af0f068 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/tools/scoring/matching/inference.py @@ -0,0 +1,138 @@ +import argparse +import os + +import clip +import colossalai +import numpy as np +import pandas as pd +import torch +import torch.distributed as dist +import torch.nn.functional as F +from torch.utils.data import DataLoader, DistributedSampler +from torchvision.datasets.folder import pil_loader +from tqdm import tqdm + +from tools.datasets.utils import extract_frames, is_video + + +def merge_scores(gathered_list: list, meta: pd.DataFrame, column): + # reorder + indices_list = list(map(lambda x: x[0], gathered_list)) + scores_list = list(map(lambda x: x[1], gathered_list)) + + flat_indices = [] + for x in zip(*indices_list): + flat_indices.extend(x) + flat_scores = [] + for x in zip(*scores_list): + flat_scores.extend(x) + flat_indices = np.array(flat_indices) + flat_scores = np.array(flat_scores) + + # filter duplicates + unique_indices, unique_indices_idx = np.unique(flat_indices, return_index=True) + meta.loc[unique_indices, column] = flat_scores[unique_indices_idx] + return meta + + +class VideoTextDataset(torch.utils.data.Dataset): + def __init__(self, meta_path, transform): + self.meta_path = meta_path + self.meta = pd.read_csv(meta_path) + self.transform = transform + + def __getitem__(self, index): + row = self.meta.iloc[index] + path = row["path"] + + if is_video(path): + img = extract_frames(path, points=[0.5], backend="opencv")[0] + else: + img = pil_loader(path) + + img = self.transform(img) + + text = row["text"] + text = clip.tokenize(text, truncate=True).squeeze() + + return img, text, index + + def __len__(self): + return len(self.meta) + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("meta_path", type=str, help="Path to the input CSV file") + parser.add_argument("--bs", type=int, default=16, help="Batch size") + parser.add_argument("--num_workers", type=int, default=16, help="Number of workers") + parser.add_argument("--skip_if_existing", action="store_true") + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + meta_path = args.meta_path + if not os.path.exists(meta_path): + print(f"Meta file '{meta_path}' not found. Exit.") + exit() + + wo_ext, ext = os.path.splitext(meta_path) + out_path = f"{wo_ext}_match{ext}" + if args.skip_if_existing and os.path.exists(out_path): + print(f"Output meta file '{out_path}' already exists. Exit.") + exit() + + colossalai.launch_from_torch({}) + + # build model + device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") + model, preprocess = clip.load("ViT-L/14", device=device) + logit_scale = model.logit_scale.exp().item() + + # build dataset + dataset = VideoTextDataset(meta_path=meta_path, transform=preprocess) + dataloader = DataLoader( + dataset, + batch_size=args.bs, + num_workers=args.num_workers, + sampler=DistributedSampler( + dataset, + num_replicas=dist.get_world_size(), + rank=dist.get_rank(), + shuffle=False, + drop_last=False, + ), + ) + + # compute scores + indices_list = [] + scores_list = [] + model.eval() + for imgs, text, indices in tqdm(dataloader, disable=dist.get_rank() != 0): + imgs = imgs.to(device) + text = text.to(device) + + with torch.no_grad(): + feat_img = model.encode_image(imgs) + feat_text = model.encode_text(text) + + feat_img = F.normalize(feat_img, dim=1) + feat_text = F.normalize(feat_text, dim=1) + clip_scores = logit_scale * (feat_img * feat_text).sum(dim=1) + clip_scores = clip_scores.cpu().tolist() + indices_list.extend(indices) + scores_list.extend(clip_scores) + + gathered_list = [None] * dist.get_world_size() + dist.all_gather_object(gathered_list, (indices_list, scores_list)) + if dist.get_rank() == 0: + meta_new = merge_scores(gathered_list, dataset.meta, column="match") + meta_new.to_csv(out_path, index=False) + print(f"New meta with matching scores saved to '{out_path}'.") + + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/pa_vdm/tools/scoring/ocr/__init__.py b/exp_code/1_benchmark/pa_vdm/tools/scoring/ocr/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/pa_vdm/tools/scoring/ocr/dbnetpp.py b/exp_code/1_benchmark/pa_vdm/tools/scoring/ocr/dbnetpp.py new file mode 100644 index 0000000000000000000000000000000000000000..7c64615d413f0def442c1ec4cc020e1e6320f493 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/tools/scoring/ocr/dbnetpp.py @@ -0,0 +1,65 @@ +model = dict( + type="DBNet", + backbone=dict( + type="CLIPResNet", + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=-1, + norm_cfg=dict(type="BN", requires_grad=True), + norm_eval=False, + style="pytorch", + dcn=dict(type="DCNv2", deform_groups=1, fallback_on_stride=False), + # init_cfg=dict( + # type='Pretrained', + # checkpoint='https://download.openmmlab.com/mmocr/backbone/resnet50-oclip-7ba0c533.pth'), + stage_with_dcn=(False, True, True, True), + ), + neck=dict( + type="FPNC", + in_channels=[256, 512, 1024, 2048], + lateral_channels=256, + asf_cfg=dict(attention_type="ScaleChannelSpatial"), + ), + det_head=dict( + type="DBHead", + in_channels=256, + module_loss=dict(type="DBModuleLoss"), + postprocessor=dict( + type="DBPostprocessor", + text_repr_type="quad", + epsilon_ratio=0.002, + ), + ), + data_preprocessor=dict( + type="TextDetDataPreprocessor", + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32, + ), + init_cfg=dict( + type="Pretrained", + checkpoint="https://download.openmmlab.com/mmocr/textdet/dbnetpp/" + "dbnetpp_resnet50-oclip_fpnc_1200e_icdar2015/" + "dbnetpp_resnet50-oclip_fpnc_1200e_icdar2015_20221101_124139-4ecb39ac.pth", + ), +) + +test_pipeline = [ + # dict(type='LoadImageFromFile', color_type='color_ignore_orientation'), + dict(type="Resize", scale=(4068, 1024), keep_ratio=True), + dict( + type="PackTextDetInputs", + # meta_keys=('img_path', 'ori_shape', 'img_shape', 'scale_factor'), + meta_keys=("img_shape", "scale_factor"), + ), +] + +# Visualization +vis_backends = [dict(type="LocalVisBackend")] +visualizer = dict( + type="TextDetLocalVisualizer", + name="visualizer", + vis_backends=vis_backends, +) diff --git a/exp_code/1_benchmark/pa_vdm/tools/scoring/ocr/inference.py b/exp_code/1_benchmark/pa_vdm/tools/scoring/ocr/inference.py new file mode 100644 index 0000000000000000000000000000000000000000..95a1737e70fa3e020a9eb5fa1efe90d2acc93f7b --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/tools/scoring/ocr/inference.py @@ -0,0 +1,158 @@ +import argparse +import os + +import colossalai +import numpy as np +import pandas as pd +import torch +import torch.distributed as dist +from mmengine import Config +from mmengine.dataset import Compose, default_collate +from mmengine.registry import DefaultScope +from mmocr.datasets import PackTextDetInputs +from mmocr.registry import MODELS +from torch.utils.data import DataLoader, DistributedSampler +from torchvision.datasets.folder import pil_loader +from torchvision.transforms import CenterCrop, Compose, Resize +from tqdm import tqdm + +from tools.datasets.utils import extract_frames, is_video + + +def merge_scores(gathered_list: list, meta: pd.DataFrame): + # reorder + indices_list = list(map(lambda x: x[0], gathered_list)) + scores_list = list(map(lambda x: x[1], gathered_list)) + flat_indices = [] + for x in zip(*indices_list): + flat_indices.extend(x) + flat_scores = [] + for x in zip(*scores_list): + flat_scores.extend(x) + flat_indices = np.array(flat_indices) + flat_scores = np.array(flat_scores) + # filter duplicates + unique_indices, unique_indices_idx = np.unique(flat_indices, return_index=True) + meta.loc[unique_indices, "ocr"] = flat_scores[unique_indices_idx] + + +class VideoTextDataset(torch.utils.data.Dataset): + def __init__(self, meta_path, transform): + self.meta_path = meta_path + self.meta = pd.read_csv(meta_path) + self.transform = transform + self.transform = Compose( + [ + Resize(1024), + CenterCrop(1024), + ] + ) + self.formatting = PackTextDetInputs(meta_keys=["scale_factor"]) + + def __getitem__(self, index): + row = self.meta.iloc[index] + path = row["path"] + + if is_video(path): + img = extract_frames(path, frame_inds=[10], backend="opencv")[0] + else: + img = pil_loader(path) + + img = self.transform(img) + img_array = np.array(img)[:, :, ::-1].copy() # bgr + results = { + "img": img_array, + "scale_factor": 1.0, + # 'img_shape': img_array.shape[-2], + # 'ori_shape': img_array.shape[-2], + } + results = self.formatting(results) + results["index"] = index + + return results + + def __len__(self): + return len(self.meta) + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("meta_path", type=str, help="Path to the input CSV file") + parser.add_argument("--bs", type=int, default=16, help="Batch size") + parser.add_argument("--num_workers", type=int, default=16, help="Number of workers") + parser.add_argument("--skip_if_existing", action="store_true") + args = parser.parse_args() + + return args + + +def main(): + args = parse_args() + + meta_path = args.meta_path + if not os.path.exists(meta_path): + print(f"Meta file '{meta_path}' not found. Exit.") + exit() + + wo_ext, ext = os.path.splitext(meta_path) + out_path = f"{wo_ext}_ocr{ext}" + if args.skip_if_existing and os.path.exists(out_path): + print(f"Output meta file '{out_path}' already exists. Exit.") + exit() + + cfg = Config.fromfile("./tools/scoring/ocr/dbnetpp.py") + colossalai.launch_from_torch({}) + + device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") + DefaultScope.get_instance("ocr", scope_name="mmocr") # use mmocr Registry as default + + # build model + model = MODELS.build(cfg.model) + model.init_weights() + model.to(device) # set data_preprocessor._device + print("==> Model built.") + + # build dataset + transform = Compose(cfg.test_pipeline) + dataset = VideoTextDataset(meta_path=meta_path, transform=transform) + dataloader = DataLoader( + dataset, + batch_size=args.bs, + num_workers=args.num_workers, + sampler=DistributedSampler( + dataset, + num_replicas=dist.get_world_size(), + rank=dist.get_rank(), + shuffle=False, + drop_last=False, + ), + collate_fn=default_collate, + ) + print("==> Dataloader built.") + + # compute scores + dataset.meta["ocr"] = np.nan + indices_list = [] + scores_list = [] + model.eval() + for data in tqdm(dataloader, disable=dist.get_rank() != 0): + indices_i = data["index"] + indices_list.extend(indices_i.tolist()) + del data["index"] + + pred = model.test_step(data) # this line will cast data to device + + num_texts_i = [(x.pred_instances.scores > 0.3).sum().item() for x in pred] + scores_list.extend(num_texts_i) + + gathered_list = [None] * dist.get_world_size() + dist.all_gather_object(gathered_list, (indices_list, scores_list)) + + if dist.get_rank() == 0: + merge_scores(gathered_list, dataset.meta) + dataset.meta.to_csv(out_path, index=False) + print(f"New meta (shape={dataset.meta.shape}) with ocr results saved to '{out_path}'.") + + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/pa_vdm/tools/scoring/optical_flow/__init__.py b/exp_code/1_benchmark/pa_vdm/tools/scoring/optical_flow/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/pa_vdm/tools/scoring/optical_flow/inference.py b/exp_code/1_benchmark/pa_vdm/tools/scoring/optical_flow/inference.py new file mode 100644 index 0000000000000000000000000000000000000000..09f1b41a0a062f24bca6a4387b01261bfd864e45 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/tools/scoring/optical_flow/inference.py @@ -0,0 +1,193 @@ +import cv2 # isort:skip + +import argparse +import gc +import os +from datetime import timedelta + +import numpy as np +import pandas as pd +import torch +import torch.distributed as dist +import torch.nn.functional as F +from einops import rearrange +from torch.utils.data import DataLoader, DistributedSampler +from torchvision.transforms.functional import pil_to_tensor +from tqdm import tqdm + +from tools.datasets.utils import extract_frames +from tools.scoring.optical_flow.unimatch import UniMatch + +# torch.backends.cudnn.enabled = False # This line enables large batch, but the speed is similar + + +def merge_scores(gathered_list: list, meta: pd.DataFrame, column): + # reorder + indices_list = list(map(lambda x: x[0], gathered_list)) + scores_list = list(map(lambda x: x[1], gathered_list)) + + flat_indices = [] + for x in zip(*indices_list): + flat_indices.extend(x) + flat_scores = [] + for x in zip(*scores_list): + flat_scores.extend(x) + flat_indices = np.array(flat_indices) + flat_scores = np.array(flat_scores) + + # filter duplicates + unique_indices, unique_indices_idx = np.unique(flat_indices, return_index=True) + meta.loc[unique_indices, column] = flat_scores[unique_indices_idx] + + # drop indices in meta not in unique_indices + meta = meta.loc[unique_indices] + return meta + + +class VideoTextDataset(torch.utils.data.Dataset): + def __init__(self, meta_path, frame_inds=[0, 10, 20, 30]): + self.meta_path = meta_path + self.meta = pd.read_csv(meta_path) + self.frame_inds = frame_inds + + def __getitem__(self, index): + sample = self.meta.iloc[index] + path = sample["path"] + + # extract frames + images = extract_frames(path, frame_inds=self.frame_inds, backend="opencv") + + # transform + images = torch.stack([pil_to_tensor(x) for x in images]) + + # stack + # shape: [N, C, H, W]; dtype: torch.uint8 + images = images.float() + H, W = images.shape[-2:] + if H > W: + images = rearrange(images, "N C H W -> N C W H") + images = F.interpolate(images, size=(320, 576), mode="bilinear", align_corners=True) + + ret = dict(index=index, images=images) + return ret + + def __len__(self): + return len(self.meta) + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("meta_path", type=str, help="Path to the input CSV file") + parser.add_argument("--bs", type=int, default=4, help="Batch size") # don't use too large bs for unimatch + parser.add_argument("--num_workers", type=int, default=16, help="Number of workers") + parser.add_argument("--skip_if_existing", action="store_true") + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + meta_path = args.meta_path + if not os.path.exists(meta_path): + print(f"Meta file '{meta_path}' not found. Exit.") + exit() + + wo_ext, ext = os.path.splitext(meta_path) + out_path = f"{wo_ext}_flow{ext}" + if args.skip_if_existing and os.path.exists(out_path): + print(f"Output meta file '{out_path}' already exists. Exit.") + exit() + + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + dist.init_process_group(backend="nccl", timeout=timedelta(hours=24)) + torch.cuda.set_device(dist.get_rank() % torch.cuda.device_count()) + + # build model + device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") + model = UniMatch( + feature_channels=128, + num_scales=2, + upsample_factor=4, + num_head=1, + ffn_dim_expansion=4, + num_transformer_layers=6, + reg_refine=True, + task="flow", + ) + ckpt = torch.load("./pretrained_models/unimatch/gmflow-scale2-regrefine6-mixdata-train320x576-4e7b215d.pth") + model.load_state_dict(ckpt["model"]) + model = model.to(device) + + # build dataset + dataset = VideoTextDataset(meta_path=meta_path, frame_inds=[0, 10, 20, 30]) + dataloader = DataLoader( + dataset, + batch_size=args.bs, + num_workers=args.num_workers, + sampler=DistributedSampler( + dataset, + num_replicas=dist.get_world_size(), + rank=dist.get_rank(), + shuffle=False, + drop_last=False, + ), + ) + + # compute optical flow scores + indices_list = [] + scores_list = [] + model.eval() + for batch in tqdm(dataloader, disable=dist.get_rank() != 0): + indices = batch["index"] + images = batch["images"].to(device, non_blocking=True) + + B = images.shape[0] + batch_0 = rearrange(images[:, :-1], "B N C H W -> (B N) C H W").contiguous() + batch_1 = rearrange(images[:, 1:], "B N C H W -> (B N) C H W").contiguous() + + with torch.no_grad(): + res = model( + batch_0, + batch_1, + attn_type="swin", + attn_splits_list=[2, 8], + corr_radius_list=[-1, 4], + prop_radius_list=[-1, 1], + num_reg_refine=6, + task="flow", + pred_bidir_flow=False, + ) + flow_maps = res["flow_preds"][-1].cpu() # [B * (N-1), 2, H, W] + flow_maps = rearrange(flow_maps, "(B N) C H W -> B N H W C", B=B) + flow_scores = flow_maps.abs().mean(dim=[1, 2, 3, 4]) + flow_scores = flow_scores.tolist() + + indices_list.extend(indices.tolist()) + scores_list.extend(flow_scores) + + # save local results + meta_local = merge_scores([(indices_list, scores_list)], dataset.meta, column="flow") + save_dir_local = os.path.join(os.path.dirname(out_path), "parts") + os.makedirs(save_dir_local, exist_ok=True) + out_path_local = os.path.join( + save_dir_local, os.path.basename(out_path).replace(".csv", f"_part_{dist.get_rank()}.csv") + ) + meta_local.to_csv(out_path_local, index=False) + + # wait for all ranks to finish data processing + dist.barrier() + + torch.cuda.empty_cache() + gc.collect() + gathered_list = [None] * dist.get_world_size() + dist.all_gather_object(gathered_list, (indices_list, scores_list)) + if dist.get_rank() == 0: + meta_new = merge_scores(gathered_list, dataset.meta, column="flow") + meta_new.to_csv(out_path, index=False) + print(f"New meta with optical flow scores saved to '{out_path}'.") + + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/pa_vdm/tools/scoring/optical_flow/unimatch/__init__.py b/exp_code/1_benchmark/pa_vdm/tools/scoring/optical_flow/unimatch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c1f4eb2f58e4f32026f301c80331f536918fae7a --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/tools/scoring/optical_flow/unimatch/__init__.py @@ -0,0 +1 @@ +from .unimatch import UniMatch diff --git a/exp_code/1_benchmark/pa_vdm/tools/scoring/optical_flow/unimatch/attention.py b/exp_code/1_benchmark/pa_vdm/tools/scoring/optical_flow/unimatch/attention.py new file mode 100644 index 0000000000000000000000000000000000000000..23fb9048a07fcbd5228f42de4cca0a0f5ed9b60b --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/tools/scoring/optical_flow/unimatch/attention.py @@ -0,0 +1,280 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .utils import merge_splits, merge_splits_1d, split_feature, split_feature_1d + + +def single_head_full_attention(q, k, v): + # q, k, v: [B, L, C] + assert q.dim() == k.dim() == v.dim() == 3 + + scores = torch.matmul(q, k.permute(0, 2, 1)) / (q.size(2) ** 0.5) # [B, L, L] + attn = torch.softmax(scores, dim=2) # [B, L, L] + out = torch.matmul(attn, v) # [B, L, C] + + return out + + +def single_head_full_attention_1d( + q, + k, + v, + h=None, + w=None, +): + # q, k, v: [B, L, C] + + assert h is not None and w is not None + assert q.size(1) == h * w + + b, _, c = q.size() + + q = q.view(b, h, w, c) # [B, H, W, C] + k = k.view(b, h, w, c) + v = v.view(b, h, w, c) + + scale_factor = c**0.5 + + scores = torch.matmul(q, k.permute(0, 1, 3, 2)) / scale_factor # [B, H, W, W] + + attn = torch.softmax(scores, dim=-1) + + out = torch.matmul(attn, v).view(b, -1, c) # [B, H*W, C] + + return out + + +def single_head_split_window_attention( + q, + k, + v, + num_splits=1, + with_shift=False, + h=None, + w=None, + attn_mask=None, +): + # ref: https://github.com/microsoft/Swin-Transformer/blob/main/models/swin_transformer.py + # q, k, v: [B, L, C] + assert q.dim() == k.dim() == v.dim() == 3 + + assert h is not None and w is not None + assert q.size(1) == h * w + + b, _, c = q.size() + + b_new = b * num_splits * num_splits + + window_size_h = h // num_splits + window_size_w = w // num_splits + + q = q.view(b, h, w, c) # [B, H, W, C] + k = k.view(b, h, w, c) + v = v.view(b, h, w, c) + + scale_factor = c**0.5 + + if with_shift: + assert attn_mask is not None # compute once + shift_size_h = window_size_h // 2 + shift_size_w = window_size_w // 2 + + q = torch.roll(q, shifts=(-shift_size_h, -shift_size_w), dims=(1, 2)) + k = torch.roll(k, shifts=(-shift_size_h, -shift_size_w), dims=(1, 2)) + v = torch.roll(v, shifts=(-shift_size_h, -shift_size_w), dims=(1, 2)) + + q = split_feature(q, num_splits=num_splits, channel_last=True) # [B*K*K, H/K, W/K, C] + k = split_feature(k, num_splits=num_splits, channel_last=True) + v = split_feature(v, num_splits=num_splits, channel_last=True) + + scores = ( + torch.matmul(q.view(b_new, -1, c), k.view(b_new, -1, c).permute(0, 2, 1)) / scale_factor + ) # [B*K*K, H/K*W/K, H/K*W/K] + + if with_shift: + scores += attn_mask.repeat(b, 1, 1) + + attn = torch.softmax(scores, dim=-1) + + out = torch.matmul(attn, v.view(b_new, -1, c)) # [B*K*K, H/K*W/K, C] + + out = merge_splits( + out.view(b_new, h // num_splits, w // num_splits, c), num_splits=num_splits, channel_last=True + ) # [B, H, W, C] + + # shift back + if with_shift: + out = torch.roll(out, shifts=(shift_size_h, shift_size_w), dims=(1, 2)) + + out = out.view(b, -1, c) + + return out + + +def single_head_split_window_attention_1d( + q, + k, + v, + relative_position_bias=None, + num_splits=1, + with_shift=False, + h=None, + w=None, + attn_mask=None, +): + # q, k, v: [B, L, C] + + assert h is not None and w is not None + assert q.size(1) == h * w + + b, _, c = q.size() + + b_new = b * num_splits * h + + window_size_w = w // num_splits + + q = q.view(b * h, w, c) # [B*H, W, C] + k = k.view(b * h, w, c) + v = v.view(b * h, w, c) + + scale_factor = c**0.5 + + if with_shift: + assert attn_mask is not None # compute once + shift_size_w = window_size_w // 2 + + q = torch.roll(q, shifts=-shift_size_w, dims=1) + k = torch.roll(k, shifts=-shift_size_w, dims=1) + v = torch.roll(v, shifts=-shift_size_w, dims=1) + + q = split_feature_1d(q, num_splits=num_splits) # [B*H*K, W/K, C] + k = split_feature_1d(k, num_splits=num_splits) + v = split_feature_1d(v, num_splits=num_splits) + + scores = ( + torch.matmul(q.view(b_new, -1, c), k.view(b_new, -1, c).permute(0, 2, 1)) / scale_factor + ) # [B*H*K, W/K, W/K] + + if with_shift: + # attn_mask: [K, W/K, W/K] + scores += attn_mask.repeat(b * h, 1, 1) # [B*H*K, W/K, W/K] + + attn = torch.softmax(scores, dim=-1) + + out = torch.matmul(attn, v.view(b_new, -1, c)) # [B*H*K, W/K, C] + + out = merge_splits_1d(out, h, num_splits=num_splits) # [B, H, W, C] + + # shift back + if with_shift: + out = torch.roll(out, shifts=shift_size_w, dims=2) + + out = out.view(b, -1, c) + + return out + + +class SelfAttnPropagation(nn.Module): + """ + flow propagation with self-attention on feature + query: feature0, key: feature0, value: flow + """ + + def __init__( + self, + in_channels, + **kwargs, + ): + super(SelfAttnPropagation, self).__init__() + + self.q_proj = nn.Linear(in_channels, in_channels) + self.k_proj = nn.Linear(in_channels, in_channels) + + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + def forward( + self, + feature0, + flow, + local_window_attn=False, + local_window_radius=1, + **kwargs, + ): + # q, k: feature [B, C, H, W], v: flow [B, 2, H, W] + if local_window_attn: + return self.forward_local_window_attn(feature0, flow, local_window_radius=local_window_radius) + + b, c, h, w = feature0.size() + + query = feature0.view(b, c, h * w).permute(0, 2, 1) # [B, H*W, C] + + # a note: the ``correct'' implementation should be: + # ``query = self.q_proj(query), key = self.k_proj(query)'' + # this problem is observed while cleaning up the code + # however, this doesn't affect the performance since the projection is a linear operation, + # thus the two projection matrices for key can be merged + # so I just leave it as is in order to not re-train all models :) + query = self.q_proj(query) # [B, H*W, C] + key = self.k_proj(query) # [B, H*W, C] + + value = flow.view(b, flow.size(1), h * w).permute(0, 2, 1) # [B, H*W, 2] + + scores = torch.matmul(query, key.permute(0, 2, 1)) / (c**0.5) # [B, H*W, H*W] + prob = torch.softmax(scores, dim=-1) + + out = torch.matmul(prob, value) # [B, H*W, 2] + out = out.view(b, h, w, value.size(-1)).permute(0, 3, 1, 2) # [B, 2, H, W] + + return out + + def forward_local_window_attn( + self, + feature0, + flow, + local_window_radius=1, + ): + assert flow.size(1) == 2 or flow.size(1) == 1 # flow or disparity or depth + assert local_window_radius > 0 + + b, c, h, w = feature0.size() + + value_channel = flow.size(1) + + feature0_reshape = self.q_proj(feature0.view(b, c, -1).permute(0, 2, 1)).reshape( + b * h * w, 1, c + ) # [B*H*W, 1, C] + + kernel_size = 2 * local_window_radius + 1 + + feature0_proj = self.k_proj(feature0.view(b, c, -1).permute(0, 2, 1)).permute(0, 2, 1).reshape(b, c, h, w) + + feature0_window = F.unfold( + feature0_proj, kernel_size=kernel_size, padding=local_window_radius + ) # [B, C*(2R+1)^2), H*W] + + feature0_window = ( + feature0_window.view(b, c, kernel_size**2, h, w) + .permute(0, 3, 4, 1, 2) + .reshape(b * h * w, c, kernel_size**2) + ) # [B*H*W, C, (2R+1)^2] + + flow_window = F.unfold(flow, kernel_size=kernel_size, padding=local_window_radius) # [B, 2*(2R+1)^2), H*W] + + flow_window = ( + flow_window.view(b, value_channel, kernel_size**2, h, w) + .permute(0, 3, 4, 2, 1) + .reshape(b * h * w, kernel_size**2, value_channel) + ) # [B*H*W, (2R+1)^2, 2] + + scores = torch.matmul(feature0_reshape, feature0_window) / (c**0.5) # [B*H*W, 1, (2R+1)^2] + + prob = torch.softmax(scores, dim=-1) + + out = ( + torch.matmul(prob, flow_window).view(b, h, w, value_channel).permute(0, 3, 1, 2).contiguous() + ) # [B, 2, H, W] + + return out diff --git a/exp_code/1_benchmark/pa_vdm/tools/scoring/optical_flow/unimatch/backbone.py b/exp_code/1_benchmark/pa_vdm/tools/scoring/optical_flow/unimatch/backbone.py new file mode 100644 index 0000000000000000000000000000000000000000..5c2cc19f7dae5013da0c6a22d50e4bfabfed8ee6 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/tools/scoring/optical_flow/unimatch/backbone.py @@ -0,0 +1,128 @@ +import torch.nn as nn + +from .trident_conv import MultiScaleTridentConv + + +class ResidualBlock(nn.Module): + def __init__( + self, + in_planes, + planes, + norm_layer=nn.InstanceNorm2d, + stride=1, + dilation=1, + ): + super(ResidualBlock, self).__init__() + + self.conv1 = nn.Conv2d( + in_planes, planes, kernel_size=3, dilation=dilation, padding=dilation, stride=stride, bias=False + ) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, dilation=dilation, padding=dilation, bias=False) + self.relu = nn.ReLU(inplace=True) + + self.norm1 = norm_layer(planes) + self.norm2 = norm_layer(planes) + if not stride == 1 or in_planes != planes: + self.norm3 = norm_layer(planes) + + if stride == 1 and in_planes == planes: + self.downsample = None + else: + self.downsample = nn.Sequential(nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm3) + + def forward(self, x): + y = x + y = self.relu(self.norm1(self.conv1(y))) + y = self.relu(self.norm2(self.conv2(y))) + + if self.downsample is not None: + x = self.downsample(x) + + return self.relu(x + y) + + +class CNNEncoder(nn.Module): + def __init__( + self, + output_dim=128, + norm_layer=nn.InstanceNorm2d, + num_output_scales=1, + **kwargs, + ): + super(CNNEncoder, self).__init__() + self.num_branch = num_output_scales + + feature_dims = [64, 96, 128] + + self.conv1 = nn.Conv2d(3, feature_dims[0], kernel_size=7, stride=2, padding=3, bias=False) # 1/2 + self.norm1 = norm_layer(feature_dims[0]) + self.relu1 = nn.ReLU(inplace=True) + + self.in_planes = feature_dims[0] + self.layer1 = self._make_layer(feature_dims[0], stride=1, norm_layer=norm_layer) # 1/2 + self.layer2 = self._make_layer(feature_dims[1], stride=2, norm_layer=norm_layer) # 1/4 + + # highest resolution 1/4 or 1/8 + stride = 2 if num_output_scales == 1 else 1 + self.layer3 = self._make_layer( + feature_dims[2], + stride=stride, + norm_layer=norm_layer, + ) # 1/4 or 1/8 + + self.conv2 = nn.Conv2d(feature_dims[2], output_dim, 1, 1, 0) + + if self.num_branch > 1: + if self.num_branch == 4: + strides = (1, 2, 4, 8) + elif self.num_branch == 3: + strides = (1, 2, 4) + elif self.num_branch == 2: + strides = (1, 2) + else: + raise ValueError + + self.trident_conv = MultiScaleTridentConv( + output_dim, + output_dim, + kernel_size=3, + strides=strides, + paddings=1, + num_branch=self.num_branch, + ) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu") + elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)): + if m.weight is not None: + nn.init.constant_(m.weight, 1) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def _make_layer(self, dim, stride=1, dilation=1, norm_layer=nn.InstanceNorm2d): + layer1 = ResidualBlock(self.in_planes, dim, norm_layer=norm_layer, stride=stride, dilation=dilation) + layer2 = ResidualBlock(dim, dim, norm_layer=norm_layer, stride=1, dilation=dilation) + + layers = (layer1, layer2) + + self.in_planes = dim + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + x = self.norm1(x) + x = self.relu1(x) + + x = self.layer1(x) # 1/2 + x = self.layer2(x) # 1/4 + x = self.layer3(x) # 1/8 or 1/4 + + x = self.conv2(x) + + if self.num_branch > 1: + out = self.trident_conv([x] * self.num_branch) # high to low res + else: + out = [x] + + return out diff --git a/exp_code/1_benchmark/pa_vdm/tools/scoring/optical_flow/unimatch/geometry.py b/exp_code/1_benchmark/pa_vdm/tools/scoring/optical_flow/unimatch/geometry.py new file mode 100644 index 0000000000000000000000000000000000000000..df4d8e38d8afabe7f4e8a69724c75427dec9bd2b --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/tools/scoring/optical_flow/unimatch/geometry.py @@ -0,0 +1,200 @@ +import torch +import torch.nn.functional as F + + +def coords_grid(b, h, w, homogeneous=False, device=None): + y, x = torch.meshgrid(torch.arange(h), torch.arange(w)) # [H, W] + + stacks = [x, y] + + if homogeneous: + ones = torch.ones_like(x) # [H, W] + stacks.append(ones) + + grid = torch.stack(stacks, dim=0).float() # [2, H, W] or [3, H, W] + + grid = grid[None].repeat(b, 1, 1, 1) # [B, 2, H, W] or [B, 3, H, W] + + if device is not None: + grid = grid.to(device) + + return grid + + +def generate_window_grid(h_min, h_max, w_min, w_max, len_h, len_w, device=None): + assert device is not None + + x, y = torch.meshgrid( + [torch.linspace(w_min, w_max, len_w, device=device), torch.linspace(h_min, h_max, len_h, device=device)], + ) + grid = torch.stack((x, y), -1).transpose(0, 1).float() # [H, W, 2] + + return grid + + +def normalize_coords(coords, h, w): + # coords: [B, H, W, 2] + c = torch.Tensor([(w - 1) / 2.0, (h - 1) / 2.0]).float().to(coords.device) + return (coords - c) / c # [-1, 1] + + +def bilinear_sample(img, sample_coords, mode="bilinear", padding_mode="zeros", return_mask=False): + # img: [B, C, H, W] + # sample_coords: [B, 2, H, W] in image scale + if sample_coords.size(1) != 2: # [B, H, W, 2] + sample_coords = sample_coords.permute(0, 3, 1, 2) + + b, _, h, w = sample_coords.shape + + # Normalize to [-1, 1] + x_grid = 2 * sample_coords[:, 0] / (w - 1) - 1 + y_grid = 2 * sample_coords[:, 1] / (h - 1) - 1 + + grid = torch.stack([x_grid, y_grid], dim=-1) # [B, H, W, 2] + + img = F.grid_sample(img, grid, mode=mode, padding_mode=padding_mode, align_corners=True) + + if return_mask: + mask = (x_grid >= -1) & (y_grid >= -1) & (x_grid <= 1) & (y_grid <= 1) # [B, H, W] + + return img, mask + + return img + + +def flow_warp(feature, flow, mask=False, padding_mode="zeros"): + b, c, h, w = feature.size() + assert flow.size(1) == 2 + + grid = coords_grid(b, h, w).to(flow.device) + flow # [B, 2, H, W] + + return bilinear_sample(feature, grid, padding_mode=padding_mode, return_mask=mask) + + +def forward_backward_consistency_check(fwd_flow, bwd_flow, alpha=0.01, beta=0.5): + # fwd_flow, bwd_flow: [B, 2, H, W] + # alpha and beta values are following UnFlow (https://arxiv.org/abs/1711.07837) + assert fwd_flow.dim() == 4 and bwd_flow.dim() == 4 + assert fwd_flow.size(1) == 2 and bwd_flow.size(1) == 2 + flow_mag = torch.norm(fwd_flow, dim=1) + torch.norm(bwd_flow, dim=1) # [B, H, W] + + warped_bwd_flow = flow_warp(bwd_flow, fwd_flow) # [B, 2, H, W] + warped_fwd_flow = flow_warp(fwd_flow, bwd_flow) # [B, 2, H, W] + + diff_fwd = torch.norm(fwd_flow + warped_bwd_flow, dim=1) # [B, H, W] + diff_bwd = torch.norm(bwd_flow + warped_fwd_flow, dim=1) + + threshold = alpha * flow_mag + beta + + fwd_occ = (diff_fwd > threshold).float() # [B, H, W] + bwd_occ = (diff_bwd > threshold).float() + + return fwd_occ, bwd_occ + + +def back_project(depth, intrinsics): + # Back project 2D pixel coords to 3D points + # depth: [B, H, W] + # intrinsics: [B, 3, 3] + b, h, w = depth.shape + grid = coords_grid(b, h, w, homogeneous=True, device=depth.device) # [B, 3, H, W] + + intrinsics_inv = torch.inverse(intrinsics) # [B, 3, 3] + + points = intrinsics_inv.bmm(grid.view(b, 3, -1)).view(b, 3, h, w) * depth.unsqueeze(1) # [B, 3, H, W] + + return points + + +def camera_transform(points_ref, extrinsics_ref=None, extrinsics_tgt=None, extrinsics_rel=None): + # Transform 3D points from reference camera to target camera + # points_ref: [B, 3, H, W] + # extrinsics_ref: [B, 4, 4] + # extrinsics_tgt: [B, 4, 4] + # extrinsics_rel: [B, 4, 4], relative pose transform + b, _, h, w = points_ref.shape + + if extrinsics_rel is None: + extrinsics_rel = torch.bmm(extrinsics_tgt, torch.inverse(extrinsics_ref)) # [B, 4, 4] + + points_tgt = ( + torch.bmm(extrinsics_rel[:, :3, :3], points_ref.view(b, 3, -1)) + extrinsics_rel[:, :3, -1:] + ) # [B, 3, H*W] + + points_tgt = points_tgt.view(b, 3, h, w) # [B, 3, H, W] + + return points_tgt + + +def reproject(points_tgt, intrinsics, return_mask=False): + # reproject to target view + # points_tgt: [B, 3, H, W] + # intrinsics: [B, 3, 3] + + b, _, h, w = points_tgt.shape + + proj_points = torch.bmm(intrinsics, points_tgt.view(b, 3, -1)).view(b, 3, h, w) # [B, 3, H, W] + + X = proj_points[:, 0] + Y = proj_points[:, 1] + Z = proj_points[:, 2].clamp(min=1e-3) + + pixel_coords = torch.stack([X / Z, Y / Z], dim=1).view(b, 2, h, w) # [B, 2, H, W] in image scale + + if return_mask: + # valid mask in pixel space + mask = ( + (pixel_coords[:, 0] >= 0) + & (pixel_coords[:, 0] <= (w - 1)) + & (pixel_coords[:, 1] >= 0) + & (pixel_coords[:, 1] <= (h - 1)) + ) # [B, H, W] + + return pixel_coords, mask + + return pixel_coords + + +def reproject_coords( + depth_ref, intrinsics, extrinsics_ref=None, extrinsics_tgt=None, extrinsics_rel=None, return_mask=False +): + # Compute reprojection sample coords + points_ref = back_project(depth_ref, intrinsics) # [B, 3, H, W] + points_tgt = camera_transform(points_ref, extrinsics_ref, extrinsics_tgt, extrinsics_rel=extrinsics_rel) + + if return_mask: + reproj_coords, mask = reproject(points_tgt, intrinsics, return_mask=return_mask) # [B, 2, H, W] in image scale + + return reproj_coords, mask + + reproj_coords = reproject(points_tgt, intrinsics, return_mask=return_mask) # [B, 2, H, W] in image scale + + return reproj_coords + + +def compute_flow_with_depth_pose( + depth_ref, intrinsics, extrinsics_ref=None, extrinsics_tgt=None, extrinsics_rel=None, return_mask=False +): + b, h, w = depth_ref.shape + coords_init = coords_grid(b, h, w, device=depth_ref.device) # [B, 2, H, W] + + if return_mask: + reproj_coords, mask = reproject_coords( + depth_ref, + intrinsics, + extrinsics_ref, + extrinsics_tgt, + extrinsics_rel=extrinsics_rel, + return_mask=return_mask, + ) # [B, 2, H, W] + rigid_flow = reproj_coords - coords_init + + return rigid_flow, mask + + reproj_coords = reproject_coords( + depth_ref, intrinsics, extrinsics_ref, extrinsics_tgt, extrinsics_rel=extrinsics_rel, return_mask=return_mask + ) # [B, 2, H, W] + + rigid_flow = reproj_coords - coords_init + + return rigid_flow diff --git a/exp_code/1_benchmark/pa_vdm/tools/scoring/optical_flow/unimatch/matching.py b/exp_code/1_benchmark/pa_vdm/tools/scoring/optical_flow/unimatch/matching.py new file mode 100644 index 0000000000000000000000000000000000000000..fe5e103d742b16edff87835a1cd4db45e15775ad --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/tools/scoring/optical_flow/unimatch/matching.py @@ -0,0 +1,307 @@ +import torch +import torch.nn.functional as F + +from .geometry import coords_grid, generate_window_grid, normalize_coords + + +def global_correlation_softmax( + feature0, + feature1, + pred_bidir_flow=False, +): + # global correlation + b, c, h, w = feature0.shape + feature0 = feature0.view(b, c, -1).permute(0, 2, 1) # [B, H*W, C] + feature1 = feature1.view(b, c, -1) # [B, C, H*W] + + correlation = torch.matmul(feature0, feature1).view(b, h, w, h, w) / (c**0.5) # [B, H, W, H, W] + + # flow from softmax + init_grid = coords_grid(b, h, w).to(correlation.device) # [B, 2, H, W] + grid = init_grid.view(b, 2, -1).permute(0, 2, 1) # [B, H*W, 2] + + correlation = correlation.view(b, h * w, h * w) # [B, H*W, H*W] + + if pred_bidir_flow: + correlation = torch.cat((correlation, correlation.permute(0, 2, 1)), dim=0) # [2*B, H*W, H*W] + init_grid = init_grid.repeat(2, 1, 1, 1) # [2*B, 2, H, W] + grid = grid.repeat(2, 1, 1) # [2*B, H*W, 2] + b = b * 2 + + prob = F.softmax(correlation, dim=-1) # [B, H*W, H*W] + + correspondence = torch.matmul(prob, grid).view(b, h, w, 2).permute(0, 3, 1, 2) # [B, 2, H, W] + + # when predicting bidirectional flow, flow is the concatenation of forward flow and backward flow + flow = correspondence - init_grid + + return flow, prob + + +def local_correlation_softmax( + feature0, + feature1, + local_radius, + padding_mode="zeros", +): + b, c, h, w = feature0.size() + coords_init = coords_grid(b, h, w).to(feature0.device) # [B, 2, H, W] + coords = coords_init.view(b, 2, -1).permute(0, 2, 1) # [B, H*W, 2] + + local_h = 2 * local_radius + 1 + local_w = 2 * local_radius + 1 + + window_grid = generate_window_grid( + -local_radius, local_radius, -local_radius, local_radius, local_h, local_w, device=feature0.device + ) # [2R+1, 2R+1, 2] + window_grid = window_grid.reshape(-1, 2).repeat(b, 1, 1, 1) # [B, 1, (2R+1)^2, 2] + sample_coords = coords.unsqueeze(-2) + window_grid # [B, H*W, (2R+1)^2, 2] + + sample_coords_softmax = sample_coords + + # exclude coords that are out of image space + valid_x = (sample_coords[:, :, :, 0] >= 0) & (sample_coords[:, :, :, 0] < w) # [B, H*W, (2R+1)^2] + valid_y = (sample_coords[:, :, :, 1] >= 0) & (sample_coords[:, :, :, 1] < h) # [B, H*W, (2R+1)^2] + + valid = valid_x & valid_y # [B, H*W, (2R+1)^2], used to mask out invalid values when softmax + + # normalize coordinates to [-1, 1] + sample_coords_norm = normalize_coords(sample_coords, h, w) # [-1, 1] + window_feature = F.grid_sample(feature1, sample_coords_norm, padding_mode=padding_mode, align_corners=True).permute( + 0, 2, 1, 3 + ) # [B, H*W, C, (2R+1)^2] + feature0_view = feature0.permute(0, 2, 3, 1).view(b, h * w, 1, c) # [B, H*W, 1, C] + + corr = torch.matmul(feature0_view, window_feature).view(b, h * w, -1) / (c**0.5) # [B, H*W, (2R+1)^2] + + # mask invalid locations + corr[~valid] = -1e9 + + prob = F.softmax(corr, -1) # [B, H*W, (2R+1)^2] + + correspondence = ( + torch.matmul(prob.unsqueeze(-2), sample_coords_softmax).squeeze(-2).view(b, h, w, 2).permute(0, 3, 1, 2) + ) # [B, 2, H, W] + + flow = correspondence - coords_init + match_prob = prob + + return flow, match_prob + + +def local_correlation_with_flow( + feature0, + feature1, + flow, + local_radius, + padding_mode="zeros", + dilation=1, +): + b, c, h, w = feature0.size() + coords_init = coords_grid(b, h, w).to(feature0.device) # [B, 2, H, W] + coords = coords_init.view(b, 2, -1).permute(0, 2, 1) # [B, H*W, 2] + + local_h = 2 * local_radius + 1 + local_w = 2 * local_radius + 1 + + window_grid = generate_window_grid( + -local_radius, local_radius, -local_radius, local_radius, local_h, local_w, device=feature0.device + ) # [2R+1, 2R+1, 2] + window_grid = window_grid.reshape(-1, 2).repeat(b, 1, 1, 1) # [B, 1, (2R+1)^2, 2] + sample_coords = coords.unsqueeze(-2) + window_grid * dilation # [B, H*W, (2R+1)^2, 2] + + # flow can be zero when using features after transformer + if not isinstance(flow, float): + sample_coords = sample_coords + flow.view(b, 2, -1).permute(0, 2, 1).unsqueeze(-2) # [B, H*W, (2R+1)^2, 2] + else: + assert flow == 0.0 + + # normalize coordinates to [-1, 1] + sample_coords_norm = normalize_coords(sample_coords, h, w) # [-1, 1] + window_feature = F.grid_sample(feature1, sample_coords_norm, padding_mode=padding_mode, align_corners=True).permute( + 0, 2, 1, 3 + ) # [B, H*W, C, (2R+1)^2] + feature0_view = feature0.permute(0, 2, 3, 1).view(b, h * w, 1, c) # [B, H*W, 1, C] + + corr = torch.matmul(feature0_view, window_feature).view(b, h * w, -1) / (c**0.5) # [B, H*W, (2R+1)^2] + + corr = corr.view(b, h, w, -1).permute(0, 3, 1, 2).contiguous() # [B, (2R+1)^2, H, W] + + return corr + + +def global_correlation_softmax_stereo( + feature0, + feature1, +): + # global correlation on horizontal direction + b, c, h, w = feature0.shape + + x_grid = torch.linspace(0, w - 1, w, device=feature0.device) # [W] + + feature0 = feature0.permute(0, 2, 3, 1) # [B, H, W, C] + feature1 = feature1.permute(0, 2, 1, 3) # [B, H, C, W] + + correlation = torch.matmul(feature0, feature1) / (c**0.5) # [B, H, W, W] + + # mask subsequent positions to make disparity positive + mask = torch.triu(torch.ones((w, w)), diagonal=1).type_as(feature0) # [W, W] + valid_mask = (mask == 0).unsqueeze(0).unsqueeze(0).repeat(b, h, 1, 1) # [B, H, W, W] + + correlation[~valid_mask] = -1e9 + + prob = F.softmax(correlation, dim=-1) # [B, H, W, W] + + correspondence = (x_grid.view(1, 1, 1, w) * prob).sum(-1) # [B, H, W] + + # NOTE: unlike flow, disparity is typically positive + disparity = x_grid.view(1, 1, w).repeat(b, h, 1) - correspondence # [B, H, W] + + return disparity.unsqueeze(1), prob # feature resolution + + +def local_correlation_softmax_stereo( + feature0, + feature1, + local_radius, +): + b, c, h, w = feature0.size() + coords_init = coords_grid(b, h, w).to(feature0.device) # [B, 2, H, W] + coords = coords_init.view(b, 2, -1).permute(0, 2, 1).contiguous() # [B, H*W, 2] + + local_h = 1 + local_w = 2 * local_radius + 1 + + window_grid = generate_window_grid( + 0, 0, -local_radius, local_radius, local_h, local_w, device=feature0.device + ) # [1, 2R+1, 2] + window_grid = window_grid.reshape(-1, 2).repeat(b, 1, 1, 1) # [B, 1, (2R+1), 2] + sample_coords = coords.unsqueeze(-2) + window_grid # [B, H*W, (2R+1), 2] + + sample_coords_softmax = sample_coords + + # exclude coords that are out of image space + valid_x = (sample_coords[:, :, :, 0] >= 0) & (sample_coords[:, :, :, 0] < w) # [B, H*W, (2R+1)^2] + valid_y = (sample_coords[:, :, :, 1] >= 0) & (sample_coords[:, :, :, 1] < h) # [B, H*W, (2R+1)^2] + + valid = valid_x & valid_y # [B, H*W, (2R+1)^2], used to mask out invalid values when softmax + + # normalize coordinates to [-1, 1] + sample_coords_norm = normalize_coords(sample_coords, h, w) # [-1, 1] + window_feature = F.grid_sample(feature1, sample_coords_norm, padding_mode="zeros", align_corners=True).permute( + 0, 2, 1, 3 + ) # [B, H*W, C, (2R+1)] + feature0_view = feature0.permute(0, 2, 3, 1).contiguous().view(b, h * w, 1, c) # [B, H*W, 1, C] + + corr = torch.matmul(feature0_view, window_feature).view(b, h * w, -1) / (c**0.5) # [B, H*W, (2R+1)] + + # mask invalid locations + corr[~valid] = -1e9 + + prob = F.softmax(corr, -1) # [B, H*W, (2R+1)] + + correspondence = ( + torch.matmul(prob.unsqueeze(-2), sample_coords_softmax) + .squeeze(-2) + .view(b, h, w, 2) + .permute(0, 3, 1, 2) + .contiguous() + ) # [B, 2, H, W] + + flow = correspondence - coords_init # flow at feature resolution + match_prob = prob + + flow_x = -flow[:, :1] # [B, 1, H, W] + + return flow_x, match_prob + + +def correlation_softmax_depth( + feature0, + feature1, + intrinsics, + pose, + depth_candidates, + depth_from_argmax=False, + pred_bidir_depth=False, +): + b, c, h, w = feature0.size() + assert depth_candidates.dim() == 4 # [B, D, H, W] + scale_factor = c**0.5 + + if pred_bidir_depth: + feature0, feature1 = torch.cat((feature0, feature1), dim=0), torch.cat((feature1, feature0), dim=0) + intrinsics = intrinsics.repeat(2, 1, 1) + pose = torch.cat((pose, torch.inverse(pose)), dim=0) + depth_candidates = depth_candidates.repeat(2, 1, 1, 1) + + # depth candidates are actually inverse depth + warped_feature1 = warp_with_pose_depth_candidates( + feature1, + intrinsics, + pose, + 1.0 / depth_candidates, + ) # [B, C, D, H, W] + + correlation = (feature0.unsqueeze(2) * warped_feature1).sum(1) / scale_factor # [B, D, H, W] + + match_prob = F.softmax(correlation, dim=1) # [B, D, H, W] + + # for cross-task transfer (flow -> depth), extract depth with argmax at test time + if depth_from_argmax: + index = torch.argmax(match_prob, dim=1, keepdim=True) + depth = torch.gather(depth_candidates, dim=1, index=index) + else: + depth = (match_prob * depth_candidates).sum(dim=1, keepdim=True) # [B, 1, H, W] + + return depth, match_prob + + +def warp_with_pose_depth_candidates( + feature1, + intrinsics, + pose, + depth, + clamp_min_depth=1e-3, +): + """ + feature1: [B, C, H, W] + intrinsics: [B, 3, 3] + pose: [B, 4, 4] + depth: [B, D, H, W] + """ + + assert intrinsics.size(1) == intrinsics.size(2) == 3 + assert pose.size(1) == pose.size(2) == 4 + assert depth.dim() == 4 + + b, d, h, w = depth.size() + c = feature1.size(1) + + with torch.no_grad(): + # pixel coordinates + grid = coords_grid(b, h, w, homogeneous=True, device=depth.device) # [B, 3, H, W] + # back project to 3D and transform viewpoint + points = torch.inverse(intrinsics).bmm(grid.view(b, 3, -1)) # [B, 3, H*W] + points = torch.bmm(pose[:, :3, :3], points).unsqueeze(2).repeat(1, 1, d, 1) * depth.view( + b, 1, d, h * w + ) # [B, 3, D, H*W] + points = points + pose[:, :3, -1:].unsqueeze(-1) # [B, 3, D, H*W] + # reproject to 2D image plane + points = torch.bmm(intrinsics, points.view(b, 3, -1)).view(b, 3, d, h * w) # [B, 3, D, H*W] + pixel_coords = points[:, :2] / points[:, -1:].clamp(min=clamp_min_depth) # [B, 2, D, H*W] + + # normalize to [-1, 1] + x_grid = 2 * pixel_coords[:, 0] / (w - 1) - 1 + y_grid = 2 * pixel_coords[:, 1] / (h - 1) - 1 + + grid = torch.stack([x_grid, y_grid], dim=-1) # [B, D, H*W, 2] + + # sample features + warped_feature = F.grid_sample( + feature1, grid.view(b, d * h, w, 2), mode="bilinear", padding_mode="zeros", align_corners=True + ).view( + b, c, d, h, w + ) # [B, C, D, H, W] + + return warped_feature diff --git a/exp_code/1_benchmark/pa_vdm/tools/scoring/optical_flow/unimatch/position.py b/exp_code/1_benchmark/pa_vdm/tools/scoring/optical_flow/unimatch/position.py new file mode 100644 index 0000000000000000000000000000000000000000..619f3568d4c81f41316010be6a866a0e115cfc80 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/tools/scoring/optical_flow/unimatch/position.py @@ -0,0 +1,47 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# https://github.com/facebookresearch/detr/blob/main/models/position_encoding.py + +import math + +import torch +import torch.nn as nn + + +class PositionEmbeddingSine(nn.Module): + """ + This is a more standard version of the position embedding, very similar to the one + used by the Attention is all you need paper, generalized to work on images. + """ + + def __init__(self, num_pos_feats=64, temperature=10000, normalize=True, scale=None): + super().__init__() + self.num_pos_feats = num_pos_feats + self.temperature = temperature + self.normalize = normalize + if scale is not None and normalize is False: + raise ValueError("normalize should be True if scale is passed") + if scale is None: + scale = 2 * math.pi + self.scale = scale + + def forward(self, x): + # x = tensor_list.tensors # [B, C, H, W] + # mask = tensor_list.mask # [B, H, W], input with padding, valid as 0 + b, c, h, w = x.size() + mask = torch.ones((b, h, w), device=x.device) # [B, H, W] + y_embed = mask.cumsum(1, dtype=torch.float32) + x_embed = mask.cumsum(2, dtype=torch.float32) + if self.normalize: + eps = 1e-6 + y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale + x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale + + dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) + dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats) + + pos_x = x_embed[:, :, :, None] / dim_t + pos_y = y_embed[:, :, :, None] / dim_t + pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) + pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) + pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + return pos diff --git a/exp_code/1_benchmark/pa_vdm/tools/scoring/optical_flow/unimatch/reg_refine.py b/exp_code/1_benchmark/pa_vdm/tools/scoring/optical_flow/unimatch/reg_refine.py new file mode 100644 index 0000000000000000000000000000000000000000..965f4cac62a8db3b42187b9cdbc2f679a70e6ac3 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/tools/scoring/optical_flow/unimatch/reg_refine.py @@ -0,0 +1,133 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class FlowHead(nn.Module): + def __init__( + self, + input_dim=128, + hidden_dim=256, + out_dim=2, + ): + super(FlowHead, self).__init__() + + self.conv1 = nn.Conv2d(input_dim, hidden_dim, 3, padding=1) + self.conv2 = nn.Conv2d(hidden_dim, out_dim, 3, padding=1) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + out = self.conv2(self.relu(self.conv1(x))) + + return out + + +class SepConvGRU(nn.Module): + def __init__( + self, + hidden_dim=128, + input_dim=192 + 128, + kernel_size=5, + ): + padding = (kernel_size - 1) // 2 + + super(SepConvGRU, self).__init__() + self.convz1 = nn.Conv2d(hidden_dim + input_dim, hidden_dim, (1, kernel_size), padding=(0, padding)) + self.convr1 = nn.Conv2d(hidden_dim + input_dim, hidden_dim, (1, kernel_size), padding=(0, padding)) + self.convq1 = nn.Conv2d(hidden_dim + input_dim, hidden_dim, (1, kernel_size), padding=(0, padding)) + + self.convz2 = nn.Conv2d(hidden_dim + input_dim, hidden_dim, (kernel_size, 1), padding=(padding, 0)) + self.convr2 = nn.Conv2d(hidden_dim + input_dim, hidden_dim, (kernel_size, 1), padding=(padding, 0)) + self.convq2 = nn.Conv2d(hidden_dim + input_dim, hidden_dim, (kernel_size, 1), padding=(padding, 0)) + + def forward(self, h, x): + # horizontal + hx = torch.cat([h, x], dim=1) + z = torch.sigmoid(self.convz1(hx)) + r = torch.sigmoid(self.convr1(hx)) + q = torch.tanh(self.convq1(torch.cat([r * h, x], dim=1))) + h = (1 - z) * h + z * q + + # vertical + hx = torch.cat([h, x], dim=1) + z = torch.sigmoid(self.convz2(hx)) + r = torch.sigmoid(self.convr2(hx)) + q = torch.tanh(self.convq2(torch.cat([r * h, x], dim=1))) + h = (1 - z) * h + z * q + + return h + + +class BasicMotionEncoder(nn.Module): + def __init__( + self, + corr_channels=324, + flow_channels=2, + ): + super(BasicMotionEncoder, self).__init__() + + self.convc1 = nn.Conv2d(corr_channels, 256, 1, padding=0) + self.convc2 = nn.Conv2d(256, 192, 3, padding=1) + self.convf1 = nn.Conv2d(flow_channels, 128, 7, padding=3) + self.convf2 = nn.Conv2d(128, 64, 3, padding=1) + self.conv = nn.Conv2d(64 + 192, 128 - flow_channels, 3, padding=1) + + def forward(self, flow, corr): + cor = F.relu(self.convc1(corr)) + cor = F.relu(self.convc2(cor)) + flo = F.relu(self.convf1(flow)) + flo = F.relu(self.convf2(flo)) + + cor_flo = torch.cat([cor, flo], dim=1) + out = F.relu(self.conv(cor_flo)) + return torch.cat([out, flow], dim=1) + + +class BasicUpdateBlock(nn.Module): + def __init__( + self, + corr_channels=324, + hidden_dim=128, + context_dim=128, + downsample_factor=8, + flow_dim=2, + bilinear_up=False, + ): + super(BasicUpdateBlock, self).__init__() + + self.encoder = BasicMotionEncoder( + corr_channels=corr_channels, + flow_channels=flow_dim, + ) + + self.gru = SepConvGRU(hidden_dim=hidden_dim, input_dim=context_dim + hidden_dim) + + self.flow_head = FlowHead( + hidden_dim, + hidden_dim=256, + out_dim=flow_dim, + ) + + if bilinear_up: + self.mask = None + else: + self.mask = nn.Sequential( + nn.Conv2d(hidden_dim, 256, 3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(256, downsample_factor**2 * 9, 1, padding=0), + ) + + def forward(self, net, inp, corr, flow): + motion_features = self.encoder(flow, corr) + + inp = torch.cat([inp, motion_features], dim=1) + + net = self.gru(net, inp) + delta_flow = self.flow_head(net) + + if self.mask is not None: + mask = self.mask(net) + else: + mask = None + + return net, mask, delta_flow diff --git a/exp_code/1_benchmark/pa_vdm/tools/scoring/optical_flow/unimatch/transformer.py b/exp_code/1_benchmark/pa_vdm/tools/scoring/optical_flow/unimatch/transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..7fdffd17feb0328260f1a93b778801337d14a2c3 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/tools/scoring/optical_flow/unimatch/transformer.py @@ -0,0 +1,339 @@ +import torch +import torch.nn as nn + +from .attention import ( + single_head_full_attention, + single_head_full_attention_1d, + single_head_split_window_attention, + single_head_split_window_attention_1d, +) +from .utils import generate_shift_window_attn_mask, generate_shift_window_attn_mask_1d + + +class TransformerLayer(nn.Module): + def __init__( + self, + d_model=128, + nhead=1, + no_ffn=False, + ffn_dim_expansion=4, + ): + super(TransformerLayer, self).__init__() + + self.dim = d_model + self.nhead = nhead + self.no_ffn = no_ffn + + # multi-head attention + self.q_proj = nn.Linear(d_model, d_model, bias=False) + self.k_proj = nn.Linear(d_model, d_model, bias=False) + self.v_proj = nn.Linear(d_model, d_model, bias=False) + + self.merge = nn.Linear(d_model, d_model, bias=False) + + self.norm1 = nn.LayerNorm(d_model) + + # no ffn after self-attn, with ffn after cross-attn + if not self.no_ffn: + in_channels = d_model * 2 + self.mlp = nn.Sequential( + nn.Linear(in_channels, in_channels * ffn_dim_expansion, bias=False), + nn.GELU(), + nn.Linear(in_channels * ffn_dim_expansion, d_model, bias=False), + ) + + self.norm2 = nn.LayerNorm(d_model) + + def forward( + self, + source, + target, + height=None, + width=None, + shifted_window_attn_mask=None, + shifted_window_attn_mask_1d=None, + attn_type="swin", + with_shift=False, + attn_num_splits=None, + ): + # source, target: [B, L, C] + query, key, value = source, target, target + + # for stereo: 2d attn in self-attn, 1d attn in cross-attn + is_self_attn = (query - key).abs().max() < 1e-6 + + # single-head attention + query = self.q_proj(query) # [B, L, C] + key = self.k_proj(key) # [B, L, C] + value = self.v_proj(value) # [B, L, C] + + if attn_type == "swin" and attn_num_splits > 1: # self, cross-attn: both swin 2d + if self.nhead > 1: + # we observe that multihead attention slows down the speed and increases the memory consumption + # without bringing obvious performance gains and thus the implementation is removed + raise NotImplementedError + else: + message = single_head_split_window_attention( + query, + key, + value, + num_splits=attn_num_splits, + with_shift=with_shift, + h=height, + w=width, + attn_mask=shifted_window_attn_mask, + ) + + elif attn_type == "self_swin2d_cross_1d": # self-attn: swin 2d, cross-attn: full 1d + if self.nhead > 1: + raise NotImplementedError + else: + if is_self_attn: + if attn_num_splits > 1: + message = single_head_split_window_attention( + query, + key, + value, + num_splits=attn_num_splits, + with_shift=with_shift, + h=height, + w=width, + attn_mask=shifted_window_attn_mask, + ) + else: + # full 2d attn + message = single_head_full_attention(query, key, value) # [N, L, C] + + else: + # cross attn 1d + message = single_head_full_attention_1d( + query, + key, + value, + h=height, + w=width, + ) + + elif attn_type == "self_swin2d_cross_swin1d": # self-attn: swin 2d, cross-attn: swin 1d + if self.nhead > 1: + raise NotImplementedError + else: + if is_self_attn: + if attn_num_splits > 1: + # self attn shift window + message = single_head_split_window_attention( + query, + key, + value, + num_splits=attn_num_splits, + with_shift=with_shift, + h=height, + w=width, + attn_mask=shifted_window_attn_mask, + ) + else: + # full 2d attn + message = single_head_full_attention(query, key, value) # [N, L, C] + else: + if attn_num_splits > 1: + assert shifted_window_attn_mask_1d is not None + # cross attn 1d shift + message = single_head_split_window_attention_1d( + query, + key, + value, + num_splits=attn_num_splits, + with_shift=with_shift, + h=height, + w=width, + attn_mask=shifted_window_attn_mask_1d, + ) + else: + message = single_head_full_attention_1d( + query, + key, + value, + h=height, + w=width, + ) + + else: + message = single_head_full_attention(query, key, value) # [B, L, C] + + message = self.merge(message) # [B, L, C] + message = self.norm1(message) + + if not self.no_ffn: + message = self.mlp(torch.cat([source, message], dim=-1)) + message = self.norm2(message) + + return source + message + + +class TransformerBlock(nn.Module): + """self attention + cross attention + FFN""" + + def __init__( + self, + d_model=128, + nhead=1, + ffn_dim_expansion=4, + ): + super(TransformerBlock, self).__init__() + + self.self_attn = TransformerLayer( + d_model=d_model, + nhead=nhead, + no_ffn=True, + ffn_dim_expansion=ffn_dim_expansion, + ) + + self.cross_attn_ffn = TransformerLayer( + d_model=d_model, + nhead=nhead, + ffn_dim_expansion=ffn_dim_expansion, + ) + + def forward( + self, + source, + target, + height=None, + width=None, + shifted_window_attn_mask=None, + shifted_window_attn_mask_1d=None, + attn_type="swin", + with_shift=False, + attn_num_splits=None, + ): + # source, target: [B, L, C] + + # self attention + source = self.self_attn( + source, + source, + height=height, + width=width, + shifted_window_attn_mask=shifted_window_attn_mask, + attn_type=attn_type, + with_shift=with_shift, + attn_num_splits=attn_num_splits, + ) + + # cross attention and ffn + source = self.cross_attn_ffn( + source, + target, + height=height, + width=width, + shifted_window_attn_mask=shifted_window_attn_mask, + shifted_window_attn_mask_1d=shifted_window_attn_mask_1d, + attn_type=attn_type, + with_shift=with_shift, + attn_num_splits=attn_num_splits, + ) + + return source + + +class FeatureTransformer(nn.Module): + def __init__( + self, + num_layers=6, + d_model=128, + nhead=1, + ffn_dim_expansion=4, + ): + super(FeatureTransformer, self).__init__() + + self.d_model = d_model + self.nhead = nhead + + self.layers = nn.ModuleList( + [ + TransformerBlock( + d_model=d_model, + nhead=nhead, + ffn_dim_expansion=ffn_dim_expansion, + ) + for i in range(num_layers) + ] + ) + + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + def forward( + self, + feature0, + feature1, + attn_type="swin", + attn_num_splits=None, + **kwargs, + ): + b, c, h, w = feature0.shape + assert self.d_model == c + + feature0 = feature0.flatten(-2).permute(0, 2, 1) # [B, H*W, C] + feature1 = feature1.flatten(-2).permute(0, 2, 1) # [B, H*W, C] + + # 2d attention + if "swin" in attn_type and attn_num_splits > 1: + # global and refine use different number of splits + window_size_h = h // attn_num_splits + window_size_w = w // attn_num_splits + + # compute attn mask once + shifted_window_attn_mask = generate_shift_window_attn_mask( + input_resolution=(h, w), + window_size_h=window_size_h, + window_size_w=window_size_w, + shift_size_h=window_size_h // 2, + shift_size_w=window_size_w // 2, + device=feature0.device, + ) # [K*K, H/K*W/K, H/K*W/K] + else: + shifted_window_attn_mask = None + + # 1d attention + if "swin1d" in attn_type and attn_num_splits > 1: + window_size_w = w // attn_num_splits + + # compute attn mask once + shifted_window_attn_mask_1d = generate_shift_window_attn_mask_1d( + input_w=w, + window_size_w=window_size_w, + shift_size_w=window_size_w // 2, + device=feature0.device, + ) # [K, W/K, W/K] + else: + shifted_window_attn_mask_1d = None + + # concat feature0 and feature1 in batch dimension to compute in parallel + concat0 = torch.cat((feature0, feature1), dim=0) # [2B, H*W, C] + concat1 = torch.cat((feature1, feature0), dim=0) # [2B, H*W, C] + + for i, layer in enumerate(self.layers): + concat0 = layer( + concat0, + concat1, + height=h, + width=w, + attn_type=attn_type, + with_shift="swin" in attn_type and attn_num_splits > 1 and i % 2 == 1, + attn_num_splits=attn_num_splits, + shifted_window_attn_mask=shifted_window_attn_mask, + shifted_window_attn_mask_1d=shifted_window_attn_mask_1d, + ) + + # update feature1 + concat1 = torch.cat(concat0.chunk(chunks=2, dim=0)[::-1], dim=0) + + feature0, feature1 = concat0.chunk(chunks=2, dim=0) # [B, H*W, C] + + # reshape back + feature0 = feature0.view(b, h, w, c).permute(0, 3, 1, 2).contiguous() # [B, C, H, W] + feature1 = feature1.view(b, h, w, c).permute(0, 3, 1, 2).contiguous() # [B, C, H, W] + + return feature0, feature1 diff --git a/exp_code/1_benchmark/pa_vdm/tools/scoring/optical_flow/unimatch/trident_conv.py b/exp_code/1_benchmark/pa_vdm/tools/scoring/optical_flow/unimatch/trident_conv.py new file mode 100644 index 0000000000000000000000000000000000000000..d87579b95dfb5e40d7933264fcf917dbc508bb98 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/tools/scoring/optical_flow/unimatch/trident_conv.py @@ -0,0 +1,88 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# https://github.com/facebookresearch/detectron2/blob/main/projects/TridentNet/tridentnet/trident_conv.py + +import torch +from torch import nn +from torch.nn import functional as F +from torch.nn.modules.utils import _pair + + +class MultiScaleTridentConv(nn.Module): + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + strides=1, + paddings=0, + dilations=1, + dilation=1, + groups=1, + num_branch=1, + test_branch_idx=-1, + bias=False, + norm=None, + activation=None, + ): + super(MultiScaleTridentConv, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = _pair(kernel_size) + self.num_branch = num_branch + self.stride = _pair(stride) + self.groups = groups + self.with_bias = bias + self.dilation = dilation + if isinstance(paddings, int): + paddings = [paddings] * self.num_branch + if isinstance(dilations, int): + dilations = [dilations] * self.num_branch + if isinstance(strides, int): + strides = [strides] * self.num_branch + self.paddings = [_pair(padding) for padding in paddings] + self.dilations = [_pair(dilation) for dilation in dilations] + self.strides = [_pair(stride) for stride in strides] + self.test_branch_idx = test_branch_idx + self.norm = norm + self.activation = activation + + assert len({self.num_branch, len(self.paddings), len(self.strides)}) == 1 + + self.weight = nn.Parameter(torch.Tensor(out_channels, in_channels // groups, *self.kernel_size)) + if bias: + self.bias = nn.Parameter(torch.Tensor(out_channels)) + else: + self.bias = None + + nn.init.kaiming_uniform_(self.weight, nonlinearity="relu") + if self.bias is not None: + nn.init.constant_(self.bias, 0) + + def forward(self, inputs): + num_branch = self.num_branch if self.training or self.test_branch_idx == -1 else 1 + assert len(inputs) == num_branch + + if self.training or self.test_branch_idx == -1: + outputs = [ + F.conv2d(input, self.weight, self.bias, stride, padding, self.dilation, self.groups) + for input, stride, padding in zip(inputs, self.strides, self.paddings) + ] + else: + outputs = [ + F.conv2d( + inputs[0], + self.weight, + self.bias, + self.strides[self.test_branch_idx] if self.test_branch_idx == -1 else self.strides[-1], + self.paddings[self.test_branch_idx] if self.test_branch_idx == -1 else self.paddings[-1], + self.dilation, + self.groups, + ) + ] + + if self.norm is not None: + outputs = [self.norm(x) for x in outputs] + if self.activation is not None: + outputs = [self.activation(x) for x in outputs] + return outputs diff --git a/exp_code/1_benchmark/pa_vdm/tools/scoring/optical_flow/unimatch/unimatch.py b/exp_code/1_benchmark/pa_vdm/tools/scoring/optical_flow/unimatch/unimatch.py new file mode 100644 index 0000000000000000000000000000000000000000..c625b991627d7cb378a29ba0b1091e80c32eae65 --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/tools/scoring/optical_flow/unimatch/unimatch.py @@ -0,0 +1,393 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .attention import SelfAttnPropagation +from .backbone import CNNEncoder +from .geometry import compute_flow_with_depth_pose, flow_warp +from .matching import ( + correlation_softmax_depth, + global_correlation_softmax, + global_correlation_softmax_stereo, + local_correlation_softmax, + local_correlation_softmax_stereo, + local_correlation_with_flow, +) +from .reg_refine import BasicUpdateBlock +from .transformer import FeatureTransformer +from .utils import feature_add_position, normalize_img, upsample_flow_with_mask + + +class UniMatch(nn.Module): + def __init__( + self, + num_scales=1, + feature_channels=128, + upsample_factor=8, + num_head=1, + ffn_dim_expansion=4, + num_transformer_layers=6, + reg_refine=False, # optional local regression refinement + task="flow", + ): + super(UniMatch, self).__init__() + + self.feature_channels = feature_channels + self.num_scales = num_scales + self.upsample_factor = upsample_factor + self.reg_refine = reg_refine + + # CNN + self.backbone = CNNEncoder(output_dim=feature_channels, num_output_scales=num_scales) + + # Transformer + self.transformer = FeatureTransformer( + num_layers=num_transformer_layers, + d_model=feature_channels, + nhead=num_head, + ffn_dim_expansion=ffn_dim_expansion, + ) + + # propagation with self-attn + self.feature_flow_attn = SelfAttnPropagation(in_channels=feature_channels) + + if not self.reg_refine or task == "depth": + # convex upsampling simiar to RAFT + # concat feature0 and low res flow as input + self.upsampler = nn.Sequential( + nn.Conv2d(2 + feature_channels, 256, 3, 1, 1), + nn.ReLU(inplace=True), + nn.Conv2d(256, upsample_factor**2 * 9, 1, 1, 0), + ) + # thus far, all the learnable parameters are task-agnostic + + if reg_refine: + # optional task-specific local regression refinement + self.refine_proj = nn.Conv2d(128, 256, 1) + self.refine = BasicUpdateBlock( + corr_channels=(2 * 4 + 1) ** 2, + downsample_factor=upsample_factor, + flow_dim=2 if task == "flow" else 1, + bilinear_up=task == "depth", + ) + + def extract_feature(self, img0, img1): + concat = torch.cat((img0, img1), dim=0) # [2B, C, H, W] + features = self.backbone(concat) # list of [2B, C, H, W], resolution from high to low + + # reverse: resolution from low to high + features = features[::-1] + + feature0, feature1 = [], [] + + for i in range(len(features)): + feature = features[i] + chunks = torch.chunk(feature, 2, 0) # tuple + feature0.append(chunks[0]) + feature1.append(chunks[1]) + + return feature0, feature1 + + def upsample_flow(self, flow, feature, bilinear=False, upsample_factor=8, is_depth=False): + if bilinear: + multiplier = 1 if is_depth else upsample_factor + up_flow = ( + F.interpolate(flow, scale_factor=upsample_factor, mode="bilinear", align_corners=True) * multiplier + ) + else: + concat = torch.cat((flow, feature), dim=1) + mask = self.upsampler(concat) + up_flow = upsample_flow_with_mask(flow, mask, upsample_factor=self.upsample_factor, is_depth=is_depth) + + return up_flow + + def forward( + self, + img0, + img1, + attn_type=None, + attn_splits_list=None, + corr_radius_list=None, + prop_radius_list=None, + num_reg_refine=1, + pred_bidir_flow=False, + task="flow", + intrinsics=None, + pose=None, # relative pose transform + min_depth=1.0 / 0.5, # inverse depth range + max_depth=1.0 / 10, + num_depth_candidates=64, + depth_from_argmax=False, + pred_bidir_depth=False, + **kwargs, + ): + if pred_bidir_flow: + assert task == "flow" + + if task == "depth": + assert self.num_scales == 1 # multi-scale depth model is not supported yet + + results_dict = {} + flow_preds = [] + + if task == "flow": + # stereo and depth tasks have normalized img in dataloader + img0, img1 = normalize_img(img0, img1) # [B, 3, H, W] + + # list of features, resolution low to high + feature0_list, feature1_list = self.extract_feature(img0, img1) # list of features + + flow = None + + if task != "depth": + assert len(attn_splits_list) == len(corr_radius_list) == len(prop_radius_list) == self.num_scales + else: + assert len(attn_splits_list) == len(prop_radius_list) == self.num_scales == 1 + + for scale_idx in range(self.num_scales): + feature0, feature1 = feature0_list[scale_idx], feature1_list[scale_idx] + + if pred_bidir_flow and scale_idx > 0: + # predicting bidirectional flow with refinement + feature0, feature1 = torch.cat((feature0, feature1), dim=0), torch.cat((feature1, feature0), dim=0) + + feature0_ori, feature1_ori = feature0, feature1 + + upsample_factor = self.upsample_factor * (2 ** (self.num_scales - 1 - scale_idx)) + + if task == "depth": + # scale intrinsics + intrinsics_curr = intrinsics.clone() + intrinsics_curr[:, :2] = intrinsics_curr[:, :2] / upsample_factor + + if scale_idx > 0: + assert task != "depth" # not supported for multi-scale depth model + flow = F.interpolate(flow, scale_factor=2, mode="bilinear", align_corners=True) * 2 + + if flow is not None: + assert task != "depth" + flow = flow.detach() + + if task == "stereo": + # construct flow vector for disparity + # flow here is actually disparity + zeros = torch.zeros_like(flow) # [B, 1, H, W] + # NOTE: reverse disp, disparity is positive + displace = torch.cat((-flow, zeros), dim=1) # [B, 2, H, W] + feature1 = flow_warp(feature1, displace) # [B, C, H, W] + elif task == "flow": + feature1 = flow_warp(feature1, flow) # [B, C, H, W] + else: + raise NotImplementedError + + attn_splits = attn_splits_list[scale_idx] + if task != "depth": + corr_radius = corr_radius_list[scale_idx] + prop_radius = prop_radius_list[scale_idx] + + # add position to features + feature0, feature1 = feature_add_position(feature0, feature1, attn_splits, self.feature_channels) + + # Transformer + feature0, feature1 = self.transformer( + feature0, + feature1, + attn_type=attn_type, + attn_num_splits=attn_splits, + ) + + # correlation and softmax + if task == "depth": + # first generate depth candidates + b, _, h, w = feature0.size() + depth_candidates = torch.linspace(min_depth, max_depth, num_depth_candidates).type_as(feature0) + depth_candidates = depth_candidates.view(1, num_depth_candidates, 1, 1).repeat( + b, 1, h, w + ) # [B, D, H, W] + + flow_pred = correlation_softmax_depth( + feature0, + feature1, + intrinsics_curr, + pose, + depth_candidates=depth_candidates, + depth_from_argmax=depth_from_argmax, + pred_bidir_depth=pred_bidir_depth, + )[0] + + else: + if corr_radius == -1: # global matching + if task == "flow": + flow_pred = global_correlation_softmax(feature0, feature1, pred_bidir_flow)[0] + elif task == "stereo": + flow_pred = global_correlation_softmax_stereo(feature0, feature1)[0] + else: + raise NotImplementedError + else: # local matching + if task == "flow": + flow_pred = local_correlation_softmax(feature0, feature1, corr_radius)[0] + elif task == "stereo": + flow_pred = local_correlation_softmax_stereo(feature0, feature1, corr_radius)[0] + else: + raise NotImplementedError + + # flow or residual flow + flow = flow + flow_pred if flow is not None else flow_pred + + if task == "stereo": + flow = flow.clamp(min=0) # positive disparity + + # upsample to the original resolution for supervison at training time only + if self.training: + flow_bilinear = self.upsample_flow( + flow, None, bilinear=True, upsample_factor=upsample_factor, is_depth=task == "depth" + ) + flow_preds.append(flow_bilinear) + + # flow propagation with self-attn + if (pred_bidir_flow or pred_bidir_depth) and scale_idx == 0: + feature0 = torch.cat((feature0, feature1), dim=0) # [2*B, C, H, W] for propagation + + flow = self.feature_flow_attn( + feature0, + flow.detach(), + local_window_attn=prop_radius > 0, + local_window_radius=prop_radius, + ) + + # bilinear exclude the last one + if self.training and scale_idx < self.num_scales - 1: + flow_up = self.upsample_flow( + flow, feature0, bilinear=True, upsample_factor=upsample_factor, is_depth=task == "depth" + ) + flow_preds.append(flow_up) + + if scale_idx == self.num_scales - 1: + if not self.reg_refine: + # upsample to the original image resolution + + if task == "stereo": + flow_pad = torch.cat((-flow, torch.zeros_like(flow)), dim=1) # [B, 2, H, W] + flow_up_pad = self.upsample_flow(flow_pad, feature0) + flow_up = -flow_up_pad[:, :1] # [B, 1, H, W] + elif task == "depth": + depth_pad = torch.cat((flow, torch.zeros_like(flow)), dim=1) # [B, 2, H, W] + depth_up_pad = self.upsample_flow(depth_pad, feature0, is_depth=True).clamp( + min=min_depth, max=max_depth + ) + flow_up = depth_up_pad[:, :1] # [B, 1, H, W] + else: + flow_up = self.upsample_flow(flow, feature0) + + flow_preds.append(flow_up) + else: + # task-specific local regression refinement + # supervise current flow + if self.training: + flow_up = self.upsample_flow( + flow, feature0, bilinear=True, upsample_factor=upsample_factor, is_depth=task == "depth" + ) + flow_preds.append(flow_up) + + assert num_reg_refine > 0 + for refine_iter_idx in range(num_reg_refine): + flow = flow.detach() + + if task == "stereo": + zeros = torch.zeros_like(flow) # [B, 1, H, W] + # NOTE: reverse disp, disparity is positive + displace = torch.cat((-flow, zeros), dim=1) # [B, 2, H, W] + correlation = local_correlation_with_flow( + feature0_ori, + feature1_ori, + flow=displace, + local_radius=4, + ) # [B, (2R+1)^2, H, W] + elif task == "depth": + if pred_bidir_depth and refine_iter_idx == 0: + intrinsics_curr = intrinsics_curr.repeat(2, 1, 1) + pose = torch.cat((pose, torch.inverse(pose)), dim=0) + + feature0_ori, feature1_ori = torch.cat((feature0_ori, feature1_ori), dim=0), torch.cat( + (feature1_ori, feature0_ori), dim=0 + ) + + flow_from_depth = compute_flow_with_depth_pose( + 1.0 / flow.squeeze(1), + intrinsics_curr, + extrinsics_rel=pose, + ) + + correlation = local_correlation_with_flow( + feature0_ori, + feature1_ori, + flow=flow_from_depth, + local_radius=4, + ) # [B, (2R+1)^2, H, W] + + else: + correlation = local_correlation_with_flow( + feature0_ori, + feature1_ori, + flow=flow, + local_radius=4, + ) # [B, (2R+1)^2, H, W] + + proj = self.refine_proj(feature0) + + net, inp = torch.chunk(proj, chunks=2, dim=1) + + net = torch.tanh(net) + inp = torch.relu(inp) + + net, up_mask, residual_flow = self.refine( + net, + inp, + correlation, + flow.clone(), + ) + + if task == "depth": + flow = (flow - residual_flow).clamp(min=min_depth, max=max_depth) + else: + flow = flow + residual_flow + + if task == "stereo": + flow = flow.clamp(min=0) # positive + + if self.training or refine_iter_idx == num_reg_refine - 1: + if task == "depth": + if refine_iter_idx < num_reg_refine - 1: + # bilinear upsampling + flow_up = self.upsample_flow( + flow, feature0, bilinear=True, upsample_factor=upsample_factor, is_depth=True + ) + else: + # last one convex upsampling + # NOTE: clamp depth due to the zero padding in the unfold in the convex upsampling + # pad depth to 2 channels as flow + depth_pad = torch.cat((flow, torch.zeros_like(flow)), dim=1) # [B, 2, H, W] + depth_up_pad = self.upsample_flow(depth_pad, feature0, is_depth=True).clamp( + min=min_depth, max=max_depth + ) + flow_up = depth_up_pad[:, :1] # [B, 1, H, W] + + else: + flow_up = upsample_flow_with_mask( + flow, up_mask, upsample_factor=self.upsample_factor, is_depth=task == "depth" + ) + + flow_preds.append(flow_up) + + if task == "stereo": + for i in range(len(flow_preds)): + flow_preds[i] = flow_preds[i].squeeze(1) # [B, H, W] + + # convert inverse depth to depth + if task == "depth": + for i in range(len(flow_preds)): + flow_preds[i] = 1.0 / flow_preds[i].squeeze(1) # [B, H, W] + + results_dict.update({"flow_preds": flow_preds}) + + return results_dict diff --git a/exp_code/1_benchmark/pa_vdm/tools/scoring/optical_flow/unimatch/utils.py b/exp_code/1_benchmark/pa_vdm/tools/scoring/optical_flow/unimatch/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..60f40bea290ddd9a3f36adc7b4defb6e26588d1b --- /dev/null +++ b/exp_code/1_benchmark/pa_vdm/tools/scoring/optical_flow/unimatch/utils.py @@ -0,0 +1,219 @@ +import torch +import torch.nn.functional as F + +from .position import PositionEmbeddingSine + + +def generate_window_grid(h_min, h_max, w_min, w_max, len_h, len_w, device=None): + assert device is not None + + x, y = torch.meshgrid( + [torch.linspace(w_min, w_max, len_w, device=device), torch.linspace(h_min, h_max, len_h, device=device)], + ) + grid = torch.stack((x, y), -1).transpose(0, 1).float() # [H, W, 2] + + return grid + + +def normalize_coords(coords, h, w): + # coords: [B, H, W, 2] + c = torch.Tensor([(w - 1) / 2.0, (h - 1) / 2.0]).float().to(coords.device) + return (coords - c) / c # [-1, 1] + + +def normalize_img(img0, img1): + # loaded images are in [0, 255] + # normalize by ImageNet mean and std + mean = torch.tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(img1.device) + std = torch.tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(img1.device) + img0 = (img0 / 255.0 - mean) / std + img1 = (img1 / 255.0 - mean) / std + + return img0, img1 + + +def split_feature( + feature, + num_splits=2, + channel_last=False, +): + if channel_last: # [B, H, W, C] + b, h, w, c = feature.size() + assert h % num_splits == 0 and w % num_splits == 0 + + b_new = b * num_splits * num_splits + h_new = h // num_splits + w_new = w // num_splits + + feature = ( + feature.view(b, num_splits, h // num_splits, num_splits, w // num_splits, c) + .permute(0, 1, 3, 2, 4, 5) + .reshape(b_new, h_new, w_new, c) + ) # [B*K*K, H/K, W/K, C] + else: # [B, C, H, W] + b, c, h, w = feature.size() + assert h % num_splits == 0 and w % num_splits == 0 + + b_new = b * num_splits * num_splits + h_new = h // num_splits + w_new = w // num_splits + + feature = ( + feature.view(b, c, num_splits, h // num_splits, num_splits, w // num_splits) + .permute(0, 2, 4, 1, 3, 5) + .reshape(b_new, c, h_new, w_new) + ) # [B*K*K, C, H/K, W/K] + + return feature + + +def merge_splits( + splits, + num_splits=2, + channel_last=False, +): + if channel_last: # [B*K*K, H/K, W/K, C] + b, h, w, c = splits.size() + new_b = b // num_splits // num_splits + + splits = splits.view(new_b, num_splits, num_splits, h, w, c) + merge = ( + splits.permute(0, 1, 3, 2, 4, 5).contiguous().view(new_b, num_splits * h, num_splits * w, c) + ) # [B, H, W, C] + else: # [B*K*K, C, H/K, W/K] + b, c, h, w = splits.size() + new_b = b // num_splits // num_splits + + splits = splits.view(new_b, num_splits, num_splits, c, h, w) + merge = ( + splits.permute(0, 3, 1, 4, 2, 5).contiguous().view(new_b, c, num_splits * h, num_splits * w) + ) # [B, C, H, W] + + return merge + + +def generate_shift_window_attn_mask( + input_resolution, window_size_h, window_size_w, shift_size_h, shift_size_w, device=torch.device("cuda") +): + # ref: https://github.com/microsoft/Swin-Transformer/blob/main/models/swin_transformer.py + # calculate attention mask for SW-MSA + h, w = input_resolution + img_mask = torch.zeros((1, h, w, 1)).to(device) # 1 H W 1 + h_slices = (slice(0, -window_size_h), slice(-window_size_h, -shift_size_h), slice(-shift_size_h, None)) + w_slices = (slice(0, -window_size_w), slice(-window_size_w, -shift_size_w), slice(-shift_size_w, None)) + cnt = 0 + for h in h_slices: + for w in w_slices: + img_mask[:, h, w, :] = cnt + cnt += 1 + + mask_windows = split_feature(img_mask, num_splits=input_resolution[-1] // window_size_w, channel_last=True) + + mask_windows = mask_windows.view(-1, window_size_h * window_size_w) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) + + return attn_mask + + +def feature_add_position(feature0, feature1, attn_splits, feature_channels): + pos_enc = PositionEmbeddingSine(num_pos_feats=feature_channels // 2) + + if attn_splits > 1: # add position in splited window + feature0_splits = split_feature(feature0, num_splits=attn_splits) + feature1_splits = split_feature(feature1, num_splits=attn_splits) + + position = pos_enc(feature0_splits) + + feature0_splits = feature0_splits + position + feature1_splits = feature1_splits + position + + feature0 = merge_splits(feature0_splits, num_splits=attn_splits) + feature1 = merge_splits(feature1_splits, num_splits=attn_splits) + else: + position = pos_enc(feature0) + + feature0 = feature0 + position + feature1 = feature1 + position + + return feature0, feature1 + + +def upsample_flow_with_mask(flow, up_mask, upsample_factor, is_depth=False): + # convex upsampling following raft + + mask = up_mask + b, flow_channel, h, w = flow.shape + mask = mask.view(b, 1, 9, upsample_factor, upsample_factor, h, w) # [B, 1, 9, K, K, H, W] + mask = torch.softmax(mask, dim=2) + + multiplier = 1 if is_depth else upsample_factor + up_flow = F.unfold(multiplier * flow, [3, 3], padding=1) + up_flow = up_flow.view(b, flow_channel, 9, 1, 1, h, w) # [B, 2, 9, 1, 1, H, W] + + up_flow = torch.sum(mask * up_flow, dim=2) # [B, 2, K, K, H, W] + up_flow = up_flow.permute(0, 1, 4, 2, 5, 3) # [B, 2, K, H, K, W] + up_flow = up_flow.reshape(b, flow_channel, upsample_factor * h, upsample_factor * w) # [B, 2, K*H, K*W] + + return up_flow + + +def split_feature_1d( + feature, + num_splits=2, +): + # feature: [B, W, C] + b, w, c = feature.size() + assert w % num_splits == 0 + + b_new = b * num_splits + w_new = w // num_splits + + feature = feature.view(b, num_splits, w // num_splits, c).view(b_new, w_new, c) # [B*K, W/K, C] + + return feature + + +def merge_splits_1d( + splits, + h, + num_splits=2, +): + b, w, c = splits.size() + new_b = b // num_splits // h + + splits = splits.view(new_b, h, num_splits, w, c) + merge = splits.view(new_b, h, num_splits * w, c) # [B, H, W, C] + + return merge + + +def window_partition_1d(x, window_size_w): + """ + Args: + x: (B, W, C) + window_size (int): window size + + Returns: + windows: (num_windows*B, window_size, C) + """ + B, W, C = x.shape + x = x.view(B, W // window_size_w, window_size_w, C).view(-1, window_size_w, C) + return x + + +def generate_shift_window_attn_mask_1d(input_w, window_size_w, shift_size_w, device=torch.device("cuda")): + # calculate attention mask for SW-MSA + img_mask = torch.zeros((1, input_w, 1)).to(device) # 1 W 1 + w_slices = (slice(0, -window_size_w), slice(-window_size_w, -shift_size_w), slice(-shift_size_w, None)) + cnt = 0 + for w in w_slices: + img_mask[:, w, :] = cnt + cnt += 1 + + mask_windows = window_partition_1d(img_mask, window_size_w) # nW, window_size, 1 + mask_windows = mask_windows.view(-1, window_size_w) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) # nW, window_size, window_size + attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) + + return attn_mask