Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/networks/__pycache__/lora.cpython-312.pyc +0 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/networks/__pycache__/lora_framepack.cpython-312.pyc +0 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/networks/__pycache__/lora_wan.cpython-312.pyc +0 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/utils/__init__.py +0 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/utils/__pycache__/__init__.cpython-312.pyc +0 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/utils/__pycache__/device_utils.cpython-312.pyc +0 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/utils/__pycache__/huggingface_utils.cpython-312.pyc +0 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/utils/__pycache__/model_utils.cpython-312.pyc +0 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/utils/__pycache__/safetensors_utils.cpython-312.pyc +0 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/utils/__pycache__/sai_model_spec.cpython-312.pyc +0 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/utils/__pycache__/train_utils.cpython-312.pyc +0 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/utils/device_utils.py +19 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/utils/huggingface_utils.py +89 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/utils/model_utils.py +151 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/utils/safetensors_utils.py +221 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/utils/sai_model_spec.py +286 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/utils/train_utils.py +178 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/__init__.py +1 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/__pycache__/__init__.cpython-312.pyc +0 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/configs/__init__.py +78 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/configs/__pycache__/__init__.cpython-312.pyc +0 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/configs/__pycache__/shared_config.cpython-312.pyc +0 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/configs/__pycache__/wan_i2v_14B.cpython-312.pyc +0 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/configs/__pycache__/wan_t2v_14B.cpython-312.pyc +0 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/configs/__pycache__/wan_t2v_1_3B.cpython-312.pyc +0 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/configs/shared_config.py +20 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/configs/wan_i2v_14B.py +39 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/configs/wan_t2v_14B.py +33 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/configs/wan_t2v_1_3B.py +33 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/modules/__init__.py +16 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/modules/__pycache__/__init__.cpython-312.pyc +0 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/modules/__pycache__/attention.cpython-312.pyc +0 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/modules/__pycache__/clip.cpython-312.pyc +0 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/modules/__pycache__/model.cpython-312.pyc +0 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/modules/__pycache__/t5.cpython-312.pyc +0 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/modules/__pycache__/tokenizers.cpython-312.pyc +0 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/modules/__pycache__/vae.cpython-312.pyc +0 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/modules/__pycache__/xlm_roberta.cpython-312.pyc +0 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/modules/attention.py +312 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/modules/clip.py +546 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/modules/model.py +958 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/modules/t5.py +514 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/modules/tokenizers.py +82 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/modules/vae.py +760 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/modules/xlm_roberta.py +170 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/utils/__init__.py +8 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/utils/__pycache__/__init__.cpython-312.pyc +0 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/utils/__pycache__/fm_solvers.cpython-312.pyc +0 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/utils/__pycache__/fm_solvers_unipc.cpython-312.pyc +0 -0
- exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/utils/fm_solvers.py +857 -0
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/networks/__pycache__/lora.cpython-312.pyc
ADDED
|
Binary file (39.5 kB). View file
|
|
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/networks/__pycache__/lora_framepack.cpython-312.pyc
ADDED
|
Binary file (2.3 kB). View file
|
|
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/networks/__pycache__/lora_wan.cpython-312.pyc
ADDED
|
Binary file (2.3 kB). View file
|
|
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/utils/__init__.py
ADDED
|
File without changes
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/utils/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (196 Bytes). View file
|
|
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/utils/__pycache__/device_utils.cpython-312.pyc
ADDED
|
Binary file (1.25 kB). View file
|
|
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/utils/__pycache__/huggingface_utils.cpython-312.pyc
ADDED
|
Binary file (4.56 kB). View file
|
|
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/utils/__pycache__/model_utils.cpython-312.pyc
ADDED
|
Binary file (6.64 kB). View file
|
|
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/utils/__pycache__/safetensors_utils.cpython-312.pyc
ADDED
|
Binary file (12.5 kB). View file
|
|
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/utils/__pycache__/sai_model_spec.cpython-312.pyc
ADDED
|
Binary file (7.29 kB). View file
|
|
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/utils/__pycache__/train_utils.cpython-312.pyc
ADDED
|
Binary file (9.16 kB). View file
|
|
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/utils/device_utils.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def clean_memory_on_device(device):
|
| 5 |
+
if device.type == "cuda":
|
| 6 |
+
torch.cuda.empty_cache()
|
| 7 |
+
elif device.type == "cpu":
|
| 8 |
+
pass
|
| 9 |
+
elif device.type == "mps": # not tested
|
| 10 |
+
torch.mps.empty_cache()
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def synchronize_device(device: torch.device):
|
| 14 |
+
if device.type == "cuda":
|
| 15 |
+
torch.cuda.synchronize()
|
| 16 |
+
elif device.type == "xpu":
|
| 17 |
+
torch.xpu.synchronize()
|
| 18 |
+
elif device.type == "mps":
|
| 19 |
+
torch.mps.synchronize()
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/utils/huggingface_utils.py
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import threading
|
| 2 |
+
from typing import Union, BinaryIO
|
| 3 |
+
from huggingface_hub import HfApi
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
import argparse
|
| 6 |
+
import os
|
| 7 |
+
import logging
|
| 8 |
+
|
| 9 |
+
logger = logging.getLogger(__name__)
|
| 10 |
+
logging.basicConfig(level=logging.INFO)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def fire_in_thread(f, *args, **kwargs):
|
| 14 |
+
threading.Thread(target=f, args=args, kwargs=kwargs).start()
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def exists_repo(repo_id: str, repo_type: str, revision: str = "main", token: str = None):
|
| 18 |
+
api = HfApi(
|
| 19 |
+
token=token,
|
| 20 |
+
)
|
| 21 |
+
try:
|
| 22 |
+
api.repo_info(repo_id=repo_id, revision=revision, repo_type=repo_type)
|
| 23 |
+
return True
|
| 24 |
+
except:
|
| 25 |
+
return False
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def upload(
|
| 29 |
+
args: argparse.Namespace,
|
| 30 |
+
src: Union[str, Path, bytes, BinaryIO],
|
| 31 |
+
dest_suffix: str = "",
|
| 32 |
+
force_sync_upload: bool = False,
|
| 33 |
+
):
|
| 34 |
+
repo_id = args.huggingface_repo_id
|
| 35 |
+
repo_type = args.huggingface_repo_type
|
| 36 |
+
token = args.huggingface_token
|
| 37 |
+
path_in_repo = args.huggingface_path_in_repo + dest_suffix if args.huggingface_path_in_repo is not None else None
|
| 38 |
+
private = args.huggingface_repo_visibility is None or args.huggingface_repo_visibility != "public"
|
| 39 |
+
api = HfApi(token=token)
|
| 40 |
+
if not exists_repo(repo_id=repo_id, repo_type=repo_type, token=token):
|
| 41 |
+
try:
|
| 42 |
+
api.create_repo(repo_id=repo_id, repo_type=repo_type, private=private)
|
| 43 |
+
except Exception as e: # RepositoryNotFoundError or something else
|
| 44 |
+
logger.error("===========================================")
|
| 45 |
+
logger.error(f"failed to create HuggingFace repo / HuggingFaceのリポジトリの作成に失敗しました : {e}")
|
| 46 |
+
logger.error("===========================================")
|
| 47 |
+
|
| 48 |
+
is_folder = (type(src) == str and os.path.isdir(src)) or (isinstance(src, Path) and src.is_dir())
|
| 49 |
+
|
| 50 |
+
def uploader():
|
| 51 |
+
try:
|
| 52 |
+
if is_folder:
|
| 53 |
+
api.upload_folder(
|
| 54 |
+
repo_id=repo_id,
|
| 55 |
+
repo_type=repo_type,
|
| 56 |
+
folder_path=src,
|
| 57 |
+
path_in_repo=path_in_repo,
|
| 58 |
+
)
|
| 59 |
+
else:
|
| 60 |
+
api.upload_file(
|
| 61 |
+
repo_id=repo_id,
|
| 62 |
+
repo_type=repo_type,
|
| 63 |
+
path_or_fileobj=src,
|
| 64 |
+
path_in_repo=path_in_repo,
|
| 65 |
+
)
|
| 66 |
+
except Exception as e: # RuntimeError or something else
|
| 67 |
+
logger.error("===========================================")
|
| 68 |
+
logger.error(f"failed to upload to HuggingFace / HuggingFaceへのアップロードに失敗しました : {e}")
|
| 69 |
+
logger.error("===========================================")
|
| 70 |
+
|
| 71 |
+
if args.async_upload and not force_sync_upload:
|
| 72 |
+
fire_in_thread(uploader)
|
| 73 |
+
else:
|
| 74 |
+
uploader()
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def list_dir(
|
| 78 |
+
repo_id: str,
|
| 79 |
+
subfolder: str,
|
| 80 |
+
repo_type: str,
|
| 81 |
+
revision: str = "main",
|
| 82 |
+
token: str = None,
|
| 83 |
+
):
|
| 84 |
+
api = HfApi(
|
| 85 |
+
token=token,
|
| 86 |
+
)
|
| 87 |
+
repo_info = api.repo_info(repo_id=repo_id, revision=revision, repo_type=repo_type)
|
| 88 |
+
file_list = [file for file in repo_info.siblings if file.rfilename.startswith(subfolder)]
|
| 89 |
+
return file_list
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/utils/model_utils.py
ADDED
|
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import hashlib
|
| 2 |
+
from io import BytesIO
|
| 3 |
+
from typing import Optional
|
| 4 |
+
|
| 5 |
+
import safetensors.torch
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def model_hash(filename):
|
| 10 |
+
"""Old model hash used by stable-diffusion-webui"""
|
| 11 |
+
try:
|
| 12 |
+
with open(filename, "rb") as file:
|
| 13 |
+
m = hashlib.sha256()
|
| 14 |
+
|
| 15 |
+
file.seek(0x100000)
|
| 16 |
+
m.update(file.read(0x10000))
|
| 17 |
+
return m.hexdigest()[0:8]
|
| 18 |
+
except FileNotFoundError:
|
| 19 |
+
return "NOFILE"
|
| 20 |
+
except IsADirectoryError: # Linux?
|
| 21 |
+
return "IsADirectory"
|
| 22 |
+
except PermissionError: # Windows
|
| 23 |
+
return "IsADirectory"
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def calculate_sha256(filename):
|
| 27 |
+
"""New model hash used by stable-diffusion-webui"""
|
| 28 |
+
try:
|
| 29 |
+
hash_sha256 = hashlib.sha256()
|
| 30 |
+
blksize = 1024 * 1024
|
| 31 |
+
|
| 32 |
+
with open(filename, "rb") as f:
|
| 33 |
+
for chunk in iter(lambda: f.read(blksize), b""):
|
| 34 |
+
hash_sha256.update(chunk)
|
| 35 |
+
|
| 36 |
+
return hash_sha256.hexdigest()
|
| 37 |
+
except FileNotFoundError:
|
| 38 |
+
return "NOFILE"
|
| 39 |
+
except IsADirectoryError: # Linux?
|
| 40 |
+
return "IsADirectory"
|
| 41 |
+
except PermissionError: # Windows
|
| 42 |
+
return "IsADirectory"
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def addnet_hash_legacy(b):
|
| 46 |
+
"""Old model hash used by sd-webui-additional-networks for .safetensors format files"""
|
| 47 |
+
m = hashlib.sha256()
|
| 48 |
+
|
| 49 |
+
b.seek(0x100000)
|
| 50 |
+
m.update(b.read(0x10000))
|
| 51 |
+
return m.hexdigest()[0:8]
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def addnet_hash_safetensors(b):
|
| 55 |
+
"""New model hash used by sd-webui-additional-networks for .safetensors format files"""
|
| 56 |
+
hash_sha256 = hashlib.sha256()
|
| 57 |
+
blksize = 1024 * 1024
|
| 58 |
+
|
| 59 |
+
b.seek(0)
|
| 60 |
+
header = b.read(8)
|
| 61 |
+
n = int.from_bytes(header, "little")
|
| 62 |
+
|
| 63 |
+
offset = n + 8
|
| 64 |
+
b.seek(offset)
|
| 65 |
+
for chunk in iter(lambda: b.read(blksize), b""):
|
| 66 |
+
hash_sha256.update(chunk)
|
| 67 |
+
|
| 68 |
+
return hash_sha256.hexdigest()
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def precalculate_safetensors_hashes(tensors, metadata):
|
| 72 |
+
"""Precalculate the model hashes needed by sd-webui-additional-networks to
|
| 73 |
+
save time on indexing the model later."""
|
| 74 |
+
|
| 75 |
+
# Because writing user metadata to the file can change the result of
|
| 76 |
+
# sd_models.model_hash(), only retain the training metadata for purposes of
|
| 77 |
+
# calculating the hash, as they are meant to be immutable
|
| 78 |
+
metadata = {k: v for k, v in metadata.items() if k.startswith("ss_")}
|
| 79 |
+
|
| 80 |
+
bytes = safetensors.torch.save(tensors, metadata)
|
| 81 |
+
b = BytesIO(bytes)
|
| 82 |
+
|
| 83 |
+
model_hash = addnet_hash_safetensors(b)
|
| 84 |
+
legacy_hash = addnet_hash_legacy(b)
|
| 85 |
+
return model_hash, legacy_hash
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def dtype_to_str(dtype: torch.dtype) -> str:
|
| 89 |
+
# get name of the dtype
|
| 90 |
+
dtype_name = str(dtype).split(".")[-1]
|
| 91 |
+
return dtype_name
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def str_to_dtype(s: Optional[str], default_dtype: Optional[torch.dtype] = None) -> torch.dtype:
|
| 95 |
+
"""
|
| 96 |
+
Convert a string to a torch.dtype
|
| 97 |
+
|
| 98 |
+
Args:
|
| 99 |
+
s: string representation of the dtype
|
| 100 |
+
default_dtype: default dtype to return if s is None
|
| 101 |
+
|
| 102 |
+
Returns:
|
| 103 |
+
torch.dtype: the corresponding torch.dtype
|
| 104 |
+
|
| 105 |
+
Raises:
|
| 106 |
+
ValueError: if the dtype is not supported
|
| 107 |
+
|
| 108 |
+
Examples:
|
| 109 |
+
>>> str_to_dtype("float32")
|
| 110 |
+
torch.float32
|
| 111 |
+
>>> str_to_dtype("fp32")
|
| 112 |
+
torch.float32
|
| 113 |
+
>>> str_to_dtype("float16")
|
| 114 |
+
torch.float16
|
| 115 |
+
>>> str_to_dtype("fp16")
|
| 116 |
+
torch.float16
|
| 117 |
+
>>> str_to_dtype("bfloat16")
|
| 118 |
+
torch.bfloat16
|
| 119 |
+
>>> str_to_dtype("bf16")
|
| 120 |
+
torch.bfloat16
|
| 121 |
+
>>> str_to_dtype("fp8")
|
| 122 |
+
torch.float8_e4m3fn
|
| 123 |
+
>>> str_to_dtype("fp8_e4m3fn")
|
| 124 |
+
torch.float8_e4m3fn
|
| 125 |
+
>>> str_to_dtype("fp8_e4m3fnuz")
|
| 126 |
+
torch.float8_e4m3fnuz
|
| 127 |
+
>>> str_to_dtype("fp8_e5m2")
|
| 128 |
+
torch.float8_e5m2
|
| 129 |
+
>>> str_to_dtype("fp8_e5m2fnuz")
|
| 130 |
+
torch.float8_e5m2fnuz
|
| 131 |
+
"""
|
| 132 |
+
if s is None:
|
| 133 |
+
return default_dtype
|
| 134 |
+
if s in ["bf16", "bfloat16"]:
|
| 135 |
+
return torch.bfloat16
|
| 136 |
+
elif s in ["fp16", "float16"]:
|
| 137 |
+
return torch.float16
|
| 138 |
+
elif s in ["fp32", "float32", "float"]:
|
| 139 |
+
return torch.float32
|
| 140 |
+
elif s in ["fp8_e4m3fn", "e4m3fn", "float8_e4m3fn"]:
|
| 141 |
+
return torch.float8_e4m3fn
|
| 142 |
+
elif s in ["fp8_e4m3fnuz", "e4m3fnuz", "float8_e4m3fnuz"]:
|
| 143 |
+
return torch.float8_e4m3fnuz
|
| 144 |
+
elif s in ["fp8_e5m2", "e5m2", "float8_e5m2"]:
|
| 145 |
+
return torch.float8_e5m2
|
| 146 |
+
elif s in ["fp8_e5m2fnuz", "e5m2fnuz", "float8_e5m2fnuz"]:
|
| 147 |
+
return torch.float8_e5m2fnuz
|
| 148 |
+
elif s in ["fp8", "float8"]:
|
| 149 |
+
return torch.float8_e4m3fn # default fp8
|
| 150 |
+
else:
|
| 151 |
+
raise ValueError(f"Unsupported dtype: {s}")
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/utils/safetensors_utils.py
ADDED
|
@@ -0,0 +1,221 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import re
|
| 3 |
+
import torch
|
| 4 |
+
import json
|
| 5 |
+
import struct
|
| 6 |
+
from typing import Dict, Any, Union, Optional
|
| 7 |
+
|
| 8 |
+
from safetensors.torch import load_file
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def mem_eff_save_file(tensors: Dict[str, torch.Tensor], filename: str, metadata: Dict[str, Any] = None):
|
| 12 |
+
"""
|
| 13 |
+
memory efficient save file
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
_TYPES = {
|
| 17 |
+
torch.float64: "F64",
|
| 18 |
+
torch.float32: "F32",
|
| 19 |
+
torch.float16: "F16",
|
| 20 |
+
torch.bfloat16: "BF16",
|
| 21 |
+
torch.int64: "I64",
|
| 22 |
+
torch.int32: "I32",
|
| 23 |
+
torch.int16: "I16",
|
| 24 |
+
torch.int8: "I8",
|
| 25 |
+
torch.uint8: "U8",
|
| 26 |
+
torch.bool: "BOOL",
|
| 27 |
+
getattr(torch, "float8_e5m2", None): "F8_E5M2",
|
| 28 |
+
getattr(torch, "float8_e4m3fn", None): "F8_E4M3",
|
| 29 |
+
}
|
| 30 |
+
_ALIGN = 256
|
| 31 |
+
|
| 32 |
+
def validate_metadata(metadata: Dict[str, Any]) -> Dict[str, str]:
|
| 33 |
+
validated = {}
|
| 34 |
+
for key, value in metadata.items():
|
| 35 |
+
if not isinstance(key, str):
|
| 36 |
+
raise ValueError(f"Metadata key must be a string, got {type(key)}")
|
| 37 |
+
if not isinstance(value, str):
|
| 38 |
+
print(f"Warning: Metadata value for key '{key}' is not a string. Converting to string.")
|
| 39 |
+
validated[key] = str(value)
|
| 40 |
+
else:
|
| 41 |
+
validated[key] = value
|
| 42 |
+
return validated
|
| 43 |
+
|
| 44 |
+
# print(f"Using memory efficient save file: {filename}")
|
| 45 |
+
|
| 46 |
+
header = {}
|
| 47 |
+
offset = 0
|
| 48 |
+
if metadata:
|
| 49 |
+
header["__metadata__"] = validate_metadata(metadata)
|
| 50 |
+
for k, v in tensors.items():
|
| 51 |
+
if v.numel() == 0: # empty tensor
|
| 52 |
+
header[k] = {"dtype": _TYPES[v.dtype], "shape": list(v.shape), "data_offsets": [offset, offset]}
|
| 53 |
+
else:
|
| 54 |
+
size = v.numel() * v.element_size()
|
| 55 |
+
header[k] = {"dtype": _TYPES[v.dtype], "shape": list(v.shape), "data_offsets": [offset, offset + size]}
|
| 56 |
+
offset += size
|
| 57 |
+
|
| 58 |
+
hjson = json.dumps(header).encode("utf-8")
|
| 59 |
+
hjson += b" " * (-(len(hjson) + 8) % _ALIGN)
|
| 60 |
+
|
| 61 |
+
with open(filename, "wb") as f:
|
| 62 |
+
f.write(struct.pack("<Q", len(hjson)))
|
| 63 |
+
f.write(hjson)
|
| 64 |
+
|
| 65 |
+
for k, v in tensors.items():
|
| 66 |
+
if v.numel() == 0:
|
| 67 |
+
continue
|
| 68 |
+
if v.is_cuda:
|
| 69 |
+
# Direct GPU to disk save
|
| 70 |
+
with torch.cuda.device(v.device):
|
| 71 |
+
if v.dim() == 0: # if scalar, need to add a dimension to work with view
|
| 72 |
+
v = v.unsqueeze(0)
|
| 73 |
+
tensor_bytes = v.contiguous().view(torch.uint8)
|
| 74 |
+
tensor_bytes.cpu().numpy().tofile(f)
|
| 75 |
+
else:
|
| 76 |
+
# CPU tensor save
|
| 77 |
+
if v.dim() == 0: # if scalar, need to add a dimension to work with view
|
| 78 |
+
v = v.unsqueeze(0)
|
| 79 |
+
v.contiguous().view(torch.uint8).numpy().tofile(f)
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
class MemoryEfficientSafeOpen:
|
| 83 |
+
# does not support metadata loading
|
| 84 |
+
def __init__(self, filename):
|
| 85 |
+
self.filename = filename
|
| 86 |
+
self.file = open(filename, "rb")
|
| 87 |
+
self.header, self.header_size = self._read_header()
|
| 88 |
+
|
| 89 |
+
def __enter__(self):
|
| 90 |
+
return self
|
| 91 |
+
|
| 92 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
| 93 |
+
self.file.close()
|
| 94 |
+
|
| 95 |
+
def keys(self):
|
| 96 |
+
return [k for k in self.header.keys() if k != "__metadata__"]
|
| 97 |
+
|
| 98 |
+
def metadata(self) -> Dict[str, str]:
|
| 99 |
+
return self.header.get("__metadata__", {})
|
| 100 |
+
|
| 101 |
+
def get_tensor(self, key):
|
| 102 |
+
if key not in self.header:
|
| 103 |
+
raise KeyError(f"Tensor '{key}' not found in the file")
|
| 104 |
+
|
| 105 |
+
metadata = self.header[key]
|
| 106 |
+
offset_start, offset_end = metadata["data_offsets"]
|
| 107 |
+
|
| 108 |
+
if offset_start == offset_end:
|
| 109 |
+
tensor_bytes = None
|
| 110 |
+
else:
|
| 111 |
+
# adjust offset by header size
|
| 112 |
+
self.file.seek(self.header_size + 8 + offset_start)
|
| 113 |
+
tensor_bytes = self.file.read(offset_end - offset_start)
|
| 114 |
+
|
| 115 |
+
return self._deserialize_tensor(tensor_bytes, metadata)
|
| 116 |
+
|
| 117 |
+
def _read_header(self):
|
| 118 |
+
header_size = struct.unpack("<Q", self.file.read(8))[0]
|
| 119 |
+
header_json = self.file.read(header_size).decode("utf-8")
|
| 120 |
+
return json.loads(header_json), header_size
|
| 121 |
+
|
| 122 |
+
def _deserialize_tensor(self, tensor_bytes, metadata):
|
| 123 |
+
dtype = self._get_torch_dtype(metadata["dtype"])
|
| 124 |
+
shape = metadata["shape"]
|
| 125 |
+
|
| 126 |
+
if tensor_bytes is None:
|
| 127 |
+
byte_tensor = torch.empty(0, dtype=torch.uint8)
|
| 128 |
+
else:
|
| 129 |
+
tensor_bytes = bytearray(tensor_bytes) # make it writable
|
| 130 |
+
byte_tensor = torch.frombuffer(tensor_bytes, dtype=torch.uint8)
|
| 131 |
+
|
| 132 |
+
# process float8 types
|
| 133 |
+
if metadata["dtype"] in ["F8_E5M2", "F8_E4M3"]:
|
| 134 |
+
return self._convert_float8(byte_tensor, metadata["dtype"], shape)
|
| 135 |
+
|
| 136 |
+
# convert to the target dtype and reshape
|
| 137 |
+
return byte_tensor.view(dtype).reshape(shape)
|
| 138 |
+
|
| 139 |
+
@staticmethod
|
| 140 |
+
def _get_torch_dtype(dtype_str):
|
| 141 |
+
dtype_map = {
|
| 142 |
+
"F64": torch.float64,
|
| 143 |
+
"F32": torch.float32,
|
| 144 |
+
"F16": torch.float16,
|
| 145 |
+
"BF16": torch.bfloat16,
|
| 146 |
+
"I64": torch.int64,
|
| 147 |
+
"I32": torch.int32,
|
| 148 |
+
"I16": torch.int16,
|
| 149 |
+
"I8": torch.int8,
|
| 150 |
+
"U8": torch.uint8,
|
| 151 |
+
"BOOL": torch.bool,
|
| 152 |
+
}
|
| 153 |
+
# add float8 types if available
|
| 154 |
+
if hasattr(torch, "float8_e5m2"):
|
| 155 |
+
dtype_map["F8_E5M2"] = torch.float8_e5m2
|
| 156 |
+
if hasattr(torch, "float8_e4m3fn"):
|
| 157 |
+
dtype_map["F8_E4M3"] = torch.float8_e4m3fn
|
| 158 |
+
return dtype_map.get(dtype_str)
|
| 159 |
+
|
| 160 |
+
@staticmethod
|
| 161 |
+
def _convert_float8(byte_tensor, dtype_str, shape):
|
| 162 |
+
if dtype_str == "F8_E5M2" and hasattr(torch, "float8_e5m2"):
|
| 163 |
+
return byte_tensor.view(torch.float8_e5m2).reshape(shape)
|
| 164 |
+
elif dtype_str == "F8_E4M3" and hasattr(torch, "float8_e4m3fn"):
|
| 165 |
+
return byte_tensor.view(torch.float8_e4m3fn).reshape(shape)
|
| 166 |
+
else:
|
| 167 |
+
# # convert to float16 if float8 is not supported
|
| 168 |
+
# print(f"Warning: {dtype_str} is not supported in this PyTorch version. Converting to float16.")
|
| 169 |
+
# return byte_tensor.view(torch.uint8).to(torch.float16).reshape(shape)
|
| 170 |
+
raise ValueError(f"Unsupported float8 type: {dtype_str} (upgrade PyTorch to support float8 types)")
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
def load_safetensors(
|
| 174 |
+
path: str, device: Union[str, torch.device], disable_mmap: bool = False, dtype: Optional[torch.dtype] = None
|
| 175 |
+
) -> dict[str, torch.Tensor]:
|
| 176 |
+
if disable_mmap:
|
| 177 |
+
# return safetensors.torch.load(open(path, "rb").read())
|
| 178 |
+
# use experimental loader
|
| 179 |
+
# logger.info(f"Loading without mmap (experimental)")
|
| 180 |
+
state_dict = {}
|
| 181 |
+
with MemoryEfficientSafeOpen(path) as f:
|
| 182 |
+
for key in f.keys():
|
| 183 |
+
state_dict[key] = f.get_tensor(key).to(device, dtype=dtype)
|
| 184 |
+
return state_dict
|
| 185 |
+
else:
|
| 186 |
+
try:
|
| 187 |
+
state_dict = load_file(path, device=device)
|
| 188 |
+
except:
|
| 189 |
+
state_dict = load_file(path) # prevent device invalid Error
|
| 190 |
+
if dtype is not None:
|
| 191 |
+
for key in state_dict.keys():
|
| 192 |
+
state_dict[key] = state_dict[key].to(dtype=dtype)
|
| 193 |
+
return state_dict
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
def load_split_weights(
|
| 197 |
+
file_path: str, device: Union[str, torch.device] = "cpu", disable_mmap: bool = False
|
| 198 |
+
) -> Dict[str, torch.Tensor]:
|
| 199 |
+
"""
|
| 200 |
+
Load split weights from a file. If the file name ends with 00001-of-00004 etc, it will load all files with the same prefix.
|
| 201 |
+
dtype is as is, no conversion is done.
|
| 202 |
+
"""
|
| 203 |
+
device = torch.device(device)
|
| 204 |
+
|
| 205 |
+
# if the file name ends with 00001-of-00004 etc, we need to load the files with the same prefix
|
| 206 |
+
basename = os.path.basename(file_path)
|
| 207 |
+
match = re.match(r"^(.*?)(\d+)-of-(\d+)\.safetensors$", basename)
|
| 208 |
+
if match:
|
| 209 |
+
prefix = basename[: match.start(2)]
|
| 210 |
+
count = int(match.group(3))
|
| 211 |
+
state_dict = {}
|
| 212 |
+
for i in range(count):
|
| 213 |
+
filename = f"{prefix}{i+1:05d}-of-{count:05d}.safetensors"
|
| 214 |
+
filepath = os.path.join(os.path.dirname(file_path), filename)
|
| 215 |
+
if os.path.exists(filepath):
|
| 216 |
+
state_dict.update(load_safetensors(filepath, device=device, disable_mmap=disable_mmap))
|
| 217 |
+
else:
|
| 218 |
+
raise FileNotFoundError(f"File {filepath} not found")
|
| 219 |
+
else:
|
| 220 |
+
state_dict = load_safetensors(file_path, device=device, disable_mmap=disable_mmap)
|
| 221 |
+
return state_dict
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/utils/sai_model_spec.py
ADDED
|
@@ -0,0 +1,286 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# based on https://github.com/Stability-AI/ModelSpec
|
| 2 |
+
import datetime
|
| 3 |
+
import hashlib
|
| 4 |
+
from io import BytesIO
|
| 5 |
+
import os
|
| 6 |
+
from typing import List, Optional, Tuple, Union
|
| 7 |
+
import safetensors
|
| 8 |
+
import logging
|
| 9 |
+
|
| 10 |
+
from musubi_tuner.dataset.image_video_dataset import ARCHITECTURE_HUNYUAN_VIDEO, ARCHITECTURE_WAN, ARCHITECTURE_FRAMEPACK
|
| 11 |
+
|
| 12 |
+
logger = logging.getLogger(__name__)
|
| 13 |
+
logger.setLevel(logging.INFO)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
r"""
|
| 17 |
+
# Metadata Example
|
| 18 |
+
metadata = {
|
| 19 |
+
# === Must ===
|
| 20 |
+
"modelspec.sai_model_spec": "1.0.0", # Required version ID for the spec
|
| 21 |
+
"modelspec.architecture": "stable-diffusion-xl-v1-base", # Architecture, reference the ID of the original model of the arch to match the ID
|
| 22 |
+
"modelspec.implementation": "sgm",
|
| 23 |
+
"modelspec.title": "Example Model Version 1.0", # Clean, human-readable title. May use your own phrasing/language/etc
|
| 24 |
+
# === Should ===
|
| 25 |
+
"modelspec.author": "Example Corp", # Your name or company name
|
| 26 |
+
"modelspec.description": "This is my example model to show you how to do it!", # Describe the model in your own words/language/etc. Focus on what users need to know
|
| 27 |
+
"modelspec.date": "2023-07-20", # ISO-8601 compliant date of when the model was created
|
| 28 |
+
# === Can ===
|
| 29 |
+
"modelspec.license": "ExampleLicense-1.0", # eg CreativeML Open RAIL, etc.
|
| 30 |
+
"modelspec.usage_hint": "Use keyword 'example'" # In your own language, very short hints about how the user should use the model
|
| 31 |
+
}
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
BASE_METADATA = {
|
| 35 |
+
# === Must ===
|
| 36 |
+
"modelspec.sai_model_spec": "1.0.0", # Required version ID for the spec
|
| 37 |
+
"modelspec.architecture": None,
|
| 38 |
+
"modelspec.implementation": None,
|
| 39 |
+
"modelspec.title": None,
|
| 40 |
+
"modelspec.resolution": None,
|
| 41 |
+
# === Should ===
|
| 42 |
+
"modelspec.description": None,
|
| 43 |
+
"modelspec.author": None,
|
| 44 |
+
"modelspec.date": None,
|
| 45 |
+
# === Can ===
|
| 46 |
+
"modelspec.license": None,
|
| 47 |
+
"modelspec.tags": None,
|
| 48 |
+
"modelspec.merged_from": None,
|
| 49 |
+
"modelspec.prediction_type": None,
|
| 50 |
+
"modelspec.timestep_range": None,
|
| 51 |
+
"modelspec.encoder_layer": None,
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
# 別に使うやつだけ定義
|
| 55 |
+
MODELSPEC_TITLE = "modelspec.title"
|
| 56 |
+
|
| 57 |
+
ARCH_HUNYUAN_VIDEO = "hunyuan-video"
|
| 58 |
+
|
| 59 |
+
# Official Wan2.1 weights does not have sai_model_spec, so we use this as an architecture name
|
| 60 |
+
ARCH_WAN = "wan2.1"
|
| 61 |
+
|
| 62 |
+
ARCH_FRAMEPACK = "framepack"
|
| 63 |
+
|
| 64 |
+
ADAPTER_LORA = "lora"
|
| 65 |
+
|
| 66 |
+
IMPL_HUNYUAN_VIDEO = "https://github.com/Tencent/HunyuanVideo"
|
| 67 |
+
IMPL_WAN = "https://github.com/Wan-Video/Wan2.1"
|
| 68 |
+
IMPL_FRAMEPACK = "https://github.com/lllyasviel/FramePack"
|
| 69 |
+
|
| 70 |
+
PRED_TYPE_EPSILON = "epsilon"
|
| 71 |
+
# PRED_TYPE_V = "v"
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def load_bytes_in_safetensors(tensors):
|
| 75 |
+
bytes = safetensors.torch.save(tensors)
|
| 76 |
+
b = BytesIO(bytes)
|
| 77 |
+
|
| 78 |
+
b.seek(0)
|
| 79 |
+
header = b.read(8)
|
| 80 |
+
n = int.from_bytes(header, "little")
|
| 81 |
+
|
| 82 |
+
offset = n + 8
|
| 83 |
+
b.seek(offset)
|
| 84 |
+
|
| 85 |
+
return b.read()
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def precalculate_safetensors_hashes(state_dict):
|
| 89 |
+
# calculate each tensor one by one to reduce memory usage
|
| 90 |
+
hash_sha256 = hashlib.sha256()
|
| 91 |
+
for tensor in state_dict.values():
|
| 92 |
+
single_tensor_sd = {"tensor": tensor}
|
| 93 |
+
bytes_for_tensor = load_bytes_in_safetensors(single_tensor_sd)
|
| 94 |
+
hash_sha256.update(bytes_for_tensor)
|
| 95 |
+
|
| 96 |
+
return f"0x{hash_sha256.hexdigest()}"
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def update_hash_sha256(metadata: dict, state_dict: dict):
|
| 100 |
+
raise NotImplementedError
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def build_metadata(
|
| 104 |
+
state_dict: Optional[dict],
|
| 105 |
+
architecture: str,
|
| 106 |
+
timestamp: float,
|
| 107 |
+
title: Optional[str] = None,
|
| 108 |
+
reso: Optional[Union[int, Tuple[int, int]]] = None,
|
| 109 |
+
author: Optional[str] = None,
|
| 110 |
+
description: Optional[str] = None,
|
| 111 |
+
license: Optional[str] = None,
|
| 112 |
+
tags: Optional[str] = None,
|
| 113 |
+
merged_from: Optional[str] = None,
|
| 114 |
+
timesteps: Optional[Tuple[int, int]] = None,
|
| 115 |
+
is_lora: bool = True,
|
| 116 |
+
):
|
| 117 |
+
metadata = {}
|
| 118 |
+
metadata.update(BASE_METADATA)
|
| 119 |
+
|
| 120 |
+
# TODO implement if we can calculate hash without loading all tensors
|
| 121 |
+
# if state_dict is not None:
|
| 122 |
+
# hash = precalculate_safetensors_hashes(state_dict)
|
| 123 |
+
# metadata["modelspec.hash_sha256"] = hash
|
| 124 |
+
|
| 125 |
+
# arch = ARCH_HUNYUAN_VIDEO
|
| 126 |
+
if architecture == ARCHITECTURE_HUNYUAN_VIDEO:
|
| 127 |
+
arch = ARCH_HUNYUAN_VIDEO
|
| 128 |
+
impl = IMPL_HUNYUAN_VIDEO
|
| 129 |
+
elif architecture == ARCHITECTURE_WAN:
|
| 130 |
+
arch = ARCH_WAN
|
| 131 |
+
impl = IMPL_WAN
|
| 132 |
+
elif architecture == ARCHITECTURE_FRAMEPACK:
|
| 133 |
+
arch = ARCH_FRAMEPACK
|
| 134 |
+
impl = IMPL_FRAMEPACK
|
| 135 |
+
else:
|
| 136 |
+
raise ValueError(f"Unknown architecture: {architecture}")
|
| 137 |
+
|
| 138 |
+
if is_lora:
|
| 139 |
+
arch += f"/{ADAPTER_LORA}"
|
| 140 |
+
metadata["modelspec.architecture"] = arch
|
| 141 |
+
|
| 142 |
+
metadata["modelspec.implementation"] = impl
|
| 143 |
+
|
| 144 |
+
if title is None:
|
| 145 |
+
title = "LoRA" if is_lora else "Hunyuan-Video"
|
| 146 |
+
title += f"@{timestamp}"
|
| 147 |
+
metadata[MODELSPEC_TITLE] = title
|
| 148 |
+
|
| 149 |
+
if author is not None:
|
| 150 |
+
metadata["modelspec.author"] = author
|
| 151 |
+
else:
|
| 152 |
+
del metadata["modelspec.author"]
|
| 153 |
+
|
| 154 |
+
if description is not None:
|
| 155 |
+
metadata["modelspec.description"] = description
|
| 156 |
+
else:
|
| 157 |
+
del metadata["modelspec.description"]
|
| 158 |
+
|
| 159 |
+
if merged_from is not None:
|
| 160 |
+
metadata["modelspec.merged_from"] = merged_from
|
| 161 |
+
else:
|
| 162 |
+
del metadata["modelspec.merged_from"]
|
| 163 |
+
|
| 164 |
+
if license is not None:
|
| 165 |
+
metadata["modelspec.license"] = license
|
| 166 |
+
else:
|
| 167 |
+
del metadata["modelspec.license"]
|
| 168 |
+
|
| 169 |
+
if tags is not None:
|
| 170 |
+
metadata["modelspec.tags"] = tags
|
| 171 |
+
else:
|
| 172 |
+
del metadata["modelspec.tags"]
|
| 173 |
+
|
| 174 |
+
# remove microsecond from time
|
| 175 |
+
int_ts = int(timestamp)
|
| 176 |
+
|
| 177 |
+
# time to iso-8601 compliant date
|
| 178 |
+
date = datetime.datetime.fromtimestamp(int_ts).isoformat()
|
| 179 |
+
metadata["modelspec.date"] = date
|
| 180 |
+
|
| 181 |
+
if reso is not None:
|
| 182 |
+
# comma separated to tuple
|
| 183 |
+
if isinstance(reso, str):
|
| 184 |
+
reso = tuple(map(int, reso.split(",")))
|
| 185 |
+
if len(reso) == 1:
|
| 186 |
+
reso = (reso[0], reso[0])
|
| 187 |
+
else:
|
| 188 |
+
# resolution is defined in dataset, so use default
|
| 189 |
+
reso = (1280, 720)
|
| 190 |
+
if isinstance(reso, int):
|
| 191 |
+
reso = (reso, reso)
|
| 192 |
+
|
| 193 |
+
metadata["modelspec.resolution"] = f"{reso[0]}x{reso[1]}"
|
| 194 |
+
|
| 195 |
+
# metadata["modelspec.prediction_type"] = PRED_TYPE_EPSILON
|
| 196 |
+
del metadata["modelspec.prediction_type"]
|
| 197 |
+
|
| 198 |
+
if timesteps is not None:
|
| 199 |
+
if isinstance(timesteps, str) or isinstance(timesteps, int):
|
| 200 |
+
timesteps = (timesteps, timesteps)
|
| 201 |
+
if len(timesteps) == 1:
|
| 202 |
+
timesteps = (timesteps[0], timesteps[0])
|
| 203 |
+
metadata["modelspec.timestep_range"] = f"{timesteps[0]},{timesteps[1]}"
|
| 204 |
+
else:
|
| 205 |
+
del metadata["modelspec.timestep_range"]
|
| 206 |
+
|
| 207 |
+
# if clip_skip is not None:
|
| 208 |
+
# metadata["modelspec.encoder_layer"] = f"{clip_skip}"
|
| 209 |
+
# else:
|
| 210 |
+
del metadata["modelspec.encoder_layer"]
|
| 211 |
+
|
| 212 |
+
# # assert all values are filled
|
| 213 |
+
# assert all([v is not None for v in metadata.values()]), metadata
|
| 214 |
+
if not all([v is not None for v in metadata.values()]):
|
| 215 |
+
logger.error(f"Internal error: some metadata values are None: {metadata}")
|
| 216 |
+
|
| 217 |
+
return metadata
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
# region utils
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
def get_title(metadata: dict) -> Optional[str]:
|
| 224 |
+
return metadata.get(MODELSPEC_TITLE, None)
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
def load_metadata_from_safetensors(model: str) -> dict:
|
| 228 |
+
if not model.endswith(".safetensors"):
|
| 229 |
+
return {}
|
| 230 |
+
|
| 231 |
+
with safetensors.safe_open(model, framework="pt") as f:
|
| 232 |
+
metadata = f.metadata()
|
| 233 |
+
if metadata is None:
|
| 234 |
+
metadata = {}
|
| 235 |
+
return metadata
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
def build_merged_from(models: List[str]) -> str:
|
| 239 |
+
def get_title(model: str):
|
| 240 |
+
metadata = load_metadata_from_safetensors(model)
|
| 241 |
+
title = metadata.get(MODELSPEC_TITLE, None)
|
| 242 |
+
if title is None:
|
| 243 |
+
title = os.path.splitext(os.path.basename(model))[0] # use filename
|
| 244 |
+
return title
|
| 245 |
+
|
| 246 |
+
titles = [get_title(model) for model in models]
|
| 247 |
+
return ", ".join(titles)
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
# endregion
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
r"""
|
| 254 |
+
if __name__ == "__main__":
|
| 255 |
+
import argparse
|
| 256 |
+
import torch
|
| 257 |
+
from safetensors.torch import load_file
|
| 258 |
+
from library import train_util
|
| 259 |
+
|
| 260 |
+
parser = argparse.ArgumentParser()
|
| 261 |
+
parser.add_argument("--ckpt", type=str, required=True)
|
| 262 |
+
args = parser.parse_args()
|
| 263 |
+
|
| 264 |
+
print(f"Loading {args.ckpt}")
|
| 265 |
+
state_dict = load_file(args.ckpt)
|
| 266 |
+
|
| 267 |
+
print(f"Calculating metadata")
|
| 268 |
+
metadata = get(state_dict, False, False, False, False, "sgm", False, False, "title", "date", 256, 1000, 0)
|
| 269 |
+
print(metadata)
|
| 270 |
+
del state_dict
|
| 271 |
+
|
| 272 |
+
# by reference implementation
|
| 273 |
+
with open(args.ckpt, mode="rb") as file_data:
|
| 274 |
+
file_hash = hashlib.sha256()
|
| 275 |
+
head_len = struct.unpack("Q", file_data.read(8)) # int64 header length prefix
|
| 276 |
+
header = json.loads(file_data.read(head_len[0])) # header itself, json string
|
| 277 |
+
content = (
|
| 278 |
+
file_data.read()
|
| 279 |
+
) # All other content is tightly packed tensors. Copy to RAM for simplicity, but you can avoid this read with a more careful FS-dependent impl.
|
| 280 |
+
file_hash.update(content)
|
| 281 |
+
# ===== Update the hash for modelspec =====
|
| 282 |
+
by_ref = f"0x{file_hash.hexdigest()}"
|
| 283 |
+
print(by_ref)
|
| 284 |
+
print("is same?", by_ref == metadata["modelspec.hash_sha256"])
|
| 285 |
+
|
| 286 |
+
"""
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/utils/train_utils.py
ADDED
|
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import logging
|
| 3 |
+
import os
|
| 4 |
+
import shutil
|
| 5 |
+
|
| 6 |
+
import accelerate
|
| 7 |
+
import torch
|
| 8 |
+
|
| 9 |
+
from musubi_tuner.utils import huggingface_utils
|
| 10 |
+
|
| 11 |
+
logger = logging.getLogger(__name__)
|
| 12 |
+
logging.basicConfig(level=logging.INFO)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
# checkpointファイル名
|
| 16 |
+
EPOCH_STATE_NAME = "{}-{:06d}-state"
|
| 17 |
+
EPOCH_FILE_NAME = "{}-{:06d}"
|
| 18 |
+
EPOCH_DIFFUSERS_DIR_NAME = "{}-{:06d}"
|
| 19 |
+
LAST_STATE_NAME = "{}-state"
|
| 20 |
+
STEP_STATE_NAME = "{}-step{:08d}-state"
|
| 21 |
+
STEP_FILE_NAME = "{}-step{:08d}"
|
| 22 |
+
STEP_DIFFUSERS_DIR_NAME = "{}-step{:08d}"
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def get_sanitized_config_or_none(args: argparse.Namespace):
|
| 26 |
+
# if `--log_config` is enabled, return args for logging. if not, return None.
|
| 27 |
+
# when `--log_config is enabled, filter out sensitive values from args
|
| 28 |
+
# if wandb is not enabled, the log is not exposed to the public, but it is fine to filter out sensitive values to be safe
|
| 29 |
+
|
| 30 |
+
if not args.log_config:
|
| 31 |
+
return None
|
| 32 |
+
|
| 33 |
+
sensitive_args = ["wandb_api_key", "huggingface_token"]
|
| 34 |
+
sensitive_path_args = [
|
| 35 |
+
"dit",
|
| 36 |
+
"vae",
|
| 37 |
+
"text_encoder1",
|
| 38 |
+
"text_encoder2",
|
| 39 |
+
"image_encoder",
|
| 40 |
+
"base_weights",
|
| 41 |
+
"network_weights",
|
| 42 |
+
"output_dir",
|
| 43 |
+
"logging_dir",
|
| 44 |
+
]
|
| 45 |
+
filtered_args = {}
|
| 46 |
+
for k, v in vars(args).items():
|
| 47 |
+
# filter out sensitive values and convert to string if necessary
|
| 48 |
+
if k not in sensitive_args + sensitive_path_args:
|
| 49 |
+
# Accelerate values need to have type `bool`,`str`, `float`, `int`, or `None`.
|
| 50 |
+
if v is None or isinstance(v, bool) or isinstance(v, str) or isinstance(v, float) or isinstance(v, int):
|
| 51 |
+
filtered_args[k] = v
|
| 52 |
+
# accelerate does not support lists
|
| 53 |
+
elif isinstance(v, list):
|
| 54 |
+
filtered_args[k] = f"{v}"
|
| 55 |
+
# accelerate does not support objects
|
| 56 |
+
elif isinstance(v, object):
|
| 57 |
+
filtered_args[k] = f"{v}"
|
| 58 |
+
|
| 59 |
+
return filtered_args
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
class LossRecorder:
|
| 63 |
+
def __init__(self):
|
| 64 |
+
self.loss_list: list[float] = []
|
| 65 |
+
self.loss_total: float = 0.0
|
| 66 |
+
|
| 67 |
+
def add(self, *, epoch: int, step: int, loss: float) -> None:
|
| 68 |
+
if epoch == 0:
|
| 69 |
+
self.loss_list.append(loss)
|
| 70 |
+
else:
|
| 71 |
+
while len(self.loss_list) <= step:
|
| 72 |
+
self.loss_list.append(0.0)
|
| 73 |
+
self.loss_total -= self.loss_list[step]
|
| 74 |
+
self.loss_list[step] = loss
|
| 75 |
+
self.loss_total += loss
|
| 76 |
+
|
| 77 |
+
@property
|
| 78 |
+
def moving_average(self) -> float:
|
| 79 |
+
return self.loss_total / len(self.loss_list)
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def get_epoch_ckpt_name(model_name, epoch_no: int):
|
| 83 |
+
return EPOCH_FILE_NAME.format(model_name, epoch_no) + ".safetensors"
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def get_step_ckpt_name(model_name, step_no: int):
|
| 87 |
+
return STEP_FILE_NAME.format(model_name, step_no) + ".safetensors"
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def get_last_ckpt_name(model_name):
|
| 91 |
+
return model_name + ".safetensors"
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def get_remove_epoch_no(args: argparse.Namespace, epoch_no: int):
|
| 95 |
+
if args.save_last_n_epochs is None:
|
| 96 |
+
return None
|
| 97 |
+
|
| 98 |
+
remove_epoch_no = epoch_no - args.save_every_n_epochs * args.save_last_n_epochs
|
| 99 |
+
if remove_epoch_no < 0:
|
| 100 |
+
return None
|
| 101 |
+
return remove_epoch_no
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def get_remove_step_no(args: argparse.Namespace, step_no: int):
|
| 105 |
+
if args.save_last_n_steps is None:
|
| 106 |
+
return None
|
| 107 |
+
|
| 108 |
+
# calculate the step number to remove from the last_n_steps and save_every_n_steps
|
| 109 |
+
# e.g. if save_every_n_steps=10, save_last_n_steps=30, at step 50, keep 30 steps and remove step 10
|
| 110 |
+
remove_step_no = step_no - args.save_last_n_steps - 1
|
| 111 |
+
remove_step_no = remove_step_no - (remove_step_no % args.save_every_n_steps)
|
| 112 |
+
if remove_step_no < 0:
|
| 113 |
+
return None
|
| 114 |
+
return remove_step_no
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def save_and_remove_state_on_epoch_end(args: argparse.Namespace, accelerator: accelerate.Accelerator, epoch_no: int):
|
| 118 |
+
model_name = args.output_name
|
| 119 |
+
|
| 120 |
+
logger.info("")
|
| 121 |
+
logger.info(f"saving state at epoch {epoch_no}")
|
| 122 |
+
os.makedirs(args.output_dir, exist_ok=True)
|
| 123 |
+
|
| 124 |
+
state_dir = os.path.join(args.output_dir, EPOCH_STATE_NAME.format(model_name, epoch_no))
|
| 125 |
+
accelerator.save_state(state_dir)
|
| 126 |
+
if args.save_state_to_huggingface:
|
| 127 |
+
logger.info("uploading state to huggingface.")
|
| 128 |
+
huggingface_utils.upload(args, state_dir, "/" + EPOCH_STATE_NAME.format(model_name, epoch_no))
|
| 129 |
+
|
| 130 |
+
last_n_epochs = args.save_last_n_epochs_state if args.save_last_n_epochs_state else args.save_last_n_epochs
|
| 131 |
+
if last_n_epochs is not None:
|
| 132 |
+
remove_epoch_no = epoch_no - args.save_every_n_epochs * last_n_epochs
|
| 133 |
+
state_dir_old = os.path.join(args.output_dir, EPOCH_STATE_NAME.format(model_name, remove_epoch_no))
|
| 134 |
+
if os.path.exists(state_dir_old):
|
| 135 |
+
logger.info(f"removing old state: {state_dir_old}")
|
| 136 |
+
shutil.rmtree(state_dir_old)
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def save_and_remove_state_stepwise(args: argparse.Namespace, accelerator: accelerate.Accelerator, step_no: int):
|
| 140 |
+
model_name = args.output_name
|
| 141 |
+
|
| 142 |
+
logger.info("")
|
| 143 |
+
logger.info(f"saving state at step {step_no}")
|
| 144 |
+
os.makedirs(args.output_dir, exist_ok=True)
|
| 145 |
+
|
| 146 |
+
state_dir = os.path.join(args.output_dir, STEP_STATE_NAME.format(model_name, step_no))
|
| 147 |
+
accelerator.save_state(state_dir)
|
| 148 |
+
if args.save_state_to_huggingface:
|
| 149 |
+
logger.info("uploading state to huggingface.")
|
| 150 |
+
huggingface_utils.upload(args, state_dir, "/" + STEP_STATE_NAME.format(model_name, step_no))
|
| 151 |
+
|
| 152 |
+
last_n_steps = args.save_last_n_steps_state if args.save_last_n_steps_state else args.save_last_n_steps
|
| 153 |
+
if last_n_steps is not None:
|
| 154 |
+
# last_n_steps前のstep_noから、save_every_n_stepsの倍数のstep_noを計算して削除する
|
| 155 |
+
remove_step_no = step_no - last_n_steps - 1
|
| 156 |
+
remove_step_no = remove_step_no - (remove_step_no % args.save_every_n_steps)
|
| 157 |
+
|
| 158 |
+
if remove_step_no > 0:
|
| 159 |
+
state_dir_old = os.path.join(args.output_dir, STEP_STATE_NAME.format(model_name, remove_step_no))
|
| 160 |
+
if os.path.exists(state_dir_old):
|
| 161 |
+
logger.info(f"removing old state: {state_dir_old}")
|
| 162 |
+
shutil.rmtree(state_dir_old)
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
def save_state_on_train_end(args: argparse.Namespace, accelerator: accelerate.Accelerator):
|
| 166 |
+
model_name = args.output_name
|
| 167 |
+
|
| 168 |
+
logger.info("")
|
| 169 |
+
logger.info("saving last state.")
|
| 170 |
+
os.makedirs(args.output_dir, exist_ok=True)
|
| 171 |
+
|
| 172 |
+
state_dir = os.path.join(args.output_dir, LAST_STATE_NAME.format(model_name))
|
| 173 |
+
accelerator.save_state(state_dir)
|
| 174 |
+
|
| 175 |
+
if args.save_state_to_huggingface:
|
| 176 |
+
logger.info("uploading last state to huggingface.")
|
| 177 |
+
huggingface_utils.upload(args, state_dir, "/" + LAST_STATE_NAME.format(model_name))
|
| 178 |
+
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
# from . import configs, distributed, modules
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (194 Bytes). View file
|
|
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/configs/__init__.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved.
|
| 2 |
+
import copy
|
| 3 |
+
import os
|
| 4 |
+
import torch
|
| 5 |
+
|
| 6 |
+
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
| 7 |
+
|
| 8 |
+
from musubi_tuner.wan.configs.wan_i2v_14B import i2v_14B
|
| 9 |
+
from musubi_tuner.wan.configs.wan_t2v_1_3B import t2v_1_3B
|
| 10 |
+
from musubi_tuner.wan.configs.wan_t2v_14B import t2v_14B
|
| 11 |
+
|
| 12 |
+
# the config of t2i_14B is the same as t2v_14B
|
| 13 |
+
t2i_14B = copy.deepcopy(t2v_14B)
|
| 14 |
+
t2i_14B.__name__ = "Config: Wan T2I 14B"
|
| 15 |
+
|
| 16 |
+
# the config of flf2v_14B is the same as i2v_14B
|
| 17 |
+
flf2v_14B = copy.deepcopy(i2v_14B)
|
| 18 |
+
flf2v_14B.__name__ = "Config: Wan FLF2V 14B"
|
| 19 |
+
flf2v_14B.sample_neg_prompt = "镜头切换," + flf2v_14B.sample_neg_prompt
|
| 20 |
+
flf2v_14B.i2v = False
|
| 21 |
+
flf2v_14B.flf2v = True # this is a first and last frame model, so set flf2v to True
|
| 22 |
+
|
| 23 |
+
# support Fun models: deepcopy and change some configs. FC denotes Fun Control
|
| 24 |
+
t2v_1_3B_FC = copy.deepcopy(t2v_1_3B)
|
| 25 |
+
t2v_1_3B_FC.__name__ = "Config: Wan-Fun-Control T2V 1.3B"
|
| 26 |
+
t2v_1_3B_FC.i2v = True # this is strange, but Fun-Control model needs this because it has img cross-attention
|
| 27 |
+
t2v_1_3B_FC.in_dim = 48
|
| 28 |
+
t2v_1_3B_FC.is_fun_control = True
|
| 29 |
+
|
| 30 |
+
t2v_14B_FC = copy.deepcopy(t2v_14B)
|
| 31 |
+
t2v_14B_FC.__name__ = "Config: Wan-Fun-Control T2V 14B"
|
| 32 |
+
t2v_14B_FC.i2v = True # this is strange, but Fun-Control model needs this because it has img cross-attention
|
| 33 |
+
t2v_14B_FC.in_dim = 48 # same as i2v_14B, use zeros for image latents
|
| 34 |
+
t2v_14B_FC.is_fun_control = True
|
| 35 |
+
|
| 36 |
+
i2v_14B_FC = copy.deepcopy(i2v_14B)
|
| 37 |
+
i2v_14B_FC.__name__ = "Config: Wan-Fun-Control I2V 14B"
|
| 38 |
+
i2v_14B_FC.in_dim = 48
|
| 39 |
+
i2v_14B_FC.is_fun_control = True
|
| 40 |
+
|
| 41 |
+
WAN_CONFIGS = {
|
| 42 |
+
"t2v-14B": t2v_14B,
|
| 43 |
+
"t2v-1.3B": t2v_1_3B,
|
| 44 |
+
"i2v-14B": i2v_14B,
|
| 45 |
+
"t2i-14B": t2i_14B,
|
| 46 |
+
"flf2v-14B": flf2v_14B,
|
| 47 |
+
# Fun Control models
|
| 48 |
+
"t2v-1.3B-FC": t2v_1_3B_FC,
|
| 49 |
+
"t2v-14B-FC": t2v_14B_FC,
|
| 50 |
+
"i2v-14B-FC": i2v_14B_FC,
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
SIZE_CONFIGS = {
|
| 54 |
+
"720*1280": (720, 1280),
|
| 55 |
+
"1280*720": (1280, 720),
|
| 56 |
+
"480*832": (480, 832),
|
| 57 |
+
"832*480": (832, 480),
|
| 58 |
+
"1024*1024": (1024, 1024),
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
MAX_AREA_CONFIGS = {
|
| 62 |
+
"720*1280": 720 * 1280,
|
| 63 |
+
"1280*720": 1280 * 720,
|
| 64 |
+
"480*832": 480 * 832,
|
| 65 |
+
"832*480": 832 * 480,
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
SUPPORTED_SIZES = {
|
| 69 |
+
"t2v-14B": ("720*1280", "1280*720", "480*832", "832*480"),
|
| 70 |
+
"t2v-1.3B": ("480*832", "832*480"),
|
| 71 |
+
"i2v-14B": ("720*1280", "1280*720", "480*832", "832*480"),
|
| 72 |
+
"t2i-14B": tuple(SIZE_CONFIGS.keys()),
|
| 73 |
+
"flf2v-14B": ("720*1280", "1280*720", "480*832", "832*480"),
|
| 74 |
+
# Fun Control models
|
| 75 |
+
"t2v-1.3B-FC": ("480*832", "832*480"),
|
| 76 |
+
"t2v-14B-FC": ("720*1280", "1280*720", "480*832", "832*480"),
|
| 77 |
+
"i2v-14B-FC": ("720*1280", "1280*720", "480*832", "832*480"),
|
| 78 |
+
}
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/configs/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (2.06 kB). View file
|
|
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/configs/__pycache__/shared_config.cpython-312.pyc
ADDED
|
Binary file (1.09 kB). View file
|
|
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/configs/__pycache__/wan_i2v_14B.cpython-312.pyc
ADDED
|
Binary file (1.49 kB). View file
|
|
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/configs/__pycache__/wan_t2v_14B.cpython-312.pyc
ADDED
|
Binary file (1.19 kB). View file
|
|
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/configs/__pycache__/wan_t2v_1_3B.cpython-312.pyc
ADDED
|
Binary file (1.2 kB). View file
|
|
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/configs/shared_config.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved.
|
| 2 |
+
import torch
|
| 3 |
+
from easydict import EasyDict
|
| 4 |
+
|
| 5 |
+
#------------------------ Wan shared config ------------------------#
|
| 6 |
+
wan_shared_cfg = EasyDict()
|
| 7 |
+
|
| 8 |
+
# t5
|
| 9 |
+
wan_shared_cfg.t5_model = 'umt5_xxl'
|
| 10 |
+
wan_shared_cfg.t5_dtype = torch.bfloat16
|
| 11 |
+
wan_shared_cfg.text_len = 512
|
| 12 |
+
|
| 13 |
+
# transformer
|
| 14 |
+
wan_shared_cfg.param_dtype = torch.bfloat16
|
| 15 |
+
wan_shared_cfg.out_dim = 16
|
| 16 |
+
|
| 17 |
+
# inference
|
| 18 |
+
wan_shared_cfg.num_train_timesteps = 1000
|
| 19 |
+
wan_shared_cfg.sample_fps = 16
|
| 20 |
+
wan_shared_cfg.sample_neg_prompt = '色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走'
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/configs/wan_i2v_14B.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved.
|
| 2 |
+
import torch
|
| 3 |
+
from easydict import EasyDict
|
| 4 |
+
|
| 5 |
+
from musubi_tuner.wan.configs.shared_config import wan_shared_cfg
|
| 6 |
+
|
| 7 |
+
# ------------------------ Wan I2V 14B ------------------------#
|
| 8 |
+
|
| 9 |
+
i2v_14B = EasyDict(__name__="Config: Wan I2V 14B")
|
| 10 |
+
i2v_14B.update(wan_shared_cfg)
|
| 11 |
+
i2v_14B.i2v = True
|
| 12 |
+
i2v_14B.is_fun_control = False
|
| 13 |
+
i2v_14B.flf2v = False
|
| 14 |
+
|
| 15 |
+
i2v_14B.t5_checkpoint = "models_t5_umt5-xxl-enc-bf16.pth"
|
| 16 |
+
i2v_14B.t5_tokenizer = "google/umt5-xxl"
|
| 17 |
+
|
| 18 |
+
# clip
|
| 19 |
+
i2v_14B.clip_model = "clip_xlm_roberta_vit_h_14"
|
| 20 |
+
i2v_14B.clip_dtype = torch.float16
|
| 21 |
+
i2v_14B.clip_checkpoint = "models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth"
|
| 22 |
+
i2v_14B.clip_tokenizer = "xlm-roberta-large"
|
| 23 |
+
|
| 24 |
+
# vae
|
| 25 |
+
i2v_14B.vae_checkpoint = "Wan2.1_VAE.pth"
|
| 26 |
+
i2v_14B.vae_stride = (4, 8, 8)
|
| 27 |
+
|
| 28 |
+
# transformer
|
| 29 |
+
i2v_14B.patch_size = (1, 2, 2)
|
| 30 |
+
i2v_14B.dim = 5120
|
| 31 |
+
i2v_14B.ffn_dim = 13824
|
| 32 |
+
i2v_14B.freq_dim = 256
|
| 33 |
+
i2v_14B.in_dim = 36
|
| 34 |
+
i2v_14B.num_heads = 40
|
| 35 |
+
i2v_14B.num_layers = 40
|
| 36 |
+
i2v_14B.window_size = (-1, -1)
|
| 37 |
+
i2v_14B.qk_norm = True
|
| 38 |
+
i2v_14B.cross_attn_norm = True
|
| 39 |
+
i2v_14B.eps = 1e-6
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/configs/wan_t2v_14B.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved.
|
| 2 |
+
from easydict import EasyDict
|
| 3 |
+
|
| 4 |
+
from musubi_tuner.wan.configs.shared_config import wan_shared_cfg
|
| 5 |
+
|
| 6 |
+
# ------------------------ Wan T2V 14B ------------------------#
|
| 7 |
+
|
| 8 |
+
t2v_14B = EasyDict(__name__="Config: Wan T2V 14B")
|
| 9 |
+
t2v_14B.update(wan_shared_cfg)
|
| 10 |
+
t2v_14B.i2v = False
|
| 11 |
+
t2v_14B.is_fun_control = False
|
| 12 |
+
t2v_14B.flf2v = False
|
| 13 |
+
|
| 14 |
+
# t5
|
| 15 |
+
t2v_14B.t5_checkpoint = "models_t5_umt5-xxl-enc-bf16.pth"
|
| 16 |
+
t2v_14B.t5_tokenizer = "google/umt5-xxl"
|
| 17 |
+
|
| 18 |
+
# vae
|
| 19 |
+
t2v_14B.vae_checkpoint = "Wan2.1_VAE.pth"
|
| 20 |
+
t2v_14B.vae_stride = (4, 8, 8)
|
| 21 |
+
|
| 22 |
+
# transformer
|
| 23 |
+
t2v_14B.patch_size = (1, 2, 2)
|
| 24 |
+
t2v_14B.dim = 5120
|
| 25 |
+
t2v_14B.ffn_dim = 13824
|
| 26 |
+
t2v_14B.freq_dim = 256
|
| 27 |
+
t2v_14B.in_dim = 16
|
| 28 |
+
t2v_14B.num_heads = 40
|
| 29 |
+
t2v_14B.num_layers = 40
|
| 30 |
+
t2v_14B.window_size = (-1, -1)
|
| 31 |
+
t2v_14B.qk_norm = True
|
| 32 |
+
t2v_14B.cross_attn_norm = True
|
| 33 |
+
t2v_14B.eps = 1e-6
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/configs/wan_t2v_1_3B.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved.
|
| 2 |
+
from easydict import EasyDict
|
| 3 |
+
|
| 4 |
+
from musubi_tuner.wan.configs.shared_config import wan_shared_cfg
|
| 5 |
+
|
| 6 |
+
# ------------------------ Wan T2V 1.3B ------------------------#
|
| 7 |
+
|
| 8 |
+
t2v_1_3B = EasyDict(__name__="Config: Wan T2V 1.3B")
|
| 9 |
+
t2v_1_3B.update(wan_shared_cfg)
|
| 10 |
+
t2v_1_3B.i2v = False
|
| 11 |
+
t2v_1_3B.is_fun_control = False
|
| 12 |
+
t2v_1_3B.flf2v = False
|
| 13 |
+
|
| 14 |
+
# t5
|
| 15 |
+
t2v_1_3B.t5_checkpoint = "models_t5_umt5-xxl-enc-bf16.pth"
|
| 16 |
+
t2v_1_3B.t5_tokenizer = "google/umt5-xxl"
|
| 17 |
+
|
| 18 |
+
# vae
|
| 19 |
+
t2v_1_3B.vae_checkpoint = "Wan2.1_VAE.pth"
|
| 20 |
+
t2v_1_3B.vae_stride = (4, 8, 8)
|
| 21 |
+
|
| 22 |
+
# transformer
|
| 23 |
+
t2v_1_3B.patch_size = (1, 2, 2)
|
| 24 |
+
t2v_1_3B.dim = 1536
|
| 25 |
+
t2v_1_3B.ffn_dim = 8960
|
| 26 |
+
t2v_1_3B.freq_dim = 256
|
| 27 |
+
t2v_1_3B.in_dim = 16
|
| 28 |
+
t2v_1_3B.num_heads = 12
|
| 29 |
+
t2v_1_3B.num_layers = 30
|
| 30 |
+
t2v_1_3B.window_size = (-1, -1)
|
| 31 |
+
t2v_1_3B.qk_norm = True
|
| 32 |
+
t2v_1_3B.cross_attn_norm = True
|
| 33 |
+
t2v_1_3B.eps = 1e-6
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/modules/__init__.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.wan.modules.attention import flash_attention
|
| 2 |
+
from musubi_tuner.wan.modules.model import WanModel
|
| 3 |
+
from musubi_tuner.wan.modules.t5 import T5Decoder, T5Encoder, T5EncoderModel, T5Model
|
| 4 |
+
from musubi_tuner.wan.modules.tokenizers import HuggingfaceTokenizer
|
| 5 |
+
from musubi_tuner.wan.modules.vae import WanVAE
|
| 6 |
+
|
| 7 |
+
__all__ = [
|
| 8 |
+
'WanVAE',
|
| 9 |
+
'WanModel',
|
| 10 |
+
'T5Model',
|
| 11 |
+
'T5Encoder',
|
| 12 |
+
'T5Decoder',
|
| 13 |
+
'T5EncoderModel',
|
| 14 |
+
'HuggingfaceTokenizer',
|
| 15 |
+
'flash_attention',
|
| 16 |
+
]
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/modules/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (678 Bytes). View file
|
|
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/modules/__pycache__/attention.cpython-312.pyc
ADDED
|
Binary file (10.8 kB). View file
|
|
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/modules/__pycache__/clip.cpython-312.pyc
ADDED
|
Binary file (24 kB). View file
|
|
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/modules/__pycache__/model.cpython-312.pyc
ADDED
|
Binary file (46.1 kB). View file
|
|
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/modules/__pycache__/t5.cpython-312.pyc
ADDED
|
Binary file (27.3 kB). View file
|
|
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/modules/__pycache__/tokenizers.cpython-312.pyc
ADDED
|
Binary file (4.12 kB). View file
|
|
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/modules/__pycache__/vae.cpython-312.pyc
ADDED
|
Binary file (37.4 kB). View file
|
|
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/modules/__pycache__/xlm_roberta.cpython-312.pyc
ADDED
|
Binary file (8.05 kB). View file
|
|
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/modules/attention.py
ADDED
|
@@ -0,0 +1,312 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved.
|
| 2 |
+
from typing import Optional
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
try:
|
| 6 |
+
import flash_attn_interface
|
| 7 |
+
|
| 8 |
+
FLASH_ATTN_3_AVAILABLE = True
|
| 9 |
+
except ModuleNotFoundError:
|
| 10 |
+
FLASH_ATTN_3_AVAILABLE = False
|
| 11 |
+
|
| 12 |
+
try:
|
| 13 |
+
import flash_attn
|
| 14 |
+
|
| 15 |
+
FLASH_ATTN_2_AVAILABLE = True
|
| 16 |
+
except ModuleNotFoundError:
|
| 17 |
+
FLASH_ATTN_2_AVAILABLE = False
|
| 18 |
+
|
| 19 |
+
try:
|
| 20 |
+
import sageattention
|
| 21 |
+
|
| 22 |
+
SAGE_ATTN_AVAILABLE = True
|
| 23 |
+
except ModuleNotFoundError:
|
| 24 |
+
SAGE_ATTN_AVAILABLE = False
|
| 25 |
+
|
| 26 |
+
try:
|
| 27 |
+
import xformers.ops as xops
|
| 28 |
+
|
| 29 |
+
XFORMERS_AVAILABLE = True
|
| 30 |
+
except ImportError:
|
| 31 |
+
XFORMERS_AVAILABLE = False
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
import warnings
|
| 35 |
+
|
| 36 |
+
__all__ = [
|
| 37 |
+
"flash_attention",
|
| 38 |
+
"attention",
|
| 39 |
+
]
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def flash_attention(
|
| 43 |
+
qkv,
|
| 44 |
+
q_lens=None,
|
| 45 |
+
k_lens=None,
|
| 46 |
+
dropout_p=0.0,
|
| 47 |
+
softmax_scale=None,
|
| 48 |
+
q_scale=None,
|
| 49 |
+
causal=False,
|
| 50 |
+
window_size=(-1, -1),
|
| 51 |
+
deterministic=False,
|
| 52 |
+
dtype=torch.bfloat16,
|
| 53 |
+
version=None,
|
| 54 |
+
attn_mode: Optional[str] = "torch",
|
| 55 |
+
split_attn: bool = False,
|
| 56 |
+
):
|
| 57 |
+
"""
|
| 58 |
+
q: [B, Lq, Nq, C1].
|
| 59 |
+
k: [B, Lk, Nk, C1].
|
| 60 |
+
v: [B, Lk, Nk, C2]. Nq must be divisible by Nk.
|
| 61 |
+
q_lens: [B].
|
| 62 |
+
k_lens: [B].
|
| 63 |
+
dropout_p: float. Dropout probability.
|
| 64 |
+
softmax_scale: float. The scaling of QK^T before applying softmax.
|
| 65 |
+
causal: bool. Whether to apply causal attention mask.
|
| 66 |
+
window_size: (left right). If not (-1, -1), apply sliding window local attention.
|
| 67 |
+
deterministic: bool. If True, slightly slower and uses more memory.
|
| 68 |
+
dtype: torch.dtype. Apply when dtype of q/k/v is not float16/bfloat16.
|
| 69 |
+
"""
|
| 70 |
+
q, k, v = qkv
|
| 71 |
+
qkv.clear()
|
| 72 |
+
|
| 73 |
+
half_dtypes = (torch.float16, torch.bfloat16)
|
| 74 |
+
assert dtype in half_dtypes
|
| 75 |
+
# assert q.device.type == "cuda" and q.size(-1) <= 256
|
| 76 |
+
|
| 77 |
+
# params
|
| 78 |
+
b, lq, lk, out_dtype = q.size(0), q.size(1), k.size(1), q.dtype
|
| 79 |
+
|
| 80 |
+
def half(x):
|
| 81 |
+
return x if x.dtype in half_dtypes else x.to(dtype)
|
| 82 |
+
|
| 83 |
+
# We cannot test Flash attention 3 in musubi tuner, so keep the original code.
|
| 84 |
+
# Customized code (except for flash attention 3) is not supported q_lens and k_lens.
|
| 85 |
+
if attn_mode != "flash3" and attn_mode != "sageattn":
|
| 86 |
+
assert q_lens is None, "q_lens is not supported except for flash attention 3."
|
| 87 |
+
assert k_lens is None or (
|
| 88 |
+
min(k_lens) == max(k_lens) and k_lens[0] == lk
|
| 89 |
+
), f"k_lens is not supported except for flash attention 3 or sage attention. k_lens={k_lens}, lk={lk}."
|
| 90 |
+
|
| 91 |
+
# SDPA
|
| 92 |
+
if attn_mode == "torch" or attn_mode == "sdpa":
|
| 93 |
+
assert not deterministic, "deterministic is not supported in scaled_dot_product_attention."
|
| 94 |
+
if q_scale is not None:
|
| 95 |
+
q = q * q_scale
|
| 96 |
+
q = half(q.transpose(1, 2))
|
| 97 |
+
k = half(k.transpose(1, 2))
|
| 98 |
+
v = half(v.transpose(1, 2))
|
| 99 |
+
|
| 100 |
+
if not split_attn:
|
| 101 |
+
q = torch.nn.functional.scaled_dot_product_attention(
|
| 102 |
+
q, k, v, is_causal=causal, dropout_p=dropout_p, scale=softmax_scale
|
| 103 |
+
)
|
| 104 |
+
x = q
|
| 105 |
+
else:
|
| 106 |
+
x = torch.empty_like(q)
|
| 107 |
+
for i in range(q.size(0)):
|
| 108 |
+
x[i : i + 1] = torch.nn.functional.scaled_dot_product_attention(
|
| 109 |
+
q[i : i + 1], k[i : i + 1], v[i : i + 1], is_causal=causal, dropout_p=dropout_p, scale=softmax_scale
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
del q, k, v
|
| 113 |
+
x = x.transpose(1, 2).contiguous()
|
| 114 |
+
return x.type(out_dtype)
|
| 115 |
+
|
| 116 |
+
# flash attention 2
|
| 117 |
+
if attn_mode == "flash" or attn_mode == "flash2":
|
| 118 |
+
if q_scale is not None:
|
| 119 |
+
q = q * q_scale
|
| 120 |
+
q = half(q)
|
| 121 |
+
k = half(k)
|
| 122 |
+
v = half(v)
|
| 123 |
+
|
| 124 |
+
if not split_attn:
|
| 125 |
+
q = flash_attn.flash_attn_func(q, k, v, dropout_p, softmax_scale, causal, window_size, deterministic=deterministic)
|
| 126 |
+
x = q
|
| 127 |
+
else:
|
| 128 |
+
x = torch.empty_like(q)
|
| 129 |
+
for i in range(q.size(0)):
|
| 130 |
+
x[i : i + 1] = flash_attn.flash_attn_func(
|
| 131 |
+
q[i : i + 1],
|
| 132 |
+
k[i : i + 1],
|
| 133 |
+
v[i : i + 1],
|
| 134 |
+
dropout_p,
|
| 135 |
+
softmax_scale,
|
| 136 |
+
causal,
|
| 137 |
+
window_size,
|
| 138 |
+
deterministic=deterministic,
|
| 139 |
+
)
|
| 140 |
+
del q, k, v
|
| 141 |
+
return x.type(out_dtype)
|
| 142 |
+
|
| 143 |
+
# xformers
|
| 144 |
+
if attn_mode == "xformers":
|
| 145 |
+
assert not deterministic, "deterministic is not supported in xformers."
|
| 146 |
+
assert not causal, "causal is not supported in xformers."
|
| 147 |
+
if q_scale is not None:
|
| 148 |
+
q = q * q_scale
|
| 149 |
+
q = half(q)
|
| 150 |
+
k = half(k)
|
| 151 |
+
v = half(v)
|
| 152 |
+
|
| 153 |
+
if not split_attn:
|
| 154 |
+
q = xops.memory_efficient_attention(q, k, v, p=dropout_p, scale=softmax_scale)
|
| 155 |
+
x = q
|
| 156 |
+
else:
|
| 157 |
+
x = torch.empty_like(q)
|
| 158 |
+
for i in range(q.size(0)):
|
| 159 |
+
x[i : i + 1] = xops.memory_efficient_attention(
|
| 160 |
+
q[i : i + 1], k[i : i + 1], v[i : i + 1], p=dropout_p, scale=softmax_scale
|
| 161 |
+
)
|
| 162 |
+
|
| 163 |
+
del q, k, v
|
| 164 |
+
return x.type(out_dtype)
|
| 165 |
+
|
| 166 |
+
# sage attention with fixed length seems to cause NaN in I2V inference.
|
| 167 |
+
# # sage attention
|
| 168 |
+
# if attn_mode == "sageattn":
|
| 169 |
+
# print("Using sage attention")
|
| 170 |
+
# assert not deterministic, "deterministic is not supported in sage attention."
|
| 171 |
+
# if q_scale is not None:
|
| 172 |
+
# q = q * q_scale
|
| 173 |
+
# q, k, v = half(q), half(k), half(v)
|
| 174 |
+
# x = sageattention.sageattn(q, k, v, "NHD", is_causal=causal, sm_scale=softmax_scale)
|
| 175 |
+
# del q, k, v
|
| 176 |
+
# return x.type(out_dtype)
|
| 177 |
+
|
| 178 |
+
assert not split_attn, "split_attn is not supported in flash attention 3 or sage attention."
|
| 179 |
+
|
| 180 |
+
# preprocess query: in Wan 2.1, q_lens is always None.
|
| 181 |
+
if q_lens is None:
|
| 182 |
+
q = half(q.flatten(0, 1))
|
| 183 |
+
q_lens = torch.tensor([lq] * b, dtype=torch.int32).to(device=q.device, non_blocking=True)
|
| 184 |
+
else:
|
| 185 |
+
q = half(torch.cat([u[:v] for u, v in zip(q, q_lens)]))
|
| 186 |
+
|
| 187 |
+
# preprocess key, value
|
| 188 |
+
if k_lens is None:
|
| 189 |
+
k = half(k.flatten(0, 1))
|
| 190 |
+
v = half(v.flatten(0, 1))
|
| 191 |
+
k_lens = torch.tensor([lk] * b, dtype=torch.int32).to(device=k.device, non_blocking=True)
|
| 192 |
+
else:
|
| 193 |
+
# Note: in Wan 2.1, all k_lens are same if we have same image size in the batch.
|
| 194 |
+
if min(k_lens) == max(k_lens) and k.shape[1] == k_lens[0]:
|
| 195 |
+
# B, L, N, C -> BN, L, C
|
| 196 |
+
k = half(k.flatten(0, 1))
|
| 197 |
+
v = half(v.flatten(0, 1))
|
| 198 |
+
else:
|
| 199 |
+
k = half(torch.cat([u[:v] for u, v in zip(k, k_lens)]))
|
| 200 |
+
v = half(torch.cat([u[:v] for u, v in zip(v, k_lens)]))
|
| 201 |
+
|
| 202 |
+
q = q.to(v.dtype)
|
| 203 |
+
k = k.to(v.dtype)
|
| 204 |
+
|
| 205 |
+
if q_scale is not None:
|
| 206 |
+
q = q * q_scale
|
| 207 |
+
|
| 208 |
+
# if version is not None and version == 3 and not FLASH_ATTN_3_AVAILABLE:
|
| 209 |
+
# warnings.warn("Flash attention 3 is not available, use flash attention 2 instead.")
|
| 210 |
+
|
| 211 |
+
# apply attention
|
| 212 |
+
# if (version is None or version == 3) and FLASH_ATTN_3_AVAILABLE:
|
| 213 |
+
if attn_mode == "flash3":
|
| 214 |
+
# Not tested yet in musubi tuner.
|
| 215 |
+
# Note: dropout_p, window_size are not supported in FA3 now.
|
| 216 |
+
x = flash_attn_interface.flash_attn_varlen_func(
|
| 217 |
+
q=q,
|
| 218 |
+
k=k,
|
| 219 |
+
v=v,
|
| 220 |
+
cu_seqlens_q=torch.cat([q_lens.new_zeros([1]), q_lens]).cumsum(0, dtype=torch.int32).to(q.device, non_blocking=True),
|
| 221 |
+
cu_seqlens_k=torch.cat([k_lens.new_zeros([1]), k_lens]).cumsum(0, dtype=torch.int32).to(q.device, non_blocking=True),
|
| 222 |
+
seqused_q=None,
|
| 223 |
+
seqused_k=None,
|
| 224 |
+
max_seqlen_q=lq,
|
| 225 |
+
max_seqlen_k=lk,
|
| 226 |
+
softmax_scale=softmax_scale,
|
| 227 |
+
causal=causal,
|
| 228 |
+
deterministic=deterministic,
|
| 229 |
+
)[0].unflatten(0, (b, lq))
|
| 230 |
+
# elif (version is None or version == 2) and FLASH_ATTN_2_AVAILABLE:
|
| 231 |
+
# # assert FLASH_ATTN_2_AVAILABLE
|
| 232 |
+
# x = flash_attn.flash_attn_varlen_func(
|
| 233 |
+
# q=q,
|
| 234 |
+
# k=k,
|
| 235 |
+
# v=v,
|
| 236 |
+
# cu_seqlens_q=torch.cat([q_lens.new_zeros([1]), q_lens]).cumsum(0, dtype=torch.int32).to(q.device, non_blocking=True),
|
| 237 |
+
# cu_seqlens_k=torch.cat([k_lens.new_zeros([1]), k_lens]).cumsum(0, dtype=torch.int32).to(q.device, non_blocking=True),
|
| 238 |
+
# max_seqlen_q=lq,
|
| 239 |
+
# max_seqlen_k=lk,
|
| 240 |
+
# dropout_p=dropout_p,
|
| 241 |
+
# softmax_scale=softmax_scale,
|
| 242 |
+
# causal=causal,
|
| 243 |
+
# window_size=window_size,
|
| 244 |
+
# deterministic=deterministic,
|
| 245 |
+
# ).unflatten(0, (b, lq))
|
| 246 |
+
# elif version is None and SAGE_ATTN_AVAILABLE:
|
| 247 |
+
elif attn_mode == "sageattn":
|
| 248 |
+
# print("Using sage attention")
|
| 249 |
+
assert not causal, "SAGE attention does not support causal attention."
|
| 250 |
+
x = sageattention.sageattn_varlen(
|
| 251 |
+
q=q,
|
| 252 |
+
k=k,
|
| 253 |
+
v=v,
|
| 254 |
+
cu_seqlens_q=torch.cat([q_lens.new_zeros([1]), q_lens]).cumsum(0, dtype=torch.int32).to(q.device, non_blocking=True),
|
| 255 |
+
cu_seqlens_k=torch.cat([k_lens.new_zeros([1]), k_lens]).cumsum(0, dtype=torch.int32).to(q.device, non_blocking=True),
|
| 256 |
+
max_seqlen_q=lq,
|
| 257 |
+
max_seqlen_k=lk,
|
| 258 |
+
sm_scale=softmax_scale,
|
| 259 |
+
).unflatten(0, (b, lq))
|
| 260 |
+
else:
|
| 261 |
+
raise ValueError(f"Unknown attention mode: {attn_mode}")
|
| 262 |
+
|
| 263 |
+
# output
|
| 264 |
+
return x.type(out_dtype)
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
def attention(
|
| 268 |
+
q,
|
| 269 |
+
k,
|
| 270 |
+
v,
|
| 271 |
+
q_lens=None,
|
| 272 |
+
k_lens=None,
|
| 273 |
+
dropout_p=0.0,
|
| 274 |
+
softmax_scale=None,
|
| 275 |
+
q_scale=None,
|
| 276 |
+
causal=False,
|
| 277 |
+
window_size=(-1, -1),
|
| 278 |
+
deterministic=False,
|
| 279 |
+
dtype=torch.bfloat16,
|
| 280 |
+
fa_version=None,
|
| 281 |
+
):
|
| 282 |
+
if FLASH_ATTN_2_AVAILABLE or FLASH_ATTN_3_AVAILABLE:
|
| 283 |
+
return flash_attention(
|
| 284 |
+
q=q,
|
| 285 |
+
k=k,
|
| 286 |
+
v=v,
|
| 287 |
+
q_lens=q_lens,
|
| 288 |
+
k_lens=k_lens,
|
| 289 |
+
dropout_p=dropout_p,
|
| 290 |
+
softmax_scale=softmax_scale,
|
| 291 |
+
q_scale=q_scale,
|
| 292 |
+
causal=causal,
|
| 293 |
+
window_size=window_size,
|
| 294 |
+
deterministic=deterministic,
|
| 295 |
+
dtype=dtype,
|
| 296 |
+
version=fa_version,
|
| 297 |
+
)
|
| 298 |
+
else:
|
| 299 |
+
if q_lens is not None or k_lens is not None:
|
| 300 |
+
warnings.warn(
|
| 301 |
+
"Padding mask is disabled when using scaled_dot_product_attention. It can have a significant impact on performance."
|
| 302 |
+
)
|
| 303 |
+
attn_mask = None
|
| 304 |
+
|
| 305 |
+
q = q.transpose(1, 2).to(dtype)
|
| 306 |
+
k = k.transpose(1, 2).to(dtype)
|
| 307 |
+
v = v.transpose(1, 2).to(dtype)
|
| 308 |
+
|
| 309 |
+
out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=attn_mask, is_causal=causal, dropout_p=dropout_p)
|
| 310 |
+
|
| 311 |
+
out = out.transpose(1, 2).contiguous()
|
| 312 |
+
return out
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/modules/clip.py
ADDED
|
@@ -0,0 +1,546 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Modified from ``https://github.com/openai/CLIP'' and ``https://github.com/mlfoundations/open_clip''
|
| 2 |
+
# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved.
|
| 3 |
+
import logging
|
| 4 |
+
import math
|
| 5 |
+
import os
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
import torch.nn as nn
|
| 9 |
+
import torch.nn.functional as F
|
| 10 |
+
import torchvision.transforms as T
|
| 11 |
+
from accelerate import init_empty_weights
|
| 12 |
+
|
| 13 |
+
from musubi_tuner.wan.modules.attention import flash_attention
|
| 14 |
+
from musubi_tuner.wan.modules.tokenizers import HuggingfaceTokenizer
|
| 15 |
+
from musubi_tuner.wan.modules.xlm_roberta import XLMRoberta
|
| 16 |
+
|
| 17 |
+
from musubi_tuner.utils.safetensors_utils import load_safetensors
|
| 18 |
+
|
| 19 |
+
__all__ = [
|
| 20 |
+
"XLMRobertaCLIP",
|
| 21 |
+
"clip_xlm_roberta_vit_h_14",
|
| 22 |
+
"CLIPModel",
|
| 23 |
+
]
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def pos_interpolate(pos, seq_len):
|
| 27 |
+
if pos.size(1) == seq_len:
|
| 28 |
+
return pos
|
| 29 |
+
else:
|
| 30 |
+
src_grid = int(math.sqrt(pos.size(1)))
|
| 31 |
+
tar_grid = int(math.sqrt(seq_len))
|
| 32 |
+
n = pos.size(1) - src_grid * src_grid
|
| 33 |
+
return torch.cat(
|
| 34 |
+
[
|
| 35 |
+
pos[:, :n],
|
| 36 |
+
F.interpolate(
|
| 37 |
+
pos[:, n:].float().reshape(1, src_grid, src_grid, -1).permute(0, 3, 1, 2),
|
| 38 |
+
size=(tar_grid, tar_grid),
|
| 39 |
+
mode="bicubic",
|
| 40 |
+
align_corners=False,
|
| 41 |
+
)
|
| 42 |
+
.flatten(2)
|
| 43 |
+
.transpose(1, 2),
|
| 44 |
+
],
|
| 45 |
+
dim=1,
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class QuickGELU(nn.Module):
|
| 50 |
+
|
| 51 |
+
def forward(self, x):
|
| 52 |
+
return x * torch.sigmoid(1.702 * x)
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
class LayerNorm(nn.LayerNorm):
|
| 56 |
+
|
| 57 |
+
def forward(self, x):
|
| 58 |
+
return super().forward(x.float()).type_as(x)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
class SelfAttention(nn.Module):
|
| 62 |
+
|
| 63 |
+
def __init__(self, dim, num_heads, causal=False, attn_dropout=0.0, proj_dropout=0.0):
|
| 64 |
+
assert dim % num_heads == 0
|
| 65 |
+
super().__init__()
|
| 66 |
+
self.dim = dim
|
| 67 |
+
self.num_heads = num_heads
|
| 68 |
+
self.head_dim = dim // num_heads
|
| 69 |
+
self.causal = causal
|
| 70 |
+
self.attn_dropout = attn_dropout
|
| 71 |
+
self.proj_dropout = proj_dropout
|
| 72 |
+
|
| 73 |
+
# layers
|
| 74 |
+
self.to_qkv = nn.Linear(dim, dim * 3)
|
| 75 |
+
self.proj = nn.Linear(dim, dim)
|
| 76 |
+
|
| 77 |
+
def forward(self, x):
|
| 78 |
+
"""
|
| 79 |
+
x: [B, L, C].
|
| 80 |
+
"""
|
| 81 |
+
b, s, c, n, d = *x.size(), self.num_heads, self.head_dim
|
| 82 |
+
|
| 83 |
+
# compute query, key, value
|
| 84 |
+
q, k, v = self.to_qkv(x).view(b, s, 3, n, d).unbind(2)
|
| 85 |
+
|
| 86 |
+
# compute attention
|
| 87 |
+
p = self.attn_dropout if self.training else 0.0
|
| 88 |
+
# x = flash_attention(q, k, v, dropout_p=p, causal=self.causal, version=2)
|
| 89 |
+
# print(q.shape, k.shape, v.shape)
|
| 90 |
+
q = q.transpose(1, 2)
|
| 91 |
+
k = k.transpose(1, 2)
|
| 92 |
+
v = v.transpose(1, 2)
|
| 93 |
+
x = torch.nn.functional.scaled_dot_product_attention(q, k, v, dropout_p=p, is_causal=self.causal)
|
| 94 |
+
# print(x.shape)
|
| 95 |
+
x = x.transpose(1, 2).contiguous()
|
| 96 |
+
x = x.reshape(b, s, c)
|
| 97 |
+
|
| 98 |
+
# output
|
| 99 |
+
x = self.proj(x)
|
| 100 |
+
x = F.dropout(x, self.proj_dropout, self.training)
|
| 101 |
+
return x
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
class SwiGLU(nn.Module):
|
| 105 |
+
|
| 106 |
+
def __init__(self, dim, mid_dim):
|
| 107 |
+
super().__init__()
|
| 108 |
+
self.dim = dim
|
| 109 |
+
self.mid_dim = mid_dim
|
| 110 |
+
|
| 111 |
+
# layers
|
| 112 |
+
self.fc1 = nn.Linear(dim, mid_dim)
|
| 113 |
+
self.fc2 = nn.Linear(dim, mid_dim)
|
| 114 |
+
self.fc3 = nn.Linear(mid_dim, dim)
|
| 115 |
+
|
| 116 |
+
def forward(self, x):
|
| 117 |
+
x = F.silu(self.fc1(x)) * self.fc2(x)
|
| 118 |
+
x = self.fc3(x)
|
| 119 |
+
return x
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
class AttentionBlock(nn.Module):
|
| 123 |
+
|
| 124 |
+
def __init__(
|
| 125 |
+
self,
|
| 126 |
+
dim,
|
| 127 |
+
mlp_ratio,
|
| 128 |
+
num_heads,
|
| 129 |
+
post_norm=False,
|
| 130 |
+
causal=False,
|
| 131 |
+
activation="quick_gelu",
|
| 132 |
+
attn_dropout=0.0,
|
| 133 |
+
proj_dropout=0.0,
|
| 134 |
+
norm_eps=1e-5,
|
| 135 |
+
):
|
| 136 |
+
assert activation in ["quick_gelu", "gelu", "swi_glu"]
|
| 137 |
+
super().__init__()
|
| 138 |
+
self.dim = dim
|
| 139 |
+
self.mlp_ratio = mlp_ratio
|
| 140 |
+
self.num_heads = num_heads
|
| 141 |
+
self.post_norm = post_norm
|
| 142 |
+
self.causal = causal
|
| 143 |
+
self.norm_eps = norm_eps
|
| 144 |
+
|
| 145 |
+
# layers
|
| 146 |
+
self.norm1 = LayerNorm(dim, eps=norm_eps)
|
| 147 |
+
self.attn = SelfAttention(dim, num_heads, causal, attn_dropout, proj_dropout)
|
| 148 |
+
self.norm2 = LayerNorm(dim, eps=norm_eps)
|
| 149 |
+
if activation == "swi_glu":
|
| 150 |
+
self.mlp = SwiGLU(dim, int(dim * mlp_ratio))
|
| 151 |
+
else:
|
| 152 |
+
self.mlp = nn.Sequential(
|
| 153 |
+
nn.Linear(dim, int(dim * mlp_ratio)),
|
| 154 |
+
QuickGELU() if activation == "quick_gelu" else nn.GELU(),
|
| 155 |
+
nn.Linear(int(dim * mlp_ratio), dim),
|
| 156 |
+
nn.Dropout(proj_dropout),
|
| 157 |
+
)
|
| 158 |
+
|
| 159 |
+
def forward(self, x):
|
| 160 |
+
if self.post_norm:
|
| 161 |
+
x = x + self.norm1(self.attn(x))
|
| 162 |
+
x = x + self.norm2(self.mlp(x))
|
| 163 |
+
else:
|
| 164 |
+
x = x + self.attn(self.norm1(x))
|
| 165 |
+
x = x + self.mlp(self.norm2(x))
|
| 166 |
+
return x
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
class AttentionPool(nn.Module):
|
| 170 |
+
|
| 171 |
+
def __init__(self, dim, mlp_ratio, num_heads, activation="gelu", proj_dropout=0.0, norm_eps=1e-5):
|
| 172 |
+
assert dim % num_heads == 0
|
| 173 |
+
super().__init__()
|
| 174 |
+
self.dim = dim
|
| 175 |
+
self.mlp_ratio = mlp_ratio
|
| 176 |
+
self.num_heads = num_heads
|
| 177 |
+
self.head_dim = dim // num_heads
|
| 178 |
+
self.proj_dropout = proj_dropout
|
| 179 |
+
self.norm_eps = norm_eps
|
| 180 |
+
|
| 181 |
+
# layers
|
| 182 |
+
gain = 1.0 / math.sqrt(dim)
|
| 183 |
+
self.cls_embedding = nn.Parameter(gain * torch.randn(1, 1, dim))
|
| 184 |
+
self.to_q = nn.Linear(dim, dim)
|
| 185 |
+
self.to_kv = nn.Linear(dim, dim * 2)
|
| 186 |
+
self.proj = nn.Linear(dim, dim)
|
| 187 |
+
self.norm = LayerNorm(dim, eps=norm_eps)
|
| 188 |
+
self.mlp = nn.Sequential(
|
| 189 |
+
nn.Linear(dim, int(dim * mlp_ratio)),
|
| 190 |
+
QuickGELU() if activation == "quick_gelu" else nn.GELU(),
|
| 191 |
+
nn.Linear(int(dim * mlp_ratio), dim),
|
| 192 |
+
nn.Dropout(proj_dropout),
|
| 193 |
+
)
|
| 194 |
+
|
| 195 |
+
def forward(self, x):
|
| 196 |
+
"""
|
| 197 |
+
x: [B, L, C].
|
| 198 |
+
"""
|
| 199 |
+
b, s, c, n, d = *x.size(), self.num_heads, self.head_dim
|
| 200 |
+
|
| 201 |
+
# compute query, key, value
|
| 202 |
+
q = self.to_q(self.cls_embedding).view(1, 1, n, d).expand(b, -1, -1, -1)
|
| 203 |
+
k, v = self.to_kv(x).view(b, s, 2, n, d).unbind(2)
|
| 204 |
+
|
| 205 |
+
# compute attention
|
| 206 |
+
# this line is never used because pool_type="token" in Wan2.1
|
| 207 |
+
x = flash_attention(q, k, v, version=2)
|
| 208 |
+
x = x.reshape(b, 1, c)
|
| 209 |
+
|
| 210 |
+
# output
|
| 211 |
+
x = self.proj(x)
|
| 212 |
+
x = F.dropout(x, self.proj_dropout, self.training)
|
| 213 |
+
|
| 214 |
+
# mlp
|
| 215 |
+
x = x + self.mlp(self.norm(x))
|
| 216 |
+
return x[:, 0]
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
class VisionTransformer(nn.Module):
|
| 220 |
+
|
| 221 |
+
def __init__(
|
| 222 |
+
self,
|
| 223 |
+
image_size=224,
|
| 224 |
+
patch_size=16,
|
| 225 |
+
dim=768,
|
| 226 |
+
mlp_ratio=4,
|
| 227 |
+
out_dim=512,
|
| 228 |
+
num_heads=12,
|
| 229 |
+
num_layers=12,
|
| 230 |
+
pool_type="token",
|
| 231 |
+
pre_norm=True,
|
| 232 |
+
post_norm=False,
|
| 233 |
+
activation="quick_gelu",
|
| 234 |
+
attn_dropout=0.0,
|
| 235 |
+
proj_dropout=0.0,
|
| 236 |
+
embedding_dropout=0.0,
|
| 237 |
+
norm_eps=1e-5,
|
| 238 |
+
):
|
| 239 |
+
if image_size % patch_size != 0:
|
| 240 |
+
print("[WARNING] image_size is not divisible by patch_size", flush=True)
|
| 241 |
+
assert pool_type in ("token", "token_fc", "attn_pool")
|
| 242 |
+
out_dim = out_dim or dim
|
| 243 |
+
super().__init__()
|
| 244 |
+
self.image_size = image_size
|
| 245 |
+
self.patch_size = patch_size
|
| 246 |
+
self.num_patches = (image_size // patch_size) ** 2
|
| 247 |
+
self.dim = dim
|
| 248 |
+
self.mlp_ratio = mlp_ratio
|
| 249 |
+
self.out_dim = out_dim
|
| 250 |
+
self.num_heads = num_heads
|
| 251 |
+
self.num_layers = num_layers
|
| 252 |
+
self.pool_type = pool_type
|
| 253 |
+
self.post_norm = post_norm
|
| 254 |
+
self.norm_eps = norm_eps
|
| 255 |
+
|
| 256 |
+
# embeddings
|
| 257 |
+
gain = 1.0 / math.sqrt(dim)
|
| 258 |
+
self.patch_embedding = nn.Conv2d(3, dim, kernel_size=patch_size, stride=patch_size, bias=not pre_norm)
|
| 259 |
+
if pool_type in ("token", "token_fc"):
|
| 260 |
+
self.cls_embedding = nn.Parameter(gain * torch.randn(1, 1, dim))
|
| 261 |
+
self.pos_embedding = nn.Parameter(
|
| 262 |
+
gain * torch.randn(1, self.num_patches + (1 if pool_type in ("token", "token_fc") else 0), dim)
|
| 263 |
+
)
|
| 264 |
+
self.dropout = nn.Dropout(embedding_dropout)
|
| 265 |
+
|
| 266 |
+
# transformer
|
| 267 |
+
self.pre_norm = LayerNorm(dim, eps=norm_eps) if pre_norm else None
|
| 268 |
+
self.transformer = nn.Sequential(
|
| 269 |
+
*[
|
| 270 |
+
AttentionBlock(dim, mlp_ratio, num_heads, post_norm, False, activation, attn_dropout, proj_dropout, norm_eps)
|
| 271 |
+
for _ in range(num_layers)
|
| 272 |
+
]
|
| 273 |
+
)
|
| 274 |
+
self.post_norm = LayerNorm(dim, eps=norm_eps)
|
| 275 |
+
|
| 276 |
+
# head
|
| 277 |
+
if pool_type == "token":
|
| 278 |
+
self.head = nn.Parameter(gain * torch.randn(dim, out_dim))
|
| 279 |
+
elif pool_type == "token_fc":
|
| 280 |
+
self.head = nn.Linear(dim, out_dim)
|
| 281 |
+
elif pool_type == "attn_pool":
|
| 282 |
+
self.head = AttentionPool(dim, mlp_ratio, num_heads, activation, proj_dropout, norm_eps)
|
| 283 |
+
|
| 284 |
+
def forward(self, x, interpolation=False, use_31_block=False):
|
| 285 |
+
b = x.size(0)
|
| 286 |
+
|
| 287 |
+
# embeddings
|
| 288 |
+
x = self.patch_embedding(x).flatten(2).permute(0, 2, 1)
|
| 289 |
+
if self.pool_type in ("token", "token_fc"):
|
| 290 |
+
x = torch.cat([self.cls_embedding.expand(b, -1, -1), x], dim=1)
|
| 291 |
+
if interpolation:
|
| 292 |
+
e = pos_interpolate(self.pos_embedding, x.size(1))
|
| 293 |
+
else:
|
| 294 |
+
e = self.pos_embedding
|
| 295 |
+
x = self.dropout(x + e)
|
| 296 |
+
if self.pre_norm is not None:
|
| 297 |
+
x = self.pre_norm(x)
|
| 298 |
+
|
| 299 |
+
# transformer
|
| 300 |
+
if use_31_block:
|
| 301 |
+
x = self.transformer[:-1](x)
|
| 302 |
+
return x
|
| 303 |
+
else:
|
| 304 |
+
x = self.transformer(x)
|
| 305 |
+
return x
|
| 306 |
+
|
| 307 |
+
|
| 308 |
+
class XLMRobertaWithHead(XLMRoberta):
|
| 309 |
+
|
| 310 |
+
def __init__(self, **kwargs):
|
| 311 |
+
self.out_dim = kwargs.pop("out_dim")
|
| 312 |
+
super().__init__(**kwargs)
|
| 313 |
+
|
| 314 |
+
# head
|
| 315 |
+
mid_dim = (self.dim + self.out_dim) // 2
|
| 316 |
+
self.head = nn.Sequential(nn.Linear(self.dim, mid_dim, bias=False), nn.GELU(), nn.Linear(mid_dim, self.out_dim, bias=False))
|
| 317 |
+
|
| 318 |
+
def forward(self, ids):
|
| 319 |
+
# xlm-roberta
|
| 320 |
+
x = super().forward(ids)
|
| 321 |
+
|
| 322 |
+
# average pooling
|
| 323 |
+
mask = ids.ne(self.pad_id).unsqueeze(-1).to(x)
|
| 324 |
+
x = (x * mask).sum(dim=1) / mask.sum(dim=1)
|
| 325 |
+
|
| 326 |
+
# head
|
| 327 |
+
x = self.head(x)
|
| 328 |
+
return x
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
class XLMRobertaCLIP(nn.Module):
|
| 332 |
+
|
| 333 |
+
def __init__(
|
| 334 |
+
self,
|
| 335 |
+
embed_dim=1024,
|
| 336 |
+
image_size=224,
|
| 337 |
+
patch_size=14,
|
| 338 |
+
vision_dim=1280,
|
| 339 |
+
vision_mlp_ratio=4,
|
| 340 |
+
vision_heads=16,
|
| 341 |
+
vision_layers=32,
|
| 342 |
+
vision_pool="token",
|
| 343 |
+
vision_pre_norm=True,
|
| 344 |
+
vision_post_norm=False,
|
| 345 |
+
activation="gelu",
|
| 346 |
+
vocab_size=250002,
|
| 347 |
+
max_text_len=514,
|
| 348 |
+
type_size=1,
|
| 349 |
+
pad_id=1,
|
| 350 |
+
text_dim=1024,
|
| 351 |
+
text_heads=16,
|
| 352 |
+
text_layers=24,
|
| 353 |
+
text_post_norm=True,
|
| 354 |
+
text_dropout=0.1,
|
| 355 |
+
attn_dropout=0.0,
|
| 356 |
+
proj_dropout=0.0,
|
| 357 |
+
embedding_dropout=0.0,
|
| 358 |
+
norm_eps=1e-5,
|
| 359 |
+
):
|
| 360 |
+
super().__init__()
|
| 361 |
+
self.embed_dim = embed_dim
|
| 362 |
+
self.image_size = image_size
|
| 363 |
+
self.patch_size = patch_size
|
| 364 |
+
self.vision_dim = vision_dim
|
| 365 |
+
self.vision_mlp_ratio = vision_mlp_ratio
|
| 366 |
+
self.vision_heads = vision_heads
|
| 367 |
+
self.vision_layers = vision_layers
|
| 368 |
+
self.vision_pre_norm = vision_pre_norm
|
| 369 |
+
self.vision_post_norm = vision_post_norm
|
| 370 |
+
self.activation = activation
|
| 371 |
+
self.vocab_size = vocab_size
|
| 372 |
+
self.max_text_len = max_text_len
|
| 373 |
+
self.type_size = type_size
|
| 374 |
+
self.pad_id = pad_id
|
| 375 |
+
self.text_dim = text_dim
|
| 376 |
+
self.text_heads = text_heads
|
| 377 |
+
self.text_layers = text_layers
|
| 378 |
+
self.text_post_norm = text_post_norm
|
| 379 |
+
self.norm_eps = norm_eps
|
| 380 |
+
|
| 381 |
+
# models
|
| 382 |
+
self.visual = VisionTransformer(
|
| 383 |
+
image_size=image_size,
|
| 384 |
+
patch_size=patch_size,
|
| 385 |
+
dim=vision_dim,
|
| 386 |
+
mlp_ratio=vision_mlp_ratio,
|
| 387 |
+
out_dim=embed_dim,
|
| 388 |
+
num_heads=vision_heads,
|
| 389 |
+
num_layers=vision_layers,
|
| 390 |
+
pool_type=vision_pool,
|
| 391 |
+
pre_norm=vision_pre_norm,
|
| 392 |
+
post_norm=vision_post_norm,
|
| 393 |
+
activation=activation,
|
| 394 |
+
attn_dropout=attn_dropout,
|
| 395 |
+
proj_dropout=proj_dropout,
|
| 396 |
+
embedding_dropout=embedding_dropout,
|
| 397 |
+
norm_eps=norm_eps,
|
| 398 |
+
)
|
| 399 |
+
self.textual = XLMRobertaWithHead(
|
| 400 |
+
vocab_size=vocab_size,
|
| 401 |
+
max_seq_len=max_text_len,
|
| 402 |
+
type_size=type_size,
|
| 403 |
+
pad_id=pad_id,
|
| 404 |
+
dim=text_dim,
|
| 405 |
+
out_dim=embed_dim,
|
| 406 |
+
num_heads=text_heads,
|
| 407 |
+
num_layers=text_layers,
|
| 408 |
+
post_norm=text_post_norm,
|
| 409 |
+
dropout=text_dropout,
|
| 410 |
+
)
|
| 411 |
+
self.log_scale = nn.Parameter(math.log(1 / 0.07) * torch.ones([]))
|
| 412 |
+
|
| 413 |
+
def forward(self, imgs, txt_ids):
|
| 414 |
+
"""
|
| 415 |
+
imgs: [B, 3, H, W] of torch.float32.
|
| 416 |
+
- mean: [0.48145466, 0.4578275, 0.40821073]
|
| 417 |
+
- std: [0.26862954, 0.26130258, 0.27577711]
|
| 418 |
+
txt_ids: [B, L] of torch.long.
|
| 419 |
+
Encoded by data.CLIPTokenizer.
|
| 420 |
+
"""
|
| 421 |
+
xi = self.visual(imgs)
|
| 422 |
+
xt = self.textual(txt_ids)
|
| 423 |
+
return xi, xt
|
| 424 |
+
|
| 425 |
+
def param_groups(self):
|
| 426 |
+
groups = [
|
| 427 |
+
{"params": [p for n, p in self.named_parameters() if "norm" in n or n.endswith("bias")], "weight_decay": 0.0},
|
| 428 |
+
{"params": [p for n, p in self.named_parameters() if not ("norm" in n or n.endswith("bias"))]},
|
| 429 |
+
]
|
| 430 |
+
return groups
|
| 431 |
+
|
| 432 |
+
|
| 433 |
+
def _clip(
|
| 434 |
+
pretrained=False,
|
| 435 |
+
pretrained_name=None,
|
| 436 |
+
model_cls=XLMRobertaCLIP,
|
| 437 |
+
return_transforms=False,
|
| 438 |
+
return_tokenizer=False,
|
| 439 |
+
tokenizer_padding="eos",
|
| 440 |
+
dtype=torch.float32,
|
| 441 |
+
device="cpu",
|
| 442 |
+
**kwargs,
|
| 443 |
+
):
|
| 444 |
+
# # init a model on device
|
| 445 |
+
# with torch.device(device):
|
| 446 |
+
model = model_cls(**kwargs)
|
| 447 |
+
|
| 448 |
+
# # set device
|
| 449 |
+
# model = model.to(dtype=dtype, device=device)
|
| 450 |
+
output = (model,)
|
| 451 |
+
|
| 452 |
+
# init transforms
|
| 453 |
+
if return_transforms:
|
| 454 |
+
# mean and std
|
| 455 |
+
if "siglip" in pretrained_name.lower():
|
| 456 |
+
mean, std = [0.5, 0.5, 0.5], [0.5, 0.5, 0.5]
|
| 457 |
+
else:
|
| 458 |
+
mean = [0.48145466, 0.4578275, 0.40821073]
|
| 459 |
+
std = [0.26862954, 0.26130258, 0.27577711]
|
| 460 |
+
|
| 461 |
+
# transforms
|
| 462 |
+
transforms = T.Compose(
|
| 463 |
+
[
|
| 464 |
+
T.Resize((model.image_size, model.image_size), interpolation=T.InterpolationMode.BICUBIC),
|
| 465 |
+
T.ToTensor(),
|
| 466 |
+
T.Normalize(mean=mean, std=std),
|
| 467 |
+
]
|
| 468 |
+
)
|
| 469 |
+
output += (transforms,)
|
| 470 |
+
return output[0] if len(output) == 1 else output
|
| 471 |
+
|
| 472 |
+
|
| 473 |
+
def clip_xlm_roberta_vit_h_14(pretrained=False, pretrained_name="open-clip-xlm-roberta-large-vit-huge-14", **kwargs):
|
| 474 |
+
cfg = dict(
|
| 475 |
+
embed_dim=1024,
|
| 476 |
+
image_size=224,
|
| 477 |
+
patch_size=14,
|
| 478 |
+
vision_dim=1280,
|
| 479 |
+
vision_mlp_ratio=4,
|
| 480 |
+
vision_heads=16,
|
| 481 |
+
vision_layers=32,
|
| 482 |
+
vision_pool="token",
|
| 483 |
+
activation="gelu",
|
| 484 |
+
vocab_size=250002,
|
| 485 |
+
max_text_len=514,
|
| 486 |
+
type_size=1,
|
| 487 |
+
pad_id=1,
|
| 488 |
+
text_dim=1024,
|
| 489 |
+
text_heads=16,
|
| 490 |
+
text_layers=24,
|
| 491 |
+
text_post_norm=True,
|
| 492 |
+
text_dropout=0.1,
|
| 493 |
+
attn_dropout=0.0,
|
| 494 |
+
proj_dropout=0.0,
|
| 495 |
+
embedding_dropout=0.0,
|
| 496 |
+
)
|
| 497 |
+
cfg.update(**kwargs)
|
| 498 |
+
return _clip(pretrained, pretrained_name, XLMRobertaCLIP, **cfg)
|
| 499 |
+
|
| 500 |
+
|
| 501 |
+
class CLIPModel:
|
| 502 |
+
|
| 503 |
+
def __init__(self, dtype, device, checkpoint_path=None, tokenizer_path=None, weight_path=None):
|
| 504 |
+
self.dtype = dtype
|
| 505 |
+
self.device = device
|
| 506 |
+
self.checkpoint_path = checkpoint_path
|
| 507 |
+
self.tokenizer_path = tokenizer_path
|
| 508 |
+
self.weight_path = weight_path
|
| 509 |
+
|
| 510 |
+
# init model
|
| 511 |
+
with init_empty_weights():
|
| 512 |
+
self.model, self.transforms = clip_xlm_roberta_vit_h_14(
|
| 513 |
+
pretrained=False, return_transforms=True, return_tokenizer=False, dtype=dtype, device=device
|
| 514 |
+
)
|
| 515 |
+
self.model = self.model.eval().requires_grad_(False)
|
| 516 |
+
|
| 517 |
+
logging.info(f"loading {weight_path}")
|
| 518 |
+
if os.path.splitext(weight_path)[-1] == ".safetensors":
|
| 519 |
+
sd = load_safetensors(weight_path, device=device, disable_mmap=True, dtype=dtype)
|
| 520 |
+
else:
|
| 521 |
+
sd = torch.load(weight_path, map_location=device, weights_only=True)
|
| 522 |
+
info = self.model.load_state_dict(sd, strict=True, assign=True)
|
| 523 |
+
self.model = self.model.to(dtype=dtype, device=device)
|
| 524 |
+
logging.info(f"weights loaded from {weight_path}: {info}")
|
| 525 |
+
|
| 526 |
+
# init tokenizer
|
| 527 |
+
if tokenizer_path is None:
|
| 528 |
+
tokenizer_path = "Wan-AI/Wan2.1-I2V-14B-720P"
|
| 529 |
+
subfolder = "xlm-roberta-large"
|
| 530 |
+
else:
|
| 531 |
+
subfolder = None
|
| 532 |
+
|
| 533 |
+
self.tokenizer = HuggingfaceTokenizer(
|
| 534 |
+
name=tokenizer_path, seq_len=self.model.max_text_len - 2, clean="whitespace", subfolder=subfolder
|
| 535 |
+
)
|
| 536 |
+
|
| 537 |
+
def visual(self, videos):
|
| 538 |
+
# preprocess
|
| 539 |
+
size = (self.model.image_size,) * 2
|
| 540 |
+
videos = torch.cat([F.interpolate(u.transpose(0, 1), size=size, mode="bicubic", align_corners=False) for u in videos])
|
| 541 |
+
videos = self.transforms.transforms[-1](videos.mul_(0.5).add_(0.5))
|
| 542 |
+
|
| 543 |
+
# forward
|
| 544 |
+
# with torch.cuda.amp.autocast(dtype=self.dtype):
|
| 545 |
+
out = self.model.visual(videos, use_31_block=True)
|
| 546 |
+
return out
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/modules/model.py
ADDED
|
@@ -0,0 +1,958 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved.
|
| 2 |
+
import math
|
| 3 |
+
from typing import Optional, Union
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn as nn
|
| 7 |
+
from torch.utils.checkpoint import checkpoint
|
| 8 |
+
from accelerate import init_empty_weights
|
| 9 |
+
|
| 10 |
+
import logging
|
| 11 |
+
|
| 12 |
+
from musubi_tuner.utils.safetensors_utils import MemoryEfficientSafeOpen, load_safetensors
|
| 13 |
+
|
| 14 |
+
logger = logging.getLogger(__name__)
|
| 15 |
+
logging.basicConfig(level=logging.INFO)
|
| 16 |
+
|
| 17 |
+
from musubi_tuner.utils.device_utils import clean_memory_on_device
|
| 18 |
+
|
| 19 |
+
from musubi_tuner.wan.modules.attention import flash_attention
|
| 20 |
+
from musubi_tuner.utils.device_utils import clean_memory_on_device
|
| 21 |
+
from musubi_tuner.modules.custom_offloading_utils import ModelOffloader
|
| 22 |
+
from musubi_tuner.modules.fp8_optimization_utils import apply_fp8_monkey_patch, optimize_state_dict_with_fp8
|
| 23 |
+
|
| 24 |
+
__all__ = ["WanModel"]
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def sinusoidal_embedding_1d(dim, position):
|
| 28 |
+
# preprocess
|
| 29 |
+
assert dim % 2 == 0
|
| 30 |
+
half = dim // 2
|
| 31 |
+
position = position.type(torch.float64)
|
| 32 |
+
|
| 33 |
+
# calculation
|
| 34 |
+
sinusoid = torch.outer(position, torch.pow(10000, -torch.arange(half).to(position).div(half)))
|
| 35 |
+
x = torch.cat([torch.cos(sinusoid), torch.sin(sinusoid)], dim=1)
|
| 36 |
+
return x
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
# @amp.autocast(enabled=False)
|
| 40 |
+
# no autocast is needed for rope_apply, because it is already in float64
|
| 41 |
+
def rope_params(max_seq_len, dim, theta=10000):
|
| 42 |
+
assert dim % 2 == 0
|
| 43 |
+
freqs = torch.outer(torch.arange(max_seq_len), 1.0 / torch.pow(theta, torch.arange(0, dim, 2).to(torch.float64).div(dim)))
|
| 44 |
+
freqs = torch.polar(torch.ones_like(freqs), freqs)
|
| 45 |
+
return freqs
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
# @amp.autocast(enabled=False)
|
| 49 |
+
def rope_apply(x, grid_sizes, freqs):
|
| 50 |
+
device_type = x.device.type
|
| 51 |
+
with torch.amp.autocast(device_type=device_type, enabled=False):
|
| 52 |
+
n, c = x.size(2), x.size(3) // 2
|
| 53 |
+
|
| 54 |
+
# split freqs
|
| 55 |
+
freqs = freqs.split([c - 2 * (c // 3), c // 3, c // 3], dim=1)
|
| 56 |
+
|
| 57 |
+
# loop over samples
|
| 58 |
+
output = []
|
| 59 |
+
for i, (f, h, w) in enumerate(grid_sizes.tolist()):
|
| 60 |
+
seq_len = f * h * w
|
| 61 |
+
|
| 62 |
+
# precompute multipliers
|
| 63 |
+
x_i = torch.view_as_complex(x[i, :seq_len].to(torch.float64).reshape(seq_len, n, -1, 2))
|
| 64 |
+
freqs_i = torch.cat(
|
| 65 |
+
[
|
| 66 |
+
freqs[0][:f].view(f, 1, 1, -1).expand(f, h, w, -1),
|
| 67 |
+
freqs[1][:h].view(1, h, 1, -1).expand(f, h, w, -1),
|
| 68 |
+
freqs[2][:w].view(1, 1, w, -1).expand(f, h, w, -1),
|
| 69 |
+
],
|
| 70 |
+
dim=-1,
|
| 71 |
+
).reshape(seq_len, 1, -1)
|
| 72 |
+
|
| 73 |
+
# apply rotary embedding
|
| 74 |
+
x_i = torch.view_as_real(x_i * freqs_i).flatten(2)
|
| 75 |
+
x_i = torch.cat([x_i, x[i, seq_len:]])
|
| 76 |
+
|
| 77 |
+
# append to collection
|
| 78 |
+
output.append(x_i)
|
| 79 |
+
return torch.stack(output).float()
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def calculate_freqs_i(fhw, c, freqs, f_indices=None):
|
| 83 |
+
"""f_indices is used to select specific frames for rotary embedding. e.g. [0,8] (with start image) or [0,8,20] (with start and end images)"""
|
| 84 |
+
f, h, w = fhw[:3]
|
| 85 |
+
freqs = freqs.split([c - 2 * (c // 3), c // 3, c // 3], dim=1)
|
| 86 |
+
|
| 87 |
+
if f_indices is None:
|
| 88 |
+
freqs_f = freqs[0][:f]
|
| 89 |
+
else:
|
| 90 |
+
logger.info(f"Using f_indices: {f_indices} for rotary embedding. fhw: {fhw}")
|
| 91 |
+
freqs_f = freqs[0][f_indices]
|
| 92 |
+
|
| 93 |
+
freqs_i = torch.cat(
|
| 94 |
+
[
|
| 95 |
+
freqs_f.view(f, 1, 1, -1).expand(f, h, w, -1),
|
| 96 |
+
freqs[1][:h].view(1, h, 1, -1).expand(f, h, w, -1),
|
| 97 |
+
freqs[2][:w].view(1, 1, w, -1).expand(f, h, w, -1),
|
| 98 |
+
],
|
| 99 |
+
dim=-1,
|
| 100 |
+
).reshape(f * h * w, 1, -1)
|
| 101 |
+
return freqs_i
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
# inplace version of rope_apply
|
| 105 |
+
def rope_apply_inplace_cached(x, grid_sizes, freqs_list):
|
| 106 |
+
# with torch.amp.autocast(device_type=device_type, enabled=False):
|
| 107 |
+
rope_dtype = torch.float64 # float32 does not reduce memory usage significantly
|
| 108 |
+
|
| 109 |
+
n, c = x.size(2), x.size(3) // 2
|
| 110 |
+
|
| 111 |
+
# loop over samples
|
| 112 |
+
for i, (f, h, w) in enumerate(grid_sizes.tolist()):
|
| 113 |
+
seq_len = f * h * w
|
| 114 |
+
|
| 115 |
+
# precompute multipliers
|
| 116 |
+
x_i = torch.view_as_complex(x[i, :seq_len].to(rope_dtype).reshape(seq_len, n, -1, 2))
|
| 117 |
+
freqs_i = freqs_list[i]
|
| 118 |
+
|
| 119 |
+
# apply rotary embedding
|
| 120 |
+
x_i = torch.view_as_real(x_i * freqs_i).flatten(2)
|
| 121 |
+
# x_i = torch.cat([x_i, x[i, seq_len:]])
|
| 122 |
+
|
| 123 |
+
# inplace update
|
| 124 |
+
x[i, :seq_len] = x_i.to(x.dtype)
|
| 125 |
+
|
| 126 |
+
return x
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
class WanRMSNorm(nn.Module):
|
| 130 |
+
|
| 131 |
+
def __init__(self, dim, eps=1e-5):
|
| 132 |
+
super().__init__()
|
| 133 |
+
self.dim = dim
|
| 134 |
+
self.eps = eps
|
| 135 |
+
self.weight = nn.Parameter(torch.ones(dim))
|
| 136 |
+
|
| 137 |
+
def forward(self, x):
|
| 138 |
+
r"""
|
| 139 |
+
Args:
|
| 140 |
+
x(Tensor): Shape [B, L, C]
|
| 141 |
+
"""
|
| 142 |
+
# return self._norm(x.float()).type_as(x) * self.weight
|
| 143 |
+
# support fp8
|
| 144 |
+
return self._norm(x.float()).type_as(x) * self.weight.to(x.dtype)
|
| 145 |
+
|
| 146 |
+
def _norm(self, x):
|
| 147 |
+
return x * torch.rsqrt(x.pow(2).mean(dim=-1, keepdim=True) + self.eps)
|
| 148 |
+
|
| 149 |
+
# def forward(self, x):
|
| 150 |
+
# r"""
|
| 151 |
+
# Args:
|
| 152 |
+
# x(Tensor): Shape [B, L, C]
|
| 153 |
+
# """
|
| 154 |
+
# # inplace version, also supports fp8 -> does not have significant performance improvement
|
| 155 |
+
# original_dtype = x.dtype
|
| 156 |
+
# x = x.float()
|
| 157 |
+
# y = x.pow(2).mean(dim=-1, keepdim=True)
|
| 158 |
+
# y.add_(self.eps)
|
| 159 |
+
# y.rsqrt_()
|
| 160 |
+
# x *= y
|
| 161 |
+
# x = x.to(original_dtype)
|
| 162 |
+
# x *= self.weight.to(original_dtype)
|
| 163 |
+
# return x
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
class WanLayerNorm(nn.LayerNorm):
|
| 167 |
+
|
| 168 |
+
def __init__(self, dim, eps=1e-6, elementwise_affine=False):
|
| 169 |
+
super().__init__(dim, elementwise_affine=elementwise_affine, eps=eps)
|
| 170 |
+
|
| 171 |
+
def forward(self, x):
|
| 172 |
+
r"""
|
| 173 |
+
Args:
|
| 174 |
+
x(Tensor): Shape [B, L, C]
|
| 175 |
+
"""
|
| 176 |
+
return super().forward(x.float()).type_as(x)
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
class WanSelfAttention(nn.Module):
|
| 180 |
+
|
| 181 |
+
def __init__(self, dim, num_heads, window_size=(-1, -1), qk_norm=True, eps=1e-6, attn_mode="torch", split_attn=False):
|
| 182 |
+
assert dim % num_heads == 0
|
| 183 |
+
super().__init__()
|
| 184 |
+
self.dim = dim
|
| 185 |
+
self.num_heads = num_heads
|
| 186 |
+
self.head_dim = dim // num_heads
|
| 187 |
+
self.window_size = window_size
|
| 188 |
+
self.qk_norm = qk_norm
|
| 189 |
+
self.eps = eps
|
| 190 |
+
self.attn_mode = attn_mode
|
| 191 |
+
self.split_attn = split_attn
|
| 192 |
+
|
| 193 |
+
# layers
|
| 194 |
+
self.q = nn.Linear(dim, dim)
|
| 195 |
+
self.k = nn.Linear(dim, dim)
|
| 196 |
+
self.v = nn.Linear(dim, dim)
|
| 197 |
+
self.o = nn.Linear(dim, dim)
|
| 198 |
+
self.norm_q = WanRMSNorm(dim, eps=eps) if qk_norm else nn.Identity()
|
| 199 |
+
self.norm_k = WanRMSNorm(dim, eps=eps) if qk_norm else nn.Identity()
|
| 200 |
+
|
| 201 |
+
def forward(self, x, seq_lens, grid_sizes, freqs):
|
| 202 |
+
r"""
|
| 203 |
+
Args:
|
| 204 |
+
x(Tensor): Shape [B, L, num_heads, C / num_heads]
|
| 205 |
+
seq_lens(Tensor): Shape [B]
|
| 206 |
+
grid_sizes(Tensor): Shape [B, 3], the second dimension contains (F, H, W)
|
| 207 |
+
freqs(Tensor): Rope freqs, shape [1024, C / num_heads / 2]
|
| 208 |
+
"""
|
| 209 |
+
b, s, n, d = *x.shape[:2], self.num_heads, self.head_dim
|
| 210 |
+
|
| 211 |
+
# # query, key, value function
|
| 212 |
+
# def qkv_fn(x):
|
| 213 |
+
# q = self.norm_q(self.q(x)).view(b, s, n, d)
|
| 214 |
+
# k = self.norm_k(self.k(x)).view(b, s, n, d)
|
| 215 |
+
# v = self.v(x).view(b, s, n, d)
|
| 216 |
+
# return q, k, v
|
| 217 |
+
# q, k, v = qkv_fn(x)
|
| 218 |
+
# del x
|
| 219 |
+
# query, key, value function
|
| 220 |
+
|
| 221 |
+
q = self.q(x)
|
| 222 |
+
k = self.k(x)
|
| 223 |
+
v = self.v(x)
|
| 224 |
+
del x
|
| 225 |
+
q = self.norm_q(q)
|
| 226 |
+
k = self.norm_k(k)
|
| 227 |
+
q = q.view(b, s, n, d)
|
| 228 |
+
k = k.view(b, s, n, d)
|
| 229 |
+
v = v.view(b, s, n, d)
|
| 230 |
+
|
| 231 |
+
rope_apply_inplace_cached(q, grid_sizes, freqs)
|
| 232 |
+
rope_apply_inplace_cached(k, grid_sizes, freqs)
|
| 233 |
+
qkv = [q, k, v]
|
| 234 |
+
del q, k, v
|
| 235 |
+
x = flash_attention(
|
| 236 |
+
qkv, k_lens=seq_lens, window_size=self.window_size, attn_mode=self.attn_mode, split_attn=self.split_attn
|
| 237 |
+
)
|
| 238 |
+
|
| 239 |
+
# output
|
| 240 |
+
x = x.flatten(2)
|
| 241 |
+
x = self.o(x)
|
| 242 |
+
return x
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
class WanT2VCrossAttention(WanSelfAttention):
|
| 246 |
+
|
| 247 |
+
def forward(self, x, context, context_lens):
|
| 248 |
+
r"""
|
| 249 |
+
Args:
|
| 250 |
+
x(Tensor): Shape [B, L1, C]
|
| 251 |
+
context(Tensor): Shape [B, L2, C]
|
| 252 |
+
context_lens(Tensor): Shape [B]
|
| 253 |
+
"""
|
| 254 |
+
b, n, d = x.size(0), self.num_heads, self.head_dim
|
| 255 |
+
|
| 256 |
+
# compute query, key, value
|
| 257 |
+
# q = self.norm_q(self.q(x)).view(b, -1, n, d)
|
| 258 |
+
# k = self.norm_k(self.k(context)).view(b, -1, n, d)
|
| 259 |
+
# v = self.v(context).view(b, -1, n, d)
|
| 260 |
+
q = self.q(x)
|
| 261 |
+
del x
|
| 262 |
+
k = self.k(context)
|
| 263 |
+
v = self.v(context)
|
| 264 |
+
del context
|
| 265 |
+
q = self.norm_q(q)
|
| 266 |
+
k = self.norm_k(k)
|
| 267 |
+
q = q.view(b, -1, n, d)
|
| 268 |
+
k = k.view(b, -1, n, d)
|
| 269 |
+
v = v.view(b, -1, n, d)
|
| 270 |
+
|
| 271 |
+
# compute attention
|
| 272 |
+
qkv = [q, k, v]
|
| 273 |
+
del q, k, v
|
| 274 |
+
x = flash_attention(qkv, k_lens=context_lens, attn_mode=self.attn_mode, split_attn=self.split_attn)
|
| 275 |
+
|
| 276 |
+
# output
|
| 277 |
+
x = x.flatten(2)
|
| 278 |
+
x = self.o(x)
|
| 279 |
+
return x
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
class WanI2VCrossAttention(WanSelfAttention):
|
| 283 |
+
|
| 284 |
+
def __init__(self, dim, num_heads, window_size=(-1, -1), qk_norm=True, eps=1e-6, attn_mode="torch", split_attn=False):
|
| 285 |
+
super().__init__(dim, num_heads, window_size, qk_norm, eps, attn_mode, split_attn)
|
| 286 |
+
|
| 287 |
+
self.k_img = nn.Linear(dim, dim)
|
| 288 |
+
self.v_img = nn.Linear(dim, dim)
|
| 289 |
+
# self.alpha = nn.Parameter(torch.zeros((1, )))
|
| 290 |
+
self.norm_k_img = WanRMSNorm(dim, eps=eps) if qk_norm else nn.Identity()
|
| 291 |
+
|
| 292 |
+
def forward(self, x, context, context_lens):
|
| 293 |
+
r"""
|
| 294 |
+
Args:
|
| 295 |
+
x(Tensor): Shape [B, L1, C]
|
| 296 |
+
context(Tensor): Shape [B, L2, C]
|
| 297 |
+
context_lens(Tensor): Shape [B]
|
| 298 |
+
"""
|
| 299 |
+
context_img = context[:, :257]
|
| 300 |
+
context = context[:, 257:]
|
| 301 |
+
b, n, d = x.size(0), self.num_heads, self.head_dim
|
| 302 |
+
|
| 303 |
+
# compute query, key, value
|
| 304 |
+
q = self.q(x)
|
| 305 |
+
del x
|
| 306 |
+
q = self.norm_q(q)
|
| 307 |
+
q = q.view(b, -1, n, d)
|
| 308 |
+
k = self.k(context)
|
| 309 |
+
k = self.norm_k(k).view(b, -1, n, d)
|
| 310 |
+
v = self.v(context).view(b, -1, n, d)
|
| 311 |
+
del context
|
| 312 |
+
|
| 313 |
+
# compute attention
|
| 314 |
+
qkv = [q, k, v]
|
| 315 |
+
del k, v
|
| 316 |
+
x = flash_attention(qkv, k_lens=context_lens, attn_mode=self.attn_mode, split_attn=self.split_attn)
|
| 317 |
+
|
| 318 |
+
# compute query, key, value
|
| 319 |
+
k_img = self.norm_k_img(self.k_img(context_img)).view(b, -1, n, d)
|
| 320 |
+
v_img = self.v_img(context_img).view(b, -1, n, d)
|
| 321 |
+
del context_img
|
| 322 |
+
|
| 323 |
+
# compute attention
|
| 324 |
+
qkv = [q, k_img, v_img]
|
| 325 |
+
del q, k_img, v_img
|
| 326 |
+
img_x = flash_attention(qkv, k_lens=None, attn_mode=self.attn_mode, split_attn=self.split_attn)
|
| 327 |
+
|
| 328 |
+
# output
|
| 329 |
+
x = x.flatten(2)
|
| 330 |
+
img_x = img_x.flatten(2)
|
| 331 |
+
if self.training:
|
| 332 |
+
x = x + img_x # avoid inplace
|
| 333 |
+
else:
|
| 334 |
+
x += img_x
|
| 335 |
+
del img_x
|
| 336 |
+
|
| 337 |
+
x = self.o(x)
|
| 338 |
+
return x
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
WAN_CROSSATTENTION_CLASSES = {
|
| 342 |
+
"t2v_cross_attn": WanT2VCrossAttention,
|
| 343 |
+
"i2v_cross_attn": WanI2VCrossAttention,
|
| 344 |
+
}
|
| 345 |
+
|
| 346 |
+
|
| 347 |
+
class WanAttentionBlock(nn.Module):
|
| 348 |
+
|
| 349 |
+
def __init__(
|
| 350 |
+
self,
|
| 351 |
+
cross_attn_type,
|
| 352 |
+
dim,
|
| 353 |
+
ffn_dim,
|
| 354 |
+
num_heads,
|
| 355 |
+
window_size=(-1, -1),
|
| 356 |
+
qk_norm=True,
|
| 357 |
+
cross_attn_norm=False,
|
| 358 |
+
eps=1e-6,
|
| 359 |
+
attn_mode="torch",
|
| 360 |
+
split_attn=False,
|
| 361 |
+
):
|
| 362 |
+
super().__init__()
|
| 363 |
+
self.dim = dim
|
| 364 |
+
self.ffn_dim = ffn_dim
|
| 365 |
+
self.num_heads = num_heads
|
| 366 |
+
self.window_size = window_size
|
| 367 |
+
self.qk_norm = qk_norm
|
| 368 |
+
self.cross_attn_norm = cross_attn_norm
|
| 369 |
+
self.eps = eps
|
| 370 |
+
|
| 371 |
+
# layers
|
| 372 |
+
self.norm1 = WanLayerNorm(dim, eps)
|
| 373 |
+
self.self_attn = WanSelfAttention(dim, num_heads, window_size, qk_norm, eps, attn_mode, split_attn)
|
| 374 |
+
self.norm3 = WanLayerNorm(dim, eps, elementwise_affine=True) if cross_attn_norm else nn.Identity()
|
| 375 |
+
self.cross_attn = WAN_CROSSATTENTION_CLASSES[cross_attn_type](dim, num_heads, (-1, -1), qk_norm, eps, attn_mode, split_attn)
|
| 376 |
+
self.norm2 = WanLayerNorm(dim, eps)
|
| 377 |
+
self.ffn = nn.Sequential(nn.Linear(dim, ffn_dim), nn.GELU(approximate="tanh"), nn.Linear(ffn_dim, dim))
|
| 378 |
+
|
| 379 |
+
# modulation
|
| 380 |
+
self.modulation = nn.Parameter(torch.randn(1, 6, dim) / dim**0.5)
|
| 381 |
+
|
| 382 |
+
self.gradient_checkpointing = False
|
| 383 |
+
|
| 384 |
+
def enable_gradient_checkpointing(self):
|
| 385 |
+
self.gradient_checkpointing = True
|
| 386 |
+
|
| 387 |
+
def disable_gradient_checkpointing(self):
|
| 388 |
+
self.gradient_checkpointing = False
|
| 389 |
+
|
| 390 |
+
def _forward(self, x, e, seq_lens, grid_sizes, freqs, context, context_lens):
|
| 391 |
+
r"""
|
| 392 |
+
Args:
|
| 393 |
+
x(Tensor): Shape [B, L, C]
|
| 394 |
+
e(Tensor): Shape [B, 6, C]
|
| 395 |
+
seq_lens(Tensor): Shape [B], length of each sequence in batch
|
| 396 |
+
grid_sizes(Tensor): Shape [B, 3], the second dimension contains (F, H, W)
|
| 397 |
+
freqs(Tensor): Rope freqs, shape [1024, C / num_heads / 2]
|
| 398 |
+
"""
|
| 399 |
+
assert e.dtype == torch.float32
|
| 400 |
+
# with amp.autocast(dtype=torch.float32):
|
| 401 |
+
# e = (self.modulation + e).chunk(6, dim=1)
|
| 402 |
+
# support fp8
|
| 403 |
+
e = self.modulation.to(torch.float32) + e
|
| 404 |
+
e = e.chunk(6, dim=1)
|
| 405 |
+
assert e[0].dtype == torch.float32
|
| 406 |
+
|
| 407 |
+
# self-attention
|
| 408 |
+
y = self.self_attn(self.norm1(x).float() * (1 + e[1]) + e[0], seq_lens, grid_sizes, freqs)
|
| 409 |
+
# with amp.autocast(dtype=torch.float32):
|
| 410 |
+
# x = x + y * e[2]
|
| 411 |
+
x = x + y.to(torch.float32) * e[2]
|
| 412 |
+
del y
|
| 413 |
+
|
| 414 |
+
# cross-attention & ffn function
|
| 415 |
+
# def cross_attn_ffn(x, context, context_lens, e):
|
| 416 |
+
# x += self.cross_attn(self.norm3(x), context, context_lens)
|
| 417 |
+
# y = self.ffn(self.norm2(x).float() * (1 + e[4]) + e[3])
|
| 418 |
+
# # with amp.autocast(dtype=torch.float32):
|
| 419 |
+
# # x = x + y * e[5]
|
| 420 |
+
# x += y.to(torch.float32) * e[5]
|
| 421 |
+
# return x
|
| 422 |
+
# x = cross_attn_ffn(x, context, context_lens, e)
|
| 423 |
+
|
| 424 |
+
# x += self.cross_attn(self.norm3(x), context, context_lens) # backward error
|
| 425 |
+
x = x + self.cross_attn(self.norm3(x), context, context_lens)
|
| 426 |
+
del context
|
| 427 |
+
y = self.ffn(self.norm2(x).float() * (1 + e[4]) + e[3])
|
| 428 |
+
x = x + y.to(torch.float32) * e[5]
|
| 429 |
+
del y
|
| 430 |
+
return x
|
| 431 |
+
|
| 432 |
+
def forward(self, x, e, seq_lens, grid_sizes, freqs, context, context_lens):
|
| 433 |
+
if self.training and self.gradient_checkpointing:
|
| 434 |
+
return checkpoint(self._forward, x, e, seq_lens, grid_sizes, freqs, context, context_lens, use_reentrant=False)
|
| 435 |
+
return self._forward(x, e, seq_lens, grid_sizes, freqs, context, context_lens)
|
| 436 |
+
|
| 437 |
+
|
| 438 |
+
class Head(nn.Module):
|
| 439 |
+
|
| 440 |
+
def __init__(self, dim, out_dim, patch_size, eps=1e-6):
|
| 441 |
+
super().__init__()
|
| 442 |
+
self.dim = dim
|
| 443 |
+
self.out_dim = out_dim
|
| 444 |
+
self.patch_size = patch_size
|
| 445 |
+
self.eps = eps
|
| 446 |
+
|
| 447 |
+
# layers
|
| 448 |
+
out_dim = math.prod(patch_size) * out_dim
|
| 449 |
+
self.norm = WanLayerNorm(dim, eps)
|
| 450 |
+
self.head = nn.Linear(dim, out_dim)
|
| 451 |
+
|
| 452 |
+
# modulation
|
| 453 |
+
self.modulation = nn.Parameter(torch.randn(1, 2, dim) / dim**0.5)
|
| 454 |
+
|
| 455 |
+
def forward(self, x, e):
|
| 456 |
+
r"""
|
| 457 |
+
Args:
|
| 458 |
+
x(Tensor): Shape [B, L1, C]
|
| 459 |
+
e(Tensor): Shape [B, C]
|
| 460 |
+
"""
|
| 461 |
+
assert e.dtype == torch.float32
|
| 462 |
+
# with amp.autocast(dtype=torch.float32):
|
| 463 |
+
# e = (self.modulation + e.unsqueeze(1)).chunk(2, dim=1)
|
| 464 |
+
# x = self.head(self.norm(x) * (1 + e[1]) + e[0])
|
| 465 |
+
# support fp8
|
| 466 |
+
e = (self.modulation.to(torch.float32) + e.unsqueeze(1)).chunk(2, dim=1)
|
| 467 |
+
x = self.head(self.norm(x) * (1 + e[1]) + e[0])
|
| 468 |
+
return x
|
| 469 |
+
|
| 470 |
+
|
| 471 |
+
FIRST_LAST_FRAME_CONTEXT_TOKEN_NUMBER = 257 * 2
|
| 472 |
+
|
| 473 |
+
|
| 474 |
+
class MLPProj(torch.nn.Module):
|
| 475 |
+
|
| 476 |
+
def __init__(self, in_dim, out_dim, flf_pos_emb=False):
|
| 477 |
+
super().__init__()
|
| 478 |
+
|
| 479 |
+
self.proj = torch.nn.Sequential(
|
| 480 |
+
torch.nn.LayerNorm(in_dim),
|
| 481 |
+
torch.nn.Linear(in_dim, in_dim),
|
| 482 |
+
torch.nn.GELU(),
|
| 483 |
+
torch.nn.Linear(in_dim, out_dim),
|
| 484 |
+
torch.nn.LayerNorm(out_dim),
|
| 485 |
+
)
|
| 486 |
+
if flf_pos_emb: # NOTE: we only use this for `flf2v`
|
| 487 |
+
self.emb_pos = nn.Parameter(torch.zeros(1, FIRST_LAST_FRAME_CONTEXT_TOKEN_NUMBER, 1280))
|
| 488 |
+
else:
|
| 489 |
+
self.emb_pos = None
|
| 490 |
+
|
| 491 |
+
def forward(self, image_embeds):
|
| 492 |
+
if self.emb_pos is not None: # for `flf2v`
|
| 493 |
+
bs, n, d = image_embeds.shape
|
| 494 |
+
image_embeds = image_embeds.view(-1, 2 * n, d)
|
| 495 |
+
image_embeds = image_embeds + self.emb_pos
|
| 496 |
+
clip_extra_context_tokens = self.proj(image_embeds)
|
| 497 |
+
return clip_extra_context_tokens
|
| 498 |
+
|
| 499 |
+
|
| 500 |
+
class WanModel(nn.Module): # ModelMixin, ConfigMixin):
|
| 501 |
+
r"""
|
| 502 |
+
Wan diffusion backbone supporting both text-to-video and image-to-video.
|
| 503 |
+
"""
|
| 504 |
+
|
| 505 |
+
ignore_for_config = ["patch_size", "cross_attn_norm", "qk_norm", "text_dim", "window_size"]
|
| 506 |
+
_no_split_modules = ["WanAttentionBlock"]
|
| 507 |
+
|
| 508 |
+
# @register_to_config
|
| 509 |
+
def __init__(
|
| 510 |
+
self,
|
| 511 |
+
model_type="t2v",
|
| 512 |
+
patch_size=(1, 2, 2),
|
| 513 |
+
text_len=512,
|
| 514 |
+
in_dim=16,
|
| 515 |
+
dim=2048,
|
| 516 |
+
ffn_dim=8192,
|
| 517 |
+
freq_dim=256,
|
| 518 |
+
text_dim=4096,
|
| 519 |
+
out_dim=16,
|
| 520 |
+
num_heads=16,
|
| 521 |
+
num_layers=32,
|
| 522 |
+
window_size=(-1, -1),
|
| 523 |
+
qk_norm=True,
|
| 524 |
+
cross_attn_norm=True,
|
| 525 |
+
eps=1e-6,
|
| 526 |
+
attn_mode=None,
|
| 527 |
+
split_attn=False,
|
| 528 |
+
):
|
| 529 |
+
r"""
|
| 530 |
+
Initialize the diffusion model backbone.
|
| 531 |
+
|
| 532 |
+
Args:
|
| 533 |
+
model_type (`str`, *optional*, defaults to 't2v'):
|
| 534 |
+
Model variant - 't2v' (text-to-video) or 'i2v' (image-to-video)
|
| 535 |
+
patch_size (`tuple`, *optional*, defaults to (1, 2, 2)):
|
| 536 |
+
3D patch dimensions for video embedding (t_patch, h_patch, w_patch)
|
| 537 |
+
text_len (`int`, *optional*, defaults to 512):
|
| 538 |
+
Fixed length for text embeddings
|
| 539 |
+
in_dim (`int`, *optional*, defaults to 16):
|
| 540 |
+
Input video channels (C_in)
|
| 541 |
+
dim (`int`, *optional*, defaults to 2048):
|
| 542 |
+
Hidden dimension of the transformer
|
| 543 |
+
ffn_dim (`int`, *optional*, defaults to 8192):
|
| 544 |
+
Intermediate dimension in feed-forward network
|
| 545 |
+
freq_dim (`int`, *optional*, defaults to 256):
|
| 546 |
+
Dimension for sinusoidal time embeddings
|
| 547 |
+
text_dim (`int`, *optional*, defaults to 4096):
|
| 548 |
+
Input dimension for text embeddings
|
| 549 |
+
out_dim (`int`, *optional*, defaults to 16):
|
| 550 |
+
Output video channels (C_out)
|
| 551 |
+
num_heads (`int`, *optional*, defaults to 16):
|
| 552 |
+
Number of attention heads
|
| 553 |
+
num_layers (`int`, *optional*, defaults to 32):
|
| 554 |
+
Number of transformer blocks
|
| 555 |
+
window_size (`tuple`, *optional*, defaults to (-1, -1)):
|
| 556 |
+
Window size for local attention (-1 indicates global attention)
|
| 557 |
+
qk_norm (`bool`, *optional*, defaults to True):
|
| 558 |
+
Enable query/key normalization
|
| 559 |
+
cross_attn_norm (`bool`, *optional*, defaults to False):
|
| 560 |
+
Enable cross-attention normalization
|
| 561 |
+
eps (`float`, *optional*, defaults to 1e-6):
|
| 562 |
+
Epsilon value for normalization layers
|
| 563 |
+
"""
|
| 564 |
+
|
| 565 |
+
super().__init__()
|
| 566 |
+
|
| 567 |
+
assert model_type in ["t2v", "i2v", "flf2v"], f"Invalid model_type: {model_type}. Must be one of ['t2v', 'i2v', 'flf2v']."
|
| 568 |
+
self.model_type = model_type
|
| 569 |
+
|
| 570 |
+
self.patch_size = patch_size
|
| 571 |
+
self.text_len = text_len
|
| 572 |
+
self.in_dim = in_dim
|
| 573 |
+
self.dim = dim
|
| 574 |
+
self.ffn_dim = ffn_dim
|
| 575 |
+
self.freq_dim = freq_dim
|
| 576 |
+
self.text_dim = text_dim
|
| 577 |
+
self.out_dim = out_dim
|
| 578 |
+
self.num_heads = num_heads
|
| 579 |
+
self.num_layers = num_layers
|
| 580 |
+
self.window_size = window_size
|
| 581 |
+
self.qk_norm = qk_norm
|
| 582 |
+
self.cross_attn_norm = cross_attn_norm
|
| 583 |
+
self.eps = eps
|
| 584 |
+
self.attn_mode = attn_mode if attn_mode is not None else "torch"
|
| 585 |
+
self.split_attn = split_attn
|
| 586 |
+
|
| 587 |
+
# embeddings
|
| 588 |
+
self.patch_embedding = nn.Conv3d(in_dim, dim, kernel_size=patch_size, stride=patch_size)
|
| 589 |
+
self.text_embedding = nn.Sequential(nn.Linear(text_dim, dim), nn.GELU(approximate="tanh"), nn.Linear(dim, dim))
|
| 590 |
+
|
| 591 |
+
self.time_embedding = nn.Sequential(nn.Linear(freq_dim, dim), nn.SiLU(), nn.Linear(dim, dim))
|
| 592 |
+
self.time_projection = nn.Sequential(nn.SiLU(), nn.Linear(dim, dim * 6))
|
| 593 |
+
|
| 594 |
+
# blocks
|
| 595 |
+
cross_attn_type = "t2v_cross_attn" if model_type == "t2v" else "i2v_cross_attn"
|
| 596 |
+
self.blocks = nn.ModuleList(
|
| 597 |
+
[
|
| 598 |
+
WanAttentionBlock(
|
| 599 |
+
cross_attn_type, dim, ffn_dim, num_heads, window_size, qk_norm, cross_attn_norm, eps, attn_mode, split_attn
|
| 600 |
+
)
|
| 601 |
+
for _ in range(num_layers)
|
| 602 |
+
]
|
| 603 |
+
)
|
| 604 |
+
|
| 605 |
+
# head
|
| 606 |
+
self.head = Head(dim, out_dim, patch_size, eps)
|
| 607 |
+
|
| 608 |
+
# buffers (don't use register_buffer otherwise dtype will be changed in to())
|
| 609 |
+
assert (dim % num_heads) == 0 and (dim // num_heads) % 2 == 0
|
| 610 |
+
d = dim // num_heads
|
| 611 |
+
self.freqs = torch.cat(
|
| 612 |
+
[rope_params(1024, d - 4 * (d // 6)), rope_params(1024, 2 * (d // 6)), rope_params(1024, 2 * (d // 6))], dim=1
|
| 613 |
+
)
|
| 614 |
+
self.freqs_fhw = {}
|
| 615 |
+
|
| 616 |
+
if model_type == "i2v" or model_type == "flf2v":
|
| 617 |
+
self.img_emb = MLPProj(1280, dim, flf_pos_emb=model_type == "flf2v")
|
| 618 |
+
|
| 619 |
+
# initialize weights
|
| 620 |
+
self.init_weights()
|
| 621 |
+
|
| 622 |
+
self.gradient_checkpointing = False
|
| 623 |
+
|
| 624 |
+
# offloading
|
| 625 |
+
self.blocks_to_swap = None
|
| 626 |
+
self.offloader = None
|
| 627 |
+
|
| 628 |
+
@property
|
| 629 |
+
def dtype(self):
|
| 630 |
+
return next(self.parameters()).dtype
|
| 631 |
+
|
| 632 |
+
@property
|
| 633 |
+
def device(self):
|
| 634 |
+
return next(self.parameters()).device
|
| 635 |
+
|
| 636 |
+
def fp8_optimization(
|
| 637 |
+
self, state_dict: dict[str, torch.Tensor], device: torch.device, move_to_device: bool, use_scaled_mm: bool = False
|
| 638 |
+
) -> int:
|
| 639 |
+
"""
|
| 640 |
+
Optimize the model state_dict with fp8.
|
| 641 |
+
|
| 642 |
+
Args:
|
| 643 |
+
state_dict (dict[str, torch.Tensor]):
|
| 644 |
+
The state_dict of the model.
|
| 645 |
+
device (torch.device):
|
| 646 |
+
The device to calculate the weight.
|
| 647 |
+
move_to_device (bool):
|
| 648 |
+
Whether to move the weight to the device after optimization.
|
| 649 |
+
"""
|
| 650 |
+
TARGET_KEYS = ["blocks"]
|
| 651 |
+
EXCLUDE_KEYS = [
|
| 652 |
+
"norm",
|
| 653 |
+
"patch_embedding",
|
| 654 |
+
"text_embedding",
|
| 655 |
+
"time_embedding",
|
| 656 |
+
"time_projection",
|
| 657 |
+
"head",
|
| 658 |
+
"modulation",
|
| 659 |
+
"img_emb",
|
| 660 |
+
]
|
| 661 |
+
|
| 662 |
+
# inplace optimization
|
| 663 |
+
state_dict = optimize_state_dict_with_fp8(state_dict, device, TARGET_KEYS, EXCLUDE_KEYS, move_to_device=move_to_device)
|
| 664 |
+
|
| 665 |
+
# apply monkey patching
|
| 666 |
+
apply_fp8_monkey_patch(self, state_dict, use_scaled_mm=use_scaled_mm)
|
| 667 |
+
|
| 668 |
+
return state_dict
|
| 669 |
+
|
| 670 |
+
def enable_gradient_checkpointing(self):
|
| 671 |
+
self.gradient_checkpointing = True
|
| 672 |
+
|
| 673 |
+
for block in self.blocks:
|
| 674 |
+
block.enable_gradient_checkpointing()
|
| 675 |
+
|
| 676 |
+
print(f"WanModel: Gradient checkpointing enabled.")
|
| 677 |
+
|
| 678 |
+
def disable_gradient_checkpointing(self):
|
| 679 |
+
self.gradient_checkpointing = False
|
| 680 |
+
|
| 681 |
+
for block in self.blocks:
|
| 682 |
+
block.disable_gradient_checkpointing()
|
| 683 |
+
|
| 684 |
+
print(f"WanModel: Gradient checkpointing disabled.")
|
| 685 |
+
|
| 686 |
+
def enable_block_swap(self, blocks_to_swap: int, device: torch.device, supports_backward: bool):
|
| 687 |
+
self.blocks_to_swap = blocks_to_swap
|
| 688 |
+
self.num_blocks = len(self.blocks)
|
| 689 |
+
|
| 690 |
+
assert (
|
| 691 |
+
self.blocks_to_swap <= self.num_blocks - 1
|
| 692 |
+
), f"Cannot swap more than {self.num_blocks - 1} blocks. Requested {self.blocks_to_swap} blocks to swap."
|
| 693 |
+
|
| 694 |
+
self.offloader = ModelOffloader(
|
| 695 |
+
"wan_attn_block", self.blocks, self.num_blocks, self.blocks_to_swap, supports_backward, device # , debug=True
|
| 696 |
+
)
|
| 697 |
+
print(
|
| 698 |
+
f"WanModel: Block swap enabled. Swapping {self.blocks_to_swap} blocks out of {self.num_blocks} blocks. Supports backward: {supports_backward}"
|
| 699 |
+
)
|
| 700 |
+
|
| 701 |
+
def switch_block_swap_for_inference(self):
|
| 702 |
+
if self.blocks_to_swap:
|
| 703 |
+
self.offloader.set_forward_only(True)
|
| 704 |
+
self.prepare_block_swap_before_forward()
|
| 705 |
+
print(f"WanModel: Block swap set to forward only.")
|
| 706 |
+
|
| 707 |
+
def switch_block_swap_for_training(self):
|
| 708 |
+
if self.blocks_to_swap:
|
| 709 |
+
self.offloader.set_forward_only(False)
|
| 710 |
+
self.prepare_block_swap_before_forward()
|
| 711 |
+
print(f"WanModel: Block swap set to forward and backward.")
|
| 712 |
+
|
| 713 |
+
def move_to_device_except_swap_blocks(self, device: torch.device):
|
| 714 |
+
# assume model is on cpu. do not move blocks to device to reduce temporary memory usage
|
| 715 |
+
if self.blocks_to_swap:
|
| 716 |
+
save_blocks = self.blocks
|
| 717 |
+
self.blocks = None
|
| 718 |
+
|
| 719 |
+
self.to(device)
|
| 720 |
+
|
| 721 |
+
if self.blocks_to_swap:
|
| 722 |
+
self.blocks = save_blocks
|
| 723 |
+
|
| 724 |
+
def prepare_block_swap_before_forward(self):
|
| 725 |
+
if self.blocks_to_swap is None or self.blocks_to_swap == 0:
|
| 726 |
+
return
|
| 727 |
+
self.offloader.prepare_block_devices_before_forward(self.blocks)
|
| 728 |
+
|
| 729 |
+
def forward(self, x, t, context, seq_len, clip_fea=None, y=None, skip_block_indices=None, f_indices=None):
|
| 730 |
+
r"""
|
| 731 |
+
Forward pass through the diffusion model
|
| 732 |
+
|
| 733 |
+
Args:
|
| 734 |
+
x (List[Tensor]):
|
| 735 |
+
List of input video tensors, each with shape [C_in, F, H, W]
|
| 736 |
+
t (Tensor):
|
| 737 |
+
Diffusion timesteps tensor of shape [B]
|
| 738 |
+
context (List[Tensor]):
|
| 739 |
+
List of text embeddings each with shape [L, C]
|
| 740 |
+
seq_len (`int`):
|
| 741 |
+
Maximum sequence length for positional encoding
|
| 742 |
+
clip_fea (Tensor, *optional*):
|
| 743 |
+
CLIP image features for image-to-video mode
|
| 744 |
+
y (List[Tensor], *optional*):
|
| 745 |
+
Conditional video inputs for image-to-video mode, same shape as x
|
| 746 |
+
skip_block_indices (List[int], *optional*):
|
| 747 |
+
Indices of blocks to skip during forward pass
|
| 748 |
+
f_indices (List[List[int]], *optional*):
|
| 749 |
+
Indices of frames used for rotary embeddings, list of lists for each video in the batch
|
| 750 |
+
|
| 751 |
+
Returns:
|
| 752 |
+
List[Tensor]:
|
| 753 |
+
List of denoised video tensors with original input shapes [C_out, F, H / 8, W / 8]
|
| 754 |
+
"""
|
| 755 |
+
# remove assertions to work with Fun-Control T2V
|
| 756 |
+
# if self.model_type == "i2v":
|
| 757 |
+
# assert clip_fea is not None and y is not None
|
| 758 |
+
# params
|
| 759 |
+
device = self.patch_embedding.weight.device
|
| 760 |
+
if self.freqs.device != device:
|
| 761 |
+
self.freqs = self.freqs.to(device)
|
| 762 |
+
|
| 763 |
+
if y is not None:
|
| 764 |
+
x = [torch.cat([u, v], dim=0) for u, v in zip(x, y)]
|
| 765 |
+
y = None
|
| 766 |
+
|
| 767 |
+
# embeddings
|
| 768 |
+
x = [self.patch_embedding(u.unsqueeze(0)) for u in x] # x[0].shape = [1, 5120, F, H, W]
|
| 769 |
+
grid_sizes = torch.stack([torch.tensor(u.shape[2:], dtype=torch.long) for u in x]) # list of [F, H, W]
|
| 770 |
+
|
| 771 |
+
freqs_list = []
|
| 772 |
+
for i, fhw in enumerate(grid_sizes):
|
| 773 |
+
fhw = tuple(fhw.tolist())
|
| 774 |
+
if f_indices is not None:
|
| 775 |
+
fhw = tuple(list(fhw) + f_indices[i]) # add f_indices to fhw for cache key
|
| 776 |
+
if fhw not in self.freqs_fhw:
|
| 777 |
+
c = self.dim // self.num_heads // 2
|
| 778 |
+
self.freqs_fhw[fhw] = calculate_freqs_i(fhw, c, self.freqs, None if f_indices is None else f_indices[i])
|
| 779 |
+
freqs_list.append(self.freqs_fhw[fhw])
|
| 780 |
+
|
| 781 |
+
x = [u.flatten(2).transpose(1, 2) for u in x]
|
| 782 |
+
seq_lens = torch.tensor([u.size(1) for u in x], dtype=torch.long)
|
| 783 |
+
assert seq_lens.max() <= seq_len, f"Sequence length exceeds maximum allowed length {seq_len}. Got {seq_lens.max()}"
|
| 784 |
+
x = torch.cat([torch.cat([u, u.new_zeros(1, seq_len - u.size(1), u.size(2))], dim=1) for u in x])
|
| 785 |
+
|
| 786 |
+
# time embeddings
|
| 787 |
+
# with amp.autocast(dtype=torch.float32):
|
| 788 |
+
with torch.amp.autocast(device_type=device.type, dtype=torch.float32):
|
| 789 |
+
e = self.time_embedding(sinusoidal_embedding_1d(self.freq_dim, t).float())
|
| 790 |
+
e0 = self.time_projection(e).unflatten(1, (6, self.dim))
|
| 791 |
+
assert e.dtype == torch.float32 and e0.dtype == torch.float32
|
| 792 |
+
|
| 793 |
+
# context
|
| 794 |
+
context_lens = None
|
| 795 |
+
if type(context) is list:
|
| 796 |
+
context = torch.stack([torch.cat([u, u.new_zeros(self.text_len - u.size(0), u.size(1))]) for u in context])
|
| 797 |
+
context = self.text_embedding(context)
|
| 798 |
+
|
| 799 |
+
if clip_fea is not None:
|
| 800 |
+
context_clip = self.img_emb(clip_fea) # bs x 257 x dim
|
| 801 |
+
context = torch.concat([context_clip, context], dim=1)
|
| 802 |
+
clip_fea = None
|
| 803 |
+
context_clip = None
|
| 804 |
+
|
| 805 |
+
# arguments
|
| 806 |
+
kwargs = dict(e=e0, seq_lens=seq_lens, grid_sizes=grid_sizes, freqs=freqs_list, context=context, context_lens=context_lens)
|
| 807 |
+
|
| 808 |
+
if self.blocks_to_swap:
|
| 809 |
+
clean_memory_on_device(device)
|
| 810 |
+
|
| 811 |
+
# print(f"x: {x.shape}, e: {e0.shape}, context: {context.shape}, seq_lens: {seq_lens}")
|
| 812 |
+
for block_idx, block in enumerate(self.blocks):
|
| 813 |
+
is_block_skipped = skip_block_indices is not None and block_idx in skip_block_indices
|
| 814 |
+
|
| 815 |
+
if self.blocks_to_swap and not is_block_skipped:
|
| 816 |
+
self.offloader.wait_for_block(block_idx)
|
| 817 |
+
|
| 818 |
+
if not is_block_skipped:
|
| 819 |
+
x = block(x, **kwargs)
|
| 820 |
+
|
| 821 |
+
if self.blocks_to_swap:
|
| 822 |
+
self.offloader.submit_move_blocks_forward(self.blocks, block_idx)
|
| 823 |
+
|
| 824 |
+
# head
|
| 825 |
+
x = self.head(x, e)
|
| 826 |
+
|
| 827 |
+
# unpatchify
|
| 828 |
+
x = self.unpatchify(x, grid_sizes)
|
| 829 |
+
return [u.float() for u in x]
|
| 830 |
+
|
| 831 |
+
def unpatchify(self, x, grid_sizes):
|
| 832 |
+
r"""
|
| 833 |
+
Reconstruct video tensors from patch embeddings.
|
| 834 |
+
|
| 835 |
+
Args:
|
| 836 |
+
x (List[Tensor]):
|
| 837 |
+
List of patchified features, each with shape [L, C_out * prod(patch_size)]
|
| 838 |
+
grid_sizes (Tensor):
|
| 839 |
+
Original spatial-temporal grid dimensions before patching,
|
| 840 |
+
shape [B, 3] (3 dimensions correspond to F_patches, H_patches, W_patches)
|
| 841 |
+
|
| 842 |
+
Returns:
|
| 843 |
+
List[Tensor]:
|
| 844 |
+
Reconstructed video tensors with shape [C_out, F, H / 8, W / 8]
|
| 845 |
+
"""
|
| 846 |
+
|
| 847 |
+
c = self.out_dim
|
| 848 |
+
out = []
|
| 849 |
+
for u, v in zip(x, grid_sizes.tolist()):
|
| 850 |
+
u = u[: math.prod(v)].view(*v, *self.patch_size, c)
|
| 851 |
+
u = torch.einsum("fhwpqrc->cfphqwr", u)
|
| 852 |
+
u = u.reshape(c, *[i * j for i, j in zip(v, self.patch_size)])
|
| 853 |
+
out.append(u)
|
| 854 |
+
return out
|
| 855 |
+
|
| 856 |
+
def init_weights(self):
|
| 857 |
+
r"""
|
| 858 |
+
Initialize model parameters using Xavier initialization.
|
| 859 |
+
"""
|
| 860 |
+
|
| 861 |
+
# basic init
|
| 862 |
+
for m in self.modules():
|
| 863 |
+
if isinstance(m, nn.Linear):
|
| 864 |
+
nn.init.xavier_uniform_(m.weight)
|
| 865 |
+
if m.bias is not None:
|
| 866 |
+
nn.init.zeros_(m.bias)
|
| 867 |
+
|
| 868 |
+
# init embeddings
|
| 869 |
+
nn.init.xavier_uniform_(self.patch_embedding.weight.flatten(1))
|
| 870 |
+
for m in self.text_embedding.modules():
|
| 871 |
+
if isinstance(m, nn.Linear):
|
| 872 |
+
nn.init.normal_(m.weight, std=0.02)
|
| 873 |
+
for m in self.time_embedding.modules():
|
| 874 |
+
if isinstance(m, nn.Linear):
|
| 875 |
+
nn.init.normal_(m.weight, std=0.02)
|
| 876 |
+
|
| 877 |
+
# init output layer
|
| 878 |
+
nn.init.zeros_(self.head.head.weight)
|
| 879 |
+
|
| 880 |
+
|
| 881 |
+
def detect_wan_sd_dtype(path: str) -> torch.dtype:
|
| 882 |
+
# get dtype from model weights
|
| 883 |
+
with MemoryEfficientSafeOpen(path) as f:
|
| 884 |
+
keys = set(f.keys())
|
| 885 |
+
key1 = "model.diffusion_model.blocks.0.cross_attn.k.weight" # 1.3B
|
| 886 |
+
key2 = "blocks.0.cross_attn.k.weight" # 14B
|
| 887 |
+
if key1 in keys:
|
| 888 |
+
dit_dtype = f.get_tensor(key1).dtype
|
| 889 |
+
elif key2 in keys:
|
| 890 |
+
dit_dtype = f.get_tensor(key2).dtype
|
| 891 |
+
else:
|
| 892 |
+
raise ValueError(f"Could not find the dtype in the model weights: {path}")
|
| 893 |
+
logger.info(f"Detected DiT dtype: {dit_dtype}")
|
| 894 |
+
return dit_dtype
|
| 895 |
+
|
| 896 |
+
|
| 897 |
+
def load_wan_model(
|
| 898 |
+
config: any,
|
| 899 |
+
device: Union[str, torch.device],
|
| 900 |
+
dit_path: str,
|
| 901 |
+
attn_mode: str,
|
| 902 |
+
split_attn: bool,
|
| 903 |
+
loading_device: Union[str, torch.device],
|
| 904 |
+
dit_weight_dtype: Optional[torch.dtype],
|
| 905 |
+
fp8_scaled: bool = False,
|
| 906 |
+
) -> WanModel:
|
| 907 |
+
# dit_weight_dtype is None for fp8_scaled
|
| 908 |
+
assert (not fp8_scaled and dit_weight_dtype is not None) or (fp8_scaled and dit_weight_dtype is None)
|
| 909 |
+
|
| 910 |
+
device = torch.device(device)
|
| 911 |
+
loading_device = torch.device(loading_device)
|
| 912 |
+
|
| 913 |
+
with init_empty_weights():
|
| 914 |
+
logger.info(f"Creating WanModel")
|
| 915 |
+
model = WanModel(
|
| 916 |
+
model_type="i2v" if config.i2v else ("flf2v" if config.flf2v else "t2v"),
|
| 917 |
+
dim=config.dim,
|
| 918 |
+
eps=config.eps,
|
| 919 |
+
ffn_dim=config.ffn_dim,
|
| 920 |
+
freq_dim=config.freq_dim,
|
| 921 |
+
in_dim=config.in_dim,
|
| 922 |
+
num_heads=config.num_heads,
|
| 923 |
+
num_layers=config.num_layers,
|
| 924 |
+
out_dim=config.out_dim,
|
| 925 |
+
text_len=config.text_len,
|
| 926 |
+
attn_mode=attn_mode,
|
| 927 |
+
split_attn=split_attn,
|
| 928 |
+
)
|
| 929 |
+
if dit_weight_dtype is not None:
|
| 930 |
+
model.to(dit_weight_dtype)
|
| 931 |
+
|
| 932 |
+
# if fp8_scaled, load model weights to CPU to reduce VRAM usage. Otherwise, load to the specified device (CPU for block swap or CUDA for others)
|
| 933 |
+
wan_loading_device = torch.device("cpu") if fp8_scaled else loading_device
|
| 934 |
+
logger.info(f"Loading DiT model from {dit_path}, device={wan_loading_device}, dtype={dit_weight_dtype}")
|
| 935 |
+
|
| 936 |
+
# load model weights with the specified dtype or as is
|
| 937 |
+
sd = load_safetensors(dit_path, wan_loading_device, disable_mmap=True, dtype=dit_weight_dtype)
|
| 938 |
+
|
| 939 |
+
# remove "model.diffusion_model." prefix: 1.3B model has this prefix
|
| 940 |
+
for key in list(sd.keys()):
|
| 941 |
+
if key.startswith("model.diffusion_model."):
|
| 942 |
+
sd[key[22:]] = sd.pop(key)
|
| 943 |
+
|
| 944 |
+
if fp8_scaled:
|
| 945 |
+
# fp8 optimization: calculate on CUDA, move back to CPU if loading_device is CPU (block swap)
|
| 946 |
+
logger.info(f"Optimizing model weights to fp8. This may take a while.")
|
| 947 |
+
sd = model.fp8_optimization(sd, device, move_to_device=loading_device.type == "cpu")
|
| 948 |
+
|
| 949 |
+
if loading_device.type != "cpu":
|
| 950 |
+
# make sure all the model weights are on the loading_device
|
| 951 |
+
logger.info(f"Moving weights to {loading_device}")
|
| 952 |
+
for key in sd.keys():
|
| 953 |
+
sd[key] = sd[key].to(loading_device)
|
| 954 |
+
|
| 955 |
+
info = model.load_state_dict(sd, strict=True, assign=True)
|
| 956 |
+
logger.info(f"Loaded DiT model from {dit_path}, info={info}")
|
| 957 |
+
|
| 958 |
+
return model
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/modules/t5.py
ADDED
|
@@ -0,0 +1,514 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Modified from transformers.models.t5.modeling_t5
|
| 2 |
+
# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved.
|
| 3 |
+
# import logging
|
| 4 |
+
import math
|
| 5 |
+
import os
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
import torch.nn as nn
|
| 9 |
+
import torch.nn.functional as F
|
| 10 |
+
|
| 11 |
+
from musubi_tuner.wan.modules.tokenizers import HuggingfaceTokenizer
|
| 12 |
+
from accelerate import init_empty_weights
|
| 13 |
+
from safetensors.torch import load_file
|
| 14 |
+
|
| 15 |
+
import logging
|
| 16 |
+
|
| 17 |
+
logger = logging.getLogger(__name__)
|
| 18 |
+
logging.basicConfig(level=logging.INFO)
|
| 19 |
+
|
| 20 |
+
__all__ = [
|
| 21 |
+
"T5Model",
|
| 22 |
+
"T5Encoder",
|
| 23 |
+
"T5Decoder",
|
| 24 |
+
"T5EncoderModel",
|
| 25 |
+
]
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def fp16_clamp(x):
|
| 29 |
+
if x.dtype == torch.float16 and torch.isinf(x).any():
|
| 30 |
+
clamp = torch.finfo(x.dtype).max - 1000
|
| 31 |
+
x = torch.clamp(x, min=-clamp, max=clamp)
|
| 32 |
+
return x
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def init_weights(m):
|
| 36 |
+
if isinstance(m, T5LayerNorm):
|
| 37 |
+
nn.init.ones_(m.weight)
|
| 38 |
+
elif isinstance(m, T5Model):
|
| 39 |
+
nn.init.normal_(m.token_embedding.weight, std=1.0)
|
| 40 |
+
elif isinstance(m, T5FeedForward):
|
| 41 |
+
nn.init.normal_(m.gate[0].weight, std=m.dim**-0.5)
|
| 42 |
+
nn.init.normal_(m.fc1.weight, std=m.dim**-0.5)
|
| 43 |
+
nn.init.normal_(m.fc2.weight, std=m.dim_ffn**-0.5)
|
| 44 |
+
elif isinstance(m, T5Attention):
|
| 45 |
+
nn.init.normal_(m.q.weight, std=(m.dim * m.dim_attn) ** -0.5)
|
| 46 |
+
nn.init.normal_(m.k.weight, std=m.dim**-0.5)
|
| 47 |
+
nn.init.normal_(m.v.weight, std=m.dim**-0.5)
|
| 48 |
+
nn.init.normal_(m.o.weight, std=(m.num_heads * m.dim_attn) ** -0.5)
|
| 49 |
+
elif isinstance(m, T5RelativeEmbedding):
|
| 50 |
+
nn.init.normal_(m.embedding.weight, std=(2 * m.num_buckets * m.num_heads) ** -0.5)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class GELU(nn.Module):
|
| 54 |
+
|
| 55 |
+
def forward(self, x):
|
| 56 |
+
return 0.5 * x * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * torch.pow(x, 3.0))))
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
class T5LayerNorm(nn.Module):
|
| 60 |
+
|
| 61 |
+
def __init__(self, dim, eps=1e-6):
|
| 62 |
+
super(T5LayerNorm, self).__init__()
|
| 63 |
+
self.dim = dim
|
| 64 |
+
self.eps = eps
|
| 65 |
+
self.weight = nn.Parameter(torch.ones(dim))
|
| 66 |
+
|
| 67 |
+
def forward(self, x):
|
| 68 |
+
x = x * torch.rsqrt(x.float().pow(2).mean(dim=-1, keepdim=True) + self.eps)
|
| 69 |
+
if self.weight.dtype in [torch.float16, torch.bfloat16]:
|
| 70 |
+
x = x.type_as(self.weight)
|
| 71 |
+
return self.weight * x
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
class T5Attention(nn.Module):
|
| 75 |
+
|
| 76 |
+
def __init__(self, dim, dim_attn, num_heads, dropout=0.1):
|
| 77 |
+
assert dim_attn % num_heads == 0
|
| 78 |
+
super(T5Attention, self).__init__()
|
| 79 |
+
self.dim = dim
|
| 80 |
+
self.dim_attn = dim_attn
|
| 81 |
+
self.num_heads = num_heads
|
| 82 |
+
self.head_dim = dim_attn // num_heads
|
| 83 |
+
|
| 84 |
+
# layers
|
| 85 |
+
self.q = nn.Linear(dim, dim_attn, bias=False)
|
| 86 |
+
self.k = nn.Linear(dim, dim_attn, bias=False)
|
| 87 |
+
self.v = nn.Linear(dim, dim_attn, bias=False)
|
| 88 |
+
self.o = nn.Linear(dim_attn, dim, bias=False)
|
| 89 |
+
self.dropout = nn.Dropout(dropout)
|
| 90 |
+
|
| 91 |
+
def forward(self, x, context=None, mask=None, pos_bias=None):
|
| 92 |
+
"""
|
| 93 |
+
x: [B, L1, C].
|
| 94 |
+
context: [B, L2, C] or None.
|
| 95 |
+
mask: [B, L2] or [B, L1, L2] or None.
|
| 96 |
+
"""
|
| 97 |
+
# check inputs
|
| 98 |
+
context = x if context is None else context
|
| 99 |
+
b, n, c = x.size(0), self.num_heads, self.head_dim
|
| 100 |
+
|
| 101 |
+
# compute query, key, value
|
| 102 |
+
q = self.q(x).view(b, -1, n, c)
|
| 103 |
+
k = self.k(context).view(b, -1, n, c)
|
| 104 |
+
v = self.v(context).view(b, -1, n, c)
|
| 105 |
+
|
| 106 |
+
# attention bias
|
| 107 |
+
attn_bias = x.new_zeros(b, n, q.size(1), k.size(1))
|
| 108 |
+
if pos_bias is not None:
|
| 109 |
+
attn_bias += pos_bias
|
| 110 |
+
if mask is not None:
|
| 111 |
+
assert mask.ndim in [2, 3]
|
| 112 |
+
mask = mask.view(b, 1, 1, -1) if mask.ndim == 2 else mask.unsqueeze(1)
|
| 113 |
+
attn_bias.masked_fill_(mask == 0, torch.finfo(x.dtype).min)
|
| 114 |
+
|
| 115 |
+
# compute attention (T5 does not use scaling)
|
| 116 |
+
attn = torch.einsum("binc,bjnc->bnij", q, k) + attn_bias
|
| 117 |
+
attn = F.softmax(attn.float(), dim=-1).type_as(attn)
|
| 118 |
+
x = torch.einsum("bnij,bjnc->binc", attn, v)
|
| 119 |
+
|
| 120 |
+
# output
|
| 121 |
+
x = x.reshape(b, -1, n * c)
|
| 122 |
+
x = self.o(x)
|
| 123 |
+
x = self.dropout(x)
|
| 124 |
+
return x
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
class T5FeedForward(nn.Module):
|
| 128 |
+
|
| 129 |
+
def __init__(self, dim, dim_ffn, dropout=0.1):
|
| 130 |
+
super(T5FeedForward, self).__init__()
|
| 131 |
+
self.dim = dim
|
| 132 |
+
self.dim_ffn = dim_ffn
|
| 133 |
+
|
| 134 |
+
# layers
|
| 135 |
+
self.gate = nn.Sequential(nn.Linear(dim, dim_ffn, bias=False), GELU())
|
| 136 |
+
self.fc1 = nn.Linear(dim, dim_ffn, bias=False)
|
| 137 |
+
self.fc2 = nn.Linear(dim_ffn, dim, bias=False)
|
| 138 |
+
self.dropout = nn.Dropout(dropout)
|
| 139 |
+
|
| 140 |
+
def forward(self, x):
|
| 141 |
+
x = self.fc1(x) * self.gate(x)
|
| 142 |
+
x = self.dropout(x)
|
| 143 |
+
x = self.fc2(x)
|
| 144 |
+
x = self.dropout(x)
|
| 145 |
+
return x
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
class T5SelfAttention(nn.Module):
|
| 149 |
+
|
| 150 |
+
def __init__(self, dim, dim_attn, dim_ffn, num_heads, num_buckets, shared_pos=True, dropout=0.1):
|
| 151 |
+
super(T5SelfAttention, self).__init__()
|
| 152 |
+
self.dim = dim
|
| 153 |
+
self.dim_attn = dim_attn
|
| 154 |
+
self.dim_ffn = dim_ffn
|
| 155 |
+
self.num_heads = num_heads
|
| 156 |
+
self.num_buckets = num_buckets
|
| 157 |
+
self.shared_pos = shared_pos
|
| 158 |
+
|
| 159 |
+
# layers
|
| 160 |
+
self.norm1 = T5LayerNorm(dim)
|
| 161 |
+
self.attn = T5Attention(dim, dim_attn, num_heads, dropout)
|
| 162 |
+
self.norm2 = T5LayerNorm(dim)
|
| 163 |
+
self.ffn = T5FeedForward(dim, dim_ffn, dropout)
|
| 164 |
+
self.pos_embedding = None if shared_pos else T5RelativeEmbedding(num_buckets, num_heads, bidirectional=True)
|
| 165 |
+
|
| 166 |
+
def forward(self, x, mask=None, pos_bias=None):
|
| 167 |
+
e = pos_bias if self.shared_pos else self.pos_embedding(x.size(1), x.size(1))
|
| 168 |
+
x = fp16_clamp(x + self.attn(self.norm1(x), mask=mask, pos_bias=e))
|
| 169 |
+
x = fp16_clamp(x + self.ffn(self.norm2(x)))
|
| 170 |
+
return x
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
class T5CrossAttention(nn.Module):
|
| 174 |
+
|
| 175 |
+
def __init__(self, dim, dim_attn, dim_ffn, num_heads, num_buckets, shared_pos=True, dropout=0.1):
|
| 176 |
+
super(T5CrossAttention, self).__init__()
|
| 177 |
+
self.dim = dim
|
| 178 |
+
self.dim_attn = dim_attn
|
| 179 |
+
self.dim_ffn = dim_ffn
|
| 180 |
+
self.num_heads = num_heads
|
| 181 |
+
self.num_buckets = num_buckets
|
| 182 |
+
self.shared_pos = shared_pos
|
| 183 |
+
|
| 184 |
+
# layers
|
| 185 |
+
self.norm1 = T5LayerNorm(dim)
|
| 186 |
+
self.self_attn = T5Attention(dim, dim_attn, num_heads, dropout)
|
| 187 |
+
self.norm2 = T5LayerNorm(dim)
|
| 188 |
+
self.cross_attn = T5Attention(dim, dim_attn, num_heads, dropout)
|
| 189 |
+
self.norm3 = T5LayerNorm(dim)
|
| 190 |
+
self.ffn = T5FeedForward(dim, dim_ffn, dropout)
|
| 191 |
+
self.pos_embedding = None if shared_pos else T5RelativeEmbedding(num_buckets, num_heads, bidirectional=False)
|
| 192 |
+
|
| 193 |
+
def forward(self, x, mask=None, encoder_states=None, encoder_mask=None, pos_bias=None):
|
| 194 |
+
e = pos_bias if self.shared_pos else self.pos_embedding(x.size(1), x.size(1))
|
| 195 |
+
x = fp16_clamp(x + self.self_attn(self.norm1(x), mask=mask, pos_bias=e))
|
| 196 |
+
x = fp16_clamp(x + self.cross_attn(self.norm2(x), context=encoder_states, mask=encoder_mask))
|
| 197 |
+
x = fp16_clamp(x + self.ffn(self.norm3(x)))
|
| 198 |
+
return x
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
class T5RelativeEmbedding(nn.Module):
|
| 202 |
+
|
| 203 |
+
def __init__(self, num_buckets, num_heads, bidirectional, max_dist=128):
|
| 204 |
+
super(T5RelativeEmbedding, self).__init__()
|
| 205 |
+
self.num_buckets = num_buckets
|
| 206 |
+
self.num_heads = num_heads
|
| 207 |
+
self.bidirectional = bidirectional
|
| 208 |
+
self.max_dist = max_dist
|
| 209 |
+
|
| 210 |
+
# layers
|
| 211 |
+
self.embedding = nn.Embedding(num_buckets, num_heads)
|
| 212 |
+
|
| 213 |
+
def forward(self, lq, lk):
|
| 214 |
+
device = self.embedding.weight.device
|
| 215 |
+
# rel_pos = torch.arange(lk).unsqueeze(0).to(device) - \
|
| 216 |
+
# torch.arange(lq).unsqueeze(1).to(device)
|
| 217 |
+
rel_pos = torch.arange(lk, device=device).unsqueeze(0) - torch.arange(lq, device=device).unsqueeze(1)
|
| 218 |
+
rel_pos = self._relative_position_bucket(rel_pos)
|
| 219 |
+
rel_pos_embeds = self.embedding(rel_pos)
|
| 220 |
+
rel_pos_embeds = rel_pos_embeds.permute(2, 0, 1).unsqueeze(0) # [1, N, Lq, Lk]
|
| 221 |
+
return rel_pos_embeds.contiguous()
|
| 222 |
+
|
| 223 |
+
def _relative_position_bucket(self, rel_pos):
|
| 224 |
+
# preprocess
|
| 225 |
+
if self.bidirectional:
|
| 226 |
+
num_buckets = self.num_buckets // 2
|
| 227 |
+
rel_buckets = (rel_pos > 0).long() * num_buckets
|
| 228 |
+
rel_pos = torch.abs(rel_pos)
|
| 229 |
+
else:
|
| 230 |
+
num_buckets = self.num_buckets
|
| 231 |
+
rel_buckets = 0
|
| 232 |
+
rel_pos = -torch.min(rel_pos, torch.zeros_like(rel_pos))
|
| 233 |
+
|
| 234 |
+
# embeddings for small and large positions
|
| 235 |
+
max_exact = num_buckets // 2
|
| 236 |
+
rel_pos_large = (
|
| 237 |
+
max_exact
|
| 238 |
+
+ (torch.log(rel_pos.float() / max_exact) / math.log(self.max_dist / max_exact) * (num_buckets - max_exact)).long()
|
| 239 |
+
)
|
| 240 |
+
rel_pos_large = torch.min(rel_pos_large, torch.full_like(rel_pos_large, num_buckets - 1))
|
| 241 |
+
rel_buckets += torch.where(rel_pos < max_exact, rel_pos, rel_pos_large)
|
| 242 |
+
return rel_buckets
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
class T5Encoder(nn.Module):
|
| 246 |
+
|
| 247 |
+
def __init__(self, vocab, dim, dim_attn, dim_ffn, num_heads, num_layers, num_buckets, shared_pos=True, dropout=0.1):
|
| 248 |
+
super(T5Encoder, self).__init__()
|
| 249 |
+
self.dim = dim
|
| 250 |
+
self.dim_attn = dim_attn
|
| 251 |
+
self.dim_ffn = dim_ffn
|
| 252 |
+
self.num_heads = num_heads
|
| 253 |
+
self.num_layers = num_layers
|
| 254 |
+
self.num_buckets = num_buckets
|
| 255 |
+
self.shared_pos = shared_pos
|
| 256 |
+
|
| 257 |
+
# layers
|
| 258 |
+
self.token_embedding = vocab if isinstance(vocab, nn.Embedding) else nn.Embedding(vocab, dim)
|
| 259 |
+
self.pos_embedding = T5RelativeEmbedding(num_buckets, num_heads, bidirectional=True) if shared_pos else None
|
| 260 |
+
self.dropout = nn.Dropout(dropout)
|
| 261 |
+
self.blocks = nn.ModuleList(
|
| 262 |
+
[T5SelfAttention(dim, dim_attn, dim_ffn, num_heads, num_buckets, shared_pos, dropout) for _ in range(num_layers)]
|
| 263 |
+
)
|
| 264 |
+
self.norm = T5LayerNorm(dim)
|
| 265 |
+
|
| 266 |
+
# initialize weights
|
| 267 |
+
self.apply(init_weights)
|
| 268 |
+
|
| 269 |
+
def prepare_fp8(self, target_dtype=torch.bfloat16):
|
| 270 |
+
def forward_hook(module):
|
| 271 |
+
def forward(hidden_states):
|
| 272 |
+
hidden_gelu = module.act(module.wi_0(hidden_states))
|
| 273 |
+
hidden_linear = module.wi_1(hidden_states)
|
| 274 |
+
hidden_states = hidden_gelu * hidden_linear
|
| 275 |
+
hidden_states = module.dropout(hidden_states)
|
| 276 |
+
|
| 277 |
+
hidden_states = module.wo(hidden_states)
|
| 278 |
+
return hidden_states
|
| 279 |
+
|
| 280 |
+
return forward
|
| 281 |
+
|
| 282 |
+
for module in self.modules():
|
| 283 |
+
if module.__class__.__name__ in ["T5LayerNorm", "Embedding"]:
|
| 284 |
+
# print("set", module.__class__.__name__, "to", target_dtype)
|
| 285 |
+
module.to(target_dtype)
|
| 286 |
+
if module.__class__.__name__ in ["T5DenseGatedActDense"]:
|
| 287 |
+
# print("set", module.__class__.__name__, "hooks")
|
| 288 |
+
module.forward = forward_hook(module)
|
| 289 |
+
|
| 290 |
+
def forward(self, ids, mask=None):
|
| 291 |
+
x = self.token_embedding(ids)
|
| 292 |
+
x = self.dropout(x)
|
| 293 |
+
e = self.pos_embedding(x.size(1), x.size(1)) if self.shared_pos else None
|
| 294 |
+
for block in self.blocks:
|
| 295 |
+
x = block(x, mask, pos_bias=e)
|
| 296 |
+
x = self.norm(x)
|
| 297 |
+
x = self.dropout(x)
|
| 298 |
+
return x
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
class T5Decoder(nn.Module):
|
| 302 |
+
|
| 303 |
+
def __init__(self, vocab, dim, dim_attn, dim_ffn, num_heads, num_layers, num_buckets, shared_pos=True, dropout=0.1):
|
| 304 |
+
super(T5Decoder, self).__init__()
|
| 305 |
+
self.dim = dim
|
| 306 |
+
self.dim_attn = dim_attn
|
| 307 |
+
self.dim_ffn = dim_ffn
|
| 308 |
+
self.num_heads = num_heads
|
| 309 |
+
self.num_layers = num_layers
|
| 310 |
+
self.num_buckets = num_buckets
|
| 311 |
+
self.shared_pos = shared_pos
|
| 312 |
+
|
| 313 |
+
# layers
|
| 314 |
+
self.token_embedding = vocab if isinstance(vocab, nn.Embedding) else nn.Embedding(vocab, dim)
|
| 315 |
+
self.pos_embedding = T5RelativeEmbedding(num_buckets, num_heads, bidirectional=False) if shared_pos else None
|
| 316 |
+
self.dropout = nn.Dropout(dropout)
|
| 317 |
+
self.blocks = nn.ModuleList(
|
| 318 |
+
[T5CrossAttention(dim, dim_attn, dim_ffn, num_heads, num_buckets, shared_pos, dropout) for _ in range(num_layers)]
|
| 319 |
+
)
|
| 320 |
+
self.norm = T5LayerNorm(dim)
|
| 321 |
+
|
| 322 |
+
# initialize weights
|
| 323 |
+
self.apply(init_weights)
|
| 324 |
+
|
| 325 |
+
def forward(self, ids, mask=None, encoder_states=None, encoder_mask=None):
|
| 326 |
+
b, s = ids.size()
|
| 327 |
+
|
| 328 |
+
# causal mask
|
| 329 |
+
if mask is None:
|
| 330 |
+
mask = torch.tril(torch.ones(1, s, s).to(ids.device))
|
| 331 |
+
elif mask.ndim == 2:
|
| 332 |
+
mask = torch.tril(mask.unsqueeze(1).expand(-1, s, -1))
|
| 333 |
+
|
| 334 |
+
# layers
|
| 335 |
+
x = self.token_embedding(ids)
|
| 336 |
+
x = self.dropout(x)
|
| 337 |
+
e = self.pos_embedding(x.size(1), x.size(1)) if self.shared_pos else None
|
| 338 |
+
for block in self.blocks:
|
| 339 |
+
x = block(x, mask, encoder_states, encoder_mask, pos_bias=e)
|
| 340 |
+
x = self.norm(x)
|
| 341 |
+
x = self.dropout(x)
|
| 342 |
+
return x
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
class T5Model(nn.Module):
|
| 346 |
+
|
| 347 |
+
def __init__(
|
| 348 |
+
self,
|
| 349 |
+
vocab_size,
|
| 350 |
+
dim,
|
| 351 |
+
dim_attn,
|
| 352 |
+
dim_ffn,
|
| 353 |
+
num_heads,
|
| 354 |
+
encoder_layers,
|
| 355 |
+
decoder_layers,
|
| 356 |
+
num_buckets,
|
| 357 |
+
shared_pos=True,
|
| 358 |
+
dropout=0.1,
|
| 359 |
+
):
|
| 360 |
+
super(T5Model, self).__init__()
|
| 361 |
+
self.vocab_size = vocab_size
|
| 362 |
+
self.dim = dim
|
| 363 |
+
self.dim_attn = dim_attn
|
| 364 |
+
self.dim_ffn = dim_ffn
|
| 365 |
+
self.num_heads = num_heads
|
| 366 |
+
self.encoder_layers = encoder_layers
|
| 367 |
+
self.decoder_layers = decoder_layers
|
| 368 |
+
self.num_buckets = num_buckets
|
| 369 |
+
|
| 370 |
+
# layers
|
| 371 |
+
self.token_embedding = nn.Embedding(vocab_size, dim)
|
| 372 |
+
self.encoder = T5Encoder(
|
| 373 |
+
self.token_embedding, dim, dim_attn, dim_ffn, num_heads, encoder_layers, num_buckets, shared_pos, dropout
|
| 374 |
+
)
|
| 375 |
+
self.decoder = T5Decoder(
|
| 376 |
+
self.token_embedding, dim, dim_attn, dim_ffn, num_heads, decoder_layers, num_buckets, shared_pos, dropout
|
| 377 |
+
)
|
| 378 |
+
self.head = nn.Linear(dim, vocab_size, bias=False)
|
| 379 |
+
|
| 380 |
+
# initialize weights
|
| 381 |
+
self.apply(init_weights)
|
| 382 |
+
|
| 383 |
+
def forward(self, encoder_ids, encoder_mask, decoder_ids, decoder_mask):
|
| 384 |
+
x = self.encoder(encoder_ids, encoder_mask)
|
| 385 |
+
x = self.decoder(decoder_ids, decoder_mask, x, encoder_mask)
|
| 386 |
+
x = self.head(x)
|
| 387 |
+
return x
|
| 388 |
+
|
| 389 |
+
|
| 390 |
+
def _t5(
|
| 391 |
+
name,
|
| 392 |
+
encoder_only=False,
|
| 393 |
+
decoder_only=False,
|
| 394 |
+
return_tokenizer=False,
|
| 395 |
+
tokenizer_kwargs={},
|
| 396 |
+
**kwargs,
|
| 397 |
+
):
|
| 398 |
+
# dtype=torch.float32,
|
| 399 |
+
# device="cpu",
|
| 400 |
+
# sanity check
|
| 401 |
+
assert not (encoder_only and decoder_only)
|
| 402 |
+
|
| 403 |
+
# params
|
| 404 |
+
if encoder_only:
|
| 405 |
+
model_cls = T5Encoder
|
| 406 |
+
kwargs["vocab"] = kwargs.pop("vocab_size")
|
| 407 |
+
kwargs["num_layers"] = kwargs.pop("encoder_layers")
|
| 408 |
+
_ = kwargs.pop("decoder_layers")
|
| 409 |
+
elif decoder_only:
|
| 410 |
+
model_cls = T5Decoder
|
| 411 |
+
kwargs["vocab"] = kwargs.pop("vocab_size")
|
| 412 |
+
kwargs["num_layers"] = kwargs.pop("decoder_layers")
|
| 413 |
+
_ = kwargs.pop("encoder_layers")
|
| 414 |
+
else:
|
| 415 |
+
model_cls = T5Model
|
| 416 |
+
|
| 417 |
+
# # init model
|
| 418 |
+
# with torch.device(device):
|
| 419 |
+
model = model_cls(**kwargs)
|
| 420 |
+
|
| 421 |
+
# # set device
|
| 422 |
+
# model = model.to(dtype=dtype, device=device)
|
| 423 |
+
|
| 424 |
+
# init tokenizer
|
| 425 |
+
if return_tokenizer:
|
| 426 |
+
from musubi_tuner.wan.modules.tokenizers import HuggingfaceTokenizer
|
| 427 |
+
|
| 428 |
+
tokenizer = HuggingfaceTokenizer(f"google/{name}", **tokenizer_kwargs)
|
| 429 |
+
return model, tokenizer
|
| 430 |
+
else:
|
| 431 |
+
return model
|
| 432 |
+
|
| 433 |
+
|
| 434 |
+
def umt5_xxl(**kwargs):
|
| 435 |
+
cfg = dict(
|
| 436 |
+
vocab_size=256384,
|
| 437 |
+
dim=4096,
|
| 438 |
+
dim_attn=4096,
|
| 439 |
+
dim_ffn=10240,
|
| 440 |
+
num_heads=64,
|
| 441 |
+
encoder_layers=24,
|
| 442 |
+
decoder_layers=24,
|
| 443 |
+
num_buckets=32,
|
| 444 |
+
shared_pos=False,
|
| 445 |
+
dropout=0.1,
|
| 446 |
+
)
|
| 447 |
+
cfg.update(**kwargs)
|
| 448 |
+
return _t5("umt5-xxl", **cfg)
|
| 449 |
+
|
| 450 |
+
|
| 451 |
+
class T5EncoderModel:
|
| 452 |
+
|
| 453 |
+
def __init__(
|
| 454 |
+
self,
|
| 455 |
+
text_len,
|
| 456 |
+
dtype=torch.bfloat16,
|
| 457 |
+
device=torch.cuda.current_device(),
|
| 458 |
+
checkpoint_path=None,
|
| 459 |
+
tokenizer_path=None,
|
| 460 |
+
shard_fn=None,
|
| 461 |
+
weight_path=None,
|
| 462 |
+
fp8=False,
|
| 463 |
+
):
|
| 464 |
+
self.text_len = text_len
|
| 465 |
+
self.dtype = dtype if not fp8 else torch.float8_e4m3fn
|
| 466 |
+
self.device = device
|
| 467 |
+
self.checkpoint_path = checkpoint_path
|
| 468 |
+
self.tokenizer_path = tokenizer_path
|
| 469 |
+
|
| 470 |
+
# init model
|
| 471 |
+
with init_empty_weights():
|
| 472 |
+
model = umt5_xxl(encoder_only=True, return_tokenizer=False)
|
| 473 |
+
|
| 474 |
+
model = model.eval().requires_grad_(False)
|
| 475 |
+
if checkpoint_path is not None:
|
| 476 |
+
logger.info(f"loading {checkpoint_path}")
|
| 477 |
+
model.load_state_dict(torch.load(checkpoint_path, map_location="cpu"))
|
| 478 |
+
else:
|
| 479 |
+
logger.info(f"loading weights from {weight_path}")
|
| 480 |
+
if os.path.splitext(weight_path)[1] == ".safetensors":
|
| 481 |
+
sd = load_file(weight_path)
|
| 482 |
+
else:
|
| 483 |
+
sd = torch.load(weight_path, map_location="cpu", weights_only=True)
|
| 484 |
+
# remove prefix "encoder." from the state dict
|
| 485 |
+
sd = {k.replace("encoder.", ""): v for k, v in sd.items()}
|
| 486 |
+
model.load_state_dict(sd, strict=True, assign=True)
|
| 487 |
+
|
| 488 |
+
logger.info(f"moving model to {device} and casting to {self.dtype}")
|
| 489 |
+
model = model.to(device, dtype=self.dtype)
|
| 490 |
+
|
| 491 |
+
if fp8:
|
| 492 |
+
logger.info("preparing model for fp8")
|
| 493 |
+
model.prepare_fp8(dtype)
|
| 494 |
+
|
| 495 |
+
self.model = model
|
| 496 |
+
# if shard_fn is not None:
|
| 497 |
+
# self.model = shard_fn(self.model, sync_module_states=False)
|
| 498 |
+
# else:
|
| 499 |
+
# self.model.to(self.device)
|
| 500 |
+
# init tokenizer
|
| 501 |
+
if tokenizer_path is None:
|
| 502 |
+
tokenizer_path = "Wan-AI/Wan2.1-T2V-14B"
|
| 503 |
+
subfolder = "google/umt5-xxl"
|
| 504 |
+
else:
|
| 505 |
+
subfolder = None
|
| 506 |
+
self.tokenizer = HuggingfaceTokenizer(name=tokenizer_path, seq_len=text_len, clean="whitespace", subfolder=subfolder)
|
| 507 |
+
|
| 508 |
+
def __call__(self, texts, device):
|
| 509 |
+
ids, mask = self.tokenizer(texts, return_mask=True, add_special_tokens=True)
|
| 510 |
+
ids = ids.to(device)
|
| 511 |
+
mask = mask.to(device)
|
| 512 |
+
seq_lens = mask.gt(0).sum(dim=1).long()
|
| 513 |
+
context = self.model(ids, mask)
|
| 514 |
+
return [u[:v] for u, v in zip(context, seq_lens)]
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/modules/tokenizers.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved.
|
| 2 |
+
import html
|
| 3 |
+
import string
|
| 4 |
+
|
| 5 |
+
import ftfy
|
| 6 |
+
import regex as re
|
| 7 |
+
from transformers import AutoTokenizer
|
| 8 |
+
|
| 9 |
+
__all__ = ['HuggingfaceTokenizer']
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def basic_clean(text):
|
| 13 |
+
text = ftfy.fix_text(text)
|
| 14 |
+
text = html.unescape(html.unescape(text))
|
| 15 |
+
return text.strip()
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def whitespace_clean(text):
|
| 19 |
+
text = re.sub(r'\s+', ' ', text)
|
| 20 |
+
text = text.strip()
|
| 21 |
+
return text
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def canonicalize(text, keep_punctuation_exact_string=None):
|
| 25 |
+
text = text.replace('_', ' ')
|
| 26 |
+
if keep_punctuation_exact_string:
|
| 27 |
+
text = keep_punctuation_exact_string.join(
|
| 28 |
+
part.translate(str.maketrans('', '', string.punctuation))
|
| 29 |
+
for part in text.split(keep_punctuation_exact_string))
|
| 30 |
+
else:
|
| 31 |
+
text = text.translate(str.maketrans('', '', string.punctuation))
|
| 32 |
+
text = text.lower()
|
| 33 |
+
text = re.sub(r'\s+', ' ', text)
|
| 34 |
+
return text.strip()
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class HuggingfaceTokenizer:
|
| 38 |
+
|
| 39 |
+
def __init__(self, name, seq_len=None, clean=None, **kwargs):
|
| 40 |
+
assert clean in (None, 'whitespace', 'lower', 'canonicalize')
|
| 41 |
+
self.name = name
|
| 42 |
+
self.seq_len = seq_len
|
| 43 |
+
self.clean = clean
|
| 44 |
+
|
| 45 |
+
# init tokenizer
|
| 46 |
+
self.tokenizer = AutoTokenizer.from_pretrained(name, **kwargs)
|
| 47 |
+
self.vocab_size = self.tokenizer.vocab_size
|
| 48 |
+
|
| 49 |
+
def __call__(self, sequence, **kwargs):
|
| 50 |
+
return_mask = kwargs.pop('return_mask', False)
|
| 51 |
+
|
| 52 |
+
# arguments
|
| 53 |
+
_kwargs = {'return_tensors': 'pt'}
|
| 54 |
+
if self.seq_len is not None:
|
| 55 |
+
_kwargs.update({
|
| 56 |
+
'padding': 'max_length',
|
| 57 |
+
'truncation': True,
|
| 58 |
+
'max_length': self.seq_len
|
| 59 |
+
})
|
| 60 |
+
_kwargs.update(**kwargs)
|
| 61 |
+
|
| 62 |
+
# tokenization
|
| 63 |
+
if isinstance(sequence, str):
|
| 64 |
+
sequence = [sequence]
|
| 65 |
+
if self.clean:
|
| 66 |
+
sequence = [self._clean(u) for u in sequence]
|
| 67 |
+
ids = self.tokenizer(sequence, **_kwargs)
|
| 68 |
+
|
| 69 |
+
# output
|
| 70 |
+
if return_mask:
|
| 71 |
+
return ids.input_ids, ids.attention_mask
|
| 72 |
+
else:
|
| 73 |
+
return ids.input_ids
|
| 74 |
+
|
| 75 |
+
def _clean(self, text):
|
| 76 |
+
if self.clean == 'whitespace':
|
| 77 |
+
text = whitespace_clean(basic_clean(text))
|
| 78 |
+
elif self.clean == 'lower':
|
| 79 |
+
text = whitespace_clean(basic_clean(text)).lower()
|
| 80 |
+
elif self.clean == 'canonicalize':
|
| 81 |
+
text = canonicalize(basic_clean(text))
|
| 82 |
+
return text
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/modules/vae.py
ADDED
|
@@ -0,0 +1,760 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved.
|
| 2 |
+
import logging
|
| 3 |
+
import os
|
| 4 |
+
from typing import Optional, Union
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
import torch.nn as nn
|
| 8 |
+
import torch.nn.functional as F
|
| 9 |
+
from einops import rearrange
|
| 10 |
+
|
| 11 |
+
from safetensors.torch import load_file
|
| 12 |
+
|
| 13 |
+
__all__ = [
|
| 14 |
+
"WanVAE",
|
| 15 |
+
]
|
| 16 |
+
|
| 17 |
+
CACHE_T = 2
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class CausalConv3d(nn.Conv3d):
|
| 21 |
+
"""
|
| 22 |
+
Causal 3d convolusion.
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
def __init__(self, *args, **kwargs):
|
| 26 |
+
super().__init__(*args, **kwargs)
|
| 27 |
+
self._padding = (self.padding[2], self.padding[2], self.padding[1], self.padding[1], 2 * self.padding[0], 0)
|
| 28 |
+
self.padding = (0, 0, 0)
|
| 29 |
+
|
| 30 |
+
def forward(self, x, cache_x=None):
|
| 31 |
+
padding = list(self._padding)
|
| 32 |
+
if cache_x is not None and self._padding[4] > 0:
|
| 33 |
+
cache_x = cache_x.to(x.device)
|
| 34 |
+
x = torch.cat([cache_x, x], dim=2)
|
| 35 |
+
padding[4] -= cache_x.shape[2]
|
| 36 |
+
x = F.pad(x, padding)
|
| 37 |
+
|
| 38 |
+
return super().forward(x)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class RMS_norm(nn.Module):
|
| 42 |
+
|
| 43 |
+
def __init__(self, dim, channel_first=True, images=True, bias=False):
|
| 44 |
+
super().__init__()
|
| 45 |
+
broadcastable_dims = (1, 1, 1) if not images else (1, 1)
|
| 46 |
+
shape = (dim, *broadcastable_dims) if channel_first else (dim,)
|
| 47 |
+
|
| 48 |
+
self.channel_first = channel_first
|
| 49 |
+
self.scale = dim**0.5
|
| 50 |
+
self.gamma = nn.Parameter(torch.ones(shape))
|
| 51 |
+
self.bias = nn.Parameter(torch.zeros(shape)) if bias else 0.0
|
| 52 |
+
|
| 53 |
+
def forward(self, x):
|
| 54 |
+
return F.normalize(x, dim=(1 if self.channel_first else -1)) * self.scale * self.gamma + self.bias
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
class Upsample(nn.Upsample):
|
| 58 |
+
|
| 59 |
+
def forward(self, x):
|
| 60 |
+
"""
|
| 61 |
+
Fix bfloat16 support for nearest neighbor interpolation.
|
| 62 |
+
"""
|
| 63 |
+
return super().forward(x.float()).type_as(x)
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
class Resample(nn.Module):
|
| 67 |
+
|
| 68 |
+
def __init__(self, dim, mode):
|
| 69 |
+
assert mode in ("none", "upsample2d", "upsample3d", "downsample2d", "downsample3d")
|
| 70 |
+
super().__init__()
|
| 71 |
+
self.dim = dim
|
| 72 |
+
self.mode = mode
|
| 73 |
+
|
| 74 |
+
# layers
|
| 75 |
+
if mode == "upsample2d":
|
| 76 |
+
self.resample = nn.Sequential(
|
| 77 |
+
Upsample(scale_factor=(2.0, 2.0), mode="nearest-exact"), nn.Conv2d(dim, dim // 2, 3, padding=1)
|
| 78 |
+
)
|
| 79 |
+
elif mode == "upsample3d":
|
| 80 |
+
self.resample = nn.Sequential(
|
| 81 |
+
Upsample(scale_factor=(2.0, 2.0), mode="nearest-exact"), nn.Conv2d(dim, dim // 2, 3, padding=1)
|
| 82 |
+
)
|
| 83 |
+
self.time_conv = CausalConv3d(dim, dim * 2, (3, 1, 1), padding=(1, 0, 0))
|
| 84 |
+
|
| 85 |
+
elif mode == "downsample2d":
|
| 86 |
+
self.resample = nn.Sequential(nn.ZeroPad2d((0, 1, 0, 1)), nn.Conv2d(dim, dim, 3, stride=(2, 2)))
|
| 87 |
+
elif mode == "downsample3d":
|
| 88 |
+
self.resample = nn.Sequential(nn.ZeroPad2d((0, 1, 0, 1)), nn.Conv2d(dim, dim, 3, stride=(2, 2)))
|
| 89 |
+
self.time_conv = CausalConv3d(dim, dim, (3, 1, 1), stride=(2, 1, 1), padding=(0, 0, 0))
|
| 90 |
+
|
| 91 |
+
else:
|
| 92 |
+
self.resample = nn.Identity()
|
| 93 |
+
|
| 94 |
+
self.cache_device = None
|
| 95 |
+
|
| 96 |
+
def set_cache_device(self, device):
|
| 97 |
+
self.cache_device = device
|
| 98 |
+
|
| 99 |
+
def forward(self, x, feat_cache=None, feat_idx=[0]):
|
| 100 |
+
cache_device = self.cache_device if self.cache_device is not None else x.device
|
| 101 |
+
|
| 102 |
+
b, c, t, h, w = x.size()
|
| 103 |
+
if self.mode == "upsample3d":
|
| 104 |
+
if feat_cache is not None:
|
| 105 |
+
idx = feat_idx[0]
|
| 106 |
+
if feat_cache[idx] is None:
|
| 107 |
+
feat_cache[idx] = "Rep"
|
| 108 |
+
feat_idx[0] += 1
|
| 109 |
+
else:
|
| 110 |
+
|
| 111 |
+
cache_x = x[:, :, -CACHE_T:, :, :].clone().to(cache_device)
|
| 112 |
+
if cache_x.shape[2] < 2 and feat_cache[idx] is not None and feat_cache[idx] != "Rep":
|
| 113 |
+
# cache last frame of last two chunk
|
| 114 |
+
cache_x = torch.cat([feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(cache_x.device), cache_x], dim=2)
|
| 115 |
+
if cache_x.shape[2] < 2 and feat_cache[idx] is not None and feat_cache[idx] == "Rep":
|
| 116 |
+
cache_x = torch.cat([torch.zeros_like(cache_x).to(cache_x.device), cache_x], dim=2)
|
| 117 |
+
if feat_cache[idx] == "Rep":
|
| 118 |
+
x = self.time_conv(x)
|
| 119 |
+
else:
|
| 120 |
+
x = self.time_conv(x, feat_cache[idx].to(x.device) if feat_cache[idx] is not None else None)
|
| 121 |
+
feat_cache[idx] = cache_x
|
| 122 |
+
feat_idx[0] += 1
|
| 123 |
+
|
| 124 |
+
x = x.reshape(b, 2, c, t, h, w)
|
| 125 |
+
x = torch.stack((x[:, 0, :, :, :, :], x[:, 1, :, :, :, :]), 3)
|
| 126 |
+
x = x.reshape(b, c, t * 2, h, w)
|
| 127 |
+
t = x.shape[2]
|
| 128 |
+
x = rearrange(x, "b c t h w -> (b t) c h w")
|
| 129 |
+
x = self.resample(x)
|
| 130 |
+
x = rearrange(x, "(b t) c h w -> b c t h w", t=t)
|
| 131 |
+
|
| 132 |
+
if self.mode == "downsample3d":
|
| 133 |
+
if feat_cache is not None:
|
| 134 |
+
idx = feat_idx[0]
|
| 135 |
+
if feat_cache[idx] is None:
|
| 136 |
+
feat_cache[idx] = x.clone().to(cache_device)
|
| 137 |
+
feat_idx[0] += 1
|
| 138 |
+
else:
|
| 139 |
+
|
| 140 |
+
cache_x = x[:, :, -1:, :, :].clone().to(cache_device)
|
| 141 |
+
# if cache_x.shape[2] < 2 and feat_cache[idx] is not None and feat_cache[idx]!='Rep':
|
| 142 |
+
# # cache last frame of last two chunk
|
| 143 |
+
# cache_x = torch.cat([feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(cache_x.device), cache_x], dim=2)
|
| 144 |
+
|
| 145 |
+
x = self.time_conv(torch.cat([feat_cache[idx][:, :, -1:, :, :].to(x.device), x], 2))
|
| 146 |
+
feat_cache[idx] = cache_x
|
| 147 |
+
feat_idx[0] += 1
|
| 148 |
+
return x
|
| 149 |
+
|
| 150 |
+
def init_weight(self, conv):
|
| 151 |
+
conv_weight = conv.weight
|
| 152 |
+
nn.init.zeros_(conv_weight)
|
| 153 |
+
c1, c2, t, h, w = conv_weight.size()
|
| 154 |
+
one_matrix = torch.eye(c1, c2)
|
| 155 |
+
init_matrix = one_matrix
|
| 156 |
+
nn.init.zeros_(conv_weight)
|
| 157 |
+
# conv_weight.data[:,:,-1,1,1] = init_matrix * 0.5
|
| 158 |
+
conv_weight.data[:, :, 1, 0, 0] = init_matrix # * 0.5
|
| 159 |
+
conv.weight.data.copy_(conv_weight)
|
| 160 |
+
nn.init.zeros_(conv.bias.data)
|
| 161 |
+
|
| 162 |
+
def init_weight2(self, conv):
|
| 163 |
+
conv_weight = conv.weight.data
|
| 164 |
+
nn.init.zeros_(conv_weight)
|
| 165 |
+
c1, c2, t, h, w = conv_weight.size()
|
| 166 |
+
init_matrix = torch.eye(c1 // 2, c2)
|
| 167 |
+
# init_matrix = repeat(init_matrix, 'o ... -> (o 2) ...').permute(1,0,2).contiguous().reshape(c1,c2)
|
| 168 |
+
conv_weight[: c1 // 2, :, -1, 0, 0] = init_matrix
|
| 169 |
+
conv_weight[c1 // 2 :, :, -1, 0, 0] = init_matrix
|
| 170 |
+
conv.weight.data.copy_(conv_weight)
|
| 171 |
+
nn.init.zeros_(conv.bias.data)
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
class ResidualBlock(nn.Module):
|
| 175 |
+
|
| 176 |
+
def __init__(self, in_dim, out_dim, dropout=0.0):
|
| 177 |
+
super().__init__()
|
| 178 |
+
self.in_dim = in_dim
|
| 179 |
+
self.out_dim = out_dim
|
| 180 |
+
|
| 181 |
+
# layers
|
| 182 |
+
self.residual = nn.Sequential(
|
| 183 |
+
RMS_norm(in_dim, images=False),
|
| 184 |
+
nn.SiLU(),
|
| 185 |
+
CausalConv3d(in_dim, out_dim, 3, padding=1),
|
| 186 |
+
RMS_norm(out_dim, images=False),
|
| 187 |
+
nn.SiLU(),
|
| 188 |
+
nn.Dropout(dropout),
|
| 189 |
+
CausalConv3d(out_dim, out_dim, 3, padding=1),
|
| 190 |
+
)
|
| 191 |
+
self.shortcut = CausalConv3d(in_dim, out_dim, 1) if in_dim != out_dim else nn.Identity()
|
| 192 |
+
|
| 193 |
+
self.cache_device = None
|
| 194 |
+
|
| 195 |
+
def set_cache_device(self, device):
|
| 196 |
+
self.cache_device = device
|
| 197 |
+
|
| 198 |
+
def forward(self, x, feat_cache=None, feat_idx=[0]):
|
| 199 |
+
cache_device = self.cache_device if self.cache_device is not None else x.device
|
| 200 |
+
|
| 201 |
+
h = self.shortcut(x)
|
| 202 |
+
for layer in self.residual:
|
| 203 |
+
if isinstance(layer, CausalConv3d) and feat_cache is not None:
|
| 204 |
+
idx = feat_idx[0]
|
| 205 |
+
cache_x = x[:, :, -CACHE_T:, :, :].clone().to(cache_device)
|
| 206 |
+
if cache_x.shape[2] < 2 and feat_cache[idx] is not None:
|
| 207 |
+
# cache last frame of last two chunk
|
| 208 |
+
cache_x = torch.cat([feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(cache_x.device), cache_x], dim=2)
|
| 209 |
+
x = layer(x, feat_cache[idx].to(x.device) if feat_cache[idx] is not None else None)
|
| 210 |
+
feat_cache[idx] = cache_x
|
| 211 |
+
feat_idx[0] += 1
|
| 212 |
+
else:
|
| 213 |
+
x = layer(x)
|
| 214 |
+
return x + h
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
class AttentionBlock(nn.Module):
|
| 218 |
+
"""
|
| 219 |
+
Causal self-attention with a single head.
|
| 220 |
+
"""
|
| 221 |
+
|
| 222 |
+
def __init__(self, dim):
|
| 223 |
+
super().__init__()
|
| 224 |
+
self.dim = dim
|
| 225 |
+
|
| 226 |
+
# layers
|
| 227 |
+
self.norm = RMS_norm(dim)
|
| 228 |
+
self.to_qkv = nn.Conv2d(dim, dim * 3, 1)
|
| 229 |
+
self.proj = nn.Conv2d(dim, dim, 1)
|
| 230 |
+
|
| 231 |
+
# zero out the last layer params
|
| 232 |
+
nn.init.zeros_(self.proj.weight)
|
| 233 |
+
|
| 234 |
+
def forward(self, x):
|
| 235 |
+
identity = x
|
| 236 |
+
b, c, t, h, w = x.size()
|
| 237 |
+
x = rearrange(x, "b c t h w -> (b t) c h w")
|
| 238 |
+
x = self.norm(x)
|
| 239 |
+
# compute query, key, value
|
| 240 |
+
q, k, v = self.to_qkv(x).reshape(b * t, 1, c * 3, -1).permute(0, 1, 3, 2).contiguous().chunk(3, dim=-1)
|
| 241 |
+
|
| 242 |
+
# apply attention
|
| 243 |
+
x = F.scaled_dot_product_attention(
|
| 244 |
+
q,
|
| 245 |
+
k,
|
| 246 |
+
v,
|
| 247 |
+
)
|
| 248 |
+
x = x.squeeze(1).permute(0, 2, 1).reshape(b * t, c, h, w)
|
| 249 |
+
|
| 250 |
+
# output
|
| 251 |
+
x = self.proj(x)
|
| 252 |
+
x = rearrange(x, "(b t) c h w-> b c t h w", t=t)
|
| 253 |
+
return x + identity
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
class Encoder3d(nn.Module):
|
| 257 |
+
|
| 258 |
+
def __init__(
|
| 259 |
+
self,
|
| 260 |
+
dim=128,
|
| 261 |
+
z_dim=4,
|
| 262 |
+
dim_mult=[1, 2, 4, 4],
|
| 263 |
+
num_res_blocks=2,
|
| 264 |
+
attn_scales=[],
|
| 265 |
+
temperal_downsample=[True, True, False],
|
| 266 |
+
dropout=0.0,
|
| 267 |
+
):
|
| 268 |
+
super().__init__()
|
| 269 |
+
self.dim = dim
|
| 270 |
+
self.z_dim = z_dim
|
| 271 |
+
self.dim_mult = dim_mult
|
| 272 |
+
self.num_res_blocks = num_res_blocks
|
| 273 |
+
self.attn_scales = attn_scales
|
| 274 |
+
self.temperal_downsample = temperal_downsample
|
| 275 |
+
|
| 276 |
+
# dimensions
|
| 277 |
+
dims = [dim * u for u in [1] + dim_mult]
|
| 278 |
+
scale = 1.0
|
| 279 |
+
|
| 280 |
+
# init block
|
| 281 |
+
self.conv1 = CausalConv3d(3, dims[0], 3, padding=1)
|
| 282 |
+
|
| 283 |
+
# downsample blocks
|
| 284 |
+
downsamples = []
|
| 285 |
+
for i, (in_dim, out_dim) in enumerate(zip(dims[:-1], dims[1:])):
|
| 286 |
+
# residual (+attention) blocks
|
| 287 |
+
for _ in range(num_res_blocks):
|
| 288 |
+
downsamples.append(ResidualBlock(in_dim, out_dim, dropout))
|
| 289 |
+
if scale in attn_scales:
|
| 290 |
+
downsamples.append(AttentionBlock(out_dim))
|
| 291 |
+
in_dim = out_dim
|
| 292 |
+
|
| 293 |
+
# downsample block
|
| 294 |
+
if i != len(dim_mult) - 1:
|
| 295 |
+
mode = "downsample3d" if temperal_downsample[i] else "downsample2d"
|
| 296 |
+
downsamples.append(Resample(out_dim, mode=mode))
|
| 297 |
+
scale /= 2.0
|
| 298 |
+
self.downsamples = nn.Sequential(*downsamples)
|
| 299 |
+
|
| 300 |
+
# middle blocks
|
| 301 |
+
self.middle = nn.Sequential(
|
| 302 |
+
ResidualBlock(out_dim, out_dim, dropout), AttentionBlock(out_dim), ResidualBlock(out_dim, out_dim, dropout)
|
| 303 |
+
)
|
| 304 |
+
|
| 305 |
+
# output blocks
|
| 306 |
+
self.head = nn.Sequential(RMS_norm(out_dim, images=False), nn.SiLU(), CausalConv3d(out_dim, z_dim, 3, padding=1))
|
| 307 |
+
|
| 308 |
+
self.cache_device = None
|
| 309 |
+
|
| 310 |
+
def set_cache_device(self, device):
|
| 311 |
+
self.cache_device = device
|
| 312 |
+
|
| 313 |
+
# set cache device for all layers
|
| 314 |
+
for layer in self.downsamples + self.middle + self.head:
|
| 315 |
+
if isinstance(layer, Resample) or isinstance(layer, ResidualBlock):
|
| 316 |
+
layer.set_cache_device(device)
|
| 317 |
+
|
| 318 |
+
def forward(self, x, feat_cache=None, feat_idx=[0]):
|
| 319 |
+
cache_device = self.cache_device if self.cache_device is not None else x.device
|
| 320 |
+
|
| 321 |
+
if feat_cache is not None:
|
| 322 |
+
idx = feat_idx[0]
|
| 323 |
+
cache_x = x[:, :, -CACHE_T:, :, :].clone().to(cache_device)
|
| 324 |
+
if cache_x.shape[2] < 2 and feat_cache[idx] is not None:
|
| 325 |
+
# cache last frame of last two chunk
|
| 326 |
+
cache_x = torch.cat([feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(cache_x.device), cache_x], dim=2)
|
| 327 |
+
x = self.conv1(x, feat_cache[idx].to(x.device) if feat_cache[idx] is not None else None)
|
| 328 |
+
feat_cache[idx] = cache_x
|
| 329 |
+
feat_idx[0] += 1
|
| 330 |
+
else:
|
| 331 |
+
x = self.conv1(x)
|
| 332 |
+
|
| 333 |
+
## downsamples
|
| 334 |
+
for layer in self.downsamples:
|
| 335 |
+
if feat_cache is not None:
|
| 336 |
+
x = layer(x, feat_cache, feat_idx)
|
| 337 |
+
else:
|
| 338 |
+
x = layer(x)
|
| 339 |
+
|
| 340 |
+
## middle
|
| 341 |
+
for layer in self.middle:
|
| 342 |
+
if isinstance(layer, ResidualBlock) and feat_cache is not None:
|
| 343 |
+
x = layer(x, feat_cache, feat_idx)
|
| 344 |
+
else:
|
| 345 |
+
x = layer(x)
|
| 346 |
+
|
| 347 |
+
## head
|
| 348 |
+
for layer in self.head:
|
| 349 |
+
if isinstance(layer, CausalConv3d) and feat_cache is not None:
|
| 350 |
+
idx = feat_idx[0]
|
| 351 |
+
cache_x = x[:, :, -CACHE_T:, :, :].clone().to(cache_device)
|
| 352 |
+
if cache_x.shape[2] < 2 and feat_cache[idx] is not None:
|
| 353 |
+
# cache last frame of last two chunk
|
| 354 |
+
cache_x = torch.cat([feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(cache_x.device), cache_x], dim=2)
|
| 355 |
+
x = layer(x, feat_cache[idx].to(x.device) if feat_cache[idx] is not None else None)
|
| 356 |
+
feat_cache[idx] = cache_x
|
| 357 |
+
feat_idx[0] += 1
|
| 358 |
+
else:
|
| 359 |
+
x = layer(x)
|
| 360 |
+
return x
|
| 361 |
+
|
| 362 |
+
|
| 363 |
+
class Decoder3d(nn.Module):
|
| 364 |
+
|
| 365 |
+
def __init__(
|
| 366 |
+
self,
|
| 367 |
+
dim=128,
|
| 368 |
+
z_dim=4,
|
| 369 |
+
dim_mult=[1, 2, 4, 4],
|
| 370 |
+
num_res_blocks=2,
|
| 371 |
+
attn_scales=[],
|
| 372 |
+
temperal_upsample=[False, True, True],
|
| 373 |
+
dropout=0.0,
|
| 374 |
+
):
|
| 375 |
+
super().__init__()
|
| 376 |
+
self.dim = dim
|
| 377 |
+
self.z_dim = z_dim
|
| 378 |
+
self.dim_mult = dim_mult
|
| 379 |
+
self.num_res_blocks = num_res_blocks
|
| 380 |
+
self.attn_scales = attn_scales
|
| 381 |
+
self.temperal_upsample = temperal_upsample
|
| 382 |
+
|
| 383 |
+
# dimensions
|
| 384 |
+
dims = [dim * u for u in [dim_mult[-1]] + dim_mult[::-1]]
|
| 385 |
+
scale = 1.0 / 2 ** (len(dim_mult) - 2)
|
| 386 |
+
|
| 387 |
+
# init block
|
| 388 |
+
self.conv1 = CausalConv3d(z_dim, dims[0], 3, padding=1)
|
| 389 |
+
|
| 390 |
+
# middle blocks
|
| 391 |
+
self.middle = nn.Sequential(
|
| 392 |
+
ResidualBlock(dims[0], dims[0], dropout), AttentionBlock(dims[0]), ResidualBlock(dims[0], dims[0], dropout)
|
| 393 |
+
)
|
| 394 |
+
|
| 395 |
+
# upsample blocks
|
| 396 |
+
upsamples = []
|
| 397 |
+
for i, (in_dim, out_dim) in enumerate(zip(dims[:-1], dims[1:])):
|
| 398 |
+
# residual (+attention) blocks
|
| 399 |
+
if i == 1 or i == 2 or i == 3:
|
| 400 |
+
in_dim = in_dim // 2
|
| 401 |
+
for _ in range(num_res_blocks + 1):
|
| 402 |
+
upsamples.append(ResidualBlock(in_dim, out_dim, dropout))
|
| 403 |
+
if scale in attn_scales:
|
| 404 |
+
upsamples.append(AttentionBlock(out_dim))
|
| 405 |
+
in_dim = out_dim
|
| 406 |
+
|
| 407 |
+
# upsample block
|
| 408 |
+
if i != len(dim_mult) - 1:
|
| 409 |
+
mode = "upsample3d" if temperal_upsample[i] else "upsample2d"
|
| 410 |
+
upsamples.append(Resample(out_dim, mode=mode))
|
| 411 |
+
scale *= 2.0
|
| 412 |
+
self.upsamples = nn.Sequential(*upsamples)
|
| 413 |
+
|
| 414 |
+
# output blocks
|
| 415 |
+
self.head = nn.Sequential(RMS_norm(out_dim, images=False), nn.SiLU(), CausalConv3d(out_dim, 3, 3, padding=1))
|
| 416 |
+
|
| 417 |
+
self.cache_device = None
|
| 418 |
+
|
| 419 |
+
def set_cache_device(self, device):
|
| 420 |
+
self.cache_device = device
|
| 421 |
+
|
| 422 |
+
# set cache device for all layers
|
| 423 |
+
for layer in self.middle + self.upsamples + self.head:
|
| 424 |
+
if isinstance(layer, Resample) or isinstance(layer, ResidualBlock):
|
| 425 |
+
layer.set_cache_device(device)
|
| 426 |
+
|
| 427 |
+
def forward(self, x, feat_cache=None, feat_idx=[0]):
|
| 428 |
+
cache_device = self.cache_device if self.cache_device is not None else x.device
|
| 429 |
+
|
| 430 |
+
## conv1
|
| 431 |
+
if feat_cache is not None:
|
| 432 |
+
idx = feat_idx[0]
|
| 433 |
+
cache_x = x[:, :, -CACHE_T:, :, :].clone().to(cache_device)
|
| 434 |
+
if cache_x.shape[2] < 2 and feat_cache[idx] is not None:
|
| 435 |
+
# cache last frame of last two chunk
|
| 436 |
+
cache_x = torch.cat([feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(cache_x.device), cache_x], dim=2)
|
| 437 |
+
x = self.conv1(x, feat_cache[idx].to(x.device) if feat_cache[idx] is not None else None)
|
| 438 |
+
feat_cache[idx] = cache_x
|
| 439 |
+
feat_idx[0] += 1
|
| 440 |
+
else:
|
| 441 |
+
x = self.conv1(x)
|
| 442 |
+
|
| 443 |
+
## middle
|
| 444 |
+
for layer in self.middle:
|
| 445 |
+
if isinstance(layer, ResidualBlock) and feat_cache is not None:
|
| 446 |
+
x = layer(x, feat_cache, feat_idx)
|
| 447 |
+
else:
|
| 448 |
+
x = layer(x)
|
| 449 |
+
|
| 450 |
+
## upsamples
|
| 451 |
+
for layer in self.upsamples:
|
| 452 |
+
if feat_cache is not None:
|
| 453 |
+
x = layer(x, feat_cache, feat_idx)
|
| 454 |
+
else:
|
| 455 |
+
x = layer(x)
|
| 456 |
+
|
| 457 |
+
## head
|
| 458 |
+
for layer in self.head:
|
| 459 |
+
if isinstance(layer, CausalConv3d) and feat_cache is not None:
|
| 460 |
+
idx = feat_idx[0]
|
| 461 |
+
cache_x = x[:, :, -CACHE_T:, :, :].clone().to(cache_device)
|
| 462 |
+
if cache_x.shape[2] < 2 and feat_cache[idx] is not None:
|
| 463 |
+
# cache last frame of last two chunk
|
| 464 |
+
cache_x = torch.cat([feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(cache_x.device), cache_x], dim=2)
|
| 465 |
+
x = layer(x, feat_cache[idx].to(x.device) if feat_cache[idx] is not None else None)
|
| 466 |
+
feat_cache[idx] = cache_x
|
| 467 |
+
feat_idx[0] += 1
|
| 468 |
+
else:
|
| 469 |
+
x = layer(x)
|
| 470 |
+
return x
|
| 471 |
+
|
| 472 |
+
|
| 473 |
+
def count_conv3d(model):
|
| 474 |
+
count = 0
|
| 475 |
+
for m in model.modules():
|
| 476 |
+
if isinstance(m, CausalConv3d):
|
| 477 |
+
count += 1
|
| 478 |
+
return count
|
| 479 |
+
|
| 480 |
+
|
| 481 |
+
class WanVAE_(nn.Module):
|
| 482 |
+
|
| 483 |
+
def __init__(
|
| 484 |
+
self,
|
| 485 |
+
dim=128,
|
| 486 |
+
z_dim=4,
|
| 487 |
+
dim_mult=[1, 2, 4, 4],
|
| 488 |
+
num_res_blocks=2,
|
| 489 |
+
attn_scales=[],
|
| 490 |
+
temperal_downsample=[True, True, False],
|
| 491 |
+
dropout=0.0,
|
| 492 |
+
):
|
| 493 |
+
super().__init__()
|
| 494 |
+
self.dim = dim
|
| 495 |
+
self.z_dim = z_dim
|
| 496 |
+
self.dim_mult = dim_mult
|
| 497 |
+
self.num_res_blocks = num_res_blocks
|
| 498 |
+
self.attn_scales = attn_scales
|
| 499 |
+
self.temperal_downsample = temperal_downsample
|
| 500 |
+
self.temperal_upsample = temperal_downsample[::-1]
|
| 501 |
+
|
| 502 |
+
# modules
|
| 503 |
+
self.encoder = Encoder3d(dim, z_dim * 2, dim_mult, num_res_blocks, attn_scales, self.temperal_downsample, dropout)
|
| 504 |
+
self.conv1 = CausalConv3d(z_dim * 2, z_dim * 2, 1)
|
| 505 |
+
self.conv2 = CausalConv3d(z_dim, z_dim, 1)
|
| 506 |
+
self.decoder = Decoder3d(dim, z_dim, dim_mult, num_res_blocks, attn_scales, self.temperal_upsample, dropout)
|
| 507 |
+
|
| 508 |
+
self.cache_device = None
|
| 509 |
+
|
| 510 |
+
@property
|
| 511 |
+
def dtype(self):
|
| 512 |
+
return self.conv1.weight.dtype
|
| 513 |
+
|
| 514 |
+
@property
|
| 515 |
+
def device(self):
|
| 516 |
+
return self.conv1.weight.device
|
| 517 |
+
|
| 518 |
+
def set_cache_device(self, device):
|
| 519 |
+
# set cache device
|
| 520 |
+
self.cache_device = device
|
| 521 |
+
self.encoder.set_cache_device(device)
|
| 522 |
+
self.decoder.set_cache_device(device)
|
| 523 |
+
|
| 524 |
+
def forward(self, x):
|
| 525 |
+
mu, log_var = self.encode(x)
|
| 526 |
+
z = self.reparameterize(mu, log_var)
|
| 527 |
+
x_recon = self.decode(z)
|
| 528 |
+
return x_recon, mu, log_var
|
| 529 |
+
|
| 530 |
+
def encode(self, x, scale):
|
| 531 |
+
self.clear_cache()
|
| 532 |
+
## cache
|
| 533 |
+
t = x.shape[2]
|
| 534 |
+
iter_ = 1 + (t - 1) // 4
|
| 535 |
+
# ## 对encode输入的x,按时间拆分为1、4、4、4....
|
| 536 |
+
|
| 537 |
+
# if self.cache_device is None:
|
| 538 |
+
for i in range(iter_):
|
| 539 |
+
self._enc_conv_idx = [0]
|
| 540 |
+
if i == 0:
|
| 541 |
+
out = self.encoder(x[:, :, :1, :, :], feat_cache=self._enc_feat_map, feat_idx=self._enc_conv_idx)
|
| 542 |
+
else:
|
| 543 |
+
out_ = self.encoder(
|
| 544 |
+
x[:, :, 1 + 4 * (i - 1) : 1 + 4 * i, :, :], feat_cache=self._enc_feat_map, feat_idx=self._enc_conv_idx
|
| 545 |
+
)
|
| 546 |
+
out = torch.cat([out, out_], 2)
|
| 547 |
+
# else:
|
| 548 |
+
# # VRAM optimization
|
| 549 |
+
# device = x.device
|
| 550 |
+
# clean_memory_on_device(device)
|
| 551 |
+
# outs = []
|
| 552 |
+
# for i in range(iter_):
|
| 553 |
+
# self._enc_conv_idx = [0]
|
| 554 |
+
# if i == 0:
|
| 555 |
+
# out = self.encoder(x[:, :, :1, :, :], feat_cache=self._enc_feat_map, feat_idx=self._enc_conv_idx)
|
| 556 |
+
# else:
|
| 557 |
+
# out = self.encoder(
|
| 558 |
+
# x[:, :, 1 + 4 * (i - 1) : 1 + 4 * i, :, :], feat_cache=self._enc_feat_map, feat_idx=self._enc_conv_idx
|
| 559 |
+
# )
|
| 560 |
+
# outs.append(out.to(self.cache_device))
|
| 561 |
+
# out = torch.cat(outs, 2).to(device)
|
| 562 |
+
mu, log_var = self.conv1(out).chunk(2, dim=1)
|
| 563 |
+
if isinstance(scale[0], torch.Tensor):
|
| 564 |
+
mu = (mu - scale[0].view(1, self.z_dim, 1, 1, 1)) * scale[1].view(1, self.z_dim, 1, 1, 1)
|
| 565 |
+
else:
|
| 566 |
+
mu = (mu - scale[0]) * scale[1]
|
| 567 |
+
self.clear_cache()
|
| 568 |
+
return mu
|
| 569 |
+
|
| 570 |
+
def decode(self, z, scale):
|
| 571 |
+
self.clear_cache()
|
| 572 |
+
# z: [b,c,t,h,w]
|
| 573 |
+
if isinstance(scale[0], torch.Tensor):
|
| 574 |
+
z = z / scale[1].view(1, self.z_dim, 1, 1, 1) + scale[0].view(1, self.z_dim, 1, 1, 1)
|
| 575 |
+
else:
|
| 576 |
+
z = z / scale[1] + scale[0]
|
| 577 |
+
iter_ = z.shape[2]
|
| 578 |
+
x = self.conv2(z)
|
| 579 |
+
|
| 580 |
+
# if self.cache_device is None:
|
| 581 |
+
for i in range(iter_):
|
| 582 |
+
self._conv_idx = [0]
|
| 583 |
+
if i == 0:
|
| 584 |
+
out = self.decoder(x[:, :, i : i + 1, :, :], feat_cache=self._feat_map, feat_idx=self._conv_idx)
|
| 585 |
+
else:
|
| 586 |
+
out_ = self.decoder(x[:, :, i : i + 1, :, :], feat_cache=self._feat_map, feat_idx=self._conv_idx)
|
| 587 |
+
out = torch.cat([out, out_], 2)
|
| 588 |
+
# else:
|
| 589 |
+
# # VRAM optimization
|
| 590 |
+
# device = z.device
|
| 591 |
+
# x = x.to("cpu")
|
| 592 |
+
# clean_memory_on_device(device)
|
| 593 |
+
# outs = []
|
| 594 |
+
# for i in range(iter_):
|
| 595 |
+
# self._conv_idx = [0]
|
| 596 |
+
# out = self.decoder(x[:, :, i : i + 1, :, :].to(device), feat_cache=self._feat_map, feat_idx=self._conv_idx).to(
|
| 597 |
+
# self.cache_device
|
| 598 |
+
# )
|
| 599 |
+
# outs.append(out)
|
| 600 |
+
# out = torch.cat(outs, 2) # on cache_device
|
| 601 |
+
self.clear_cache()
|
| 602 |
+
return out
|
| 603 |
+
|
| 604 |
+
def reparameterize(self, mu, log_var):
|
| 605 |
+
std = torch.exp(0.5 * log_var)
|
| 606 |
+
eps = torch.randn_like(std)
|
| 607 |
+
return eps * std + mu
|
| 608 |
+
|
| 609 |
+
def sample(self, imgs, deterministic=False):
|
| 610 |
+
mu, log_var = self.encode(imgs)
|
| 611 |
+
if deterministic:
|
| 612 |
+
return mu
|
| 613 |
+
std = torch.exp(0.5 * log_var.clamp(-30.0, 20.0))
|
| 614 |
+
return mu + std * torch.randn_like(std)
|
| 615 |
+
|
| 616 |
+
def clear_cache(self):
|
| 617 |
+
self._conv_num = count_conv3d(self.decoder)
|
| 618 |
+
self._conv_idx = [0]
|
| 619 |
+
self._feat_map = [None] * self._conv_num
|
| 620 |
+
# cache encode
|
| 621 |
+
self._enc_conv_num = count_conv3d(self.encoder)
|
| 622 |
+
self._enc_conv_idx = [0]
|
| 623 |
+
self._enc_feat_map = [None] * self._enc_conv_num
|
| 624 |
+
|
| 625 |
+
|
| 626 |
+
def _video_vae(pretrained_path=None, z_dim=None, device="cpu", **kwargs):
|
| 627 |
+
"""
|
| 628 |
+
Autoencoder3d adapted from Stable Diffusion 1.x, 2.x and XL.
|
| 629 |
+
"""
|
| 630 |
+
# params
|
| 631 |
+
cfg = dict(
|
| 632 |
+
dim=96,
|
| 633 |
+
z_dim=z_dim,
|
| 634 |
+
dim_mult=[1, 2, 4, 4],
|
| 635 |
+
num_res_blocks=2,
|
| 636 |
+
attn_scales=[],
|
| 637 |
+
temperal_downsample=[False, True, True],
|
| 638 |
+
dropout=0.0,
|
| 639 |
+
)
|
| 640 |
+
cfg.update(**kwargs)
|
| 641 |
+
|
| 642 |
+
# init model
|
| 643 |
+
with torch.device("meta"):
|
| 644 |
+
model = WanVAE_(**cfg)
|
| 645 |
+
|
| 646 |
+
# load checkpoint
|
| 647 |
+
logging.info(f"loading {pretrained_path}")
|
| 648 |
+
if os.path.splitext(pretrained_path)[-1] == ".safetensors":
|
| 649 |
+
sd = load_file(pretrained_path)
|
| 650 |
+
model.load_state_dict(sd, strict=False, assign=True)
|
| 651 |
+
else:
|
| 652 |
+
model.load_state_dict(torch.load(pretrained_path, map_location=device, weights_only=True), assign=True)
|
| 653 |
+
|
| 654 |
+
return model
|
| 655 |
+
|
| 656 |
+
|
| 657 |
+
class WanVAE:
|
| 658 |
+
|
| 659 |
+
def __init__(self, z_dim=16, vae_path="cache/vae_step_411000.pth", dtype=torch.float, device="cuda", cache_device=None):
|
| 660 |
+
self.dtype = dtype
|
| 661 |
+
self.device = device
|
| 662 |
+
|
| 663 |
+
mean = [
|
| 664 |
+
-0.7571,
|
| 665 |
+
-0.7089,
|
| 666 |
+
-0.9113,
|
| 667 |
+
0.1075,
|
| 668 |
+
-0.1745,
|
| 669 |
+
0.9653,
|
| 670 |
+
-0.1517,
|
| 671 |
+
1.5508,
|
| 672 |
+
0.4134,
|
| 673 |
+
-0.0715,
|
| 674 |
+
0.5517,
|
| 675 |
+
-0.3632,
|
| 676 |
+
-0.1922,
|
| 677 |
+
-0.9497,
|
| 678 |
+
0.2503,
|
| 679 |
+
-0.2921,
|
| 680 |
+
]
|
| 681 |
+
std = [
|
| 682 |
+
2.8184,
|
| 683 |
+
1.4541,
|
| 684 |
+
2.3275,
|
| 685 |
+
2.6558,
|
| 686 |
+
1.2196,
|
| 687 |
+
1.7708,
|
| 688 |
+
2.6052,
|
| 689 |
+
2.0743,
|
| 690 |
+
3.2687,
|
| 691 |
+
2.1526,
|
| 692 |
+
2.8652,
|
| 693 |
+
1.5579,
|
| 694 |
+
1.6382,
|
| 695 |
+
1.1253,
|
| 696 |
+
2.8251,
|
| 697 |
+
1.9160,
|
| 698 |
+
]
|
| 699 |
+
self.mean = torch.tensor(mean, dtype=dtype, device=device)
|
| 700 |
+
self.std = torch.tensor(std, dtype=dtype, device=device)
|
| 701 |
+
self.scale = [self.mean, 1.0 / self.std]
|
| 702 |
+
|
| 703 |
+
# init model
|
| 704 |
+
self.model = (
|
| 705 |
+
_video_vae(
|
| 706 |
+
pretrained_path=vae_path,
|
| 707 |
+
z_dim=z_dim,
|
| 708 |
+
)
|
| 709 |
+
.eval()
|
| 710 |
+
.requires_grad_(False)
|
| 711 |
+
.to(device, dtype=dtype)
|
| 712 |
+
)
|
| 713 |
+
if cache_device is not None:
|
| 714 |
+
self.model.set_cache_device(torch.device(cache_device))
|
| 715 |
+
|
| 716 |
+
def to_device(self, device):
|
| 717 |
+
self.device = device
|
| 718 |
+
self.model.to(device)
|
| 719 |
+
self.mean = self.mean.to(device)
|
| 720 |
+
self.std = self.std.to(device)
|
| 721 |
+
self.scale = [t.to(device) for t in self.scale]
|
| 722 |
+
|
| 723 |
+
def to_dtype(self, dtype):
|
| 724 |
+
self.dtype = dtype
|
| 725 |
+
self.model.to(dtype=dtype)
|
| 726 |
+
self.mean = self.mean.to(dtype)
|
| 727 |
+
self.std = self.std.to(dtype)
|
| 728 |
+
self.scale = [t.to(dtype) for t in self.scale]
|
| 729 |
+
|
| 730 |
+
def eval(self):
|
| 731 |
+
self.model.eval()
|
| 732 |
+
|
| 733 |
+
def train(self, mode: bool = True):
|
| 734 |
+
self.model.train(mode)
|
| 735 |
+
|
| 736 |
+
def requires_grad_(self, requires_grad: bool = True):
|
| 737 |
+
self.model.requires_grad_(requires_grad)
|
| 738 |
+
|
| 739 |
+
def to(self, device_or_dtype: Union[torch.device, torch.dtype, str], dtype: Optional[torch.dtype] = None):
|
| 740 |
+
"""
|
| 741 |
+
Add nn.Module.to() support for device and dtype.
|
| 742 |
+
"""
|
| 743 |
+
if isinstance(device_or_dtype, str) or isinstance(device_or_dtype, torch.device):
|
| 744 |
+
self.to_device(device_or_dtype)
|
| 745 |
+
else:
|
| 746 |
+
self.to_dtype(device_or_dtype)
|
| 747 |
+
|
| 748 |
+
if dtype is not None:
|
| 749 |
+
self.to_dtype(dtype)
|
| 750 |
+
|
| 751 |
+
def encode(self, videos):
|
| 752 |
+
"""
|
| 753 |
+
videos: A list of videos each with shape [C, T, H, W].
|
| 754 |
+
"""
|
| 755 |
+
# with amp.autocast(dtype=self.dtype):
|
| 756 |
+
return [self.model.encode(u.unsqueeze(0), self.scale).float().squeeze(0) for u in videos]
|
| 757 |
+
|
| 758 |
+
def decode(self, zs):
|
| 759 |
+
# with amp.autocast(dtype=self.dtype):
|
| 760 |
+
return [self.model.decode(u.unsqueeze(0), self.scale).float().clamp_(-1, 1).squeeze(0) for u in zs]
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/modules/xlm_roberta.py
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Modified from transformers.models.xlm_roberta.modeling_xlm_roberta
|
| 2 |
+
# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved.
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
import torch.nn.functional as F
|
| 6 |
+
|
| 7 |
+
__all__ = ['XLMRoberta', 'xlm_roberta_large']
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class SelfAttention(nn.Module):
|
| 11 |
+
|
| 12 |
+
def __init__(self, dim, num_heads, dropout=0.1, eps=1e-5):
|
| 13 |
+
assert dim % num_heads == 0
|
| 14 |
+
super().__init__()
|
| 15 |
+
self.dim = dim
|
| 16 |
+
self.num_heads = num_heads
|
| 17 |
+
self.head_dim = dim // num_heads
|
| 18 |
+
self.eps = eps
|
| 19 |
+
|
| 20 |
+
# layers
|
| 21 |
+
self.q = nn.Linear(dim, dim)
|
| 22 |
+
self.k = nn.Linear(dim, dim)
|
| 23 |
+
self.v = nn.Linear(dim, dim)
|
| 24 |
+
self.o = nn.Linear(dim, dim)
|
| 25 |
+
self.dropout = nn.Dropout(dropout)
|
| 26 |
+
|
| 27 |
+
def forward(self, x, mask):
|
| 28 |
+
"""
|
| 29 |
+
x: [B, L, C].
|
| 30 |
+
"""
|
| 31 |
+
b, s, c, n, d = *x.size(), self.num_heads, self.head_dim
|
| 32 |
+
|
| 33 |
+
# compute query, key, value
|
| 34 |
+
q = self.q(x).reshape(b, s, n, d).permute(0, 2, 1, 3)
|
| 35 |
+
k = self.k(x).reshape(b, s, n, d).permute(0, 2, 1, 3)
|
| 36 |
+
v = self.v(x).reshape(b, s, n, d).permute(0, 2, 1, 3)
|
| 37 |
+
|
| 38 |
+
# compute attention
|
| 39 |
+
p = self.dropout.p if self.training else 0.0
|
| 40 |
+
x = F.scaled_dot_product_attention(q, k, v, mask, p)
|
| 41 |
+
x = x.permute(0, 2, 1, 3).reshape(b, s, c)
|
| 42 |
+
|
| 43 |
+
# output
|
| 44 |
+
x = self.o(x)
|
| 45 |
+
x = self.dropout(x)
|
| 46 |
+
return x
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class AttentionBlock(nn.Module):
|
| 50 |
+
|
| 51 |
+
def __init__(self, dim, num_heads, post_norm, dropout=0.1, eps=1e-5):
|
| 52 |
+
super().__init__()
|
| 53 |
+
self.dim = dim
|
| 54 |
+
self.num_heads = num_heads
|
| 55 |
+
self.post_norm = post_norm
|
| 56 |
+
self.eps = eps
|
| 57 |
+
|
| 58 |
+
# layers
|
| 59 |
+
self.attn = SelfAttention(dim, num_heads, dropout, eps)
|
| 60 |
+
self.norm1 = nn.LayerNorm(dim, eps=eps)
|
| 61 |
+
self.ffn = nn.Sequential(
|
| 62 |
+
nn.Linear(dim, dim * 4), nn.GELU(), nn.Linear(dim * 4, dim),
|
| 63 |
+
nn.Dropout(dropout))
|
| 64 |
+
self.norm2 = nn.LayerNorm(dim, eps=eps)
|
| 65 |
+
|
| 66 |
+
def forward(self, x, mask):
|
| 67 |
+
if self.post_norm:
|
| 68 |
+
x = self.norm1(x + self.attn(x, mask))
|
| 69 |
+
x = self.norm2(x + self.ffn(x))
|
| 70 |
+
else:
|
| 71 |
+
x = x + self.attn(self.norm1(x), mask)
|
| 72 |
+
x = x + self.ffn(self.norm2(x))
|
| 73 |
+
return x
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
class XLMRoberta(nn.Module):
|
| 77 |
+
"""
|
| 78 |
+
XLMRobertaModel with no pooler and no LM head.
|
| 79 |
+
"""
|
| 80 |
+
|
| 81 |
+
def __init__(self,
|
| 82 |
+
vocab_size=250002,
|
| 83 |
+
max_seq_len=514,
|
| 84 |
+
type_size=1,
|
| 85 |
+
pad_id=1,
|
| 86 |
+
dim=1024,
|
| 87 |
+
num_heads=16,
|
| 88 |
+
num_layers=24,
|
| 89 |
+
post_norm=True,
|
| 90 |
+
dropout=0.1,
|
| 91 |
+
eps=1e-5):
|
| 92 |
+
super().__init__()
|
| 93 |
+
self.vocab_size = vocab_size
|
| 94 |
+
self.max_seq_len = max_seq_len
|
| 95 |
+
self.type_size = type_size
|
| 96 |
+
self.pad_id = pad_id
|
| 97 |
+
self.dim = dim
|
| 98 |
+
self.num_heads = num_heads
|
| 99 |
+
self.num_layers = num_layers
|
| 100 |
+
self.post_norm = post_norm
|
| 101 |
+
self.eps = eps
|
| 102 |
+
|
| 103 |
+
# embeddings
|
| 104 |
+
self.token_embedding = nn.Embedding(vocab_size, dim, padding_idx=pad_id)
|
| 105 |
+
self.type_embedding = nn.Embedding(type_size, dim)
|
| 106 |
+
self.pos_embedding = nn.Embedding(max_seq_len, dim, padding_idx=pad_id)
|
| 107 |
+
self.dropout = nn.Dropout(dropout)
|
| 108 |
+
|
| 109 |
+
# blocks
|
| 110 |
+
self.blocks = nn.ModuleList([
|
| 111 |
+
AttentionBlock(dim, num_heads, post_norm, dropout, eps)
|
| 112 |
+
for _ in range(num_layers)
|
| 113 |
+
])
|
| 114 |
+
|
| 115 |
+
# norm layer
|
| 116 |
+
self.norm = nn.LayerNorm(dim, eps=eps)
|
| 117 |
+
|
| 118 |
+
def forward(self, ids):
|
| 119 |
+
"""
|
| 120 |
+
ids: [B, L] of torch.LongTensor.
|
| 121 |
+
"""
|
| 122 |
+
b, s = ids.shape
|
| 123 |
+
mask = ids.ne(self.pad_id).long()
|
| 124 |
+
|
| 125 |
+
# embeddings
|
| 126 |
+
x = self.token_embedding(ids) + \
|
| 127 |
+
self.type_embedding(torch.zeros_like(ids)) + \
|
| 128 |
+
self.pos_embedding(self.pad_id + torch.cumsum(mask, dim=1) * mask)
|
| 129 |
+
if self.post_norm:
|
| 130 |
+
x = self.norm(x)
|
| 131 |
+
x = self.dropout(x)
|
| 132 |
+
|
| 133 |
+
# blocks
|
| 134 |
+
mask = torch.where(
|
| 135 |
+
mask.view(b, 1, 1, s).gt(0), 0.0,
|
| 136 |
+
torch.finfo(x.dtype).min)
|
| 137 |
+
for block in self.blocks:
|
| 138 |
+
x = block(x, mask)
|
| 139 |
+
|
| 140 |
+
# output
|
| 141 |
+
if not self.post_norm:
|
| 142 |
+
x = self.norm(x)
|
| 143 |
+
return x
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def xlm_roberta_large(pretrained=False,
|
| 147 |
+
return_tokenizer=False,
|
| 148 |
+
device='cpu',
|
| 149 |
+
**kwargs):
|
| 150 |
+
"""
|
| 151 |
+
XLMRobertaLarge adapted from Huggingface.
|
| 152 |
+
"""
|
| 153 |
+
# params
|
| 154 |
+
cfg = dict(
|
| 155 |
+
vocab_size=250002,
|
| 156 |
+
max_seq_len=514,
|
| 157 |
+
type_size=1,
|
| 158 |
+
pad_id=1,
|
| 159 |
+
dim=1024,
|
| 160 |
+
num_heads=16,
|
| 161 |
+
num_layers=24,
|
| 162 |
+
post_norm=True,
|
| 163 |
+
dropout=0.1,
|
| 164 |
+
eps=1e-5)
|
| 165 |
+
cfg.update(**kwargs)
|
| 166 |
+
|
| 167 |
+
# init a model on device
|
| 168 |
+
with torch.device(device):
|
| 169 |
+
model = XLMRoberta(**cfg)
|
| 170 |
+
return model
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/utils/__init__.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from musubi_tuner.wan.utils.fm_solvers import (FlowDPMSolverMultistepScheduler, get_sampling_sigmas,
|
| 2 |
+
retrieve_timesteps)
|
| 3 |
+
from musubi_tuner.wan.utils.fm_solvers_unipc import FlowUniPCMultistepScheduler
|
| 4 |
+
|
| 5 |
+
__all__ = [
|
| 6 |
+
'HuggingfaceTokenizer', 'get_sampling_sigmas', 'retrieve_timesteps',
|
| 7 |
+
'FlowDPMSolverMultistepScheduler', 'FlowUniPCMultistepScheduler'
|
| 8 |
+
]
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/utils/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (518 Bytes). View file
|
|
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/utils/__pycache__/fm_solvers.cpython-312.pyc
ADDED
|
Binary file (42.7 kB). View file
|
|
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/utils/__pycache__/fm_solvers_unipc.cpython-312.pyc
ADDED
|
Binary file (35.2 kB). View file
|
|
|
exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan/utils/fm_solvers.py
ADDED
|
@@ -0,0 +1,857 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copied from https://github.com/huggingface/diffusers/blob/main/src/diffusers/schedulers/scheduling_dpmsolver_multistep.py
|
| 2 |
+
# Convert dpm solver for flow matching
|
| 3 |
+
# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved.
|
| 4 |
+
|
| 5 |
+
import inspect
|
| 6 |
+
import math
|
| 7 |
+
from typing import List, Optional, Tuple, Union
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
import torch
|
| 11 |
+
from diffusers.configuration_utils import ConfigMixin, register_to_config
|
| 12 |
+
from diffusers.schedulers.scheduling_utils import (KarrasDiffusionSchedulers,
|
| 13 |
+
SchedulerMixin,
|
| 14 |
+
SchedulerOutput)
|
| 15 |
+
from diffusers.utils import deprecate, is_scipy_available
|
| 16 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 17 |
+
|
| 18 |
+
if is_scipy_available():
|
| 19 |
+
pass
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def get_sampling_sigmas(sampling_steps, shift):
|
| 23 |
+
sigma = np.linspace(1, 0, sampling_steps + 1)[:sampling_steps]
|
| 24 |
+
sigma = (shift * sigma / (1 + (shift - 1) * sigma))
|
| 25 |
+
|
| 26 |
+
return sigma
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def retrieve_timesteps(
|
| 30 |
+
scheduler,
|
| 31 |
+
num_inference_steps=None,
|
| 32 |
+
device=None,
|
| 33 |
+
timesteps=None,
|
| 34 |
+
sigmas=None,
|
| 35 |
+
**kwargs,
|
| 36 |
+
):
|
| 37 |
+
if timesteps is not None and sigmas is not None:
|
| 38 |
+
raise ValueError(
|
| 39 |
+
"Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values"
|
| 40 |
+
)
|
| 41 |
+
if timesteps is not None:
|
| 42 |
+
accepts_timesteps = "timesteps" in set(
|
| 43 |
+
inspect.signature(scheduler.set_timesteps).parameters.keys())
|
| 44 |
+
if not accepts_timesteps:
|
| 45 |
+
raise ValueError(
|
| 46 |
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
| 47 |
+
f" timestep schedules. Please check whether you are using the correct scheduler."
|
| 48 |
+
)
|
| 49 |
+
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
|
| 50 |
+
timesteps = scheduler.timesteps
|
| 51 |
+
num_inference_steps = len(timesteps)
|
| 52 |
+
elif sigmas is not None:
|
| 53 |
+
accept_sigmas = "sigmas" in set(
|
| 54 |
+
inspect.signature(scheduler.set_timesteps).parameters.keys())
|
| 55 |
+
if not accept_sigmas:
|
| 56 |
+
raise ValueError(
|
| 57 |
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
| 58 |
+
f" sigmas schedules. Please check whether you are using the correct scheduler."
|
| 59 |
+
)
|
| 60 |
+
scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
|
| 61 |
+
timesteps = scheduler.timesteps
|
| 62 |
+
num_inference_steps = len(timesteps)
|
| 63 |
+
else:
|
| 64 |
+
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
|
| 65 |
+
timesteps = scheduler.timesteps
|
| 66 |
+
return timesteps, num_inference_steps
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
class FlowDPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin):
|
| 70 |
+
"""
|
| 71 |
+
`FlowDPMSolverMultistepScheduler` is a fast dedicated high-order solver for diffusion ODEs.
|
| 72 |
+
This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic
|
| 73 |
+
methods the library implements for all schedulers such as loading and saving.
|
| 74 |
+
Args:
|
| 75 |
+
num_train_timesteps (`int`, defaults to 1000):
|
| 76 |
+
The number of diffusion steps to train the model. This determines the resolution of the diffusion process.
|
| 77 |
+
solver_order (`int`, defaults to 2):
|
| 78 |
+
The DPMSolver order which can be `1`, `2`, or `3`. It is recommended to use `solver_order=2` for guided
|
| 79 |
+
sampling, and `solver_order=3` for unconditional sampling. This affects the number of model outputs stored
|
| 80 |
+
and used in multistep updates.
|
| 81 |
+
prediction_type (`str`, defaults to "flow_prediction"):
|
| 82 |
+
Prediction type of the scheduler function; must be `flow_prediction` for this scheduler, which predicts
|
| 83 |
+
the flow of the diffusion process.
|
| 84 |
+
shift (`float`, *optional*, defaults to 1.0):
|
| 85 |
+
A factor used to adjust the sigmas in the noise schedule. It modifies the step sizes during the sampling
|
| 86 |
+
process.
|
| 87 |
+
use_dynamic_shifting (`bool`, defaults to `False`):
|
| 88 |
+
Whether to apply dynamic shifting to the timesteps based on image resolution. If `True`, the shifting is
|
| 89 |
+
applied on the fly.
|
| 90 |
+
thresholding (`bool`, defaults to `False`):
|
| 91 |
+
Whether to use the "dynamic thresholding" method. This method adjusts the predicted sample to prevent
|
| 92 |
+
saturation and improve photorealism.
|
| 93 |
+
dynamic_thresholding_ratio (`float`, defaults to 0.995):
|
| 94 |
+
The ratio for the dynamic thresholding method. Valid only when `thresholding=True`.
|
| 95 |
+
sample_max_value (`float`, defaults to 1.0):
|
| 96 |
+
The threshold value for dynamic thresholding. Valid only when `thresholding=True` and
|
| 97 |
+
`algorithm_type="dpmsolver++"`.
|
| 98 |
+
algorithm_type (`str`, defaults to `dpmsolver++`):
|
| 99 |
+
Algorithm type for the solver; can be `dpmsolver`, `dpmsolver++`, `sde-dpmsolver` or `sde-dpmsolver++`. The
|
| 100 |
+
`dpmsolver` type implements the algorithms in the [DPMSolver](https://huggingface.co/papers/2206.00927)
|
| 101 |
+
paper, and the `dpmsolver++` type implements the algorithms in the
|
| 102 |
+
[DPMSolver++](https://huggingface.co/papers/2211.01095) paper. It is recommended to use `dpmsolver++` or
|
| 103 |
+
`sde-dpmsolver++` with `solver_order=2` for guided sampling like in Stable Diffusion.
|
| 104 |
+
solver_type (`str`, defaults to `midpoint`):
|
| 105 |
+
Solver type for the second-order solver; can be `midpoint` or `heun`. The solver type slightly affects the
|
| 106 |
+
sample quality, especially for a small number of steps. It is recommended to use `midpoint` solvers.
|
| 107 |
+
lower_order_final (`bool`, defaults to `True`):
|
| 108 |
+
Whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps. This can
|
| 109 |
+
stabilize the sampling of DPMSolver for steps < 15, especially for steps <= 10.
|
| 110 |
+
euler_at_final (`bool`, defaults to `False`):
|
| 111 |
+
Whether to use Euler's method in the final step. It is a trade-off between numerical stability and detail
|
| 112 |
+
richness. This can stabilize the sampling of the SDE variant of DPMSolver for small number of inference
|
| 113 |
+
steps, but sometimes may result in blurring.
|
| 114 |
+
final_sigmas_type (`str`, *optional*, defaults to "zero"):
|
| 115 |
+
The final `sigma` value for the noise schedule during the sampling process. If `"sigma_min"`, the final
|
| 116 |
+
sigma is the same as the last sigma in the training schedule. If `zero`, the final sigma is set to 0.
|
| 117 |
+
lambda_min_clipped (`float`, defaults to `-inf`):
|
| 118 |
+
Clipping threshold for the minimum value of `lambda(t)` for numerical stability. This is critical for the
|
| 119 |
+
cosine (`squaredcos_cap_v2`) noise schedule.
|
| 120 |
+
variance_type (`str`, *optional*):
|
| 121 |
+
Set to "learned" or "learned_range" for diffusion models that predict variance. If set, the model's output
|
| 122 |
+
contains the predicted Gaussian variance.
|
| 123 |
+
"""
|
| 124 |
+
|
| 125 |
+
_compatibles = [e.name for e in KarrasDiffusionSchedulers]
|
| 126 |
+
order = 1
|
| 127 |
+
|
| 128 |
+
@register_to_config
|
| 129 |
+
def __init__(
|
| 130 |
+
self,
|
| 131 |
+
num_train_timesteps: int = 1000,
|
| 132 |
+
solver_order: int = 2,
|
| 133 |
+
prediction_type: str = "flow_prediction",
|
| 134 |
+
shift: Optional[float] = 1.0,
|
| 135 |
+
use_dynamic_shifting=False,
|
| 136 |
+
thresholding: bool = False,
|
| 137 |
+
dynamic_thresholding_ratio: float = 0.995,
|
| 138 |
+
sample_max_value: float = 1.0,
|
| 139 |
+
algorithm_type: str = "dpmsolver++",
|
| 140 |
+
solver_type: str = "midpoint",
|
| 141 |
+
lower_order_final: bool = True,
|
| 142 |
+
euler_at_final: bool = False,
|
| 143 |
+
final_sigmas_type: Optional[str] = "zero", # "zero", "sigma_min"
|
| 144 |
+
lambda_min_clipped: float = -float("inf"),
|
| 145 |
+
variance_type: Optional[str] = None,
|
| 146 |
+
invert_sigmas: bool = False,
|
| 147 |
+
):
|
| 148 |
+
if algorithm_type in ["dpmsolver", "sde-dpmsolver"]:
|
| 149 |
+
deprecation_message = f"algorithm_type {algorithm_type} is deprecated and will be removed in a future version. Choose from `dpmsolver++` or `sde-dpmsolver++` instead"
|
| 150 |
+
deprecate("algorithm_types dpmsolver and sde-dpmsolver", "1.0.0",
|
| 151 |
+
deprecation_message)
|
| 152 |
+
|
| 153 |
+
# settings for DPM-Solver
|
| 154 |
+
if algorithm_type not in [
|
| 155 |
+
"dpmsolver", "dpmsolver++", "sde-dpmsolver", "sde-dpmsolver++"
|
| 156 |
+
]:
|
| 157 |
+
if algorithm_type == "deis":
|
| 158 |
+
self.register_to_config(algorithm_type="dpmsolver++")
|
| 159 |
+
else:
|
| 160 |
+
raise NotImplementedError(
|
| 161 |
+
f"{algorithm_type} is not implemented for {self.__class__}")
|
| 162 |
+
|
| 163 |
+
if solver_type not in ["midpoint", "heun"]:
|
| 164 |
+
if solver_type in ["logrho", "bh1", "bh2"]:
|
| 165 |
+
self.register_to_config(solver_type="midpoint")
|
| 166 |
+
else:
|
| 167 |
+
raise NotImplementedError(
|
| 168 |
+
f"{solver_type} is not implemented for {self.__class__}")
|
| 169 |
+
|
| 170 |
+
if algorithm_type not in ["dpmsolver++", "sde-dpmsolver++"
|
| 171 |
+
] and final_sigmas_type == "zero":
|
| 172 |
+
raise ValueError(
|
| 173 |
+
f"`final_sigmas_type` {final_sigmas_type} is not supported for `algorithm_type` {algorithm_type}. Please choose `sigma_min` instead."
|
| 174 |
+
)
|
| 175 |
+
|
| 176 |
+
# setable values
|
| 177 |
+
self.num_inference_steps = None
|
| 178 |
+
alphas = np.linspace(1, 1 / num_train_timesteps,
|
| 179 |
+
num_train_timesteps)[::-1].copy()
|
| 180 |
+
sigmas = 1.0 - alphas
|
| 181 |
+
sigmas = torch.from_numpy(sigmas).to(dtype=torch.float32)
|
| 182 |
+
|
| 183 |
+
if not use_dynamic_shifting:
|
| 184 |
+
# when use_dynamic_shifting is True, we apply the timestep shifting on the fly based on the image resolution
|
| 185 |
+
sigmas = shift * sigmas / (1 +
|
| 186 |
+
(shift - 1) * sigmas) # pyright: ignore
|
| 187 |
+
|
| 188 |
+
self.sigmas = sigmas
|
| 189 |
+
self.timesteps = sigmas * num_train_timesteps
|
| 190 |
+
|
| 191 |
+
self.model_outputs = [None] * solver_order
|
| 192 |
+
self.lower_order_nums = 0
|
| 193 |
+
self._step_index = None
|
| 194 |
+
self._begin_index = None
|
| 195 |
+
|
| 196 |
+
# self.sigmas = self.sigmas.to(
|
| 197 |
+
# "cpu") # to avoid too much CPU/GPU communication
|
| 198 |
+
self.sigma_min = self.sigmas[-1].item()
|
| 199 |
+
self.sigma_max = self.sigmas[0].item()
|
| 200 |
+
|
| 201 |
+
@property
|
| 202 |
+
def step_index(self):
|
| 203 |
+
"""
|
| 204 |
+
The index counter for current timestep. It will increase 1 after each scheduler step.
|
| 205 |
+
"""
|
| 206 |
+
return self._step_index
|
| 207 |
+
|
| 208 |
+
@property
|
| 209 |
+
def begin_index(self):
|
| 210 |
+
"""
|
| 211 |
+
The index for the first timestep. It should be set from pipeline with `set_begin_index` method.
|
| 212 |
+
"""
|
| 213 |
+
return self._begin_index
|
| 214 |
+
|
| 215 |
+
# Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index
|
| 216 |
+
def set_begin_index(self, begin_index: int = 0):
|
| 217 |
+
"""
|
| 218 |
+
Sets the begin index for the scheduler. This function should be run from pipeline before the inference.
|
| 219 |
+
Args:
|
| 220 |
+
begin_index (`int`):
|
| 221 |
+
The begin index for the scheduler.
|
| 222 |
+
"""
|
| 223 |
+
self._begin_index = begin_index
|
| 224 |
+
|
| 225 |
+
# Modified from diffusers.schedulers.scheduling_flow_match_euler_discrete.FlowMatchEulerDiscreteScheduler.set_timesteps
|
| 226 |
+
def set_timesteps(
|
| 227 |
+
self,
|
| 228 |
+
num_inference_steps: Union[int, None] = None,
|
| 229 |
+
device: Union[str, torch.device] = None,
|
| 230 |
+
sigmas: Optional[List[float]] = None,
|
| 231 |
+
mu: Optional[Union[float, None]] = None,
|
| 232 |
+
shift: Optional[Union[float, None]] = None,
|
| 233 |
+
):
|
| 234 |
+
"""
|
| 235 |
+
Sets the discrete timesteps used for the diffusion chain (to be run before inference).
|
| 236 |
+
Args:
|
| 237 |
+
num_inference_steps (`int`):
|
| 238 |
+
Total number of the spacing of the time steps.
|
| 239 |
+
device (`str` or `torch.device`, *optional*):
|
| 240 |
+
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
|
| 241 |
+
"""
|
| 242 |
+
|
| 243 |
+
if self.config.use_dynamic_shifting and mu is None:
|
| 244 |
+
raise ValueError(
|
| 245 |
+
" you have to pass a value for `mu` when `use_dynamic_shifting` is set to be `True`"
|
| 246 |
+
)
|
| 247 |
+
|
| 248 |
+
if sigmas is None:
|
| 249 |
+
sigmas = np.linspace(self.sigma_max, self.sigma_min,
|
| 250 |
+
num_inference_steps +
|
| 251 |
+
1).copy()[:-1] # pyright: ignore
|
| 252 |
+
|
| 253 |
+
if self.config.use_dynamic_shifting:
|
| 254 |
+
sigmas = self.time_shift(mu, 1.0, sigmas) # pyright: ignore
|
| 255 |
+
else:
|
| 256 |
+
if shift is None:
|
| 257 |
+
shift = self.config.shift
|
| 258 |
+
sigmas = shift * sigmas / (1 +
|
| 259 |
+
(shift - 1) * sigmas) # pyright: ignore
|
| 260 |
+
|
| 261 |
+
if self.config.final_sigmas_type == "sigma_min":
|
| 262 |
+
sigma_last = ((1 - self.alphas_cumprod[0]) /
|
| 263 |
+
self.alphas_cumprod[0])**0.5
|
| 264 |
+
elif self.config.final_sigmas_type == "zero":
|
| 265 |
+
sigma_last = 0
|
| 266 |
+
else:
|
| 267 |
+
raise ValueError(
|
| 268 |
+
f"`final_sigmas_type` must be one of 'zero', or 'sigma_min', but got {self.config.final_sigmas_type}"
|
| 269 |
+
)
|
| 270 |
+
|
| 271 |
+
timesteps = sigmas * self.config.num_train_timesteps
|
| 272 |
+
sigmas = np.concatenate([sigmas, [sigma_last]
|
| 273 |
+
]).astype(np.float32) # pyright: ignore
|
| 274 |
+
|
| 275 |
+
self.sigmas = torch.from_numpy(sigmas)
|
| 276 |
+
self.timesteps = torch.from_numpy(timesteps).to(
|
| 277 |
+
device=device, dtype=torch.int64)
|
| 278 |
+
|
| 279 |
+
self.num_inference_steps = len(timesteps)
|
| 280 |
+
|
| 281 |
+
self.model_outputs = [
|
| 282 |
+
None,
|
| 283 |
+
] * self.config.solver_order
|
| 284 |
+
self.lower_order_nums = 0
|
| 285 |
+
|
| 286 |
+
self._step_index = None
|
| 287 |
+
self._begin_index = None
|
| 288 |
+
# self.sigmas = self.sigmas.to(
|
| 289 |
+
# "cpu") # to avoid too much CPU/GPU communication
|
| 290 |
+
|
| 291 |
+
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample
|
| 292 |
+
def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor:
|
| 293 |
+
"""
|
| 294 |
+
"Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the
|
| 295 |
+
prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by
|
| 296 |
+
s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing
|
| 297 |
+
pixels from saturation at each step. We find that dynamic thresholding results in significantly better
|
| 298 |
+
photorealism as well as better image-text alignment, especially when using very large guidance weights."
|
| 299 |
+
https://arxiv.org/abs/2205.11487
|
| 300 |
+
"""
|
| 301 |
+
dtype = sample.dtype
|
| 302 |
+
batch_size, channels, *remaining_dims = sample.shape
|
| 303 |
+
|
| 304 |
+
if dtype not in (torch.float32, torch.float64):
|
| 305 |
+
sample = sample.float(
|
| 306 |
+
) # upcast for quantile calculation, and clamp not implemented for cpu half
|
| 307 |
+
|
| 308 |
+
# Flatten sample for doing quantile calculation along each image
|
| 309 |
+
sample = sample.reshape(batch_size, channels * np.prod(remaining_dims))
|
| 310 |
+
|
| 311 |
+
abs_sample = sample.abs() # "a certain percentile absolute pixel value"
|
| 312 |
+
|
| 313 |
+
s = torch.quantile(
|
| 314 |
+
abs_sample, self.config.dynamic_thresholding_ratio, dim=1)
|
| 315 |
+
s = torch.clamp(
|
| 316 |
+
s, min=1, max=self.config.sample_max_value
|
| 317 |
+
) # When clamped to min=1, equivalent to standard clipping to [-1, 1]
|
| 318 |
+
s = s.unsqueeze(
|
| 319 |
+
1) # (batch_size, 1) because clamp will broadcast along dim=0
|
| 320 |
+
sample = torch.clamp(
|
| 321 |
+
sample, -s, s
|
| 322 |
+
) / s # "we threshold xt0 to the range [-s, s] and then divide by s"
|
| 323 |
+
|
| 324 |
+
sample = sample.reshape(batch_size, channels, *remaining_dims)
|
| 325 |
+
sample = sample.to(dtype)
|
| 326 |
+
|
| 327 |
+
return sample
|
| 328 |
+
|
| 329 |
+
# Copied from diffusers.schedulers.scheduling_flow_match_euler_discrete.FlowMatchEulerDiscreteScheduler._sigma_to_t
|
| 330 |
+
def _sigma_to_t(self, sigma):
|
| 331 |
+
return sigma * self.config.num_train_timesteps
|
| 332 |
+
|
| 333 |
+
def _sigma_to_alpha_sigma_t(self, sigma):
|
| 334 |
+
return 1 - sigma, sigma
|
| 335 |
+
|
| 336 |
+
# Copied from diffusers.schedulers.scheduling_flow_match_euler_discrete.set_timesteps
|
| 337 |
+
def time_shift(self, mu: float, sigma: float, t: torch.Tensor):
|
| 338 |
+
return math.exp(mu) / (math.exp(mu) + (1 / t - 1)**sigma)
|
| 339 |
+
|
| 340 |
+
# Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.convert_model_output
|
| 341 |
+
def convert_model_output(
|
| 342 |
+
self,
|
| 343 |
+
model_output: torch.Tensor,
|
| 344 |
+
*args,
|
| 345 |
+
sample: torch.Tensor = None,
|
| 346 |
+
**kwargs,
|
| 347 |
+
) -> torch.Tensor:
|
| 348 |
+
"""
|
| 349 |
+
Convert the model output to the corresponding type the DPMSolver/DPMSolver++ algorithm needs. DPM-Solver is
|
| 350 |
+
designed to discretize an integral of the noise prediction model, and DPM-Solver++ is designed to discretize an
|
| 351 |
+
integral of the data prediction model.
|
| 352 |
+
<Tip>
|
| 353 |
+
The algorithm and model type are decoupled. You can use either DPMSolver or DPMSolver++ for both noise
|
| 354 |
+
prediction and data prediction models.
|
| 355 |
+
</Tip>
|
| 356 |
+
Args:
|
| 357 |
+
model_output (`torch.Tensor`):
|
| 358 |
+
The direct output from the learned diffusion model.
|
| 359 |
+
sample (`torch.Tensor`):
|
| 360 |
+
A current instance of a sample created by the diffusion process.
|
| 361 |
+
Returns:
|
| 362 |
+
`torch.Tensor`:
|
| 363 |
+
The converted model output.
|
| 364 |
+
"""
|
| 365 |
+
timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None)
|
| 366 |
+
if sample is None:
|
| 367 |
+
if len(args) > 1:
|
| 368 |
+
sample = args[1]
|
| 369 |
+
else:
|
| 370 |
+
raise ValueError(
|
| 371 |
+
"missing `sample` as a required keyward argument")
|
| 372 |
+
if timestep is not None:
|
| 373 |
+
deprecate(
|
| 374 |
+
"timesteps",
|
| 375 |
+
"1.0.0",
|
| 376 |
+
"Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`",
|
| 377 |
+
)
|
| 378 |
+
|
| 379 |
+
# DPM-Solver++ needs to solve an integral of the data prediction model.
|
| 380 |
+
if self.config.algorithm_type in ["dpmsolver++", "sde-dpmsolver++"]:
|
| 381 |
+
if self.config.prediction_type == "flow_prediction":
|
| 382 |
+
sigma_t = self.sigmas[self.step_index]
|
| 383 |
+
x0_pred = sample - sigma_t * model_output
|
| 384 |
+
else:
|
| 385 |
+
raise ValueError(
|
| 386 |
+
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`,"
|
| 387 |
+
" `v_prediction`, or `flow_prediction` for the FlowDPMSolverMultistepScheduler."
|
| 388 |
+
)
|
| 389 |
+
|
| 390 |
+
if self.config.thresholding:
|
| 391 |
+
x0_pred = self._threshold_sample(x0_pred)
|
| 392 |
+
|
| 393 |
+
return x0_pred
|
| 394 |
+
|
| 395 |
+
# DPM-Solver needs to solve an integral of the noise prediction model.
|
| 396 |
+
elif self.config.algorithm_type in ["dpmsolver", "sde-dpmsolver"]:
|
| 397 |
+
if self.config.prediction_type == "flow_prediction":
|
| 398 |
+
sigma_t = self.sigmas[self.step_index]
|
| 399 |
+
epsilon = sample - (1 - sigma_t) * model_output
|
| 400 |
+
else:
|
| 401 |
+
raise ValueError(
|
| 402 |
+
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`,"
|
| 403 |
+
" `v_prediction` or `flow_prediction` for the FlowDPMSolverMultistepScheduler."
|
| 404 |
+
)
|
| 405 |
+
|
| 406 |
+
if self.config.thresholding:
|
| 407 |
+
sigma_t = self.sigmas[self.step_index]
|
| 408 |
+
x0_pred = sample - sigma_t * model_output
|
| 409 |
+
x0_pred = self._threshold_sample(x0_pred)
|
| 410 |
+
epsilon = model_output + x0_pred
|
| 411 |
+
|
| 412 |
+
return epsilon
|
| 413 |
+
|
| 414 |
+
# Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.dpm_solver_first_order_update
|
| 415 |
+
def dpm_solver_first_order_update(
|
| 416 |
+
self,
|
| 417 |
+
model_output: torch.Tensor,
|
| 418 |
+
*args,
|
| 419 |
+
sample: torch.Tensor = None,
|
| 420 |
+
noise: Optional[torch.Tensor] = None,
|
| 421 |
+
**kwargs,
|
| 422 |
+
) -> torch.Tensor:
|
| 423 |
+
"""
|
| 424 |
+
One step for the first-order DPMSolver (equivalent to DDIM).
|
| 425 |
+
Args:
|
| 426 |
+
model_output (`torch.Tensor`):
|
| 427 |
+
The direct output from the learned diffusion model.
|
| 428 |
+
sample (`torch.Tensor`):
|
| 429 |
+
A current instance of a sample created by the diffusion process.
|
| 430 |
+
Returns:
|
| 431 |
+
`torch.Tensor`:
|
| 432 |
+
The sample tensor at the previous timestep.
|
| 433 |
+
"""
|
| 434 |
+
timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None)
|
| 435 |
+
prev_timestep = args[1] if len(args) > 1 else kwargs.pop(
|
| 436 |
+
"prev_timestep", None)
|
| 437 |
+
if sample is None:
|
| 438 |
+
if len(args) > 2:
|
| 439 |
+
sample = args[2]
|
| 440 |
+
else:
|
| 441 |
+
raise ValueError(
|
| 442 |
+
" missing `sample` as a required keyward argument")
|
| 443 |
+
if timestep is not None:
|
| 444 |
+
deprecate(
|
| 445 |
+
"timesteps",
|
| 446 |
+
"1.0.0",
|
| 447 |
+
"Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`",
|
| 448 |
+
)
|
| 449 |
+
|
| 450 |
+
if prev_timestep is not None:
|
| 451 |
+
deprecate(
|
| 452 |
+
"prev_timestep",
|
| 453 |
+
"1.0.0",
|
| 454 |
+
"Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`",
|
| 455 |
+
)
|
| 456 |
+
|
| 457 |
+
sigma_t, sigma_s = self.sigmas[self.step_index + 1], self.sigmas[
|
| 458 |
+
self.step_index] # pyright: ignore
|
| 459 |
+
alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t)
|
| 460 |
+
alpha_s, sigma_s = self._sigma_to_alpha_sigma_t(sigma_s)
|
| 461 |
+
lambda_t = torch.log(alpha_t) - torch.log(sigma_t)
|
| 462 |
+
lambda_s = torch.log(alpha_s) - torch.log(sigma_s)
|
| 463 |
+
|
| 464 |
+
h = lambda_t - lambda_s
|
| 465 |
+
if self.config.algorithm_type == "dpmsolver++":
|
| 466 |
+
x_t = (sigma_t /
|
| 467 |
+
sigma_s) * sample - (alpha_t *
|
| 468 |
+
(torch.exp(-h) - 1.0)) * model_output
|
| 469 |
+
elif self.config.algorithm_type == "dpmsolver":
|
| 470 |
+
x_t = (alpha_t /
|
| 471 |
+
alpha_s) * sample - (sigma_t *
|
| 472 |
+
(torch.exp(h) - 1.0)) * model_output
|
| 473 |
+
elif self.config.algorithm_type == "sde-dpmsolver++":
|
| 474 |
+
assert noise is not None
|
| 475 |
+
x_t = ((sigma_t / sigma_s * torch.exp(-h)) * sample +
|
| 476 |
+
(alpha_t * (1 - torch.exp(-2.0 * h))) * model_output +
|
| 477 |
+
sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise)
|
| 478 |
+
elif self.config.algorithm_type == "sde-dpmsolver":
|
| 479 |
+
assert noise is not None
|
| 480 |
+
x_t = ((alpha_t / alpha_s) * sample - 2.0 *
|
| 481 |
+
(sigma_t * (torch.exp(h) - 1.0)) * model_output +
|
| 482 |
+
sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise)
|
| 483 |
+
return x_t # pyright: ignore
|
| 484 |
+
|
| 485 |
+
# Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.multistep_dpm_solver_second_order_update
|
| 486 |
+
def multistep_dpm_solver_second_order_update(
|
| 487 |
+
self,
|
| 488 |
+
model_output_list: List[torch.Tensor],
|
| 489 |
+
*args,
|
| 490 |
+
sample: torch.Tensor = None,
|
| 491 |
+
noise: Optional[torch.Tensor] = None,
|
| 492 |
+
**kwargs,
|
| 493 |
+
) -> torch.Tensor:
|
| 494 |
+
"""
|
| 495 |
+
One step for the second-order multistep DPMSolver.
|
| 496 |
+
Args:
|
| 497 |
+
model_output_list (`List[torch.Tensor]`):
|
| 498 |
+
The direct outputs from learned diffusion model at current and latter timesteps.
|
| 499 |
+
sample (`torch.Tensor`):
|
| 500 |
+
A current instance of a sample created by the diffusion process.
|
| 501 |
+
Returns:
|
| 502 |
+
`torch.Tensor`:
|
| 503 |
+
The sample tensor at the previous timestep.
|
| 504 |
+
"""
|
| 505 |
+
timestep_list = args[0] if len(args) > 0 else kwargs.pop(
|
| 506 |
+
"timestep_list", None)
|
| 507 |
+
prev_timestep = args[1] if len(args) > 1 else kwargs.pop(
|
| 508 |
+
"prev_timestep", None)
|
| 509 |
+
if sample is None:
|
| 510 |
+
if len(args) > 2:
|
| 511 |
+
sample = args[2]
|
| 512 |
+
else:
|
| 513 |
+
raise ValueError(
|
| 514 |
+
" missing `sample` as a required keyward argument")
|
| 515 |
+
if timestep_list is not None:
|
| 516 |
+
deprecate(
|
| 517 |
+
"timestep_list",
|
| 518 |
+
"1.0.0",
|
| 519 |
+
"Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`",
|
| 520 |
+
)
|
| 521 |
+
|
| 522 |
+
if prev_timestep is not None:
|
| 523 |
+
deprecate(
|
| 524 |
+
"prev_timestep",
|
| 525 |
+
"1.0.0",
|
| 526 |
+
"Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`",
|
| 527 |
+
)
|
| 528 |
+
|
| 529 |
+
sigma_t, sigma_s0, sigma_s1 = (
|
| 530 |
+
self.sigmas[self.step_index + 1], # pyright: ignore
|
| 531 |
+
self.sigmas[self.step_index],
|
| 532 |
+
self.sigmas[self.step_index - 1], # pyright: ignore
|
| 533 |
+
)
|
| 534 |
+
|
| 535 |
+
alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t)
|
| 536 |
+
alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0)
|
| 537 |
+
alpha_s1, sigma_s1 = self._sigma_to_alpha_sigma_t(sigma_s1)
|
| 538 |
+
|
| 539 |
+
lambda_t = torch.log(alpha_t) - torch.log(sigma_t)
|
| 540 |
+
lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0)
|
| 541 |
+
lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1)
|
| 542 |
+
|
| 543 |
+
m0, m1 = model_output_list[-1], model_output_list[-2]
|
| 544 |
+
|
| 545 |
+
h, h_0 = lambda_t - lambda_s0, lambda_s0 - lambda_s1
|
| 546 |
+
r0 = h_0 / h
|
| 547 |
+
D0, D1 = m0, (1.0 / r0) * (m0 - m1)
|
| 548 |
+
if self.config.algorithm_type == "dpmsolver++":
|
| 549 |
+
# See https://arxiv.org/abs/2211.01095 for detailed derivations
|
| 550 |
+
if self.config.solver_type == "midpoint":
|
| 551 |
+
x_t = ((sigma_t / sigma_s0) * sample -
|
| 552 |
+
(alpha_t * (torch.exp(-h) - 1.0)) * D0 - 0.5 *
|
| 553 |
+
(alpha_t * (torch.exp(-h) - 1.0)) * D1)
|
| 554 |
+
elif self.config.solver_type == "heun":
|
| 555 |
+
x_t = ((sigma_t / sigma_s0) * sample -
|
| 556 |
+
(alpha_t * (torch.exp(-h) - 1.0)) * D0 +
|
| 557 |
+
(alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0)) * D1)
|
| 558 |
+
elif self.config.algorithm_type == "dpmsolver":
|
| 559 |
+
# See https://arxiv.org/abs/2206.00927 for detailed derivations
|
| 560 |
+
if self.config.solver_type == "midpoint":
|
| 561 |
+
x_t = ((alpha_t / alpha_s0) * sample -
|
| 562 |
+
(sigma_t * (torch.exp(h) - 1.0)) * D0 - 0.5 *
|
| 563 |
+
(sigma_t * (torch.exp(h) - 1.0)) * D1)
|
| 564 |
+
elif self.config.solver_type == "heun":
|
| 565 |
+
x_t = ((alpha_t / alpha_s0) * sample -
|
| 566 |
+
(sigma_t * (torch.exp(h) - 1.0)) * D0 -
|
| 567 |
+
(sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1)
|
| 568 |
+
elif self.config.algorithm_type == "sde-dpmsolver++":
|
| 569 |
+
assert noise is not None
|
| 570 |
+
if self.config.solver_type == "midpoint":
|
| 571 |
+
x_t = ((sigma_t / sigma_s0 * torch.exp(-h)) * sample +
|
| 572 |
+
(alpha_t * (1 - torch.exp(-2.0 * h))) * D0 + 0.5 *
|
| 573 |
+
(alpha_t * (1 - torch.exp(-2.0 * h))) * D1 +
|
| 574 |
+
sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise)
|
| 575 |
+
elif self.config.solver_type == "heun":
|
| 576 |
+
x_t = ((sigma_t / sigma_s0 * torch.exp(-h)) * sample +
|
| 577 |
+
(alpha_t * (1 - torch.exp(-2.0 * h))) * D0 +
|
| 578 |
+
(alpha_t * ((1.0 - torch.exp(-2.0 * h)) /
|
| 579 |
+
(-2.0 * h) + 1.0)) * D1 +
|
| 580 |
+
sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise)
|
| 581 |
+
elif self.config.algorithm_type == "sde-dpmsolver":
|
| 582 |
+
assert noise is not None
|
| 583 |
+
if self.config.solver_type == "midpoint":
|
| 584 |
+
x_t = ((alpha_t / alpha_s0) * sample - 2.0 *
|
| 585 |
+
(sigma_t * (torch.exp(h) - 1.0)) * D0 -
|
| 586 |
+
(sigma_t * (torch.exp(h) - 1.0)) * D1 +
|
| 587 |
+
sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise)
|
| 588 |
+
elif self.config.solver_type == "heun":
|
| 589 |
+
x_t = ((alpha_t / alpha_s0) * sample - 2.0 *
|
| 590 |
+
(sigma_t * (torch.exp(h) - 1.0)) * D0 - 2.0 *
|
| 591 |
+
(sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1 +
|
| 592 |
+
sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise)
|
| 593 |
+
return x_t # pyright: ignore
|
| 594 |
+
|
| 595 |
+
# Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.multistep_dpm_solver_third_order_update
|
| 596 |
+
def multistep_dpm_solver_third_order_update(
|
| 597 |
+
self,
|
| 598 |
+
model_output_list: List[torch.Tensor],
|
| 599 |
+
*args,
|
| 600 |
+
sample: torch.Tensor = None,
|
| 601 |
+
**kwargs,
|
| 602 |
+
) -> torch.Tensor:
|
| 603 |
+
"""
|
| 604 |
+
One step for the third-order multistep DPMSolver.
|
| 605 |
+
Args:
|
| 606 |
+
model_output_list (`List[torch.Tensor]`):
|
| 607 |
+
The direct outputs from learned diffusion model at current and latter timesteps.
|
| 608 |
+
sample (`torch.Tensor`):
|
| 609 |
+
A current instance of a sample created by diffusion process.
|
| 610 |
+
Returns:
|
| 611 |
+
`torch.Tensor`:
|
| 612 |
+
The sample tensor at the previous timestep.
|
| 613 |
+
"""
|
| 614 |
+
|
| 615 |
+
timestep_list = args[0] if len(args) > 0 else kwargs.pop(
|
| 616 |
+
"timestep_list", None)
|
| 617 |
+
prev_timestep = args[1] if len(args) > 1 else kwargs.pop(
|
| 618 |
+
"prev_timestep", None)
|
| 619 |
+
if sample is None:
|
| 620 |
+
if len(args) > 2:
|
| 621 |
+
sample = args[2]
|
| 622 |
+
else:
|
| 623 |
+
raise ValueError(
|
| 624 |
+
" missing`sample` as a required keyward argument")
|
| 625 |
+
if timestep_list is not None:
|
| 626 |
+
deprecate(
|
| 627 |
+
"timestep_list",
|
| 628 |
+
"1.0.0",
|
| 629 |
+
"Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`",
|
| 630 |
+
)
|
| 631 |
+
|
| 632 |
+
if prev_timestep is not None:
|
| 633 |
+
deprecate(
|
| 634 |
+
"prev_timestep",
|
| 635 |
+
"1.0.0",
|
| 636 |
+
"Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`",
|
| 637 |
+
)
|
| 638 |
+
|
| 639 |
+
sigma_t, sigma_s0, sigma_s1, sigma_s2 = (
|
| 640 |
+
self.sigmas[self.step_index + 1], # pyright: ignore
|
| 641 |
+
self.sigmas[self.step_index],
|
| 642 |
+
self.sigmas[self.step_index - 1], # pyright: ignore
|
| 643 |
+
self.sigmas[self.step_index - 2], # pyright: ignore
|
| 644 |
+
)
|
| 645 |
+
|
| 646 |
+
alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t)
|
| 647 |
+
alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0)
|
| 648 |
+
alpha_s1, sigma_s1 = self._sigma_to_alpha_sigma_t(sigma_s1)
|
| 649 |
+
alpha_s2, sigma_s2 = self._sigma_to_alpha_sigma_t(sigma_s2)
|
| 650 |
+
|
| 651 |
+
lambda_t = torch.log(alpha_t) - torch.log(sigma_t)
|
| 652 |
+
lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0)
|
| 653 |
+
lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1)
|
| 654 |
+
lambda_s2 = torch.log(alpha_s2) - torch.log(sigma_s2)
|
| 655 |
+
|
| 656 |
+
m0, m1, m2 = model_output_list[-1], model_output_list[
|
| 657 |
+
-2], model_output_list[-3]
|
| 658 |
+
|
| 659 |
+
h, h_0, h_1 = lambda_t - lambda_s0, lambda_s0 - lambda_s1, lambda_s1 - lambda_s2
|
| 660 |
+
r0, r1 = h_0 / h, h_1 / h
|
| 661 |
+
D0 = m0
|
| 662 |
+
D1_0, D1_1 = (1.0 / r0) * (m0 - m1), (1.0 / r1) * (m1 - m2)
|
| 663 |
+
D1 = D1_0 + (r0 / (r0 + r1)) * (D1_0 - D1_1)
|
| 664 |
+
D2 = (1.0 / (r0 + r1)) * (D1_0 - D1_1)
|
| 665 |
+
if self.config.algorithm_type == "dpmsolver++":
|
| 666 |
+
# See https://arxiv.org/abs/2206.00927 for detailed derivations
|
| 667 |
+
x_t = ((sigma_t / sigma_s0) * sample -
|
| 668 |
+
(alpha_t * (torch.exp(-h) - 1.0)) * D0 +
|
| 669 |
+
(alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0)) * D1 -
|
| 670 |
+
(alpha_t * ((torch.exp(-h) - 1.0 + h) / h**2 - 0.5)) * D2)
|
| 671 |
+
elif self.config.algorithm_type == "dpmsolver":
|
| 672 |
+
# See https://arxiv.org/abs/2206.00927 for detailed derivations
|
| 673 |
+
x_t = ((alpha_t / alpha_s0) * sample - (sigma_t *
|
| 674 |
+
(torch.exp(h) - 1.0)) * D0 -
|
| 675 |
+
(sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1 -
|
| 676 |
+
(sigma_t * ((torch.exp(h) - 1.0 - h) / h**2 - 0.5)) * D2)
|
| 677 |
+
return x_t # pyright: ignore
|
| 678 |
+
|
| 679 |
+
def index_for_timestep(self, timestep, schedule_timesteps=None):
|
| 680 |
+
if schedule_timesteps is None:
|
| 681 |
+
schedule_timesteps = self.timesteps
|
| 682 |
+
|
| 683 |
+
indices = (schedule_timesteps == timestep).nonzero()
|
| 684 |
+
|
| 685 |
+
# The sigma index that is taken for the **very** first `step`
|
| 686 |
+
# is always the second index (or the last index if there is only 1)
|
| 687 |
+
# This way we can ensure we don't accidentally skip a sigma in
|
| 688 |
+
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
|
| 689 |
+
pos = 1 if len(indices) > 1 else 0
|
| 690 |
+
|
| 691 |
+
return indices[pos].item()
|
| 692 |
+
|
| 693 |
+
def _init_step_index(self, timestep):
|
| 694 |
+
"""
|
| 695 |
+
Initialize the step_index counter for the scheduler.
|
| 696 |
+
"""
|
| 697 |
+
|
| 698 |
+
if self.begin_index is None:
|
| 699 |
+
if isinstance(timestep, torch.Tensor):
|
| 700 |
+
timestep = timestep.to(self.timesteps.device)
|
| 701 |
+
self._step_index = self.index_for_timestep(timestep)
|
| 702 |
+
else:
|
| 703 |
+
self._step_index = self._begin_index
|
| 704 |
+
|
| 705 |
+
# Modified from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.step
|
| 706 |
+
def step(
|
| 707 |
+
self,
|
| 708 |
+
model_output: torch.Tensor,
|
| 709 |
+
timestep: Union[int, torch.Tensor],
|
| 710 |
+
sample: torch.Tensor,
|
| 711 |
+
generator=None,
|
| 712 |
+
variance_noise: Optional[torch.Tensor] = None,
|
| 713 |
+
return_dict: bool = True,
|
| 714 |
+
) -> Union[SchedulerOutput, Tuple]:
|
| 715 |
+
"""
|
| 716 |
+
Predict the sample from the previous timestep by reversing the SDE. This function propagates the sample with
|
| 717 |
+
the multistep DPMSolver.
|
| 718 |
+
Args:
|
| 719 |
+
model_output (`torch.Tensor`):
|
| 720 |
+
The direct output from learned diffusion model.
|
| 721 |
+
timestep (`int`):
|
| 722 |
+
The current discrete timestep in the diffusion chain.
|
| 723 |
+
sample (`torch.Tensor`):
|
| 724 |
+
A current instance of a sample created by the diffusion process.
|
| 725 |
+
generator (`torch.Generator`, *optional*):
|
| 726 |
+
A random number generator.
|
| 727 |
+
variance_noise (`torch.Tensor`):
|
| 728 |
+
Alternative to generating noise with `generator` by directly providing the noise for the variance
|
| 729 |
+
itself. Useful for methods such as [`LEdits++`].
|
| 730 |
+
return_dict (`bool`):
|
| 731 |
+
Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`.
|
| 732 |
+
Returns:
|
| 733 |
+
[`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`:
|
| 734 |
+
If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a
|
| 735 |
+
tuple is returned where the first element is the sample tensor.
|
| 736 |
+
"""
|
| 737 |
+
if self.num_inference_steps is None:
|
| 738 |
+
raise ValueError(
|
| 739 |
+
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
|
| 740 |
+
)
|
| 741 |
+
|
| 742 |
+
if self.step_index is None:
|
| 743 |
+
self._init_step_index(timestep)
|
| 744 |
+
|
| 745 |
+
# Improve numerical stability for small number of steps
|
| 746 |
+
lower_order_final = (self.step_index == len(self.timesteps) - 1) and (
|
| 747 |
+
self.config.euler_at_final or
|
| 748 |
+
(self.config.lower_order_final and len(self.timesteps) < 15) or
|
| 749 |
+
self.config.final_sigmas_type == "zero")
|
| 750 |
+
lower_order_second = ((self.step_index == len(self.timesteps) - 2) and
|
| 751 |
+
self.config.lower_order_final and
|
| 752 |
+
len(self.timesteps) < 15)
|
| 753 |
+
|
| 754 |
+
model_output = self.convert_model_output(model_output, sample=sample)
|
| 755 |
+
for i in range(self.config.solver_order - 1):
|
| 756 |
+
self.model_outputs[i] = self.model_outputs[i + 1]
|
| 757 |
+
self.model_outputs[-1] = model_output
|
| 758 |
+
|
| 759 |
+
# Upcast to avoid precision issues when computing prev_sample
|
| 760 |
+
sample = sample.to(torch.float32)
|
| 761 |
+
if self.config.algorithm_type in ["sde-dpmsolver", "sde-dpmsolver++"
|
| 762 |
+
] and variance_noise is None:
|
| 763 |
+
noise = randn_tensor(
|
| 764 |
+
model_output.shape,
|
| 765 |
+
generator=generator,
|
| 766 |
+
device=model_output.device,
|
| 767 |
+
dtype=torch.float32)
|
| 768 |
+
elif self.config.algorithm_type in ["sde-dpmsolver", "sde-dpmsolver++"]:
|
| 769 |
+
noise = variance_noise.to(
|
| 770 |
+
device=model_output.device,
|
| 771 |
+
dtype=torch.float32) # pyright: ignore
|
| 772 |
+
else:
|
| 773 |
+
noise = None
|
| 774 |
+
|
| 775 |
+
if self.config.solver_order == 1 or self.lower_order_nums < 1 or lower_order_final:
|
| 776 |
+
prev_sample = self.dpm_solver_first_order_update(
|
| 777 |
+
model_output, sample=sample, noise=noise)
|
| 778 |
+
elif self.config.solver_order == 2 or self.lower_order_nums < 2 or lower_order_second:
|
| 779 |
+
prev_sample = self.multistep_dpm_solver_second_order_update(
|
| 780 |
+
self.model_outputs, sample=sample, noise=noise)
|
| 781 |
+
else:
|
| 782 |
+
prev_sample = self.multistep_dpm_solver_third_order_update(
|
| 783 |
+
self.model_outputs, sample=sample)
|
| 784 |
+
|
| 785 |
+
if self.lower_order_nums < self.config.solver_order:
|
| 786 |
+
self.lower_order_nums += 1
|
| 787 |
+
|
| 788 |
+
# Cast sample back to expected dtype
|
| 789 |
+
prev_sample = prev_sample.to(model_output.dtype)
|
| 790 |
+
|
| 791 |
+
# upon completion increase step index by one
|
| 792 |
+
self._step_index += 1 # pyright: ignore
|
| 793 |
+
|
| 794 |
+
if not return_dict:
|
| 795 |
+
return (prev_sample,)
|
| 796 |
+
|
| 797 |
+
return SchedulerOutput(prev_sample=prev_sample)
|
| 798 |
+
|
| 799 |
+
# Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.scale_model_input
|
| 800 |
+
def scale_model_input(self, sample: torch.Tensor, *args,
|
| 801 |
+
**kwargs) -> torch.Tensor:
|
| 802 |
+
"""
|
| 803 |
+
Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
|
| 804 |
+
current timestep.
|
| 805 |
+
Args:
|
| 806 |
+
sample (`torch.Tensor`):
|
| 807 |
+
The input sample.
|
| 808 |
+
Returns:
|
| 809 |
+
`torch.Tensor`:
|
| 810 |
+
A scaled input sample.
|
| 811 |
+
"""
|
| 812 |
+
return sample
|
| 813 |
+
|
| 814 |
+
# Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.scale_model_input
|
| 815 |
+
def add_noise(
|
| 816 |
+
self,
|
| 817 |
+
original_samples: torch.Tensor,
|
| 818 |
+
noise: torch.Tensor,
|
| 819 |
+
timesteps: torch.IntTensor,
|
| 820 |
+
) -> torch.Tensor:
|
| 821 |
+
# Make sure sigmas and timesteps have the same device and dtype as original_samples
|
| 822 |
+
sigmas = self.sigmas.to(
|
| 823 |
+
device=original_samples.device, dtype=original_samples.dtype)
|
| 824 |
+
if original_samples.device.type == "mps" and torch.is_floating_point(
|
| 825 |
+
timesteps):
|
| 826 |
+
# mps does not support float64
|
| 827 |
+
schedule_timesteps = self.timesteps.to(
|
| 828 |
+
original_samples.device, dtype=torch.float32)
|
| 829 |
+
timesteps = timesteps.to(
|
| 830 |
+
original_samples.device, dtype=torch.float32)
|
| 831 |
+
else:
|
| 832 |
+
schedule_timesteps = self.timesteps.to(original_samples.device)
|
| 833 |
+
timesteps = timesteps.to(original_samples.device)
|
| 834 |
+
|
| 835 |
+
# begin_index is None when the scheduler is used for training or pipeline does not implement set_begin_index
|
| 836 |
+
if self.begin_index is None:
|
| 837 |
+
step_indices = [
|
| 838 |
+
self.index_for_timestep(t, schedule_timesteps)
|
| 839 |
+
for t in timesteps
|
| 840 |
+
]
|
| 841 |
+
elif self.step_index is not None:
|
| 842 |
+
# add_noise is called after first denoising step (for inpainting)
|
| 843 |
+
step_indices = [self.step_index] * timesteps.shape[0]
|
| 844 |
+
else:
|
| 845 |
+
# add noise is called before first denoising step to create initial latent(img2img)
|
| 846 |
+
step_indices = [self.begin_index] * timesteps.shape[0]
|
| 847 |
+
|
| 848 |
+
sigma = sigmas[step_indices].flatten()
|
| 849 |
+
while len(sigma.shape) < len(original_samples.shape):
|
| 850 |
+
sigma = sigma.unsqueeze(-1)
|
| 851 |
+
|
| 852 |
+
alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma)
|
| 853 |
+
noisy_samples = alpha_t * original_samples + sigma_t * noise
|
| 854 |
+
return noisy_samples
|
| 855 |
+
|
| 856 |
+
def __len__(self):
|
| 857 |
+
return self.config.num_train_timesteps
|