sypyp
commited on
Commit
·
3b928bb
1
Parent(s):
cc51aab
init upload
Browse files- image_encoder/config.json +22 -0
- image_encoder/model.safetensors +3 -0
- image_processor/preprocessor_config.json +28 -0
- model_index.json +32 -0
- scheduler/scheduler_config.json +28 -0
- transformer/config.json +26 -0
- transformer/diffusion_pytorch_model-00001-of-00007.safetensors +3 -0
- transformer/diffusion_pytorch_model-00002-of-00007.safetensors +3 -0
- transformer/diffusion_pytorch_model-00003-of-00007.safetensors +3 -0
- transformer/diffusion_pytorch_model-00004-of-00007.safetensors +3 -0
- transformer/diffusion_pytorch_model-00005-of-00007.safetensors +3 -0
- transformer/diffusion_pytorch_model-00006-of-00007.safetensors +3 -0
- transformer/diffusion_pytorch_model-00007-of-00007.safetensors +3 -0
- transformer/diffusion_pytorch_model.safetensors.index.json +0 -0
image_encoder/config.json
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"CLIPVisionModel"
|
| 4 |
+
],
|
| 5 |
+
"attention_dropout": 0.0,
|
| 6 |
+
"dropout": 0.0,
|
| 7 |
+
"hidden_act": "gelu",
|
| 8 |
+
"hidden_size": 1280,
|
| 9 |
+
"image_size": 224,
|
| 10 |
+
"initializer_factor": 1.0,
|
| 11 |
+
"initializer_range": 0.02,
|
| 12 |
+
"intermediate_size": 5120,
|
| 13 |
+
"layer_norm_eps": 1e-05,
|
| 14 |
+
"model_type": "clip_vision_model",
|
| 15 |
+
"num_attention_heads": 16,
|
| 16 |
+
"num_channels": 3,
|
| 17 |
+
"num_hidden_layers": 32,
|
| 18 |
+
"patch_size": 14,
|
| 19 |
+
"projection_dim": 1024,
|
| 20 |
+
"torch_dtype": "float32",
|
| 21 |
+
"transformers_version": "4.49.0"
|
| 22 |
+
}
|
image_encoder/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8eb46f477ef5e1859b659014aed6ca56cdc207c12cb7a0f9d61b4d80a1a7bb84
|
| 3 |
+
size 2523128312
|
image_processor/preprocessor_config.json
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"crop_size": {
|
| 3 |
+
"height": 224,
|
| 4 |
+
"width": 224
|
| 5 |
+
},
|
| 6 |
+
"do_center_crop": false,
|
| 7 |
+
"do_convert_rgb": true,
|
| 8 |
+
"do_normalize": true,
|
| 9 |
+
"do_rescale": true,
|
| 10 |
+
"do_resize": true,
|
| 11 |
+
"image_mean": [
|
| 12 |
+
0.48145466,
|
| 13 |
+
0.4578275,
|
| 14 |
+
0.40821073
|
| 15 |
+
],
|
| 16 |
+
"image_processor_type": "CLIPImageProcessor",
|
| 17 |
+
"image_std": [
|
| 18 |
+
0.26862954,
|
| 19 |
+
0.26130258,
|
| 20 |
+
0.27577711
|
| 21 |
+
],
|
| 22 |
+
"resample": 3,
|
| 23 |
+
"rescale_factor": 0.00392156862745098,
|
| 24 |
+
"size": {
|
| 25 |
+
"height": 224,
|
| 26 |
+
"width": 224
|
| 27 |
+
}
|
| 28 |
+
}
|
model_index.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_class_name": "WanI2VPipeline",
|
| 3 |
+
"_diffusers_version": "0.33.0.dev0",
|
| 4 |
+
"image_encoder": [
|
| 5 |
+
"transformers",
|
| 6 |
+
"CLIPVisionModel"
|
| 7 |
+
],
|
| 8 |
+
"image_processor": [
|
| 9 |
+
"transformers",
|
| 10 |
+
"CLIPImageProcessor"
|
| 11 |
+
],
|
| 12 |
+
"scheduler": [
|
| 13 |
+
"diffusers",
|
| 14 |
+
"UniPCMultistepScheduler"
|
| 15 |
+
],
|
| 16 |
+
"text_encoder": [
|
| 17 |
+
"transformers",
|
| 18 |
+
"UMT5EncoderModel"
|
| 19 |
+
],
|
| 20 |
+
"tokenizer": [
|
| 21 |
+
"transformers",
|
| 22 |
+
"T5TokenizerFast"
|
| 23 |
+
],
|
| 24 |
+
"transformer": [
|
| 25 |
+
"diffusers",
|
| 26 |
+
"WanTransformer3DModel"
|
| 27 |
+
],
|
| 28 |
+
"vae": [
|
| 29 |
+
"diffusers",
|
| 30 |
+
"AutoencoderKLWan"
|
| 31 |
+
]
|
| 32 |
+
}
|
scheduler/scheduler_config.json
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_class_name": "UniPCMultistepScheduler",
|
| 3 |
+
"_diffusers_version": "0.33.0.dev0",
|
| 4 |
+
"beta_end": 0.02,
|
| 5 |
+
"beta_schedule": "linear",
|
| 6 |
+
"beta_start": 0.0001,
|
| 7 |
+
"disable_corrector": [],
|
| 8 |
+
"dynamic_thresholding_ratio": 0.995,
|
| 9 |
+
"final_sigmas_type": "zero",
|
| 10 |
+
"flow_shift": 7.0,
|
| 11 |
+
"lower_order_final": true,
|
| 12 |
+
"num_train_timesteps": 1000,
|
| 13 |
+
"predict_x0": true,
|
| 14 |
+
"prediction_type": "flow_prediction",
|
| 15 |
+
"rescale_betas_zero_snr": false,
|
| 16 |
+
"sample_max_value": 1.0,
|
| 17 |
+
"solver_order": 2,
|
| 18 |
+
"solver_p": null,
|
| 19 |
+
"solver_type": "bh2",
|
| 20 |
+
"steps_offset": 0,
|
| 21 |
+
"thresholding": false,
|
| 22 |
+
"timestep_spacing": "linspace",
|
| 23 |
+
"trained_betas": null,
|
| 24 |
+
"use_beta_sigmas": false,
|
| 25 |
+
"use_exponential_sigmas": false,
|
| 26 |
+
"use_flow_sigmas": true,
|
| 27 |
+
"use_karras_sigmas": false
|
| 28 |
+
}
|
transformer/config.json
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_class_name": "WanTransformer3DModel",
|
| 3 |
+
"_diffusers_version": "0.33.0.dev0",
|
| 4 |
+
"add_img_emb": true,
|
| 5 |
+
"added_kv_proj_dim": 5120,
|
| 6 |
+
"attention_head_dim": 128,
|
| 7 |
+
"cross_attn_norm": true,
|
| 8 |
+
"eps": 1e-06,
|
| 9 |
+
"ffn_dim": 13824,
|
| 10 |
+
"freq_dim": 256,
|
| 11 |
+
"in_channels": 36,
|
| 12 |
+
"num_attention_heads": 40,
|
| 13 |
+
"num_layers": 40,
|
| 14 |
+
"out_channels": 16,
|
| 15 |
+
"patch_size": [
|
| 16 |
+
1,
|
| 17 |
+
2,
|
| 18 |
+
2
|
| 19 |
+
],
|
| 20 |
+
"qk_norm": true,
|
| 21 |
+
"text_dim": 4096,
|
| 22 |
+
"window_size": [
|
| 23 |
+
-1,
|
| 24 |
+
-1
|
| 25 |
+
]
|
| 26 |
+
}
|
transformer/diffusion_pytorch_model-00001-of-00007.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:457639497b204e838c0b5e7f5955e8b1b0f9f04213bd9853e40cd77771569685
|
| 3 |
+
size 9952163512
|
transformer/diffusion_pytorch_model-00002-of-00007.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:eda5a7f06db0164b852b52fc56db9cb82c502e963f0e7d407af8e13bac31826b
|
| 3 |
+
size 9797226656
|
transformer/diffusion_pytorch_model-00003-of-00007.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:be4dfe5a12ac35c857fe307e91899d0f9c473551c2a828e80718f340489b27bd
|
| 3 |
+
size 9975437232
|
transformer/diffusion_pytorch_model-00004-of-00007.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:744a014df498e5b579d34a12e9ea836b2cd9adf9e0ef77b4f7378ad762091573
|
| 3 |
+
size 9975566544
|
transformer/diffusion_pytorch_model-00005-of-00007.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b49e16fed3592ffc07d65503f39517f3190d8e52130418706ea1ba678f207050
|
| 3 |
+
size 9902022768
|
transformer/diffusion_pytorch_model-00006-of-00007.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d173bad73ebcdde9fe4efef487d1df9c993dfc3590b041e05e7806418479be52
|
| 3 |
+
size 9902063944
|
transformer/diffusion_pytorch_model-00007-of-00007.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:68b4173328f39217e177fe40cf94e509a213955aea842b086f3fd17e6c286832
|
| 3 |
+
size 6075990120
|
transformer/diffusion_pytorch_model.safetensors.index.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|