diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..24b9f063ac27374c17be54b7f2824d1cdff75e4c
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,16 @@
+# Dockerfile Public T4
+
+FROM pytorch/pytorch:2.0.1-cuda11.7-cudnn8-devel
+ENV DEBIAN_FRONTEND noninteractive
+
+WORKDIR /content
+RUN pip install numexpr einops transformers k_diffusion safetensors gradio diffusers xformers
+
+ADD . .
+RUN adduser --disabled-password --gecos '' user
+RUN chown -R user:user /content
+RUN chmod -R 777 /content
+USER user
+
+EXPOSE 7860
+CMD python /content/app.py
diff --git a/README.md b/README.md
index f9c3df5a3686e0d93a7f373eb27407285b4aff0b..cade33ccecc366f372783a459de112259c3a446b 100644
--- a/README.md
+++ b/README.md
@@ -1,11 +1,13 @@
---
-title: Spatial Control For SD
-emoji: ๐
-colorFrom: blue
+title: Sd Diffusers Webui
+emoji: ๐ณ
+colorFrom: purple
colorTo: gray
sdk: docker
+sdk_version: 3.9
pinned: false
-license: apache-2.0
+license: openrail
+app_port: 7860
---
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/app.py b/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..9e42a066f09a746fb1b2e4464c04eb0f4e5da0b5
--- /dev/null
+++ b/app.py
@@ -0,0 +1,3966 @@
+import transformers
+transformers.utils.move_cache()
+import random
+import tempfile
+import time
+import gradio as gr
+import numpy as np
+import torch
+import math
+import re
+import sys
+from gradio import inputs
+from diffusers import (
+ AutoencoderKL,
+ #UNet2DConditionModel,
+ ControlNetModel,
+ DPMSolverMultistepScheduler,
+ EulerAncestralDiscreteScheduler,
+ EulerDiscreteScheduler,
+ HeunDiscreteScheduler,
+ KDPM2AncestralDiscreteScheduler,
+ KDPM2DiscreteScheduler,
+ LMSDiscreteScheduler,
+ PNDMScheduler,
+ UniPCMultistepScheduler,
+ DEISMultistepScheduler,
+ DDPMScheduler,
+ DDIMScheduler,
+ DPMSolverSDEScheduler,
+ DPMSolverSinglestepScheduler,
+ T2IAdapter,
+ SASolverScheduler,
+ EDMEulerScheduler,
+ EDMDPMSolverMultistepScheduler,
+ ConsistencyDecoderVAE,
+)
+from modules.u_net_condition_modify import UNet2DConditionModel
+from modules.model_diffusers import (
+ StableDiffusionPipeline_finetune,
+ StableDiffusionControlNetPipeline_finetune,
+ StableDiffusionControlNetImg2ImgPipeline_finetune,
+ StableDiffusionImg2ImgPipeline_finetune,
+ StableDiffusionInpaintPipeline_finetune,
+ StableDiffusionControlNetInpaintPipeline_finetune,
+)
+from modules.attention_modify import AttnProcessor,IPAdapterAttnProcessor,AttnProcessor2_0,IPAdapterAttnProcessor2_0
+from modules.model_k_diffusion import StableDiffusionPipeline
+from torchvision import transforms
+from transformers import CLIPTokenizer, CLIPTextModel,CLIPImageProcessor
+from PIL import Image,ImageOps, ImageChops
+from pathlib import Path
+from safetensors.torch import load_file
+import modules.safe as _
+from modules.lora import LoRANetwork
+import os
+import cv2
+from controlnet_aux import PidiNetDetector, HEDdetector,LineartAnimeDetector,LineartDetector,MLSDdetector,OpenposeDetector,MidasDetector,NormalBaeDetector,ContentShuffleDetector,ZoeDetector
+from transformers import pipeline
+from modules import samplers_extra_k_diffusion
+import gc
+import copy
+from modules.preprocessing_segmentation import preprocessing_segmentation
+import torch.nn.functional as F
+from modules.t2i_adapter import setup_model_t2i_adapter
+from diffusers.image_processor import IPAdapterMaskProcessor
+from typing import Callable, Dict, List, Optional, Union
+from insightface.app import FaceAnalysis
+from insightface.utils import face_align
+from diffusers.utils import load_image
+from transformers import (
+ CLIPImageProcessor,
+ CLIPVisionModelWithProjection,
+)
+embeddings_dict = dict()
+lora_dict = dict()
+lora_scale_dict = dict()
+# lora_dict = {'Not using Lora':None,}
+# lora_scale_dict = {'Not using Lora':1.0,}
+# lora_lst = ['Not using Lora']
+lora_lst = ['Not using Lora']
+formula = [
+ ['w = token_weight_martix * sigma * std(qk)',0],
+ ['w = token_weight_martix * log(1 + sigma) * max(qk)',1],
+ ['w = token_weight_martix * log(1 + sigma) * std(qk)',2],
+ ['w = token_weight_martix * log(1 + sigma^2) * std(qk)',3],
+]
+
+encoding_type ={
+ "Automatic111 Encoding": 0,
+ "Long Prompt Encoding": 1,
+ "Short Prompt Encoding": 2,
+}
+model_ip_adapter_lst = ['IP-Adapter','IP-Adapter VIT-G','IP-Adapter Light','IP-Adapter Light v1.1','IP-Adapter Face','IP-Adapter FaceID','IP-Adapter Plus','IP-Adapter Plus Face',"IP-Adapter Plus FaceID","IP-Adapter Plus FaceIDv2"]
+
+model_ip_adapter_type = {
+ "IP-Adapter": "ip-adapter_sd15.bin",
+ "IP-Adapter VIT-G": "ip-adapter_sd15_vit-G.bin",
+ "IP-Adapter Light": "ip-adapter_sd15_light.bin",
+ "IP-Adapter Light v1.1": "ip-adapter_sd15_light_v11.bin",
+ "IP-Adapter Face":"ip-adapter-full-face_sd15.bin",
+ "IP-Adapter FaceID":"ip-adapter-faceid_sd15.bin",
+ "IP-Adapter Plus": "ip-adapter-plus_sd15.bin",
+ "IP-Adapter Plus Face": "ip-adapter-plus-face_sd15.bin",
+ "IP-Adapter Plus FaceID": "ip-adapter-faceid-plus_sd15.bin",
+ "IP-Adapter Plus FaceIDv2": "ip-adapter-faceid-plusv2_sd15.bin",
+}
+
+controlnet_lst = ["Canny","Depth","Openpose","Soft Edge","Lineart","Lineart (anime)","Scribble","MLSD","Semantic Segmentation","Normal Map","Shuffle","Instruct Pix2Pix"]
+adapter_lst = ["Canny","Sketch","Color","Depth","Openpose","Semantic Segmentation","Zoedepth"]
+controlnet_type ={
+ "Canny": "lllyasviel/control_v11p_sd15_canny",
+ "Depth": "lllyasviel/control_v11f1p_sd15_depth",
+ "Openpose": "lllyasviel/control_v11p_sd15_openpose",
+ "Soft Edge": "lllyasviel/control_v11p_sd15_softedge",
+ "Lineart":"ControlNet-1-1-preview/control_v11p_sd15_lineart",
+ "Lineart (anime)":"lllyasviel/control_v11p_sd15s2_lineart_anime",
+ "Scribble":"lllyasviel/control_v11p_sd15_scribble",
+ "MLSD":"lllyasviel/control_v11p_sd15_mlsd",
+ "Semantic Segmentation":"lllyasviel/control_v11p_sd15_seg",
+ "Normal Map":"lllyasviel/control_v11p_sd15_normalbae",
+ "Shuffle":"lllyasviel/control_v11e_sd15_shuffle",
+ "Instruct Pix2Pix":"lllyasviel/control_v11e_sd15_ip2p",
+}
+adapter_type ={
+ "Canny": "TencentARC/t2iadapter_canny_sd15v2",
+ "Sketch": "TencentARC/t2iadapter_sketch_sd15v2",
+ "Color": "TencentARC/t2iadapter_color_sd14v1",
+ "Depth": "TencentARC/t2iadapter_depth_sd15v2",
+ "Openpose":"TencentARC/t2iadapter_openpose_sd14v1",
+ "Semantic Segmentation":"TencentARC/t2iadapter_seg_sd14v1",
+ "Zoedepth":"TencentARC/t2iadapter_zoedepth_sd15v1",
+}
+models_single_file = []
+models = [
+ ("AbyssOrangeMix2", "Korakoe/AbyssOrangeMix2-HF"),
+ ("BloodOrangeMix", "WarriorMama777/BloodOrangeMix"),
+ ("ElyOrangeMix", "WarriorMama777/ElyOrangeMix"),
+ ("Pastal Mix", "JamesFlare/pastel-mix"),
+ ("Basil Mix", "nuigurumi/basil_mix"),
+ ("Stable Diffusion v1.5", "runwayml/stable-diffusion-v1-5"),
+ ("Stable Diffusion v2.1", "stabilityai/stable-diffusion-2-1-base"),
+ ("Realistic Vision v1.4", "SG161222/Realistic_Vision_V1.4"),
+ ("Dreamlike Photoreal v2.0", "dreamlike-art/dreamlike-photoreal-2.0"),
+ ("Waifu-diffusion v1.4", "hakurei/waifu-diffusion"),
+ ("Stable diffusion PixelArt v1.4", "Onodofthenorth/SD_PixelArt_SpriteSheet_Generator"),
+ ("Anything v3", "Linaqruf/anything-v3.0"),
+ ("Sketch style", "Cosk/sketchstyle-cutesexyrobutts"),
+ ("Anything v5", "stablediffusionapi/anything-v5"),
+ ("Counterfeit v2.5", "gsdf/Counterfeit-V2.5"),
+ ("Edge of realism", "stablediffusionapi/edge-of-realism"),
+ ("Photorealistic fuen", "claudfuen/photorealistic-fuen-v1"),
+ ("Protogen x5.8 (Scifi-Anime)", "darkstorm2150/Protogen_x5.8_Official_Release"),
+ ("Dreamlike Anime", "dreamlike-art/dreamlike-anime-1.0"),
+ ("Something V2.2", "NoCrypt/SomethingV2_2"),
+ ("Realistic Vision v3.0", "SG161222/Realistic_Vision_V3.0_VAE"),
+ ("Noosphere v3.0", "digiplay/Noosphere_v3"),
+ ("Beauty Fool v1.2", "digiplay/BeautyFool_v1.2VAE_pruned"),
+ ("Prefix RealisticMix v1.0", "digiplay/PrefixRealisticMix_v1"),
+ ("Prefix FantasyMix v1.0", "digiplay/PrefixFantasyMix_v1"),
+ ("Unstable Diffusers YamerMIX v3.0", "digiplay/unstableDiffusersYamerMIX_v3"),
+ ("GTA5 Artwork Diffusion", "ItsJayQz/GTA5_Artwork_Diffusion"),
+ ("Open Journey", "prompthero/openjourney"),
+ ("SoapMix2.5D v2.0", "digiplay/SoapMix2.5D_v2"),
+ ("CoffeeMix v2.0", "digiplay/CoffeeMix_v2"),
+ ("helloworld v3.0", "digiplay/helloworld_v3"),
+ ("ARRealVX v1.1", "digiplay/ARRealVX1.1"),
+ ("Fishmix v1.0", "digiplay/fishmix_other_v1"),
+ ("DiamondCoalMix v2.0", "digiplay/DiamondCoalMix_v2_pruned_diffusers"),
+ ("ISOMix v3.22", "digiplay/ISOmix_v3.22"),
+ ("Pika v2", "digiplay/Pika_v2"),
+ ("BluePencil v0.9b", "digiplay/bluePencil_v09b"),
+ ("MeinaPastel v6", "Meina/MeinaPastel_V6"),
+ ("Realistic Vision v4", "SG161222/Realistic_Vision_V4.0"),
+ ("Revanimated v1.2.2", "stablediffusionapi/revanimated"),
+ ("NeverEnding Dream v1.2.2", "Lykon/NeverEnding-Dream"),
+ ("CetusMixCoda", "Stax124/CetusMixCoda"),
+ ("NewMarsMix R11", "digiplay/NewMarsMix_R11"),
+ ("Juggernaut Final", "digiplay/Juggernaut_final"),
+ ("BlankCanvas v1.0", "digiplay/BlankCanvas_v1"),
+ ("FumizukiMix v1.0", "digiplay/FumizukiMix_v1"),
+ ("CampurSari v1.0", "digiplay/CampurSari_Gen1"),
+ ("Realisian v1.0", "digiplay/Realisian_v5"),
+ ("Real Epic Majic Revolution v1.0", "digiplay/RealEpicMajicRevolution_v1"),
+ ("QuinceMix v2.0", "digiplay/quincemix_v2"),
+ ("Counterfeit v3.0", "stablediffusionapi/counterfeit-v30"),
+ ("MeinaMix v11.0", "Meina/MeinaMix_V11"),
+ ("MeinaPastel V7.0", "Meina/MeinaPastel_V7"),
+ ("Alter V3.0", "Meina/Alter_V3"),
+ ("MeinaUnreal V5.0", "Meina/MeinaUnreal_V5"),
+ ("MeinaHentai V5.0", "Meina/MeinaHentai_V5"),
+ ("AnyOrangeMix Mint", "GraydientPlatformAPI/anyorange-mint"),
+]
+
+#Name / link / True = single file , False = need config.json
+vae_link ={
+ "Vae ft MSE": "stabilityai/sd-vae-ft-mse",
+ "Vae ft MSE original": "stabilityai/sd-vae-ft-mse-original/vae-ft-mse-840000-ema-pruned.safetensors",
+ "Vae ft EMA": "stabilityai/sd-vae-ft-ema",
+ "Vae ft EMA original": "stabilityai/sd-vae-ft-ema-original/vae-ft-ema-560000-ema-pruned.safetensors",
+ "ClearVAE V2.1" : "digiplay/VAE/ClearVAE_V2.1.safetensors",
+ "Blessed": "digiplay/VAE/blessed.vae.pt",
+ "Color101VAE v1": "digiplay/VAE/color101VAE_v1.safetensors",
+ "kl-f8-anime2": "digiplay/VAE/klF8Anime2VAE_klF8Anime2VAE.ckpt",
+ "Mangled Merge": "digiplay/VAE/mangledMergeVAE_v10.pt",
+ "Orangemix": "digiplay/VAE/orangemix.vae.pt",
+ "Stable 780000": "digiplay/VAE/stable-780000.vae.pt",
+ "CustomVAE Q6": "duongve/VAE/customvae_q6.safetensors",
+ "Voidnoise VAE": "duongve/VAE/voidnoiseVAE_baseonR0829.safetensors",
+ "Lastpiece Contrast": "duongve/VAE/lastpieceVAE_contrast.safetensors",
+ "Lastpiece Brightness": "duongve/VAE/lastpieceVAE_brightness.safetensors",
+ "Berry's Mix v1.0": "duongve/VAE/berrysMixVAE_v10.safetensors",
+ "Async's VAE v1.0": "duongve/VAE/asyncsVAE_v10.safetensors",
+ "WD-VAE v1.0": "duongve/VAE/wdVAE_v10.safetensors",
+ "Nocturnal": "duongve/VAE/nocturnalVAE_.safetensors",
+ "Apricots": "duongve/VAE/apricotsVAESeries_tensorQuantizerV10.safetensors",
+ "Earth & Dusk v1.0": "duongve/VAE/earthDuskVAE_v10.safetensors",
+ "HotaruVAE Anime v1.0": "duongve/VAE/hotaruvae_AnimeV10.safetensors",
+ "HotaruVAE Real v1.0": "duongve/VAE/hotaruvae_RealV10.safetensors",
+ "Consistency Decoder": "openai/consistency-decoder",
+}
+
+vae_single_file ={
+ "Vae ft MSE": False,
+ "Vae ft MSE original": True,
+ "Vae ft EMA": False,
+ "Vae ft EMA original": True,
+ "ClearVAE V2.1": True,
+ "Blessed": True,
+ "Color101VAE v1": True,
+ "kl-f8-anime2": True,
+ "Mangled Merge": True,
+ "Orangemix": True,
+ "Stable 780000": True,
+ "CustomVAE Q6": True,
+ "Voidnoise VAE": True,
+ "Lastpiece Contrast": True,
+ "Lastpiece Brightness": True,
+ "Berry's Mix v1.0": True,
+ "Async's VAE v1.0": True,
+ "WD-VAE v1.0": True,
+ "Nocturnal": True,
+ "Apricots": True,
+ "Earth & Dusk v1.0": True,
+ "HotaruVAE Anime v1.0": True,
+ "HotaruVAE Real v1.0": True,
+ "Consistency Decoder": False,
+}
+
+
+vae_lst = [
+ "Default",
+ "Vae ft MSE",
+ "Vae ft MSE original",
+ "Vae ft EMA",
+ "Vae ft EMA original",
+ "ClearVAE V2.1",
+ "Blessed",
+ "Color101VAE v1",
+ "kl-f8-anime2",
+ "Mangled Merge",
+ "Orangemix",
+ "Stable 780000",
+ "CustomVAE Q6",
+ "Voidnoise VAE",
+ "Lastpiece Contrast",
+ "Lastpiece Brightness",
+ "Berry's Mix v1.0",
+ "Async's VAE v1.0",
+ "WD-VAE v1.0",
+ "Nocturnal",
+ "Apricots",
+ "Earth & Dusk v1.0",
+ "HotaruVAE Anime v1.0",
+ "HotaruVAE Real v1.0",
+ "Consistency Decoder",
+]
+
+keep_vram = [
+ "Korakoe/AbyssOrangeMix2-HF",
+ "WarriorMama777/BloodOrangeMix",
+ "WarriorMama777/ElyOrangeMix",
+ "JamesFlare/pastel-mix",
+ "nuigurumi/basil_mix",
+ "runwayml/stable-diffusion-v1-5",
+ "stabilityai/stable-diffusion-2-1-base",
+ "SG161222/Realistic_Vision_V1.4",
+ "dreamlike-art/dreamlike-photoreal-2.0",
+ "hakurei/waifu-diffusion",
+ "Onodofthenorth/SD_PixelArt_SpriteSheet_Generator",
+ "Linaqruf/anything-v3.0",
+ "Cosk/sketchstyle-cutesexyrobutts",
+ "stablediffusionapi/anything-v5",
+ "gsdf/Counterfeit-V2.5",
+ "stablediffusionapi/edge-of-realism",
+ "claudfuen/photorealistic-fuen-v1",
+ "darkstorm2150/Protogen_x5.8_Official_Release",
+ "dreamlike-art/dreamlike-anime-1.0",
+ "NoCrypt/SomethingV2_2",
+ "SG161222/Realistic_Vision_V3.0_VAE",
+ "digiplay/Noosphere_v3",
+ "digiplay/BeautyFool_v1.2VAE_pruned",
+ "digiplay/PrefixRealisticMix_v1",
+ "digiplay/PrefixFantasyMix_v1",
+ "digiplay/unstableDiffusersYamerMIX_v3",
+ "ItsJayQz/GTA5_Artwork_Diffusion",
+ "prompthero/openjourney",
+ "digiplay/SoapMix2.5D_v2",
+ "digiplay/CoffeeMix_v2",
+ "digiplay/helloworld_v3",
+ "digiplay/ARRealVX1.1",
+ "digiplay/fishmix_other_v1",
+ "digiplay/DiamondCoalMix_v2_pruned_diffusers",
+ "digiplay/ISOmix_v3.22",
+ "digiplay/Pika_v2",
+ "digiplay/bluePencil_v09b",
+ "Meina/MeinaPastel_V6",
+ "SG161222/Realistic_Vision_V4.0",
+ "stablediffusionapi/revanimated",
+ "Lykon/NeverEnding-Dream",
+ "Stax124/CetusMixCoda",
+ "digiplay/NewMarsMix_R11",
+ "digiplay/Juggernaut_final",
+ "digiplay/BlankCanvas_v1",
+ "digiplay/FumizukiMix_v1",
+ "digiplay/CampurSari_Gen1",
+ "digiplay/Realisian_v5",
+ "digiplay/RealEpicMajicRevolution_v1",
+ "stablediffusionapi/counterfeit-v30",
+ "Meina/MeinaMix_V11",
+ "Meina/MeinaPastel_V7",
+ "Meina/Alter_V3",
+ "Meina/MeinaUnreal_V5",
+ "Meina/MeinaHentai_V5",
+ "GraydientPlatformAPI/anyorange-mint",
+]
+base_name, base_model = models[0]
+
+samplers_k_diffusion = [
+ ('Euler', 'sample_euler', {}),
+ ('Euler a', 'sample_euler_ancestral', {"uses_ensd": True}),
+ ('LMS', 'sample_lms', {}),
+ ('LCM', samplers_extra_k_diffusion.sample_lcm, {"second_order": True}),
+ ('Heun', 'sample_heun', {"second_order": True}),
+ ('Heun++', samplers_extra_k_diffusion.sample_heunpp2, {"second_order": True}),
+ ('DDPM', samplers_extra_k_diffusion.sample_ddpm, {"second_order": True}),
+ ('DPM2', 'sample_dpm_2', {'discard_next_to_last_sigma': True}),
+ ('DPM2 a', 'sample_dpm_2_ancestral', {'discard_next_to_last_sigma': True, "uses_ensd": True}),
+ ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', {"uses_ensd": True, "second_order": True}),
+ ('DPM++ 2M', 'sample_dpmpp_2m', {}),
+ ('DPM++ SDE', 'sample_dpmpp_sde', {"second_order": True, "brownian_noise": True}),
+ ('DPM++ 2M SDE', 'sample_dpmpp_2m_sde', {"brownian_noise": True}),
+ ('DPM++ 3M SDE', 'sample_dpmpp_3m_sde', {'discard_next_to_last_sigma': True, "brownian_noise": True}),
+ ('DPM fast (img-to-img)', 'sample_dpm_fast', {"uses_ensd": True}),
+ ('DPM adaptive (img-to-img)', 'sample_dpm_adaptive', {"uses_ensd": True}),
+ ('DPM++ 2M SDE Heun', 'sample_dpmpp_2m_sde', {"brownian_noise": True, "solver_type": "heun"}),
+ ('Restart', samplers_extra_k_diffusion.restart_sampler, {"second_order": True}),
+ ('Euler Karras', 'sample_euler', {'scheduler': 'karras'}),
+ ('Euler a Karras', 'sample_euler_ancestral', {'scheduler': 'karras',"uses_ensd": True}),
+ ('LMS Karras', 'sample_lms', {'scheduler': 'karras'}),
+ ('LCM Karras', samplers_extra_k_diffusion.sample_lcm, {'scheduler': 'karras',"second_order": True}),
+ ('Heun Karras', 'sample_heun', {'scheduler': 'karras',"second_order": True}),
+ ('Heun++ Karras', samplers_extra_k_diffusion.sample_heunpp2, {'scheduler': 'karras',"second_order": True}),
+ ('DDPM Karras', samplers_extra_k_diffusion.sample_ddpm, {'scheduler': 'karras', "second_order": True}),
+ ('DPM2 Karras', 'sample_dpm_2', {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}),
+ ('DPM2 a Karras', 'sample_dpm_2_ancestral', {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}),
+ ('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', {'scheduler': 'karras', "uses_ensd": True, "second_order": True}),
+ ('DPM++ 2M Karras', 'sample_dpmpp_2m', {'scheduler': 'karras'}),
+ ('DPM++ SDE Karras', 'sample_dpmpp_sde', {'scheduler': 'karras', "second_order": True, "brownian_noise": True}),
+ ('DPM++ 2M SDE Karras', 'sample_dpmpp_2m_sde', {'scheduler': 'karras', "brownian_noise": True}),
+ ('DPM++ 2M SDE Heun Karras', 'sample_dpmpp_2m_sde', {'scheduler': 'karras', "brownian_noise": True, "solver_type": "heun"}),
+ ('DPM++ 3M SDE Karras', 'sample_dpmpp_3m_sde', {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "brownian_noise": True}),
+ ('Restart Karras', samplers_extra_k_diffusion.restart_sampler, {'scheduler': 'karras', "second_order": True}),
+ ('Euler Exponential', 'sample_euler', {'scheduler': 'exponential'}),
+ ('Euler a Exponential', 'sample_euler_ancestral', {'scheduler': 'exponential',"uses_ensd": True}),
+ ('LMS Exponential', 'sample_lms', {'scheduler': 'exponential'}),
+ ('LCM Exponential', samplers_extra_k_diffusion.sample_lcm, {'scheduler': 'exponential',"second_order": True}),
+ ('Heun Exponential', 'sample_heun', {'scheduler': 'exponential',"second_order": True}),
+ ('Heun++ Exponential', samplers_extra_k_diffusion.sample_heunpp2, {'scheduler': 'exponential',"second_order": True}),
+ ('DDPM Exponential', samplers_extra_k_diffusion.sample_ddpm, {'scheduler': 'exponential', "second_order": True}),
+ ('DPM++ 2M Exponential', 'sample_dpmpp_2m', {'scheduler': 'exponential'}),
+ ('DPM++ 2M SDE Exponential', 'sample_dpmpp_2m_sde', {'scheduler': 'exponential', "brownian_noise": True}),
+ ('DPM++ 2M SDE Heun Exponential', 'sample_dpmpp_2m_sde', {'scheduler': 'exponential', "brownian_noise": True, "solver_type": "heun"}),
+ ('DPM++ 3M SDE Exponential', 'sample_dpmpp_3m_sde', {'scheduler': 'exponential', 'discard_next_to_last_sigma': True, "brownian_noise": True}),
+ ('Restart Exponential', samplers_extra_k_diffusion.restart_sampler, {'scheduler': 'exponential', "second_order": True}),
+ ('Euler Polyexponential', 'sample_euler', {'scheduler': 'polyexponential'}),
+ ('Euler a Polyexponential', 'sample_euler_ancestral', {'scheduler': 'polyexponential',"uses_ensd": True}),
+ ('LMS Polyexponential', 'sample_lms', {'scheduler': 'polyexponential'}),
+ ('LCM Polyexponential', samplers_extra_k_diffusion.sample_lcm, {'scheduler': 'polyexponential',"second_order": True}),
+ ('Heun Polyexponential', 'sample_heun', {'scheduler': 'polyexponential',"second_order": True}),
+ ('Heun++ Polyexponential', samplers_extra_k_diffusion.sample_heunpp2, {'scheduler': 'polyexponential',"second_order": True}),
+ ('DDPM Polyexponential', samplers_extra_k_diffusion.sample_ddpm, {'scheduler': 'polyexponential', "second_order": True}),
+ ('DPM++ 2M Polyexponential', 'sample_dpmpp_2m', {'scheduler': 'polyexponential'}),
+ ('DPM++ 2M SDE Heun Polyexponential', 'sample_dpmpp_2m_sde', {'scheduler': 'polyexponential', "brownian_noise": True, "solver_type": "heun"}),
+ ('DPM++ 3M SDE Polyexponential', 'sample_dpmpp_3m_sde', {'scheduler': 'polyexponential', 'discard_next_to_last_sigma': True, "brownian_noise": True}),
+ ('Restart Polyexponential', samplers_extra_k_diffusion.restart_sampler, {'scheduler': 'polyexponential', "second_order": True}),
+]
+
+#Add to sigma sp which library is missing
+'''class DEISMultistepScheduler_modify(DEISMultistepScheduler):
+ def _convert_to_karras(self, in_sigmas: torch.FloatTensor, num_inference_steps) -> torch.FloatTensor:
+ """Constructs the noise schedule of Karras et al. (2022)."""
+
+ sigma_min: float = in_sigmas[-1].item()
+ sigma_max: float = in_sigmas[0].item()
+
+ rho = 7.0 # 7.0 is the value used in the paper
+ ramp = np.linspace(0, 1, num_inference_steps)
+ min_inv_rho = sigma_min ** (1 / rho)
+ max_inv_rho = sigma_max ** (1 / rho)
+ sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
+ return sigmas
+
+ def _sigma_to_t(self, sigma, log_sigmas):
+ # get log sigma
+ log_sigma = np.log(sigma)
+
+ # get distribution
+ dists = log_sigma - log_sigmas[:, np.newaxis]
+
+ # get sigmas range
+ low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2)
+ high_idx = low_idx + 1
+
+ low = log_sigmas[low_idx]
+ high = log_sigmas[high_idx]
+
+ # interpolate sigmas
+ w = (low - log_sigma) / (low - high)
+ w = np.clip(w, 0, 1)
+
+ # transform interpolation to time range
+ t = (1 - w) * low_idx + w * high_idx
+ t = t.reshape(sigma.shape)
+ return t'''
+
+samplers_diffusers = [
+ ('Euler a', lambda ddim_scheduler_config: EulerAncestralDiscreteScheduler.from_config(ddim_scheduler_config), {}),
+ ('Euler', lambda ddim_scheduler_config: EulerDiscreteScheduler.from_config(ddim_scheduler_config), {}),
+ #('EDM Euler', lambda ddim_scheduler_config: EDMEulerScheduler.from_config(ddim_scheduler_config), {}),
+ ('LMS', lambda ddim_scheduler_config: LMSDiscreteScheduler.from_config(ddim_scheduler_config), {}),
+ ('Heun',lambda ddim_scheduler_config: HeunDiscreteScheduler.from_config(ddim_scheduler_config), {}),
+ ('DPM2',lambda ddim_scheduler_config: KDPM2DiscreteScheduler.from_config(ddim_scheduler_config), {}),
+ ('DPM2 a',lambda ddim_scheduler_config: KDPM2AncestralDiscreteScheduler.from_config(ddim_scheduler_config), {}),
+ ('DPM++ 2S a',lambda ddim_scheduler_config: DPMSolverSinglestepScheduler.from_config(ddim_scheduler_config), {}),
+ ('DPM++ 2M',lambda ddim_scheduler_config: DPMSolverMultistepScheduler.from_config(ddim_scheduler_config), {}),
+ #('EDM DPM++ 2M',lambda ddim_scheduler_config: EDMDPMSolverMultistepScheduler.from_config(ddim_scheduler_config), {}),
+ ('DPM++ SDE',lambda ddim_scheduler_config: DPMSolverSDEScheduler.from_config(ddim_scheduler_config), {}),
+ ('DPM++ 2M SDE',lambda ddim_scheduler_config: DPMSolverMultistepScheduler.from_config(ddim_scheduler_config,algorithm_type="sde-dpmsolver++"), {}),
+ #('EDM DPM++ 2M SDE',lambda ddim_scheduler_config: EDMDPMSolverMultistepScheduler.from_config(ddim_scheduler_config,algorithm_type="sde-dpmsolver++"), {}),
+ ('DEIS',lambda ddim_scheduler_config: DEISMultistepScheduler.from_config(ddim_scheduler_config), {}),
+ ('UniPC Time Uniform 1',lambda ddim_scheduler_config: UniPCMultistepScheduler.from_config(ddim_scheduler_config,solver_type = "bh1"), {}),
+ ('UniPC Time Uniform 2',lambda ddim_scheduler_config: UniPCMultistepScheduler.from_config(ddim_scheduler_config,solver_type = "bh2"), {}),
+ ('SA-Solver',lambda ddim_scheduler_config: SASolverScheduler.from_config(ddim_scheduler_config), {}),
+ ('Euler Karras', lambda ddim_scheduler_config: EulerDiscreteScheduler.from_config(ddim_scheduler_config,use_karras_sigmas=True), {}),
+ ('LMS Karras',lambda ddim_scheduler_config: LMSDiscreteScheduler.from_config(ddim_scheduler_config,use_karras_sigmas=True), {}),
+ ('Heun Karras',lambda ddim_scheduler_config: HeunDiscreteScheduler.from_config(ddim_scheduler_config,use_karras_sigmas=True), {}),
+ ('DPM2 Karras',lambda ddim_scheduler_config: KDPM2DiscreteScheduler.from_config(ddim_scheduler_config,use_karras_sigmas=True), {}),
+ ('DPM2 a Karras',lambda ddim_scheduler_config: KDPM2AncestralDiscreteScheduler.from_config(ddim_scheduler_config,use_karras_sigmas=True), {}),
+ ('DPM++ 2S a Karras',lambda ddim_scheduler_config: DPMSolverSinglestepScheduler.from_config(ddim_scheduler_config,use_karras_sigmas=True), {}),
+ ('DPM++ 2M Karras',lambda ddim_scheduler_config: DPMSolverMultistepScheduler.from_config(ddim_scheduler_config,use_karras_sigmas=True), {}),
+ ('DPM++ SDE Karras',lambda ddim_scheduler_config: DPMSolverSDEScheduler.from_config(ddim_scheduler_config,use_karras_sigmas=True), {}),
+ ('DPM++ 2M SDE Karras',lambda ddim_scheduler_config: DPMSolverMultistepScheduler.from_config(ddim_scheduler_config,use_karras_sigmas=True,algorithm_type="sde-dpmsolver++"), {}),
+ ('DEIS Karras',lambda ddim_scheduler_config: DEISMultistepScheduler.from_config(ddim_scheduler_config,use_karras_sigmas=True), {}),
+ ('UniPC Time Uniform 1 Karras',lambda ddim_scheduler_config: UniPCMultistepScheduler.from_config(ddim_scheduler_config,solver_type = "bh1",use_karras_sigmas=True), {}),
+ ('UniPC Time Uniform 2 Karras',lambda ddim_scheduler_config: UniPCMultistepScheduler.from_config(ddim_scheduler_config,solver_type = "bh2",use_karras_sigmas=True), {}),
+ ('SA-Solver Karras',lambda ddim_scheduler_config: SASolverScheduler.from_config(ddim_scheduler_config,use_karras_sigmas=True), {}),
+]
+
+
+# samplers_diffusers = [
+# ("DDIMScheduler", "diffusers.schedulers.DDIMScheduler", {})
+# ("DDPMScheduler", "diffusers.schedulers.DDPMScheduler", {})
+# ("DEISMultistepScheduler", "diffusers.schedulers.DEISMultistepScheduler", {})
+# ]
+
+start_time = time.time()
+timeout = 360
+
+scheduler = DDIMScheduler.from_pretrained(
+ base_model,
+ subfolder="scheduler",
+)
+'''vae = AutoencoderKL.from_pretrained(
+ "stabilityai/sd-vae-ft-mse",
+ torch_dtype=torch.float16
+)'''
+
+vae = AutoencoderKL.from_pretrained(base_model,
+ subfolder="vae",
+ torch_dtype=torch.float16,
+)
+if vae is None:
+ vae = AutoencoderKL.from_pretrained(
+ "stabilityai/sd-vae-ft-mse",
+ torch_dtype=torch.float16,
+ )
+text_encoder = CLIPTextModel.from_pretrained(
+ base_model,
+ subfolder="text_encoder",
+ torch_dtype=torch.float16,
+)
+tokenizer = CLIPTokenizer.from_pretrained(
+ base_model,
+ subfolder="tokenizer",
+ torch_dtype=torch.float16,
+)
+unet = UNet2DConditionModel.from_pretrained(
+ base_model,
+ subfolder="unet",
+ torch_dtype=torch.float16,
+)
+feature_extract = CLIPImageProcessor.from_pretrained(
+ base_model,
+ subfolder="feature_extractor",
+)
+pipe = StableDiffusionPipeline(
+ text_encoder=text_encoder,
+ tokenizer=tokenizer,
+ unet=unet,
+ vae=vae,
+ scheduler=scheduler,
+ feature_extractor = feature_extract,
+)
+
+if torch.cuda.is_available():
+ pipe = pipe.to("cuda")
+
+def get_model_list():
+ return models
+
+scheduler_cache ={
+ base_name: scheduler
+}
+te_cache = {
+ base_name: text_encoder
+}
+vae_cache = {
+ base_name: vae
+}
+unet_cache = {
+ base_name: unet
+}
+
+lora_cache = {
+ base_name: LoRANetwork(text_encoder, unet)
+}
+tokenizer_cache ={
+ base_name: tokenizer
+}
+feature_cache ={
+ base_name: feature_extract
+}
+controlnetmodel_cache ={
+
+}
+adapter_cache ={
+
+}
+
+vae_enhance_cache ={
+
+}
+te_base_weight_length = text_encoder.get_input_embeddings().weight.data.shape[0]
+original_prepare_for_tokenization = tokenizer.prepare_for_tokenization
+current_model = base_name
+
+def setup_controlnet(name_control,device):
+ global controlnet_type,controlnetmodel_cache
+ if name_control not in controlnetmodel_cache:
+ model_control = ControlNetModel.from_pretrained(name_control, torch_dtype=torch.float16).to(device)
+ controlnetmodel_cache[name_control] = model_control
+ return controlnetmodel_cache[name_control]
+
+def setup_adapter(adapter_sp,device):
+ global model_ip_adapter_type,adapter_cache
+ if adapter_sp not in adapter_cache:
+ model_control = T2IAdapter.from_pretrained(adapter_sp, torch_dtype=torch.float16).to(device)
+ adapter_cache[adapter_sp] = model_control
+ return adapter_cache[adapter_sp]
+
+def setup_vae(model,vae_used = "Default"):
+ global vae_link,vae_single_file
+ vae_model = None
+ if vae_used == "Default":
+ vae_model = AutoencoderKL.from_pretrained(model,subfolder="vae",torch_dtype=torch.float16)
+ elif vae_used == "Consistency Decoder":
+ vae_model = ConsistencyDecoderVAE.from_pretrained(vae_link[vae_used], torch_dtype=torch.float16)
+ else:
+ if vae_single_file[vae_used]:
+ vae_model = AutoencoderKL.from_single_file(vae_link[vae_used],torch_dtype=torch.float16)
+ else:
+ vae_model = AutoencoderKL.from_pretrained(vae_link[vae_used],torch_dtype=torch.float16)
+ if vae_model is None:
+ vae_model = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16)
+ return vae_model
+
+
+
+def setup_model(name,clip_skip, lora_group=None,diffuser_pipeline = False ,control_net_model = None,img_input = None,device = "cpu",mask_inpaiting = None,vae_used = "Default"):
+ global current_model,vae_link,vae_single_file,models_single_file
+
+ keys = [k[0] for k in models]
+ model = models[keys.index(name)][1]
+ if name not in unet_cache:
+ if name not in models_single_file:
+ try:
+ vae_model = AutoencoderKL.from_pretrained(model,subfolder="vae",torch_dtype=torch.float16)
+ except OSError:
+ vae_model = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16)
+
+ try:
+ unet = UNet2DConditionModel.from_pretrained(model, subfolder="unet", torch_dtype=torch.float16)
+ except OSError:
+ unet = UNet2DConditionModel.from_pretrained(base_model, subfolder="unet", torch_dtype=torch.float16)
+
+ try:
+ text_encoder = CLIPTextModel.from_pretrained(model, subfolder="text_encoder", torch_dtype=torch.float16)
+ except OSError:
+ text_encoder = CLIPTextModel.from_pretrained(base_model, subfolder="text_encoder", torch_dtype=torch.float16)
+
+ try:
+ tokenizer = CLIPTokenizer.from_pretrained(model,subfolder="tokenizer",torch_dtype=torch.float16)
+ except OSError:
+ tokenizer = CLIPTokenizer.from_pretrained(base_model,subfolder="tokenizer",torch_dtype=torch.float16)
+
+ try:
+ scheduler = DDIMScheduler.from_pretrained(model,subfolder="scheduler")
+ except OSError:
+ scheduler = DDIMScheduler.from_pretrained(base_model,subfolder="scheduler")
+
+ try:
+ feature_extract = CLIPImageProcessor.from_pretrained(model,subfolder="feature_extractor")
+ except OSError:
+ feature_extract = CLIPImageProcessor.from_pretrained(base_model,subfolder="feature_extractor")
+ else:
+ pipe_get = StableDiffusionPipeline_finetune.from_single_file(model,safety_checker= None,requires_safety_checker = False,torch_dtype=torch.float16).to(device)
+ vae_model = pipe_get.vae
+ unet = pipe_get.unet
+ text_encoder = pipe_get.text_encoder
+ tokenizer = pipe_get.tokenizer
+ scheduler = pipe_get.scheduler
+ feature_extract = pipe_get.feature_extractor if pipe_get.feature_extractor is not None else CLIPImageProcessor.from_pretrained(base_model,subfolder="feature_extractor")
+ del pipe_get
+
+ # if vae_model is None:
+ # vae_model = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16)
+ scheduler_cache[name] = scheduler
+ unet_cache[name] = unet
+ te_cache[name] = text_encoder
+ vae_cache[name] = vae_model
+ tokenizer_cache[name] = tokenizer
+ feature_cache[name] = feature_extract
+ #lora_cache[model] = LoRANetwork(text_encoder, unet)
+
+ if vae_used != "Default" and vae_used not in vae_enhance_cache:
+ vae_enhance_cache[vae_used] = setup_vae(model,vae_used)
+
+ if current_model != name:
+ #if current_model not in keep_vram:
+ # offload current model
+ unet_cache[current_model].to(device)
+ te_cache[current_model].to(device)
+ vae_cache[current_model].to(device)
+ current_model = name
+
+ local_te, local_unet,local_sche,local_vae,local_token,local_feature = copy.deepcopy(te_cache[name]), copy.deepcopy(unet_cache[name]),scheduler_cache[name],vae_cache[name], copy.deepcopy(tokenizer_cache[name]),feature_cache[name]
+ if vae_used != "Default":
+ local_vae = vae_enhance_cache[vae_used]
+ if torch.cuda.is_available():
+ local_unet.to("cuda")
+ local_te.to("cuda")
+ local_vae.to("cuda")
+ #local_unet.set_attn_processor(AttnProcessor())
+ #local_lora.reset()
+
+
+ if diffuser_pipeline:
+ if control_net_model is not None:
+ if mask_inpaiting and img_input:
+ pipe = StableDiffusionControlNetInpaintPipeline_finetune(
+ vae= local_vae,
+ text_encoder= local_te,
+ tokenizer=local_token,
+ unet=local_unet,
+ controlnet = control_net_model,
+ safety_checker= None,
+ scheduler = local_sche,
+ feature_extractor=local_feature,
+ requires_safety_checker = False,
+ ).to(device)
+ elif img_input is not None:
+ #pipe = StableDiffusionControlNetImg2ImgPipeline_finetune.from_pretrained(model,safety_checker = None,controlnet=control_net_model, torch_dtype=torch.float16).to(device)
+ pipe = StableDiffusionControlNetImg2ImgPipeline_finetune(
+ vae= local_vae,
+ text_encoder= local_te,
+ tokenizer=local_token,
+ unet=local_unet,
+ controlnet = control_net_model,
+ safety_checker= None,
+ scheduler = local_sche,
+ feature_extractor=local_feature,
+ requires_safety_checker = False,
+ ).to(device)
+ else:
+ #pipe = StableDiffusionControlNetPipeline_finetune.from_pretrained(model,safety_checker = None,controlnet=control_net_model, torch_dtype=torch.float16).to(device)
+ pipe = StableDiffusionControlNetPipeline_finetune(
+ vae= local_vae,
+ text_encoder= local_te,
+ tokenizer=local_token,
+ unet=local_unet,
+ controlnet = control_net_model,
+ scheduler = local_sche,
+ safety_checker= None,
+ feature_extractor=local_feature,
+ requires_safety_checker = False,
+ ).to(device)
+ else:
+ if mask_inpaiting and img_input:
+ pipe = StableDiffusionInpaintPipeline_finetune(
+ vae= local_vae,
+ text_encoder= local_te,
+ tokenizer=local_token,
+ unet=local_unet,
+ scheduler = local_sche,
+ safety_checker= None,
+ feature_extractor=local_feature,
+ requires_safety_checker = False,
+ ).to(device)
+ elif img_input is not None:
+ #pipe = StableDiffusionImg2ImgPipeline_finetune.from_pretrained(model,safety_checker = None, torch_dtype=torch.float16).to(device)
+ pipe = StableDiffusionImg2ImgPipeline_finetune(
+ vae= local_vae,
+ text_encoder= local_te,
+ tokenizer=local_token,
+ unet=local_unet,
+ scheduler = local_sche,
+ safety_checker= None,
+ feature_extractor=local_feature,
+ requires_safety_checker = False,
+ ).to(device)
+ else:
+ #pipe = StableDiffusionPipeline_finetune.from_pretrained(model,safety_checker = None, torch_dtype=torch.float16).to(device)
+ pipe = StableDiffusionPipeline_finetune(
+ vae= local_vae,
+ text_encoder= local_te,
+ tokenizer=local_token,
+ unet=local_unet,
+ scheduler = local_sche,
+ safety_checker= None,
+ feature_extractor=local_feature,
+ requires_safety_checker = False,
+ ).to(device)
+ else:
+ #global pipe
+ #pipe.text_encoder, pipe.unet,pipe.scheduler,pipe.vae = local_te, local_unet,local_sche,local_vae
+
+ pipe = StableDiffusionPipeline(
+ text_encoder=local_te,
+ tokenizer=local_token,
+ unet=local_unet,
+ vae=local_vae,
+ scheduler=local_sche,
+ feature_extractor=local_feature,
+ ).to(device)
+
+
+ #if lora_state is not None and lora_state != "":
+ if lora_group is not None and len(lora_group) > 0:
+ global lora_scale_dict
+ adapter_name_lst = []
+ adapter_weights_lst = []
+ for name, file in lora_group.items():
+ pipe.load_lora_weights(file, adapter_name = name)
+ adapter_name_lst.append(name)
+ adapter_weights_lst.append(lora_scale_dict[name])
+ pipe.set_adapters(adapter_name_lst, adapter_weights=adapter_weights_lst)
+ #pipe.fuse_lora(lora_scale=lora_scale_dict[name])
+ #pipe = load_lora_control_pipeline(pipe,lora_state,lora_scale,device)
+
+ pipe.unet.set_attn_processor(AttnProcessor())
+ if hasattr(F, "scaled_dot_product_attention"):
+ pipe.unet.set_attn_processor(AttnProcessor2_0())
+
+ if diffuser_pipeline == False:
+ pipe.setup_unet(pipe.unet)
+ pipe.tokenizer.prepare_for_tokenization = local_token.prepare_for_tokenization
+ #pipe.tokenizer.added_tokens_encoder = {}
+ #pipe.tokenizer.added_tokens_decoder = {}
+ #pipe.setup_text_encoder(clip_skip, local_te)
+ '''if lora_state is not None and lora_state != "":
+ local_lora.load(lora_state, lora_scale)
+ local_lora.to(local_unet.device, dtype=local_unet.dtype)
+
+ pipe.text_encoder, pipe.unet,pipe.scheduler,pipe.vae = local_te, local_unet,local_sche,local_vae
+ pipe.setup_unet(local_unet)
+ pipe.tokenizer.prepare_for_tokenization = local_token.prepare_for_tokenization
+ pipe.tokenizer.added_tokens_encoder = {}
+ pipe.tokenizer.added_tokens_decoder = {}
+ pipe.setup_text_encoder(clip_skip, local_te)'''
+ torch.cuda.empty_cache()
+ gc.collect()
+ return pipe
+
+
+def error_str(error, title="Error"):
+ return (
+ f"""#### {title}
+ {error}"""
+ if error
+ else ""
+ )
+
+def make_token_names(embs):
+ all_tokens = []
+ for name, vec in embs.items():
+ tokens = [f'emb-{name}-{i}' for i in range(len(vec))]
+ all_tokens.append(tokens)
+ return all_tokens
+
+def setup_tokenizer(tokenizer, embs):
+ reg_match = [re.compile(fr"(?:^|(?<=\s|,)){k}(?=,|\s|$)") for k in embs.keys()]
+ clip_keywords = [' '.join(s) for s in make_token_names(embs)]
+
+ def parse_prompt(prompt: str):
+ for m, v in zip(reg_match, clip_keywords):
+ prompt = m.sub(v, prompt)
+ return prompt
+
+ def prepare_for_tokenization(self, text: str, is_split_into_words: bool = False, **kwargs):
+ text = parse_prompt(text)
+ r = original_prepare_for_tokenization(text, is_split_into_words, **kwargs)
+ return r
+ tokenizer.prepare_for_tokenization = prepare_for_tokenization.__get__(tokenizer, CLIPTokenizer)
+ return [t for sublist in make_token_names(embs) for t in sublist]
+
+
+def convert_size(size_bytes):
+ if size_bytes == 0:
+ return "0B"
+ size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
+ i = int(math.floor(math.log(size_bytes, 1024)))
+ p = math.pow(1024, i)
+ s = round(size_bytes / p, 2)
+ return "%s %s" % (s, size_name[i])
+
+def load_lora_control_pipeline(pipeline_control,file_path,lora_scale,device):
+ state_dict = load_file(file_path,device=device)
+
+ LORA_PREFIX_UNET = 'lora_unet'
+ LORA_PREFIX_TEXT_ENCODER = 'lora_te'
+ alpha = lora_scale
+
+ visited = []
+
+ # directly update weight in diffusers model
+ for key in state_dict:
+
+ # it is suggested to print out the key, it usually will be something like below
+ # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
+
+ # as we have set the alpha beforehand, so just skip
+ if '.alpha' in key or key in visited:
+ continue
+
+ if 'text' in key:
+ layer_infos = key.split('.')[0].split(LORA_PREFIX_TEXT_ENCODER+'_')[-1].split('_')
+ curr_layer = pipeline_control.text_encoder
+ else:
+ layer_infos = key.split('.')[0].split(LORA_PREFIX_UNET+'_')[-1].split('_')
+ curr_layer = pipeline_control.unet
+
+ # find the target layer
+ temp_name = layer_infos.pop(0)
+ while len(layer_infos) > -1:
+ try:
+ curr_layer = curr_layer.__getattr__(temp_name)
+ if len(layer_infos) > 0:
+ temp_name = layer_infos.pop(0)
+ elif len(layer_infos) == 0:
+ break
+ except Exception:
+ if len(temp_name) > 0:
+ temp_name += '_'+layer_infos.pop(0)
+ else:
+ temp_name = layer_infos.pop(0)
+
+ # org_forward(x) + lora_up(lora_down(x)) * multiplier
+ pair_keys = []
+ if 'lora_down' in key:
+ pair_keys.append(key.replace('lora_down', 'lora_up'))
+ pair_keys.append(key)
+ else:
+ pair_keys.append(key)
+ pair_keys.append(key.replace('lora_up', 'lora_down'))
+
+ # update weight
+ if len(state_dict[pair_keys[0]].shape) == 4:
+ weight_up = state_dict[pair_keys[0]].squeeze(3).squeeze(2).to(torch.float32)
+ weight_down = state_dict[pair_keys[1]].squeeze(3).squeeze(2).to(torch.float32)
+ curr_layer.weight.data += alpha * torch.mm(weight_up, weight_down).unsqueeze(2).unsqueeze(3)
+ else:
+ weight_up = state_dict[pair_keys[0]].to(torch.float32)
+ weight_down = state_dict[pair_keys[1]].to(torch.float32)
+ curr_layer.weight.data += alpha * torch.mm(weight_up, weight_down)
+
+ # update visited list
+ for item in pair_keys:
+ visited.append(item)
+ torch.cuda.empty_cache()
+ gc.collect()
+ return pipeline_control
+
+
+def colorize(value, vmin=None, vmax=None, cmap='gray_r', invalid_val=-99, invalid_mask=None, background_color=(128, 128, 128, 255), gamma_corrected=False, value_transform=None):
+ """Converts a depth map to a color image.
+
+ Args:
+ value (torch.Tensor, numpy.ndarry): Input depth map. Shape: (H, W) or (1, H, W) or (1, 1, H, W). All singular dimensions are squeezed
+ vmin (float, optional): vmin-valued entries are mapped to start color of cmap. If None, value.min() is used. Defaults to None.
+ vmax (float, optional): vmax-valued entries are mapped to end color of cmap. If None, value.max() is used. Defaults to None.
+ cmap (str, optional): matplotlib colormap to use. Defaults to 'magma_r'.
+ invalid_val (int, optional): Specifies value of invalid pixels that should be colored as 'background_color'. Defaults to -99.
+ invalid_mask (numpy.ndarray, optional): Boolean mask for invalid regions. Defaults to None.
+ background_color (tuple[int], optional): 4-tuple RGB color to give to invalid pixels. Defaults to (128, 128, 128, 255).
+ gamma_corrected (bool, optional): Apply gamma correction to colored image. Defaults to False.
+ value_transform (Callable, optional): Apply transform function to valid pixels before coloring. Defaults to None.
+
+ Returns:
+ numpy.ndarray, dtype - uint8: Colored depth map. Shape: (H, W, 4)
+ """
+ if isinstance(value, torch.Tensor):
+ value = value.detach().cpu().numpy()
+
+ value = value.squeeze()
+ if invalid_mask is None:
+ invalid_mask = value == invalid_val
+ mask = np.logical_not(invalid_mask)
+
+ # normalize
+ vmin = np.percentile(value[mask],2) if vmin is None else vmin
+ vmax = np.percentile(value[mask],85) if vmax is None else vmax
+ if vmin != vmax:
+ value = (value - vmin) / (vmax - vmin) # vmin..vmax
+ else:
+ # Avoid 0-division
+ value = value * 0.
+
+ # squeeze last dim if it exists
+ # grey out the invalid values
+
+ value[invalid_mask] = np.nan
+ cmapper = matplotlib.cm.get_cmap(cmap)
+ if value_transform:
+ value = value_transform(value)
+ # value = value / value.max()
+ value = cmapper(value, bytes=True) # (nxmx4)
+
+ img = value[...]
+ img[invalid_mask] = background_color
+
+ if gamma_corrected:
+ img = img / 255
+ img = np.power(img, 2.2)
+ img = img * 255
+ img = img.astype(np.uint8)
+ return img
+
+def adapter_preprocessing(model_adapter,img_control,low_threshold_adapter = None,high_threshold_adapter=None,has_body=False,has_hand=False,has_face=False,preprocessor_adapter=None,disable_preprocessing_adapter=False):
+ if disable_preprocessing_adapter == True :
+ return img_control.copy()
+ device = 'cpu'
+ if torch.cuda.is_available():
+ device = 'cuda'
+ if model_adapter == 'Canny':
+ img_control = np.array(img_control)
+ img_control = cv2.Canny(img_control, low_threshold_adapter, high_threshold_adapter)
+ img_control = Image.fromarray(img_control)
+ elif model_adapter == 'Openpose':
+ #model_openpose = OpenposeDetector()
+ processor = OpenposeDetector.from_pretrained('lllyasviel/ControlNet').to(device)
+ img_control = processor(img_control, include_body=has_body, include_hand=has_hand, include_face=has_face)
+ #img_control = model_openpose(img_control, has_hand)[0]
+ elif model_adapter == 'Depth':
+ #model_midas = MidasDetector()
+ #img_control = model_midas(resize_image(img_control))[0]
+ if preprocessor_adapter == 'DPT':
+ processor = pipeline('depth-estimation')
+ img_control = processor(img_control)['depth']
+ img_control = np.array(img_control)
+ img_control = img_control[:, :, None]
+ img_control = np.concatenate([img_control, img_control, img_control], axis=2)
+ img_control = Image.fromarray(img_control)
+ else:
+ processor = MidasDetector.from_pretrained("lllyasviel/Annotators").to(device)
+ img_control = processor(img_control)
+ elif model_adapter == 'Semantic Segmentation':
+ img_control = preprocessing_segmentation(preprocessor_adapter,img_control)
+ elif model_adapter == 'Color':
+ img_control = img_control.resize((8, 8))
+ img_control = img_control.resize((512, 512), resample=Image.Resampling.NEAREST)
+ elif model_adapter == 'Zoedepth':
+ '''processor = torch.hub.load("isl-org/ZoeDepth", "ZoeD_N", pretrained=True).to(device)
+ img_control = processor.infer_pil(img_control)
+ img_control = Image.fromarray(colorize(img_control)).convert('RGB')'''
+ '''processor = ZoeDetector.from_pretrained("lllyasviel/Annotators").to(device)
+ img_control = processor(img_control)'''
+ processor = ZoeDetector.from_pretrained("valhalla/t2iadapter-aux-models", filename="zoed_nk.pth", model_type="zoedepth_nk").to(device)
+ img_control = processor(img_control, gamma_corrected=True)
+ else:
+ active_model = False
+ if model_adapter == 'Sketch':
+ active_model = True
+ if preprocessor_name == 'HED':
+ processor = HEDdetector.from_pretrained('lllyasviel/Annotators').to(device)
+ else:
+ processor = PidiNetDetector.from_pretrained('lllyasviel/Annotators').to(device)
+ img_control = processor(img_control,scribble=active_model)
+ #img_control = np.array(img_control)
+ #img = cv2.resize(img_control,(width, height))
+ #img_input = img_input.resize((width, height), Image.LANCZOS)
+ #img_control = img_control.resize((width, height), Image.LANCZOS)
+ if model_adapter != 'Canny' and model_adapter != 'Semantic Segmentation' and model_adapter != 'Color':
+ del processor
+ torch.cuda.empty_cache()
+ gc.collect()
+ return img_control
+
+def control_net_preprocessing(control_net_model,img_control,low_threshold = None,high_threshold=None,has_body=False,has_hand=False,has_face=False,preprocessor_name=None,disable_preprocessing=False):
+ if disable_preprocessing == True or control_net_model == 'Instruct Pix2Pix':
+ return img_control.copy()
+ device = 'cpu'
+ if torch.cuda.is_available():
+ device = 'cuda'
+ if control_net_model == 'Canny':
+ img_control = np.array(img_control)
+ img_control = cv2.Canny(img_control, low_threshold, high_threshold)
+ img_control = img_control[:, :, None]
+ img_control = np.concatenate([img_control, img_control, img_control], axis=2)
+ img_control = Image.fromarray(img_control)
+ elif control_net_model == 'Openpose':
+ #model_openpose = OpenposeDetector()
+ processor = OpenposeDetector.from_pretrained('lllyasviel/ControlNet').to(device)
+ img_control = processor(img_control, include_body=has_body, include_hand=has_hand, include_face=has_face)
+ #img_control = model_openpose(img_control, has_hand)[0]
+ elif control_net_model == 'Depth':
+ #model_midas = MidasDetector()
+ #img_control = model_midas(resize_image(img_control))[0]
+ if preprocessor_name == 'DPT':
+ processor = pipeline('depth-estimation')
+ img_control = processor(img_control)['depth']
+ img_control = np.array(img_control)
+ img_control = img_control[:, :, None]
+ img_control = np.concatenate([img_control, img_control, img_control], axis=2)
+ img_control = Image.fromarray(img_control)
+ else:
+ processor = MidasDetector.from_pretrained("lllyasviel/Annotators").to(device)
+ img_control = processor(img_control)
+ elif control_net_model == 'Lineart (anime)':
+ processor = LineartAnimeDetector.from_pretrained("lllyasviel/Annotators").to(device)
+ img_control = processor(img_control)
+ #img_control = np.array(img_control)
+ elif control_net_model == 'Lineart':
+ processor = LineartDetector.from_pretrained("lllyasviel/Annotators").to(device)
+ img_control = processor(img_control)
+ #img_control = np.array(img_control)
+ elif control_net_model == 'MLSD':
+ processor = MLSDdetector.from_pretrained("lllyasviel/ControlNet").to(device)
+ img_control = processor(img_control)
+ #img_control = np.array(img_control)
+ elif control_net_model == 'Semantic Segmentation':
+ img_control = preprocessing_segmentation(preprocessor_name,img_control)
+ elif control_net_model == 'Normal Map':
+ processor = NormalBaeDetector.from_pretrained("lllyasviel/Annotators").to(device)
+ img_control = processor(img_control)
+ elif control_net_model == 'Shuffle':
+ processor = ContentShuffleDetector()
+ img_control = processor(img_control)
+ else:
+ active_model = False
+ if control_net_model == 'Scribble':
+ active_model = True
+ if preprocessor_name == 'HED':
+ processor = HEDdetector.from_pretrained('lllyasviel/Annotators').to(device)
+ else:
+ processor = PidiNetDetector.from_pretrained('lllyasviel/Annotators').to(device)
+ img_control = processor(img_control,scribble=active_model)
+ #img_control = np.array(img_control)
+ #img = cv2.resize(img_control,(width, height))
+ #img_input = img_input.resize((width, height), Image.LANCZOS)
+ #img_control = img_control.resize((width, height), Image.LANCZOS)
+ if control_net_model != 'Canny' and control_net_model != 'Semantic Segmentation':
+ del processor
+ torch.cuda.empty_cache()
+ gc.collect()
+ return img_control
+
+def add_embedding(pipe_model,embs):
+ tokenizer, text_encoder = pipe_model.tokenizer, pipe_model.text_encoder
+ if embs is not None and len(embs) > 0:
+ ti_embs = {}
+ for name, file in embs.items():
+ if str(file).endswith(".pt"):
+ loaded_learned_embeds = torch.load(file, map_location="cpu")
+ else:
+ loaded_learned_embeds = load_file(file, device="cpu")
+ loaded_learned_embeds = loaded_learned_embeds["string_to_param"]["*"] if "string_to_param" in loaded_learned_embeds else loaded_learned_embeds
+ if isinstance(loaded_learned_embeds, dict):
+ #loaded_learned_embeds = list(loaded_learned_embeds.values())[-1]
+ ti_embs.update(loaded_learned_embeds)
+ else:
+ ti_embs[name] = loaded_learned_embeds
+
+ if len(ti_embs) > 0:
+ '''for key, value in ti_embs.items():
+ if isinstance(value, dict):
+ ti_embs.pop(key)
+ ti_embs.update(value)'''
+ tokens = setup_tokenizer(tokenizer, ti_embs)
+ added_tokens = tokenizer.add_tokens(tokens)
+ delta_weight = torch.cat([val for val in ti_embs.values()], dim=0)
+
+ assert added_tokens == delta_weight.shape[0]
+ text_encoder.resize_token_embeddings(len(tokenizer))
+ token_embeds = text_encoder.get_input_embeddings().weight.data
+ token_embeds[-delta_weight.shape[0]:] = delta_weight
+ torch.cuda.empty_cache()
+ gc.collect()
+ return pipe_model
+
+def add_embedding_with_diffusers(pipe,embs):
+ if embs is not None and len(embs) > 0:
+ for name, file in embs.items():
+ pipe.load_textual_inversion(file)
+ torch.cuda.empty_cache()
+ gc.collect()
+ return pipe
+
+
+def mask_region_apply_ip_adapter(mask,invert_ip_adapter_mask_mode):
+ if mask is None:
+ return None
+ #define black is region masked
+ if not isinstance(mask,List):
+ mask = [mask]
+ if len(mask) == 0:
+ return None
+ if invert_ip_adapter_mask_mode:
+ mask = [ImageOps.invert(i).convert('RGB') for i in mask]
+ processor = IPAdapterMaskProcessor()
+ masks = processor.preprocess(mask)
+ '''mask = mask.resize((width, height), Image.BICUBIC)
+ mask = np.array(mask).astype(np.float32) / 255.0
+ #If the region is black apply ( 0 = black)
+ mask = np.expand_dims(np.where(mask==0, 1,0)[:, :, 0], axis=0)
+ if mask.ndim == 3:
+ mask = mask[..., None]
+
+ mask = torch.from_numpy(mask.transpose(0, 3, 1, 2))
+ return mask[0]'''
+ return masks
+
+def ip_adapter_face_id_embedding(lst_img_face_id_embed,device,dtype,guidance_scale,plus_faceid = False):
+ ref_images_embeds = []
+ ref_unc_images_embeds = []
+ ip_adapter_images = []
+ app = FaceAnalysis(name="buffalo_l", providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
+ app.prepare(ctx_id=0, det_size=(640, 640))
+ if not isinstance(lst_img_face_id_embed,list):
+ lst_img_face_id_embed = [lst_img_face_id_embed]
+ for im in lst_img_face_id_embed:
+ #im = load_image(im)
+ image = cv2.cvtColor(np.asarray(im), cv2.COLOR_BGR2RGB)
+ faces = app.get(image) #faces is a list
+ if len(faces) == 0:
+ raise ValueError(
+ "Can not find any faces in the image."
+ )
+ if plus_faceid:
+ ip_adapter_images.append(face_align.norm_crop(image, landmark=faces[0].kps, image_size=224)) #For plus faceid
+ image = torch.from_numpy(faces[0].normed_embedding)
+ image_embeds = image.unsqueeze(0)
+ uncond_image_embeds = torch.zeros_like(image_embeds)
+ ref_images_embeds.append(image_embeds)
+ ref_unc_images_embeds.append(uncond_image_embeds)
+ ref_images_embeds = torch.stack(ref_images_embeds, dim=0)
+ if guidance_scale > 1 :
+ ref_unc_images_embeds = torch.stack(ref_unc_images_embeds, dim=0)
+ single_image_embeds = torch.cat([ref_unc_images_embeds, ref_images_embeds], dim=0).to(device,dtype=dtype)
+ else:
+ single_image_embeds = ref_images_embeds.to(device,dtype=dtype)
+ return single_image_embeds,ip_adapter_images
+
+
+lst_control = []
+lst_adapter =[]
+lst_ip_adapter = []
+current_number_ip_adapter = 0
+current_number_control = 0
+current_number_adapter = 0
+def inference(
+ prompt,
+ guidance,
+ steps,
+ width=512,
+ height=512,
+ clip_skip =2,
+ seed=0,
+ neg_prompt="",
+ state=None,
+ img_input=None,
+ i2i_scale=0.5,
+ hr_enabled=False,
+ hr_method="Latent",
+ hr_scale=1.5,
+ hr_denoise=0.8,
+ sampler="DPM++ 2M Karras",
+ embs=None,
+ model=None,
+ lora_group = None,
+ #lora_state=None,
+ #lora_scale=None,
+ formula_setting = None,
+ controlnet_enabled = False,
+ control_net_model = None,
+ low_threshold = None,
+ high_threshold = None,
+ has_body = False,
+ has_hand = False,
+ has_face = False,
+ img_control = None,
+ image_condition = None,
+ controlnet_scale = 0,
+ preprocessor_name = None,
+ diffuser_pipeline = False,
+ sampler_hires="DPM++ 2M Karras",
+ latent_processing = 0,
+ control_guidance_start = 0.0,
+ control_guidance_end = 1.0,
+ multi_controlnet = False,
+ disable_preprocessing = False,
+ region_condition = False,
+ hr_process_enabled = False,
+ ip_adapter = False,
+ model_ip_adapter = None,
+ inf_adapt_image = None,
+ inf_adapt_image_strength = 1.0,
+ hr_region_condition = False,
+ adapter_enabled = False,
+ model_adapter = None,
+ low_threshold_adapter = None,
+ high_threshold_adapter = None,
+ has_body_openpose_adapter = False,
+ has_hand_openpose_adapter = False,
+ has_face_openpose_adapter = False,
+ adapter_img = None,
+ image_condition_adapter = None,
+ preprocessor_adapter = None,
+ adapter_conditioning_scale = 0,
+ adapter_conditioning_factor = None,
+ multi_adapter = False,
+ disable_preprocessing_adapter = False,
+ ip_adapter_multi = False,
+ guidance_rescale = 0,
+ inf_control_adapt_image = None,
+ long_encode = 0,
+ inpaiting_mode = False,
+ invert_mask_mode = False,
+ mask_upload = None,
+ inf_image_inpaiting = None,
+ invert_ip_adapter_mask_mode = True,
+ vae_used = "Default",
+):
+ global formula,controlnet_type,lst_control,lst_adapter,model_ip_adapter_type,adapter_type,lst_ip_adapter,current_number_ip_adapter,encoding_type
+ img_control_input = None
+ device = "cpu"
+ if torch.cuda.is_available():
+ device = "cuda"
+ if region_condition == False:
+ state = None
+
+ mask_inpaiting = None
+ if inpaiting_mode and isinstance(inf_image_inpaiting,dict):
+ mask_inpaiting = inf_image_inpaiting["mask"]
+ img_input = inf_image_inpaiting["image"]
+ diff = ImageChops.difference(mask_inpaiting, img_input)
+ if diff.getbbox() is None:
+ mask_inpaiting = None
+ if inpaiting_mode and mask_upload:
+ mask_inpaiting = mask_upload
+ if mask_inpaiting and invert_mask_mode:
+ mask_inpaiting = ImageOps.invert(mask_inpaiting).convert('RGB')
+
+ if adapter_enabled:
+ if len(lst_adapter) > 0 and multi_adapter:
+ adapter_img = []
+ model_adapter = []
+ adapter_conditioning_scale = []
+ adapter_conditioning_factor = []
+ for i in range( len(lst_adapter)):
+ setting_processing = list(lst_adapter[i].items())
+ setting_processing = setting_processing[:-2]
+ setting_processing = dict(setting_processing)
+ image_sp_adapter = adapter_preprocessing(**setting_processing)
+ adapter_img.append(image_sp_adapter)
+ adapter_sp = adapter_type[lst_adapter[i]["model_adapter"]]
+ model_adapter.append(setup_adapter(adapter_sp,device))
+ adapter_conditioning_scale.append(float(lst_adapter[i]["adapter_conditioning_scale"]))
+ adapter_conditioning_factor.append(float(lst_adapter[i]["adapter_conditioning_factor"]))
+ adapter_conditioning_factor = adapter_conditioning_factor[-1]
+ torch.cuda.empty_cache()
+ gc.collect()
+ elif adapter_img is not None and multi_adapter ==False:
+ adapter_img = adapter_preprocessing(model_adapter,adapter_img,low_threshold_adapter,high_threshold_adapter,has_body_openpose_adapter,has_hand_openpose_adapter,has_face_openpose_adapter,preprocessor_adapter,disable_preprocessing_adapter)
+ model_adapter = adapter_type[model_adapter]
+ adapter_conditioning_scale = float(adapter_conditioning_scale)
+ adapter_conditioning_factor = float(adapter_conditioning_factor)
+ torch.cuda.empty_cache()
+ gc.collect()
+ model_adapter=setup_adapter(model_adapter,device)
+ torch.cuda.empty_cache()
+ gc.collect()
+ else:
+ model_adapter = None
+ adapter_img = None
+ else:
+ model_adapter = None
+ adapter_img = None
+
+
+ if controlnet_enabled:
+ if len(lst_control) > 0 and multi_controlnet:
+ img_control = []
+ control_net_model = []
+ controlnet_scale = []
+ control_guidance_start = []
+ control_guidance_end = []
+ for i in range( len(lst_control)):
+ setting_processing = list(lst_control[i].items())
+ setting_processing = setting_processing[:-3]
+ setting_processing = dict(setting_processing)
+ image_sp_control = control_net_preprocessing(**setting_processing)
+ img_control.append(image_sp_control)
+ conrol_net_sp = controlnet_type[lst_control[i]["control_net_model"]]
+ control_net_model.append(setup_controlnet(conrol_net_sp,device))
+ controlnet_scale.append(float(lst_control[i]["controlnet_scale"]))
+ control_guidance_start.append(float(lst_control[i]["control_guidance_start"]))
+ control_guidance_end.append(float(lst_control[i]["control_guidance_end"]))
+ torch.cuda.empty_cache()
+ gc.collect()
+ elif img_control is not None and multi_controlnet ==False:
+ img_control = control_net_preprocessing(control_net_model,img_control,low_threshold,high_threshold,has_body,has_hand,has_face,preprocessor_name,disable_preprocessing)
+ control_net_model = controlnet_type[control_net_model]
+ controlnet_scale = float(controlnet_scale)
+ control_guidance_start = float(control_guidance_start)
+ control_guidance_end = float(control_guidance_end)
+ torch.cuda.empty_cache()
+ gc.collect()
+ control_net_model=setup_controlnet(control_net_model,device)
+ torch.cuda.empty_cache()
+ gc.collect()
+ else:
+ control_net_model = None
+ img_control = None
+ else:
+ control_net_model = None
+ img_control = None
+ keys_f = [k[0] for k in formula]
+ formula_setting = formula[keys_f.index(formula_setting)][1]
+ if seed is None or seed < 0:
+ seed = random.randint(0, sys.maxsize)
+
+ #lora_state = lora_dict[lora_state]
+ pipe = setup_model(model,clip_skip, lora_group,diffuser_pipeline,control_net_model,img_input,device,mask_inpaiting,vae_used)
+ generator = torch.Generator(device).manual_seed(int(seed))
+ if formula_setting == 0:
+ weight_func = lambda w, sigma, qk: w * sigma * qk.std()
+ elif formula_setting == 1:
+ weight_func = lambda w, sigma, qk: w * math.log(1 + sigma) * qk.max()
+ elif formula_setting == 2:
+ weight_func = lambda w, sigma, qk: w * math.log(1 + sigma) * qk.std()
+ else:
+ weight_func = lambda w, sigma, qk: w * math.log(1 + sigma**2) * qk.std()
+ start_time = time.time()
+
+ sampler_name, sampler_opt = None, None
+ '''for label, funcname, options in samplers_k_diffusion:
+ if label == sampler_hires:
+ sampler_name_hires, sampler_opt_hires = funcname, options'''
+
+ #add_Textual Inversion or text embeddings
+ pipe = add_embedding(pipe,embs)
+ width_resize_mask_ipadapter = width
+ height_resize_mask_ipadapter = height
+ if img_input is not None:
+ width_resize_mask_ipadapter = img_input.width
+ height_resize_mask_ipadapter = img_input.height
+ setup_model_t2i_adapter(pipe,model_adapter)
+ cross_attention_kwargs = {}
+
+ #Get type encoding
+ long_encode = encoding_type[long_encode]
+ ip_adapter_image_embeds = None
+ faceid_plus_v2 = False
+ #clip_embeds = None #Support for faceid_plus
+
+ if ip_adapter == True:
+ #inf_adapt_image = None
+ ip_adapter_images_faceid_plus = []
+ if ip_adapter_multi and len(lst_ip_adapter) > 0:
+
+ ip_adapter_image_lst =[]
+ model_ip_adapter_lst = []
+ scale_ip_adapter_lst = []
+ region_aplly_lst = []
+
+ ip_adapter_image_vitg_lst =[]
+ model_ip_adapter_vitg_lst = []
+ scale_ip_adapter_vitg_lst = []
+ region_aplly_vitg_lst = []
+
+ ip_adapter_faceid_image_lst =[]
+ model_ip_adapter_faceid_lst = []
+ scale_ip_adapter_faceid_lst = []
+ region_aplly_lst_faceid = []
+
+ ip_adapter_faceid_plus_image_lst =[]
+ model_ip_adapter_faceid_plus_lst = []
+ scale_ip_adapter_faceid_plus_lst = []
+ region_aplly_lst_faceid_plus = []
+
+ #Support not marks
+ img_full_black = Image.new('RGB', (width, height), (0, 0, 0))
+ img_full_white = Image.new('RGB', (width, height), (255, 255, 255))
+
+ for i in lst_ip_adapter:
+ if 'VIT-G' in i["model"]:
+ ip_adapter_image_vitg_lst.append(i["image"])
+ model_ip_adapter_vitg_lst.append(model_ip_adapter_type[i["model"]])
+ scale_ip_adapter_vitg_lst.append(float(i["scale"]))
+ if i["region_apply"] is not None:
+ region_aplly_vitg_lst.append(i["region_apply"])
+ else:
+ if invert_ip_adapter_mask_mode:
+ region_aplly_vitg_lst.append(img_full_black)
+ else:
+ region_aplly_vitg_lst.append(img_full_white)
+ elif 'FaceID' not in i["model"]:
+ ip_adapter_image_lst.append(i["image"])
+ model_ip_adapter_lst.append(model_ip_adapter_type[i["model"]])
+ scale_ip_adapter_lst.append(float(i["scale"]))
+ if i["region_apply"] is not None:
+ region_aplly_lst.append(i["region_apply"])
+ else:
+ if invert_ip_adapter_mask_mode:
+ region_aplly_lst.append(img_full_black)
+ else:
+ region_aplly_lst.append(img_full_white)
+ elif 'Plus FaceID' in i["model"]:
+ if 'Plus FaceIDv2' in i["model"]:
+ faceid_plus_v2 = True
+ ip_adapter_faceid_plus_image_lst.append(i["image"])
+ model_ip_adapter_faceid_plus_lst.append(model_ip_adapter_type[i["model"]])
+ scale_ip_adapter_faceid_plus_lst.append(float(i["scale"]))
+ if i["region_apply"] is not None:
+ region_aplly_lst_faceid_plus.append(i["region_apply"])
+ else:
+ if invert_ip_adapter_mask_mode:
+ region_aplly_lst_faceid_plus.append(img_full_black)
+ else:
+ region_aplly_lst_faceid_plus.append(img_full_white)
+ else:
+ ip_adapter_faceid_image_lst.append(i["image"])
+ model_ip_adapter_faceid_lst.append(model_ip_adapter_type[i["model"]])
+ scale_ip_adapter_faceid_lst.append(float(i["scale"]))
+ if i["region_apply"] is not None:
+ region_aplly_lst_faceid.append(i["region_apply"])
+ else:
+ if invert_ip_adapter_mask_mode:
+ region_aplly_lst_faceid.append(img_full_black)
+ else:
+ region_aplly_lst_faceid.append(img_full_white)
+
+ #Concat faceid and ipadapter
+ none_img_encoder = False
+ # if len(model_ip_adapter_lst) == 0:
+ # only_face_id = 1
+
+ if len(ip_adapter_faceid_image_lst) > 0 or len(ip_adapter_image_vitg_lst) > 0 or len(ip_adapter_faceid_plus_image_lst) > 0:
+ #Image_encode vit-H
+ ip_adapter_embeds = []
+ ip_adapter_vitg_embeds = []
+ ip_adapter_image_embeds_faceid = []
+ ip_adapter_image_embeds_faceid_plus = []
+ if len(model_ip_adapter_lst) > 0:
+ pipe.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name=model_ip_adapter_lst)
+ pipe.set_ip_adapter_scale(scale_ip_adapter_lst)
+ ip_adapter_embeds = pipe.prepare_ip_adapter_image_embeds(ip_adapter_image_lst,None,device,1, guidance>1)
+ pipe.unload_ip_adapter()
+
+ if len(ip_adapter_faceid_image_lst) > 0:
+ ip_adapter_image_embeds_faceid,_ = ip_adapter_face_id_embedding(ip_adapter_faceid_image_lst,device,pipe.unet.dtype,guidance,False)
+ ip_adapter_image_embeds_faceid = [ip_adapter_image_embeds_faceid]
+ if len(ip_adapter_faceid_plus_image_lst) >0:
+ ip_adapter_image_embeds_faceid_plus,ip_adapter_images_faceid_plus = ip_adapter_face_id_embedding(ip_adapter_faceid_plus_image_lst,device,pipe.unet.dtype,guidance,True)
+ ip_adapter_image_embeds_faceid_plus = [ip_adapter_image_embeds_faceid_plus]
+ #Image encoder vit-G
+ if len(ip_adapter_image_vitg_lst) > 0:
+ pipe.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name=model_ip_adapter_vitg_lst,image_encoder_folder=None)
+ pipe.set_ip_adapter_scale(scale_ip_adapter_vitg_lst)
+ pipe.image_encoder = CLIPVisionModelWithProjection.from_pretrained(
+ "h94/IP-Adapter", subfolder="sdxl_models/image_encoder",
+ ).to(device, dtype=pipe.unet.dtype)
+ ip_adapter_vitg_embeds = pipe.prepare_ip_adapter_image_embeds(ip_adapter_image_vitg_lst,None,device,1, guidance>1)
+ pipe.unload_ip_adapter()
+
+ ip_adapter_image_embeds = ip_adapter_embeds + ip_adapter_image_embeds_faceid + ip_adapter_vitg_embeds + ip_adapter_image_embeds_faceid_plus
+
+ inf_adapt_image = None
+ none_img_encoder = True
+ if not isinstance(ip_adapter_image_embeds, list):
+ ip_adapter_image_embeds = [ip_adapter_image_embeds]
+ else:
+ inf_adapt_image = ip_adapter_image_lst
+ ip_adapter_image_embeds = None
+
+ region_aplly_lst = region_aplly_lst + region_aplly_lst_faceid + region_aplly_vitg_lst + region_aplly_lst_faceid_plus
+ load_model = ["h94/IP-Adapter"]*len(model_ip_adapter_lst) + ["h94/IP-Adapter-FaceID"]*len(model_ip_adapter_faceid_lst) + ["h94/IP-Adapter"]*len(model_ip_adapter_vitg_lst) + ["h94/IP-Adapter-FaceID"]*len(model_ip_adapter_faceid_plus_lst)
+ subfolder = ["models"]*len(model_ip_adapter_lst) + [None]*len(model_ip_adapter_faceid_lst) + ["models"] * len(model_ip_adapter_vitg_lst) + [None]*len(model_ip_adapter_faceid_plus_lst)
+ model_ip_adapter_lst = model_ip_adapter_lst + model_ip_adapter_faceid_lst + model_ip_adapter_vitg_lst + model_ip_adapter_faceid_plus_lst
+ scale_ip_adapter_lst = scale_ip_adapter_lst + scale_ip_adapter_faceid_lst + scale_ip_adapter_vitg_lst + scale_ip_adapter_faceid_plus_lst
+
+ clip_embeds = None
+ if len(ip_adapter_images_faceid_plus) > 0:
+ pipe.load_ip_adapter("h94/IP-Adapter-FaceID", subfolder=None, weight_name=model_ip_adapter_faceid_plus_lst,image_encoder_folder=None)
+ pipe.image_encoder = CLIPVisionModelWithProjection.from_pretrained(
+ "laion/CLIP-ViT-H-14-laion2B-s32B-b79K"
+ ).to(device, dtype=pipe.unet.dtype)
+ # Extract CLIP embeddings
+ clip_embeds = pipe.prepare_ip_adapter_image_embeds([ip_adapter_images_faceid_plus], None, device, 1, guidance>1)[0] #num_images = 1
+ pipe.unload_ip_adapter()
+
+ if none_img_encoder:
+ pipe.load_ip_adapter(load_model, subfolder=subfolder, weight_name=model_ip_adapter_lst,image_encoder_folder=None)
+ else:
+ pipe.load_ip_adapter(load_model, subfolder=subfolder, weight_name=model_ip_adapter_lst)
+ pipe.set_ip_adapter_scale(scale_ip_adapter_lst)
+
+ if len(ip_adapter_images_faceid_plus) > 0:
+ pipe.image_encoder = CLIPVisionModelWithProjection.from_pretrained(
+ "laion/CLIP-ViT-H-14-laion2B-s32B-b79K"
+ ).to(device, dtype=pipe.unet.dtype)
+
+ # Set CLIP embeddings as class parameter
+ pipe.unet.encoder_hid_proj.image_projection_layers[0].clip_embeds = clip_embeds.to(dtype=pipe.unet.dtype)
+ pipe.unet.encoder_hid_proj.image_projection_layers[0].shortcut = faceid_plus_v2
+
+ cross_attention_kwargs = {"ip_adapter_masks":mask_region_apply_ip_adapter(region_aplly_lst,invert_ip_adapter_mask_mode)}
+ elif inf_adapt_image is not None and ip_adapter_multi == False:
+ if 'VIT-G' in model_ip_adapter:
+ model_ip_adapter = model_ip_adapter_type[model_ip_adapter]
+ pipe.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name=model_ip_adapter,image_encoder_folder=None)
+ pipe.set_ip_adapter_scale(float(inf_adapt_image_strength))
+ pipe.image_encoder = CLIPVisionModelWithProjection.from_pretrained(
+ "h94/IP-Adapter", subfolder="sdxl_models/image_encoder",
+ ).to(device, dtype=pipe.unet.dtype)
+ cross_attention_kwargs = {"ip_adapter_masks":mask_region_apply_ip_adapter(inf_control_adapt_image,invert_ip_adapter_mask_mode)}
+ elif 'FaceID' not in model_ip_adapter:
+ model_ip_adapter = model_ip_adapter_type[model_ip_adapter]
+ pipe.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name=model_ip_adapter)
+ pipe.set_ip_adapter_scale(float(inf_adapt_image_strength))
+ cross_attention_kwargs = {"ip_adapter_masks":mask_region_apply_ip_adapter(inf_control_adapt_image,invert_ip_adapter_mask_mode)}
+ elif 'Plus FaceID' in model_ip_adapter:
+ if 'Plus FaceIDv2' in model_ip_adapter:
+ faceid_plus_v2 = True
+ model_ip_adapter = model_ip_adapter_type[model_ip_adapter]
+ pipe.load_ip_adapter("h94/IP-Adapter-FaceID", subfolder=None, weight_name=model_ip_adapter,image_encoder_folder=None)
+ pipe.set_ip_adapter_scale(float(inf_adapt_image_strength))
+ ip_adapter_image_embeds,ip_adapter_images_faceid_plus = ip_adapter_face_id_embedding([inf_adapt_image],device,pipe.unet.dtype,guidance,True)
+ if not isinstance(ip_adapter_image_embeds, list):
+ ip_adapter_image_embeds = [ip_adapter_image_embeds]
+ cross_attention_kwargs = {"ip_adapter_masks":mask_region_apply_ip_adapter(inf_control_adapt_image,invert_ip_adapter_mask_mode)}
+ if len(ip_adapter_images_faceid_plus) > 0:
+ pipe.image_encoder = CLIPVisionModelWithProjection.from_pretrained(
+ "laion/CLIP-ViT-H-14-laion2B-s32B-b79K"
+ ).to(device, dtype=pipe.unet.dtype)
+ # Extract CLIP embeddings
+ clip_embeds = pipe.prepare_ip_adapter_image_embeds([ip_adapter_images_faceid_plus], None, device, 1, guidance>1)[0] #num_images = 1
+
+ # Set CLIP embeddings as class parameter
+ pipe.unet.encoder_hid_proj.image_projection_layers[0].clip_embeds = clip_embeds.to(dtype=pipe.unet.dtype)
+ pipe.unet.encoder_hid_proj.image_projection_layers[0].shortcut = faceid_plus_v2
+ #pipe.unload_ip_adapter()
+ inf_adapt_image = None
+ else:
+ model_ip_adapter = model_ip_adapter_type[model_ip_adapter]
+ pipe.load_ip_adapter("h94/IP-Adapter-FaceID", subfolder=None, weight_name=model_ip_adapter,image_encoder_folder=None)
+ pipe.set_ip_adapter_scale(float(inf_adapt_image_strength))
+ ip_adapter_image_embeds,_ = ip_adapter_face_id_embedding([inf_adapt_image],device,pipe.unet.dtype,guidance,False)
+ if not isinstance(ip_adapter_image_embeds, list):
+ ip_adapter_image_embeds = [ip_adapter_image_embeds]
+ cross_attention_kwargs = {"ip_adapter_masks":mask_region_apply_ip_adapter(inf_control_adapt_image,invert_ip_adapter_mask_mode)}
+ inf_adapt_image = None
+ else:
+ inf_adapt_image = None
+ else:
+ inf_adapt_image = None
+
+ if diffuser_pipeline:
+ for label, funcname, options in samplers_diffusers:
+ if label == sampler:
+ sampler_name, sampler_opt = funcname, options
+ if label == sampler_hires:
+ sampler_name_hires, sampler_opt_hires = funcname, options
+ pipe.scheduler = sampler_name(pipe.scheduler.config)
+ output_type = 'pil'
+ if hr_enabled and img_input is None:
+ output_type = 'latent'
+ #Need to reduce clip_skip by 1 because when using clip_skip the value will increase in the encode_prompt
+ config = {
+ "prompt": prompt,
+ "negative_prompt": neg_prompt,
+ "num_inference_steps": int(steps),
+ "guidance_scale": guidance,
+ "generator": generator,
+ "region_map_state": state,
+ #"region_map_attn_weight": g_strength,
+ "latent_processing": latent_processing,
+ 'weight_func':weight_func,
+ 'clip_skip' :int(clip_skip),
+ "output_type" : output_type,
+ "image_t2i_adapter":adapter_img,
+ "adapter_conditioning_scale":adapter_conditioning_scale,
+ "adapter_conditioning_factor":adapter_conditioning_factor,
+ "guidance_rescale":guidance_rescale,
+ "long_encode" : int(long_encode),
+ "ip_adapter_image_embeds": ip_adapter_image_embeds,
+ "cross_attention_kwargs": cross_attention_kwargs
+ }
+ '''if ip_adapter == False:
+ inf_adapt_image = None'''
+
+ if mask_inpaiting and img_input and inpaiting_mode and control_net_model:
+ result = pipe(mask_image = mask_inpaiting,width=img_input.width,height=img_input.height, controlnet_conditioning_scale = controlnet_scale,inf_adapt_image=inf_adapt_image,image =img_input , control_image=img_control,strength = i2i_scale,control_guidance_start=control_guidance_start,control_guidance_end=control_guidance_end,**config)
+ elif control_net_model is not None and img_input is not None:
+ result = pipe(controlnet_conditioning_scale = controlnet_scale,inf_adapt_image=inf_adapt_image,image =img_input , control_image=img_control,strength = i2i_scale,control_guidance_start=control_guidance_start,control_guidance_end=control_guidance_end,**config)
+ elif control_net_model is not None:
+ result = pipe(width = width,height = height,controlnet_conditioning_scale = controlnet_scale, image=img_control,control_guidance_start=control_guidance_start,control_guidance_end=control_guidance_end,ip_adapter_image=inf_adapt_image,**config)
+ elif mask_inpaiting and img_input and inpaiting_mode:
+ result = pipe(image =img_input,ip_adapter_image=inf_adapt_image,mask_image = mask_inpaiting,strength=i2i_scale,width=img_input.width,height=img_input.height,**config)
+ elif img_input is not None:
+ result = pipe(image =img_input,strength = i2i_scale,ip_adapter_image=inf_adapt_image,**config)
+ else:
+ result = pipe(height = height, width = width,ip_adapter_image=inf_adapt_image,**config)
+ if hr_enabled and img_input is None:
+ del pipe
+ torch.cuda.empty_cache()
+ gc.collect()
+ pipe = setup_model(model,clip_skip, lora_group,diffuser_pipeline,control_net_model,True,device,vae_used)
+ #add_Textual Inversion or text embeddings
+ pipe = add_embedding(pipe,embs)
+ pipe.scheduler = sampler_name_hires(pipe.scheduler.config)
+ vae_scale_factor = 2 ** (len(pipe.vae.config.block_out_channels) - 1)
+ target_height = int(height * upscale_x // vae_scale_factor )* 8
+ target_width = int(width * upscale_x // vae_scale_factor)*8
+ latents = result[-1].unsqueeze(0)
+ #print(latents.shape)
+ latents = torch.nn.functional.interpolate(
+ latents,
+ size=(
+ int(target_height // vae_scale_factor),
+ int(target_width // vae_scale_factor),
+ ),
+ mode=latent_upscale_modes[hr_method]["upscale_method"],
+ antialias=latent_upscale_modes[hr_method]["upscale_antialias"],
+ )
+
+ config = {
+ "prompt": prompt,
+ "negative_prompt": neg_prompt,
+ "num_inference_steps": int(steps),
+ "guidance_scale": guidance,
+ "generator": generator,
+ "region_map_state": state,
+ #"region_map_attn_weight": g_strength,
+ "latent_processing": hr_process_enabled,
+ 'weight_func':weight_func,
+ 'clip_skip' :int(clip_skip),
+ "image_t2i_adapter":adapter_img,
+ "adapter_conditioning_scale":adapter_conditioning_scale,
+ "adapter_conditioning_factor":adapter_conditioning_factor,
+ "guidance_rescale":guidance_rescale,
+ "long_encode" : int(long_encode),
+ "ip_adapter_image_embeds": ip_adapter_image_embeds,
+ "cross_attention_kwargs":cross_attention_kwargs,
+ }
+ if control_net_model is not None:
+ upscale_result = pipe(width=int(target_width),height=int(target_height),controlnet_conditioning_scale = controlnet_scale,image = latents, control_image=img_control,strength = hr_denoise,control_guidance_start=control_guidance_start,control_guidance_end=control_guidance_end,**config)
+ else:
+ upscale_result = pipe(width=int(target_width),height=int(target_height),image = latents,strength = hr_denoise,**config)
+ #print(type(upscale_result[-1]))
+ #print(upscale_result)
+ result = result[:-1] + upscale_result
+ else:
+ for label, funcname, options in samplers_k_diffusion:
+ if label == sampler:
+ sampler_name, sampler_opt = funcname, options
+ if label == sampler_hires:
+ sampler_name_hires, sampler_opt_hires = funcname, options
+ config = {
+ "negative_prompt": neg_prompt,
+ "num_inference_steps": int(steps),
+ "guidance_scale": guidance,
+ "generator": generator,
+ "sampler_name": sampler_name,
+ "sampler_opt": sampler_opt,
+ "region_map_state": state,
+ #"region_map_attn_weight": g_strength,
+ "start_time": start_time,
+ "timeout": timeout,
+ "latent_processing": latent_processing,
+ 'weight_func':weight_func,
+ 'seed': int(seed),
+ 'sampler_name_hires': sampler_name_hires,
+ 'sampler_opt_hires': sampler_opt_hires,
+ "latent_upscale_processing": hr_process_enabled,
+ "ip_adapter_image":inf_adapt_image,
+ "controlnet_conditioning_scale":controlnet_scale,
+ "control_img": img_control,
+ "control_guidance_start":control_guidance_start,
+ "control_guidance_end":control_guidance_end,
+ "image_t2i_adapter":adapter_img,
+ "adapter_conditioning_scale":adapter_conditioning_scale,
+ "adapter_conditioning_factor":adapter_conditioning_factor,
+ "guidance_rescale":guidance_rescale,
+ 'clip_skip' :int(clip_skip),
+ "long_encode" : int(long_encode),
+ "ip_adapter_image_embeds": ip_adapter_image_embeds,
+ "cross_attention_kwargs":cross_attention_kwargs,
+ }
+ #if control_net_model is not None:
+ pipe.setup_controlnet(control_net_model)
+ if mask_inpaiting and img_input and inpaiting_mode:
+ result = pipe.inpaiting(prompt, image=img_input,mask_image = mask_inpaiting,strength=i2i_scale,width=img_input.width,height=img_input.height, **config)
+ elif img_input is not None:
+ result = pipe.img2img(prompt, image=img_input, strength=i2i_scale,width=img_input.width,height=img_input.height, **config)
+ elif hr_enabled:
+ result = pipe.txt2img(
+ prompt,
+ width=width,
+ height=height,
+ upscale=True,
+ upscale_x=hr_scale,
+ upscale_denoising_strength=hr_denoise,
+ **config,
+ **latent_upscale_modes[hr_method],
+ )
+ else:
+ result = pipe.txt2img(prompt, width=width, height=height, **config)
+
+
+ end_time = time.time()
+
+ vram_free, vram_total = torch.cuda.mem_get_info()
+ if ip_adapter :
+ pipe.unload_ip_adapter()
+ if lora_group is not None and len(lora_group) > 0:
+ #pipe.unfuse_lora()#Unload lora
+ pipe.unload_lora_weights()
+ #if embs is not None and len(embs) > 0:
+ #pipe.unload_textual_inversion()
+ del pipe
+ torch.cuda.empty_cache()
+ gc.collect()
+ print(f"done: model={model}, res={result[-1].width}x{result[-1].height}, step={steps}, time={round(end_time-start_time, 2)}s, vram_alloc={convert_size(vram_total-vram_free)}/{convert_size(vram_total)}")
+ return gr.Image.update(result[-1], label=f"Initial Seed: {seed}"),result
+
+
+
+color_list = []
+
+def get_color(n):
+ for _ in range(n - len(color_list)):
+ color_list.append(tuple(np.random.random(size=3) * 256))
+ return color_list
+
+
+def create_mixed_img(current, state, w=512, h=512):
+ w, h = int(w), int(h)
+ image_np = np.full([h, w, 4], 255)
+ if state is None:
+ state = {}
+
+ colors = get_color(len(state))
+ idx = 0
+
+ for key, item in state.items():
+ if item["map"] is not None:
+ m = item["map"] < 255
+ alpha = 150
+ if current == key:
+ alpha = 200
+ image_np[m] = colors[idx] + (alpha,)
+ idx += 1
+
+ return image_np
+
+def apply_size_sketch(width,height,state,inf_image,inpaiting_mode,inf_image_inpaiting):
+ if inpaiting_mode and inf_image_inpaiting:
+ w_change = inf_image_inpaiting["image"].width
+ h_change = inf_image_inpaiting["image"].height
+ elif inf_image is not None:
+ w_change = inf_image.width
+ h_change = inf_image.height
+ #update_img = gr.Image.update(value=create_mixed_img("", state, w_change, h_change))
+ #return state, update_img,gr.Image.update(width=w_change,height = h_change)
+ else:
+ w_change = int(width)
+ h_change = int(height)
+
+ if state is not None:
+ for key, item in state.items():
+ if item["map"] is not None:
+ #inverted_image = PIL.ImageOps.invert(item["map"].convert('RGB'))
+ item["map"] = resize(item["map"], w_change, h_change)
+
+ update_img = gr.Image.update(value=create_mixed_img("", state, w_change, h_change))
+ return state, update_img,gr.Image.update(width=w_change,height = h_change)
+
+
+# width.change(apply_new_res, inputs=[width, height, global_stats], outputs=[global_stats, sp, rendered])
+'''def apply_new_res(w, h, state,inf_image,rendered):
+ if inf_image is not None:
+ return state, rendered
+ w, h = int(w), int(h)
+
+ if state is not None:
+ for key, item in state.items():
+ if item["map"] is not None:
+ item["map"] = resize(item["map"], w, h)
+
+ update_img = gr.Image.update(value=create_mixed_img("", state, w, h))
+ return state, update_img'''
+
+
+def detect_text(text, state, width, height,formula_button,inf_image,inpaiting_mode,inf_image_inpaiting):
+ global formula
+ if text is None or text == "":
+ return None, None, gr.Radio.update(value=None,visible = False), None,gr.Dropdown.update(value = formula_button)
+
+ if inpaiting_mode and inf_image_inpaiting:
+ w_change = inf_image_inpaiting["image"].width
+ h_change = inf_image_inpaiting["image"].height
+ elif inf_image is not None:
+ w_change = inf_image.width
+ h_change = inf_image.height
+ else:
+ w_change = int(width)
+ h_change = int(height)
+
+
+ t = text.split(",")
+ new_state = {}
+
+ for item in t:
+ item = item.strip()
+ if item == "":
+ continue
+ if state is not None and item in state:
+ new_state[item] = {
+ "map": state[item]["map"],
+ "weight": state[item]["weight"],
+ "mask_outsides": state[item]["mask_outsides"],
+ }
+ else:
+ new_state[item] = {
+ "map": None,
+ "weight": 0.5,
+ "mask_outsides": 0
+ }
+ update = gr.Radio.update(choices=[key for key in new_state.keys()], value=None,visible = True)
+ update_img = gr.update(value=create_mixed_img("", new_state, w_change, h_change))
+ update_sketch = gr.update(value=None, interactive=False)
+ return new_state, update_sketch, update, update_img,gr.Dropdown.update(value = formula_button)
+
+def detect_text1(text, state, width, height,formula_button,inf_image,inpaiting_mode,inf_image_inpaiting):
+ global formula
+ if text is None or text == "":
+ return None, None, gr.Radio.update(value=None,visible = False), None,gr.Dropdown.update(value = formula_button)
+
+ if inpaiting_mode and inf_image_inpaiting:
+ w_change = inf_image_inpaiting["image"].width
+ h_change = inf_image_inpaiting["image"].height
+ elif inf_image is not None:
+ w_change = inf_image.width
+ h_change = inf_image.height
+ else:
+ w_change = int(width)
+ h_change = int(height)
+
+ t = text.split(",")
+ new_state = {}
+
+ for item in t:
+ item = item.strip()
+ if item == "":
+ continue
+ if state is not None and item in state:
+ new_state[item] = {
+ "map": state[item]["map"],
+ "weight": state[item]["weight"],
+ "mask_outsides": state[item]["mask_outsides"],
+ }
+ else:
+ new_state[item] = {
+ "map": None,
+ "weight": 0.5,
+ "mask_outsides": False
+ }
+ update = gr.Radio.update(choices=[key for key in new_state.keys()], value=None,visible = True)
+ update_img = gr.update(value=create_mixed_img("", new_state, w_change, h_change))
+ return new_state, update, update_img,gr.Dropdown.update(value = formula_button)
+
+
+def resize(img, w, h):
+ trs = transforms.Compose(
+ [
+ transforms.ToPILImage(),
+ #transforms.Resize(min(h, w)),
+ transforms.Resize((h, w),interpolation=transforms.InterpolationMode.BICUBIC),
+ transforms.CenterCrop((h, w)),
+ ]
+ )
+ result = np.array(trs(img), dtype=np.uint8)
+ return result
+
+
+def switch_canvas(entry, state, width, height,inf_image,inpaiting_mode,inf_image_inpaiting):
+ if inpaiting_mode and inf_image_inpaiting:
+ w_change = inf_image_inpaiting["image"].width
+ h_change = inf_image_inpaiting["image"].height
+ elif inf_image is not None:
+ w_change = inf_image.width
+ h_change = inf_image.height
+ else:
+ w_change = int(width)
+ h_change = int(height)
+
+ if entry is None or state is None:
+ return None, 0.5, False, create_mixed_img("", state, w_change, h_change)
+
+ return (
+ gr.update(value=None, interactive=True),
+ gr.update(value=state[entry]["weight"] if entry in state else 0.5),
+ gr.update(value=state[entry]["mask_outsides"] if entry in state else False),
+ create_mixed_img(entry, state, w_change, h_change),
+ )
+
+
+def apply_canvas(selected, draw, state, w, h,inf_image,inpaiting_mode,inf_image_inpaiting):
+ if inpaiting_mode and inf_image_inpaiting:
+ w_change = inf_image_inpaiting["image"].width
+ h_change = inf_image_inpaiting["image"].height
+ elif inf_image is not None:
+ w_change = inf_image.width
+ h_change = inf_image.height
+ else:
+ w_change = int(w)
+ h_change = int(h)
+
+
+ if state is not None and selected in state and draw is not None:
+ w, h = int(w_change), int(h_change)
+ state[selected]["map"] = resize(draw, w, h)
+ return state, gr.Image.update(value=create_mixed_img(selected, state, w, h))
+
+
+def apply_weight(selected, weight, state):
+ if state is not None and selected in state:
+ state[selected]["weight"] = weight
+ return state
+
+
+def apply_option(selected, mask, state):
+ if state is not None and selected in state:
+ state[selected]["mask_outsides"] = mask
+ return state
+
+clustering_image =[]
+number_clustering = 0
+def is_image_black(image):
+
+ average_intensity = image.mean()
+
+ if average_intensity < 10:
+ return True
+ else:
+ return False
+def change_diferent_black_to_white(image):
+
+ width, height = image.size
+
+ for x in range(width):
+ for y in range(height):
+ r, g, b = image.getpixel((x, y))
+
+ if r != 0 and g != 0 and b != 0:
+ image.putpixel((x, y), (255, 255, 255))
+ return image
+
+def change_black_to_other_color(image,color_list):
+
+ width, height = image.size
+ new_pixel = (random.randrange(1,256), random.randrange(1,256), random.randrange(1,256))
+ while new_pixel in color_list:
+ new_pixel = (random.randrange(1,256), random.randrange(1,256), random.randrange(1,256))
+ for x in range(width):
+ for y in range(height):
+ pixel = image.getpixel((x, y))
+
+ if pixel == (0, 0, 0):
+ image.putpixel((x, y), new_pixel)
+ return image
+
+def get_color_mask(color, image, threshold=30):
+ """
+ Returns a color mask for the given color in the given image.
+ """
+ img_array = np.array(image, dtype=np.uint8)
+ color_diff = np.sum((img_array - color) ** 2, axis=-1)
+ img_array[color_diff > threshold] = img_array[color_diff > threshold] * 0
+ return Image.fromarray(img_array)
+
+def unique_colors(image, threshold=0.01):
+ colors = image.getcolors(image.size[0] * image.size[1])
+ total_pixels = image.size[0] * image.size[1]
+ unique_colors = []
+ for count, color in colors:
+ if count / total_pixels > threshold:
+ unique_colors.append(color)
+ return unique_colors
+
+def extract_color_textboxes(color_map_image,MAX_NUM_COLORS):
+ #color_map_image = Image.open(color_map_image)
+ #color_map_image = cv2.imread(color_map_image)
+ color_map_image= Image.fromarray(color_map_image.astype('uint8'), 'RGB')
+ # Get unique colors in color_map_image
+ colors = unique_colors(color_map_image)
+ color_map_image = change_black_to_other_color(color_map_image,colors)
+ colors = unique_colors(color_map_image)
+ color_masks = [get_color_mask(color, color_map_image) for color in colors]
+ # Append white blocks to color_masks to fill up to MAX_NUM_COLORS
+ num_missing_masks = MAX_NUM_COLORS - len(color_masks)
+ white_mask = Image.new("RGB", color_map_image.size, color=(32, 32, 32))
+ color_masks += [white_mask] * num_missing_masks
+ color_output =[]
+ for i in range(0,len(color_masks)) :
+ #color_masks[i] = color_masks[i].convert('L')
+ color_masks[i] = change_diferent_black_to_white(color_masks[i])
+ color_masks[i] = np.array(color_masks[i])
+ color_masks[i] = cv2.cvtColor(color_masks[i], cv2.COLOR_RGB2GRAY)
+ color_masks[i] = 255.0 - color_masks[i]
+ if is_image_black(color_masks[i]) == False:
+ color_masks[i] = color_masks[i].astype(np.uint8)
+ color_output.append(color_masks[i])
+ return color_output
+
+
+
+def apply_image_clustering(image, selected, w, h, strength, mask, state,inf_image,inpaiting_mode,inf_image_inpaiting):
+ if inpaiting_mode and inf_image_inpaiting:
+ w_change = inf_image_inpaiting["image"].width
+ h_change = inf_image_inpaiting["image"].height
+ elif inf_image is not None:
+ w_change = inf_image.width
+ h_change = inf_image.height
+ else:
+ w_change = int(w)
+ h_change = int(h)
+
+ if state is not None and selected in state:
+ state[selected] = {
+ "map": resize(image, w_change, h_change),
+ "weight": strength,
+ "mask_outsides": mask
+ }
+ return state, gr.Image.update(value=create_mixed_img(selected, state, w_change, h_change))
+
+
+# sp2, radio, width, height, global_stats
+def apply_image(image, selected, w, h, strength, mask, state,inf_image,inpaiting_mode,inf_image_inpaiting):
+ if inpaiting_mode and inf_image_inpaiting:
+ w_change = inf_image_inpaiting["image"].width
+ h_change = inf_image_inpaiting["image"].height
+ elif inf_image is not None:
+ w_change = inf_image.width
+ h_change = inf_image.height
+ else:
+ w_change = int(w)
+ h_change = int(h)
+
+
+ if state is not None and selected in state:
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
+ state[selected] = {
+ "map": resize(image, w_change, h_change),
+ "weight": strength,
+ "mask_outsides": mask
+ }
+ elif state is not None:
+ key_state = list(state.keys())
+ global number_clustering,clustering_image
+ number_clustering = 0
+ clustering_image = []
+ clustering_image = extract_color_textboxes(image,len(state)+1)
+ number_clustering = len(clustering_image)
+ if len(state) > len(clustering_image):
+ amount_add = len(clustering_image)
+ else:
+ amount_add = len(state)
+ for i in range(0,amount_add):
+ state[key_state[i]] = {
+ "map": resize(clustering_image[i], w_change, h_change),
+ "weight": strength,
+ "mask_outsides": mask
+ }
+ return state, gr.Image.update(value=create_mixed_img(selected, state, w_change, h_change))
+#rendered, apply_style, apply_clustering_style,Previous,Next,Completed,sp2,sp3
+def apply_base_on_color(sp2,state, width, height,inf_image,inpaiting_mode,inf_image_inpaiting):
+ global number_clustering,clustering_image
+ if inpaiting_mode and inf_image_inpaiting:
+ w_change = inf_image_inpaiting["image"].width
+ h_change = inf_image_inpaiting["image"].height
+ elif inf_image is not None:
+ w_change = inf_image.width
+ h_change = inf_image.height
+ else:
+ w_change = int(width)
+ h_change = int(height)
+
+ number_clustering = 0
+ clustering_image = []
+ clustering_image = extract_color_textboxes(sp2,len(state)+1)
+ new_state = {}
+ for i in state:
+ new_state[i] = {
+ "map": None,
+ "weight": 0.5,
+ "mask_outsides": False
+ }
+ return gr.Image.update(value = create_mixed_img("", new_state, w_change, h_change)),gr.Button.update(visible = False),gr.Button.update(visible = False),gr.Button.update(visible = True),gr.Button.update(visible = True),gr.Button.update(visible = True),gr.Image.update(visible = False),gr.Image.update(value=clustering_image[0],visible = True),gr.Button.update(visible = True),new_state
+def completing_clustering(sp2):
+ return gr.Button.update(visible = True),gr.Button.update(visible = True),gr.Button.update(visible = False),gr.Button.update(visible = False),gr.Button.update(visible = False),gr.Image.update(visible = True),gr.Image.update(visible = False),gr.Button.update(visible = False)
+def previous_image_page(sp3):
+ global clustering_image,number_clustering
+ number_clustering = number_clustering - 1
+ if number_clustering < 0:
+ number_clustering = len(clustering_image)-1
+ return gr.Image.update(value = clustering_image[number_clustering])
+
+def next_image_page(sp3):
+ global clustering_image,number_clustering
+ number_clustering = number_clustering + 1
+ if number_clustering >= len(clustering_image):
+ number_clustering = 0
+ return gr.Image.update(value = clustering_image[number_clustering])
+# [ti_state, lora_state, ti_vals, lora_vals, uploads]
+
+
+def get_file_link_sp(link):
+ import requests
+ import os
+ from urllib.parse import unquote
+
+ file_name = None
+ absolute_path = None
+
+ try:
+ response = requests.get(link)
+ response.raise_for_status()
+ except requests.exceptions.HTTPError as err:
+ print(f"There was an error downloading: {err}")
+ else:
+ content_disposition = response.headers.get("content-disposition")
+ if content_disposition:
+ file_name = content_disposition.split("filename=")[1]
+ file_name = unquote(file_name)
+ # remove quotation marks
+ file_name = file_name.strip('"')
+ else:
+ file_name = "downloaded_file"
+
+ with open(file_name, "wb") as f:
+ f.write(response.content)
+
+ #Get absolute_path
+ absolute_path = os.path.abspath(file_name)
+ #Change format file_name
+ file_name = file_name.split('.')[0]
+ file_name = file_name.replace('_',' ')
+ file_name = file_name.replace('-',' ')
+ file_name = file_name.title()
+
+ return absolute_path, file_name
+
+
+def get_file_link(link):
+ import requests
+ import os
+ from urllib.parse import unquote
+
+ file_name = None
+ absolute_path = None
+
+ try:
+ with requests.get(link, stream=True) as response:
+ response.raise_for_status()
+
+ # Get file size from headers
+ total_size = int(response.headers.get('content-length', 0))
+ content_disposition = response.headers.get("content-disposition")
+ if content_disposition:
+ file_name = content_disposition.split("filename=")[1]
+ file_name = unquote(file_name)
+ # remove quotation marks
+ file_name = file_name.strip('"')
+ else:
+ file_name = "downloaded_file"
+
+ # Stream download and write to file
+ chunk_size = 1024
+ downloaded_size = 0
+ with open(file_name, "wb") as f:
+ for chunk in response.iter_content(chunk_size=chunk_size):
+ if chunk:
+ f.write(chunk)
+ downloaded_size += len(chunk)
+ # Print download progress
+ progress = (downloaded_size / total_size) * 100
+ if progress%10 == 0:
+ print(f"Download progress: {progress:.2f}% ({downloaded_size / 1024:.2f} KB / {total_size / 1024:.2f} KB)")
+
+ # Get absolute_path
+ absolute_path = os.path.abspath(file_name)
+ # Change format file_name
+ file_name = file_name.split('.')[0]
+ file_name = file_name.replace('_', ' ')
+ file_name = file_name.replace('-', ' ')
+ file_name = file_name.title()
+
+ except requests.exceptions.HTTPError as err:
+ print(f"There was an error downloading: {err}")
+
+ return absolute_path, file_name
+
+
+
+
+def add_net(files,link_download):
+ global lora_scale_dict, lora_lst, lora_dict, embeddings_dict
+ if files is None and (link_download is None or link_download == ''):
+ return gr.CheckboxGroup.update(choices=list(embeddings_dict.keys())),gr.CheckboxGroup.update(choices=list(lora_dict.keys())),gr.Dropdown.update(choices=[k for k in lora_lst],value=lora_lst[0],),gr.File.update(value=None),gr.Textbox.update(value = ''),
+ if link_download is not None and link_download != '':
+ path_file, file_name_download = get_file_link(link_download)
+ if file_name_download:
+ items_dl = Path(path_file)
+ if items_dl.suffix == ".pt":
+ state_dict = torch.load(path_file, map_location="cpu")
+ else:
+ state_dict = load_file(path_file, device="cpu")
+ if any("lora" in k for k in state_dict.keys()):
+ #lora_state = file.name
+ if file_name_download not in lora_dict:
+ lora_lst.append(file_name_download)
+ lora_dict[file_name_download] = path_file
+ lora_scale_dict[file_name_download] = 1.0
+ else:
+ if file_name_download not in embeddings_dict:
+ embeddings_dict[file_name_download] = path_file
+ if files is not None:
+ for file in files:
+ item = Path(file.name)
+ stripedname = str(item.stem).strip()
+ stripedname = stripedname.replace('_',' ')
+ stripedname = stripedname.replace('-',' ')
+ stripedname = stripedname.title()
+ if item.suffix == ".pt":
+ state_dict = torch.load(file.name, map_location="cpu")
+ else:
+ state_dict = load_file(file.name, device="cpu")
+ if any("lora" in k for k in state_dict.keys()):
+ #lora_state = file.name
+ if stripedname not in lora_dict:
+ lora_lst.append(stripedname)
+ lora_dict[stripedname] = file.name
+ lora_scale_dict[stripedname] = 1.0
+ else:
+ #ti_state[stripedname] = file.name
+ if stripedname not in embeddings_dict:
+ embeddings_dict[stripedname] = file.name
+ return gr.CheckboxGroup.update(choices=list(embeddings_dict.keys())), gr.CheckboxGroup.update(choices=list(lora_dict.keys())),gr.Dropdown.update(choices=[k for k in lora_lst],value=lora_lst[0],),gr.File.update(value=None),gr.Textbox.update(value = ''),
+
+def change_lora_value(lora_vals):
+ global lora_scale_dict
+ if len(lora_scale_dict) == 0 or lora_vals == 'Not using Lora':
+ return gr.Slider.update(value = 1.0)
+ return gr.Slider.update(value = lora_scale_dict[lora_vals])
+def update_lora_value(lora_scale,lora_vals):
+ global lora_scale_dict
+ if len(lora_scale_dict) and lora_vals != 'Not using Lora':
+ lora_scale_dict[lora_vals] = float(lora_scale)
+
+
+# [ti_state, lora_state, ti_vals, lora_vals, uploads]
+def clean_states(ti_state,lora_group):
+ global lora_dict,embeddings_dict,lora_lst,lora_scale_dict
+ delete_lora = list(lora_dict.values())
+ for i in delete_lora:
+ os.remove(i)
+ delete_embed_lst = list(embeddings_dict.values())
+ for i in delete_embed_lst:
+ os.remove(i)
+ embeddings_dict = dict()
+ lora_dict = dict()
+ lora_scale_dict = dict()
+ lora_lst = ['Not using Lora']
+ return dict(),dict(),gr.CheckboxGroup.update(choices=list(embeddings_dict.keys()),value = None),gr.CheckboxGroup.update(choices=list(lora_dict.keys()),value = None),gr.Dropdown.update(choices=[k for k in lora_lst],value=lora_lst[0],),gr.File.update(value=None),gr.Text.update(f""),gr.Text.update(f""),gr.Textbox.update(value = ''),
+
+def add_model(insert_model):
+ global models,keep_vram,models_single_file
+ insert_model=insert_model.replace(" ", "")
+ if len(insert_model) == 0:
+ return gr.Dropdown.update(choices=[k[0] for k in get_model_list()],value=base_name),gr.Textbox.update(value = '')
+ if 'https' in insert_model:
+ path_file, file_name_download = get_file_link(insert_model)
+ for i in models:
+ if file_name_download in i:
+ return gr.Dropdown.update(choices=[k[0] for k in get_model_list()],value=base_name),gr.Textbox.update(value = '')
+ models.append((file_name_download,path_file))
+ keep_vram.append(path_file)
+ models_single_file.append(file_name_download)
+ else:
+ author,name = insert_model.split('/')
+ name = name.replace('_',' ')
+ name = name.replace('-',' ')
+ name = name.title()
+ for i in models:
+ if name in i or insert_model in i:
+ return gr.Dropdown.update(choices=[k[0] for k in get_model_list()],value=base_name),gr.Textbox.update(value = '')
+ models.append((name,insert_model))
+ keep_vram.append(insert_model)
+ return gr.Dropdown.update(choices=[k[0] for k in get_model_list()],value=base_name),gr.Textbox.update(value = '')
+
+def add_vae(insert_vae,single_load_file):
+ global vae_link,vae_single_file,vae_lst
+ insert_vae=insert_vae.replace(" ", "")
+ if len(insert_vae) == 0:
+ return gr.Dropdown.update(choices=[k for k in vae_lst],value=vae_lst[0]),gr.Textbox.update(value = ''),gr.Checkbox.update(value = False),
+ if 'https' in insert_vae:
+ path_file, file_name_download = get_file_link(insert_vae)
+ if file_name_download not in vae_lst:
+ vae_lst.append(file_name_download)
+ vae_link[file_name_download] = path_file
+ vae_single_file[file_name_download] = True
+ else:
+ name = insert_vae.split('/')[-1]
+ name = name.split('.')[0]
+ name = name.replace('_',' ')
+ name = name.replace('-',' ')
+ name = name.title()
+ if name not in vae_lst:
+ vae_lst.append(name)
+ vae_link[name] = insert_vae
+ vae_single_file[name] = single_load_file
+ return gr.Dropdown.update(choices=[k for k in vae_lst],value=vae_lst[0]),gr.Textbox.update(value = ''),gr.Checkbox.update(value = False),
+
+def reset_model_button(insert_model):
+ return gr.Textbox.update(value = '')
+
+def choose_tistate(ti_vals):
+ if len(ti_vals) == 0:
+ return dict(),gr.Text.update(""),gr.CheckboxGroup.update(choices=list(embeddings_dict.keys()),value = None)
+ dict_copy = dict()
+ for key, value in embeddings_dict.items():
+ if key in ti_vals:
+ dict_copy[key] = value
+ lst_key = [key for key in dict_copy.keys()]
+ lst_key = '; '.join(map(str, lst_key))
+ return dict_copy,gr.Text.update(lst_key),gr.CheckboxGroup.update(choices=list(embeddings_dict.keys()),value = None)
+
+def choose_lora_function(lora_list):
+ global lora_dict
+ if len(lora_list) == 0:
+ return dict(),gr.Text.update(""),gr.CheckboxGroup.update(choices=list(lora_dict.keys()),value = None),gr.Dropdown.update(choices=[k for k in lora_lst],value=lora_lst[0],)
+ dict_copy = dict()
+ for key, value in lora_dict.items():
+ if key in lora_list:
+ dict_copy[key] = value
+ lst_key = [key for key in dict_copy.keys()]
+ lst_key = '; '.join(map(str, lst_key))
+ return dict_copy,gr.Text.update(lst_key),gr.CheckboxGroup.update(choices=list(lora_dict.keys()),value = None),gr.Dropdown.update(choices=[k for k in lora_lst],value=lora_lst[0],)
+
+
+
+def delete_embed(ti_vals,ti_state,embs_choose):
+ if len(ti_vals) == 0:
+ return gr.CheckboxGroup.update(choices=list(embeddings_dict.keys())),ti_state,gr.Text.update(embs_choose)
+ for key in ti_vals:
+ if key in ti_state:
+ ti_state.pop(key)
+ if key in embeddings_dict:
+ os.remove(embeddings_dict[key])
+ embeddings_dict.pop(key)
+ if len(ti_state) >= 1:
+ lst_key = [key for key in ti_state.keys()]
+ lst_key = '; '.join(map(str, lst_key))
+ else:
+ lst_key =""
+ return gr.CheckboxGroup.update(choices=list(embeddings_dict.keys()),value = None),ti_state,gr.Text.update(lst_key)
+
+def delete_lora_function(lora_list,lora_group,lora_choose):
+ global lora_dict,lora_lst,lora_scale_dict
+ if len(lora_list) == 0:
+ return gr.CheckboxGroup.update(choices=list(lora_dict.keys())),lora_group,gr.Text.update(lora_choose),gr.Dropdown.update()
+ for key in lora_list:
+ if key in lora_group:
+ lora_group.pop(key)
+ if key in lora_scale_dict:
+ lora_scale_dict.pop(key)
+ if key in lora_dict:
+ os.remove(lora_dict[key])
+ lora_dict.pop(key)
+ if len(lora_group) >= 1:
+ lst_key = [key for key in lora_group.keys()]
+ lst_key = '; '.join(map(str, lst_key))
+ else:
+ lst_key =""
+ lora_lst = ["Not using Lora"]+[key for key in lora_dict.keys()]
+ return gr.CheckboxGroup.update(choices=list(lora_dict.keys()),value = None),lora_group,gr.Text.update(lst_key),gr.Dropdown.update(choices=[k for k in lora_lst],value=lora_lst[0],)
+
+def lora_delete(lora_vals):
+ global lora_dict
+ global lora_lst
+ if lora_vals == 'Not using Lora':
+ return gr.Dropdown.update(choices=[k for k in lora_lst],value=lora_lst[0],)
+ os.remove(lora_dict[lora_vals])
+ lora_dict.pop(lora_vals)
+ lora_lst.remove(lora_vals)
+ return gr.Dropdown.update(choices=[k for k in lora_lst],value=lora_lst[0],)
+#diffuser_pipeline,sampler,gallery,hr_enabled
+def mode_diffuser_pipeline( controlnet_enabled):
+ if controlnet_enabled == True:
+ return gr.Checkbox.update(value = True),gr.Checkbox.update()
+ return gr.Checkbox.update(value = False),gr.Checkbox.update(value = False)
+'''def mode_diffuser_pipeline1(diffuser_pipeline, controlnet_enabled):
+ assert diffuser_pipeline == False, "Please enable diffusers pipeline to use this option"
+ return gr.Checkbox.update(value = True)'''
+
+def res_cap(g, w, h, x):
+ if g:
+ return f"Enable upscaler: {w}x{h} to {int(w*x)//8 *8}x{int(h*x)//8 *8}"
+ else:
+ return "Enable upscaler"
+#diffuser_pipeline,hr_enabled,sampler,gallery,controlnet_enabled
+def mode_upscale(diffuser_pipeline, hr_scale, width, height,hr_enabled):
+ if hr_enabled == True:
+ return gr.Checkbox.update(value = False),gr.Checkbox.update(value = True,label=res_cap(True, width, height, hr_scale)),gr.Dropdown.update(value="DPM++ 2M Karras",choices=[s[0] for s in samplers_k_diffusion]),gr.Checkbox.update(value = False)
+ return gr.Checkbox.update(value = False),gr.Checkbox.update(value = False,label=res_cap(False, width, height, hr_scale)),gr.Dropdown.update(value="DPM++ 2M Karras",choices=[s[0] for s in samplers_k_diffusion]),gr.Checkbox.update()
+
+def change_control_net(model_control_net, low_threshold, high_threshold,has_body_openpose,has_hand_openpose,has_face_openpose):
+ if model_control_net == 'Canny':
+ return gr.Slider.update(visible = True),gr.Slider.update(visible = True),gr.Checkbox.update(visible = False),gr.Checkbox.update(visible = False),gr.Checkbox.update(visible = False),gr.Radio.update(visible = False)
+ if model_control_net == 'Depth':
+ return gr.Slider.update(visible = False),gr.Slider.update(visible = False),gr.Checkbox.update(visible = False),gr.Checkbox.update(visible = False),gr.Checkbox.update(visible = False),gr.Radio.update(visible = True,choices=["Midas","DPT"])
+ if model_control_net == 'Openpose':
+ return gr.Slider.update(visible = False),gr.Slider.update(visible = False),gr.Checkbox.update(visible = True),gr.Checkbox.update(visible = True),gr.Checkbox.update(visible = True),gr.Radio.update(visible = False)
+ if model_control_net == 'Semantic Segmentation':
+ return gr.Slider.update(visible = False),gr.Slider.update(visible = False),gr.Checkbox.update(visible = False),gr.Checkbox.update(visible = False),gr.Checkbox.update(visible = False),gr.Radio.update(visible = True,choices=["Convnet tiny","Convnet small","Convnet base","Convnet large","Convnet xlarge","Swin tiny","Swin small","Swin base","Swin large"])
+ if model_control_net =='Soft Edge' or model_control_net == 'Scribble' or model_control_net == 'Sketch':
+ return gr.Slider.update(visible = False),gr.Slider.update(visible = False),gr.Checkbox.update(visible = False),gr.Checkbox.update(visible = False),gr.Checkbox.update(visible = False),gr.Radio.update(visible = True,choices=["HED","PidiNet"])
+ return gr.Slider.update(visible = False),gr.Slider.update(visible = False),gr.Checkbox.update(visible = False),gr.Checkbox.update(visible = False),gr.Checkbox.update(visible = False),gr.Radio.update(visible = False)
+
+previous_sampler = 'DPM++ 2M Karras'
+previous_sampler_hires = 'DPM++ 2M Karras'
+#sampler,gallery,hr_enabled,controlnet_enabled
+def mode_diffuser_pipeline_sampler(diffuser_pipeline, sampler,sampler_hires):
+ global previous_sampler, previous_sampler_hires
+ sample_now = previous_sampler
+ sampler_hires_now = previous_sampler_hires
+ previous_sampler = sampler
+ previous_sampler_hires = sampler_hires
+ if diffuser_pipeline == False:
+ return gr.Checkbox.update(value = False), gr.Dropdown.update(value=sample_now,choices=[s[0] for s in samplers_k_diffusion]),gr.Dropdown.update(value=sampler_hires_now,choices=[s[0] for s in samplers_k_diffusion])
+ return gr.Checkbox.update(value = True),gr.Dropdown.update(value=sample_now,choices=[s[0] for s in samplers_diffusers]),gr.Dropdown.update(value=sampler_hires_now,choices=[s[0] for s in samplers_diffusers])
+
+def change_gallery(latent_processing,hr_process_enabled):
+ if latent_processing or hr_process_enabled:
+ return gr.Gallery.update(visible = True)
+ return gr.Gallery.update(visible = False)
+
+
+in_edit_mode = False
+in_edit_mode_adapter = False
+def preview_image(model_control_net,low_threshold,high_threshold,has_body_openpose,has_hand_openpose,has_face_openpose,img_control,preprocessor_name,multi_controlnet,disable_preprocessing):
+ global in_edit_mode
+ if multi_controlnet == True and in_edit_mode == True:
+ global lst_control,current_number_control
+ if model_control_net == lst_control[current_number_control]["control_net_model"]:
+ setting_processing = list(lst_control[current_number_control].items())
+ setting_processing = setting_processing[:-3]
+ setting_processing = dict(setting_processing)
+ else:
+ setting_processing = {
+ "control_net_model": model_control_net,
+ "img_control": img_control,
+ "low_threshold": low_threshold,
+ "high_threshold": high_threshold,
+ "has_body": has_body_openpose,
+ "has_face": has_face_openpose,
+ "has_hand": has_hand_openpose,
+ "preprocessor_name": preprocessor_name,
+ "disable_preprocessing":disable_preprocessing,
+ }
+ image_sp_control = control_net_preprocessing(**setting_processing)
+ return gr.Image.update(image_sp_control)
+ elif img_control is not None:
+ image_show = control_net_preprocessing(model_control_net,img_control,low_threshold,high_threshold,has_body_openpose,has_hand_openpose,has_face_openpose,preprocessor_name,disable_preprocessing)
+ return gr.Image.update(image_show)
+ return gr.Image.update(value = None)
+
+
+
+def change_image_condition(image_condition):
+ if image_condition is None:
+ return gr.Image.update()
+ return gr.Image.update(value= None)
+
+
+#control_net_model,img_control,low_threshold = None,high_threshold=None,has_hand=None,preprocessor_name=None
+def control_net_muti(control_net_model,img_control,low_threshold ,high_threshold,has_body,has_hand,has_face,preprocessor_name,controlnet_scale,control_guidance_start,control_guidance_end,disable_preprocessing):
+ global lst_control
+ if img_control is not None:
+ config = {
+ "control_net_model": control_net_model,
+ "img_control": img_control,
+ "low_threshold": low_threshold,
+ "high_threshold": high_threshold,
+ "has_body": has_body,
+ "has_face": has_face,
+ "has_hand": has_hand,
+ "preprocessor_name": preprocessor_name,
+ "disable_preprocessing":disable_preprocessing,
+ "controlnet_scale": controlnet_scale,
+ "control_guidance_start": control_guidance_start,
+ "control_guidance_end": control_guidance_end,
+ }
+ lst_control.append(config)
+ return gr.Image.update(value = None)
+
+def previous_view_control():
+ global lst_control,current_number_control
+ if current_number_control <= 0:
+ current_number_control = len(lst_control)-1
+ else:
+ current_number_control -= 1
+ return gr.Dropdown.update(value = lst_control[current_number_control]["control_net_model"]),gr.Image.update(value = lst_control[current_number_control]["img_control"]),gr.Slider.update(value = lst_control[current_number_control]["low_threshold"]),gr.Slider.update(value = lst_control[current_number_control]["high_threshold"]),gr.Checkbox.update(value = lst_control[current_number_control]["has_body"]),gr.Checkbox.update(value = lst_control[current_number_control]["has_hand"]),gr.Checkbox.update(value = lst_control[current_number_control]["has_face"]),gr.Radio.update(value = lst_control[current_number_control]["preprocessor_name"]),gr.Slider.update(value= lst_control[current_number_control]["controlnet_scale"]),gr.Slider.update(value= lst_control[current_number_control]["control_guidance_start"]),gr.Slider.update(value= lst_control[current_number_control]["control_guidance_end"]),gr.Checkbox.update(value = lst_control[current_number_control]["disable_preprocessing"])
+
+def next_view_control():
+ global lst_control,current_number_control
+ if current_number_control >= len(lst_control)-1:
+ current_number_control = 0
+ else:
+ current_number_control += 1
+ return gr.Dropdown.update(value = lst_control[current_number_control]["control_net_model"]),gr.Image.update(value = lst_control[current_number_control]["img_control"]),gr.Slider.update(value = lst_control[current_number_control]["low_threshold"]),gr.Slider.update(value = lst_control[current_number_control]["high_threshold"]),gr.Checkbox.update(value = lst_control[current_number_control]["has_body"]),gr.Checkbox.update(value = lst_control[current_number_control]["has_hand"]),gr.Checkbox.update(value = lst_control[current_number_control]["has_face"]),gr.Radio.update(value = lst_control[current_number_control]["preprocessor_name"]),gr.Slider.update(value= lst_control[current_number_control]["controlnet_scale"]),gr.Slider.update(value= lst_control[current_number_control]["control_guidance_start"]),gr.Slider.update(value= lst_control[current_number_control]["control_guidance_end"]),gr.Checkbox.update(value = lst_control[current_number_control]["disable_preprocessing"])
+
+def apply_edit_control_net(control_net_model,img_control,low_threshold ,high_threshold,has_body,has_hand,has_face,preprocessor_name,controlnet_scale,control_guidance_start,control_guidance_end,disable_preprocessing):
+ global lst_control,current_number_control,in_edit_mode
+ if img_control is not None:
+ config = {
+ "control_net_model": control_net_model,
+ "img_control": img_control,
+ "low_threshold": low_threshold,
+ "high_threshold": high_threshold,
+ "has_body": has_body,
+ "has_face": has_face,
+ "has_hand": has_hand,
+ "preprocessor_name": preprocessor_name,
+ "disable_preprocessing":disable_preprocessing,
+ "controlnet_scale": controlnet_scale,
+ "control_guidance_start": control_guidance_start,
+ "control_guidance_end": control_guidance_end,
+ }
+ lst_control[current_number_control] = config
+ return gr.Dropdown.update(),gr.Image.update(),gr.Slider.update(),gr.Slider.update(),gr.Checkbox.update(),gr.Checkbox.update(),gr.Checkbox.update(),gr.Radio.update(),gr.Checkbox.update(),gr.Button.update(),gr.Button.update(),gr.Button.update(),gr.Button.update(),gr.Slider.update(),gr.Slider.update(),gr.Slider.update(),gr.Checkbox.update()
+ else:
+ lst_control.pop(current_number_control)
+ current_number_control -=1
+ if current_number_control == -1:
+ current_number_control = len(lst_control)-1
+ if len(lst_control) == 0:
+ in_edit_mode = False
+ return gr.Dropdown.update(),gr.Image.update(value = None),gr.Slider.update(),gr.Slider.update(),gr.Checkbox.update(),gr.Checkbox.update(),gr.Checkbox.update(),gr.Radio.update(),gr.Checkbox.update(value = False),gr.Button.update(visible = False),gr.Button.update(visible = False),gr.Button.update(visible = False),gr.Button.update(visible = False),gr.Slider.update(),gr.Slider.update(),gr.Slider.update(),gr.Checkbox.update()
+ return gr.Dropdown.update(value = lst_control[current_number_control]["control_net_model"]),gr.Image.update(value = lst_control[current_number_control]["img_control"]),gr.Slider.update(value = lst_control[current_number_control]["low_threshold"]),gr.Slider.update(value = lst_control[current_number_control]["high_threshold"]),gr.Checkbox.update(value = lst_control[current_number_control]["has_body"]),gr.Checkbox.update(value = lst_control[current_number_control]["has_hand"]),gr.Checkbox.update(value = lst_control[current_number_control]["has_face"]),gr.Radio.update(value = lst_control[current_number_control]["preprocessor_name"]),gr.Checkbox.update(),gr.Button.update(),gr.Button.update(),gr.Button.update(),gr.Button.update(),gr.Slider.update(value= lst_control[current_number_control]["controlnet_scale"]),gr.Slider.update(value= lst_control[current_number_control]["control_guidance_start"]),gr.Slider.update(value= lst_control[current_number_control]["control_guidance_end"]),gr.Checkbox.update(value = lst_control[current_number_control]["disable_preprocessing"])
+
+def complete_edit_multi():
+ global current_number_control,in_edit_mode
+ current_number_control = 0
+ in_edit_mode = False
+ return gr.Button.update(visible = True),gr.Button.update(visible = True),gr.Image.update(value= None),gr.Button.update(visible = False),gr.Button.update(visible = False),gr.Button.update(visible = False),gr.Button.update(visible = False)
+
+def multi_controlnet_function(multi_controlnet):
+ if multi_controlnet:
+ return gr.Checkbox.update(value = True),gr.Button.update(visible = True),gr.Button.update(visible = True),gr.Button.update(),gr.Button.update(),gr.Button.update(),gr.Button.update()
+ return gr.Checkbox.update(),gr.Button.update(visible = False),gr.Button.update(visible = False),gr.Button.update(visible = False),gr.Button.update(visible = False),gr.Button.update(visible = False),gr.Button.update(visible = False)
+
+def edit_multi_control_image_function():
+ global lst_control,current_number_control,in_edit_mode
+ if len(lst_control) > 0:
+ in_edit_mode = True
+ return gr.Button.update(visible = True),gr.Button.update(visible = True),gr.Button.update(visible = True),gr.Button.update(visible = True),gr.Button.update(visible = False),gr.Button.update(visible = False),gr.Dropdown.update(value = lst_control[current_number_control]["control_net_model"]),gr.Image.update(value = lst_control[current_number_control]["img_control"]),gr.Slider.update(value = lst_control[current_number_control]["low_threshold"]),gr.Slider.update(value = lst_control[current_number_control]["high_threshold"]),gr.Checkbox.update(value = lst_control[current_number_control]["has_body"]),gr.Checkbox.update(value = lst_control[current_number_control]["has_hand"]),gr.Checkbox.update(value = lst_control[current_number_control]["has_face"]),gr.Radio.update(value = lst_control[current_number_control]["preprocessor_name"]),gr.Slider.update(value= lst_control[current_number_control]["controlnet_scale"]),gr.Slider.update(value= lst_control[current_number_control]["control_guidance_start"]),gr.Slider.update(value= lst_control[current_number_control]["control_guidance_end"]),gr.Checkbox.update(value = lst_control[current_number_control]["disable_preprocessing"])
+ in_edit_mode = False
+ return gr.Button.update(),gr.Button.update(),gr.Button.update(),gr.Button.update(),gr.Button.update(),gr.Button.update(),gr.Dropdown.update(),gr.Image.update(),gr.Slider.update(),gr.Slider.update(),gr.Checkbox.update(),gr.Checkbox.update(),gr.Checkbox.update(),gr.Radio.update(),gr.Slider.update(),gr.Slider.update(),gr.Slider.update(),gr.Checkbox.update()
+
+def ip_adapter_work(ip_adapter):
+ if ip_adapter:
+ return gr.Checkbox.update(value = True)
+ return gr.Checkbox.update()
+
+
+def preview_image_adapter(model_adapter,low_threshold_adapter,high_threshold_adapter,has_body_openpose_adapter,has_hand_openpose_adapter,has_face_openpose_adapter,img_control,preprocessor_adapter,multi_adapter,disable_preprocessing_adapter):
+ global in_edit_mode_adapter
+ if multi_adapter == True and in_edit_mode_adapter == True:
+ global lst_adapter,current_number_adapter
+ if model_adapter == lst_adapter[current_number_adapter]["model_adapter"]:
+ setting_processing = list(lst_adapter[current_number_adapter].items())
+ setting_processing = setting_processing[:-3]
+ setting_processing = dict(setting_processing)
+ else:
+ setting_processing = {
+ "model_adapter": model_adapter,
+ "img_control": img_control,
+ "low_threshold_adapter": low_threshold_adapter,
+ "high_threshold_adapter": high_threshold_adapter,
+ "has_body": has_body_openpose_adapter,
+ "has_face": has_face_openpose_adapter,
+ "has_hand": has_hand_openpose_adapter,
+ "preprocessor_adapter": preprocessor_adapter,
+ "disable_preprocessing_adapter":disable_preprocessing_adapter,
+ }
+ image_sp_control = adapter_preprocessing(**setting_processing)
+ return gr.Image.update(image_sp_control)
+ elif img_control is not None:
+ image_show = adapter_preprocessing(model_adapter,img_control,low_threshold_adapter,high_threshold_adapter,has_body_openpose_adapter,has_hand_openpose_adapter,has_face_openpose_adapter,preprocessor_adapter,disable_preprocessing_adapter)
+ return gr.Image.update(image_show)
+ return gr.Image.update(value = None)
+
+
+
+def change_image_condition_adapter(image_condition_adapter):
+ if image_condition_adapter is None:
+ return gr.Image.update()
+ return gr.Image.update(value= None)
+
+
+#control_net_model,img_control,low_threshold_adapter = None,high_threshold_adapter=None,has_hand=None,preprocessor_adapter=None
+def adapter_muti(model_adapter,img_control,low_threshold_adapter ,high_threshold_adapter,has_body,has_hand,has_face,preprocessor_adapter,adapter_conditioning_scale,adapter_conditioning_factor,disable_preprocessing_adapter):
+ global lst_adapter
+ if img_control is not None:
+ config = {
+ "model_adapter": model_adapter,
+ "img_control": img_control,
+ "low_threshold_adapter": low_threshold_adapter,
+ "high_threshold_adapter": high_threshold_adapter,
+ "has_body": has_body,
+ "has_face": has_face,
+ "has_hand": has_hand,
+ "preprocessor_adapter": preprocessor_adapter,
+ "disable_preprocessing_adapter":disable_preprocessing_adapter,
+ "adapter_conditioning_scale": adapter_conditioning_scale,
+ "adapter_conditioning_factor": adapter_conditioning_factor,
+ }
+ lst_adapter.append(config)
+ return gr.Image.update(value = None)
+
+def previous_view_adapter():
+ global lst_adapter,current_number_adapter
+ if current_number_adapter <= 0:
+ current_number_adapter = len(lst_adapter)-1
+ else:
+ current_number_adapter -= 1
+ return gr.Dropdown.update(value = lst_adapter[current_number_adapter]["model_adapter"]),gr.Image.update(value = lst_adapter[current_number_adapter]["img_control"]),gr.Slider.update(value = lst_adapter[current_number_adapter]["low_threshold_adapter"]),gr.Slider.update(value = lst_adapter[current_number_adapter]["high_threshold_adapter"]),gr.Checkbox.update(value = lst_adapter[current_number_adapter]["has_body"]),gr.Checkbox.update(value = lst_adapter[current_number_adapter]["has_hand"]),gr.Checkbox.update(value = lst_adapter[current_number_adapter]["has_face"]),gr.Radio.update(value = lst_adapter[current_number_adapter]["preprocessor_adapter"]),gr.Slider.update(value= lst_adapter[current_number_adapter]["adapter_conditioning_scale"]),gr.Slider.update(value= lst_adapter[current_number_adapter]["adapter_conditioning_factor"]),gr.Checkbox.update(value = lst_adapter[current_number_adapter]["disable_preprocessing_adapter"])
+
+def next_view_adapter():
+ global lst_adapter,current_number_adapter
+ if current_number_adapter >= len(lst_adapter)-1:
+ current_number_adapter = 0
+ else:
+ current_number_adapter += 1
+ return gr.Dropdown.update(value = lst_adapter[current_number_adapter]["model_adapter"]),gr.Image.update(value = lst_adapter[current_number_adapter]["img_control"]),gr.Slider.update(value = lst_adapter[current_number_adapter]["low_threshold_adapter"]),gr.Slider.update(value = lst_adapter[current_number_adapter]["high_threshold_adapter"]),gr.Checkbox.update(value = lst_adapter[current_number_adapter]["has_body"]),gr.Checkbox.update(value = lst_adapter[current_number_adapter]["has_hand"]),gr.Checkbox.update(value = lst_adapter[current_number_adapter]["has_face"]),gr.Radio.update(value = lst_adapter[current_number_adapter]["preprocessor_adapter"]),gr.Slider.update(value= lst_adapter[current_number_adapter]["adapter_conditioning_scale"]),gr.Slider.update(value= lst_adapter[current_number_adapter]["adapter_conditioning_factor"]),gr.Checkbox.update(value = lst_adapter[current_number_adapter]["disable_preprocessing_adapter"])
+
+def apply_edit_adapter(model_adapter,img_control,low_threshold_adapter ,high_threshold_adapter,has_body,has_hand,has_face,preprocessor_adapter,adapter_conditioning_scale,adapter_conditioning_factor,disable_preprocessing_adapter):
+ global lst_adapter,current_number_adapter,in_edit_mode_adapter
+ if img_control is not None:
+ config = {
+ "model_adapter": model_adapter,
+ "img_control": img_control,
+ "low_threshold_adapter": low_threshold_adapter,
+ "high_threshold_adapter": high_threshold_adapter,
+ "has_body": has_body,
+ "has_face": has_face,
+ "has_hand": has_hand,
+ "preprocessor_adapter": preprocessor_adapter,
+ "disable_preprocessing_adapter":disable_preprocessing_adapter,
+ "adapter_conditioning_scale": adapter_conditioning_scale,
+ "adapter_conditioning_factor": adapter_conditioning_factor,
+ }
+ lst_adapter[current_number_adapter] = config
+ return gr.Dropdown.update(),gr.Image.update(),gr.Slider.update(),gr.Slider.update(),gr.Checkbox.update(),gr.Checkbox.update(),gr.Checkbox.update(),gr.Radio.update(),gr.Checkbox.update(),gr.Button.update(),gr.Button.update(),gr.Button.update(),gr.Button.update(),gr.Slider.update(),gr.Slider.update(),gr.Checkbox.update()
+ else:
+ lst_adapter.pop(current_number_adapter)
+ current_number_adapter -=1
+ if current_number_adapter == -1:
+ current_number_adapter = len(lst_adapter)-1
+ if len(lst_adapter) == 0:
+ in_edit_mode_adapter = False
+ return gr.Dropdown.update(),gr.Image.update(value = None),gr.Slider.update(),gr.Slider.update(),gr.Checkbox.update(),gr.Checkbox.update(),gr.Checkbox.update(),gr.Radio.update(),gr.Checkbox.update(value = False),gr.Button.update(visible = False),gr.Button.update(visible = False),gr.Button.update(visible = False),gr.Button.update(visible = False),gr.Slider.update(),gr.Slider.update(),gr.Checkbox.update()
+ return gr.Dropdown.update(value = lst_adapter[current_number_adapter]["model_adapter"]),gr.Image.update(value = lst_adapter[current_number_adapter]["img_control"]),gr.Slider.update(value = lst_adapter[current_number_adapter]["low_threshold_adapter"]),gr.Slider.update(value = lst_adapter[current_number_adapter]["high_threshold_adapter"]),gr.Checkbox.update(value = lst_adapter[current_number_adapter]["has_body"]),gr.Checkbox.update(value = lst_adapter[current_number_adapter]["has_hand"]),gr.Checkbox.update(value = lst_adapter[current_number_adapter]["has_face"]),gr.Radio.update(value = lst_adapter[current_number_adapter]["preprocessor_adapter"]),gr.Checkbox.update(),gr.Button.update(),gr.Button.update(),gr.Button.update(),gr.Button.update(),gr.Slider.update(value= lst_adapter[current_number_adapter]["adapter_conditioning_scale"]),gr.Slider.update(value= lst_adapter[current_number_adapter]["adapter_conditioning_factor"]),gr.Checkbox.update(value = lst_adapter[current_number_adapter]["disable_preprocessing_adapter"])
+
+def complete_edit_multi_adapter():
+ global current_number_adapter,in_edit_mode_adapter
+ current_number_adapter = 0
+ in_edit_mode_adapter = False
+ return gr.Button.update(visible = True),gr.Button.update(visible = True),gr.Image.update(value= None),gr.Button.update(visible = False),gr.Button.update(visible = False),gr.Button.update(visible = False),gr.Button.update(visible = False)
+
+def multi_adapter_function(multi_adapter):
+ if multi_adapter:
+ return gr.Checkbox.update(value = True),gr.Button.update(visible = True),gr.Button.update(visible = True),gr.Button.update(),gr.Button.update(),gr.Button.update(),gr.Button.update()
+ return gr.Checkbox.update(),gr.Button.update(visible = False),gr.Button.update(visible = False),gr.Button.update(visible = False),gr.Button.update(visible = False),gr.Button.update(visible = False),gr.Button.update(visible = False)
+
+def edit_multi_adapter_image_function():
+ global lst_adapter,current_number_adapter,in_edit_mode_adapter
+ if len(lst_adapter) > 0:
+ in_edit_mode_adapter = True
+ return gr.Button.update(visible = True),gr.Button.update(visible = True),gr.Button.update(visible = True),gr.Button.update(visible = True),gr.Button.update(visible = False),gr.Button.update(visible = False),gr.Dropdown.update(value = lst_adapter[current_number_adapter]["model_adapter"]),gr.Image.update(value = lst_adapter[current_number_adapter]["img_control"]),gr.Slider.update(value = lst_adapter[current_number_adapter]["low_threshold_adapter"]),gr.Slider.update(value = lst_adapter[current_number_adapter]["high_threshold_adapter"]),gr.Checkbox.update(value = lst_adapter[current_number_adapter]["has_body"]),gr.Checkbox.update(value = lst_adapter[current_number_adapter]["has_hand"]),gr.Checkbox.update(value = lst_adapter[current_number_adapter]["has_face"]),gr.Radio.update(value = lst_adapter[current_number_adapter]["preprocessor_adapter"]),gr.Slider.update(value= lst_adapter[current_number_adapter]["adapter_conditioning_scale"]),gr.Slider.update(value= lst_adapter[current_number_adapter]["adapter_conditioning_factor"]),gr.Checkbox.update(value = lst_adapter[current_number_adapter]["disable_preprocessing_adapter"])
+ in_edit_mode_adapter = False
+ return gr.Button.update(),gr.Button.update(),gr.Button.update(),gr.Button.update(),gr.Button.update(),gr.Button.update(),gr.Dropdown.update(),gr.Image.update(),gr.Slider.update(),gr.Slider.update(),gr.Checkbox.update(),gr.Checkbox.update(),gr.Checkbox.update(),gr.Radio.update(),gr.Slider.update(),gr.Slider.update(),gr.Checkbox.update()
+
+
+def ip_adpater_function(ip_adapter):
+ if ip_adapter:
+ return gr.Checkbox.update()
+ return gr.Checkbox.update(value = False)
+
+#ip_adapter,inf_adapt_image,inf_adapt_image_multi,inf_adapt_image_strength,inf_adapt_image_strength_multi,edit_ip_adapter_setting,apply_ip_adapter_setting
+def ip_adpater_multi_function(ip_adapter_multi):
+ if ip_adapter_multi:
+ return gr.Dropdown.update(choices=[k for k in model_ip_adapter_lst[:-2]],value=model_ip_adapter_lst[0]),gr.Checkbox.update(value = True), gr.Image.update(visible = False), gr.Image.update(visible = True), gr.Slider.update(visible = False), gr.Slider.update(visible = True),gr.Button.update(visible = True),gr.Button.update(visible = True), gr.Image.update(visible = False), gr.Image.update(visible = True)
+ return gr.Dropdown.update(choices=[k for k in model_ip_adapter_lst],value=model_ip_adapter_lst[0]),gr.Checkbox.update(), gr.Image.update(visible = True), gr.Image.update(visible = False), gr.Slider.update(visible = True), gr.Slider.update(visible = False),gr.Button.update(visible = False),gr.Button.update(visible = False), gr.Image.update(visible = True), gr.Image.update(visible = False)
+
+def apply_ip_adapter_setting_function(model_ip_adapter,inf_adapt_image_multi,inf_adapt_image_strength_multi,inf_control_adapt_image_multi):
+ global lst_ip_adapter,current_number_ip_adapter
+ if inf_adapt_image_multi is not None:
+ config ={
+ "model" : model_ip_adapter,
+ "image" : inf_adapt_image_multi,
+ "region_apply": inf_control_adapt_image_multi,
+ "scale" : float(inf_adapt_image_strength_multi),
+ }
+ lst_ip_adapter.append(config)
+ return gr.Image.update(value = None),gr.Image.update(value = None)
+ return gr.Image.update(value = None),gr.Image.update(value = None)
+
+#model_ip_adapter,inf_adapt_image_multi,inf_adapt_image_strength_multi,previous_ip_adapter_setting,next_ip_adapter_setting,apply_edit_ip_adapter_setting,complete_cip_adapter_setting,edit_ip_adapter_setting,apply_ip_adapter_setting
+def edit_ip_adapter_setting_function():
+ global lst_ip_adapter,current_number_ip_adapter
+ if len(lst_ip_adapter) == 0:
+ return (
+ gr.Dropdown.update(),
+ gr.Image.update(),
+ gr.Slider.update(),
+ gr.Button.update(),
+ gr.Button.update(),
+ gr.Button.update(),
+ gr.Button.update(),
+ gr.Button.update(),
+ gr.Button.update(),
+ gr.Image.update(),
+ )
+ return (
+ gr.Dropdown.update(value = lst_ip_adapter[current_number_ip_adapter]["model"]),
+ gr.Image.update(value = lst_ip_adapter[current_number_ip_adapter]["image"]),
+ gr.Slider.update(value = lst_ip_adapter[current_number_ip_adapter]["scale"]),
+ gr.Button.update(visible = True),
+ gr.Button.update(visible = True),
+ gr.Button.update(visible = True),
+ gr.Button.update(visible = True),
+ gr.Button.update(visible = False),
+ gr.Button.update(visible = False),
+ gr.Image.update(value = lst_ip_adapter[current_number_ip_adapter]["region_apply"]),
+ )
+
+def previous_ip_adapter_setting_function():
+ global lst_ip_adapter,current_number_ip_adapter
+ current_number_ip_adapter -= 1
+ if current_number_ip_adapter < 0:
+ current_number_ip_adapter = len(lst_ip_adapter) -1
+ return (
+ gr.Dropdown.update(value = lst_ip_adapter[current_number_ip_adapter]["model"]),
+ gr.Image.update(value = lst_ip_adapter[current_number_ip_adapter]["image"]),
+ gr.Slider.update(value = lst_ip_adapter[current_number_ip_adapter]["scale"]),
+ gr.Image.update(value = lst_ip_adapter[current_number_ip_adapter]["region_apply"]),
+ )
+
+def next_ip_adapter_setting_function():
+ global lst_ip_adapter,current_number_ip_adapter
+ current_number_ip_adapter += 1
+ if current_number_ip_adapter == len(lst_ip_adapter):
+ current_number_ip_adapter = 0
+ return (
+ gr.Dropdown.update(value = lst_ip_adapter[current_number_ip_adapter]["model"]),
+ gr.Image.update(value = lst_ip_adapter[current_number_ip_adapter]["image"]),
+ gr.Slider.update(value = lst_ip_adapter[current_number_ip_adapter]["scale"]),
+ gr.Image.update(value = lst_ip_adapter[current_number_ip_adapter]["region_apply"]),
+ )
+
+#inf_adapt_image_multi,previous_ip_adapter_setting,next_ip_adapter_setting,edit_ip_adapter_setting,apply_ip_adapter_setting,apply_edit_ip_adapter_setting,complete_cip_adapter_setting
+def complete_cip_adapter_setting_function():
+ return (
+ gr.Image.update(value = None),
+ gr.Button.update(visible = False),
+ gr.Button.update(visible = False),
+ gr.Button.update(visible = True),
+ gr.Button.update(visible = True),
+ gr.Button.update(visible = False),
+ gr.Button.update(visible = False),
+ gr.Image.update(value = None),
+ )
+
+
+#model_ip_adapter,inf_adapt_image_multi,inf_adapt_image_strength_multi,previous_ip_adapter_setting,next_ip_adapter_setting,edit_ip_adapter_setting,apply_ip_adapter_setting,apply_edit_ip_adapter_setting,complete_cip_adapter_setting
+def apply_edit_ip_adapter_setting_function(model_ip_adapter,inf_adapt_image_multi,inf_adapt_image_strength_multi,inf_control_adapt_image_multi):
+ global lst_ip_adapter,current_number_ip_adapter
+ if inf_adapt_image_multi is not None:
+ config_change = lst_ip_adapter[current_number_ip_adapter]
+ config_change["model"] = model_ip_adapter
+ config_change["image"] = inf_adapt_image_multi
+ config_change["scale"] = float(inf_adapt_image_strength_multi)
+ config_change["region_apply"] = inf_control_adapt_image_multi
+ return (
+ gr.Dropdown.update(),
+ gr.Image.update(),
+ gr.Slider.update(),
+ gr.Button.update(),
+ gr.Button.update(),
+ gr.Button.update(),
+ gr.Button.update(),
+ gr.Button.update(),
+ gr.Button.update(),
+ gr.Image.update(),
+ )
+ #Delete
+ lst_ip_adapter.pop(current_number_ip_adapter)
+ current_number_ip_adapter -= 1
+ if len(lst_ip_adapter) == 0:
+ return (
+ gr.Dropdown.update(),
+ gr.Image.update(value = None),
+ gr.Slider.update(),
+ gr.Button.update(visible = False),
+ gr.Button.update(visible = False),
+ gr.Button.update(visible = True),
+ gr.Button.update(visible = True),
+ gr.Button.update(visible = False),
+ gr.Button.update(visible = False),
+ gr.Image.update(value = None),
+ )
+ if current_number_ip_adapter == -1:
+ current_number_ip_adapter = len(lst_ip_adapter)-1
+ return (
+ gr.Dropdown.update(value = lst_ip_adapter[current_number_ip_adapter]["model"]),
+ gr.Image.update(value = lst_ip_adapter[current_number_ip_adapter]["image"]),
+ gr.Slider.update(value = lst_ip_adapter[current_number_ip_adapter]["scale"]),
+ gr.Button.update(),
+ gr.Button.update(),
+ gr.Button.update(),
+ gr.Button.update(),
+ gr.Button.update(),
+ gr.Button.update(),
+ gr.Image.update(value = lst_ip_adapter[current_number_ip_adapter]["region_apply"]),
+ )
+
+def inpaiting_mode_fuction(inpaiting_mode):
+ if inpaiting_mode:
+ return gr.Image.update(visible = False),gr.Image.update(visible = True), gr.Image.update(visible = True),gr.Checkbox.update(visible = True),gr.Button.update(visible = True),gr.Slider.update(value = 1.0)
+ return gr.Image.update(visible = True),gr.Image.update(visible = False), gr.Image.update(visible = False),gr.Checkbox.update(visible = False),gr.Button.update(visible = False),gr.Slider.update(value = 0.5)
+
+def get_mask_fuction(inf_image_inpaiting):
+ img_mask = None
+ if isinstance(inf_image_inpaiting,dict):
+ img_mask = inf_image_inpaiting["mask"].copy()
+ return gr.Image.update(img_mask)
+
+latent_upscale_modes = {
+ "Latent (bilinear)": {"upscale_method": "bilinear", "upscale_antialias": False},
+ "Latent (bilinear antialiased)": {"upscale_method": "bilinear", "upscale_antialias": True},
+ "Latent (bicubic)": {"upscale_method": "bicubic", "upscale_antialias": False},
+ "Latent (bicubic antialiased)": {
+ "upscale_method": "bicubic",
+ "upscale_antialias": True,
+ },
+ "Latent (nearest)": {"upscale_method": "nearest", "upscale_antialias": False},
+ "Latent (nearest-exact)": {
+ "upscale_method": "nearest-exact",
+ "upscale_antialias": False,
+ },
+ #"Latent (linear)": {"upscale_method": "linear", "upscale_antialias": False},
+ #"Latent (trilinear)": {"upscale_method": "trilinear", "upscale_antialias": False},
+ "Latent (area)": {"upscale_method": "area", "upscale_antialias": False},
+}
+
+css = """
+.finetuned-diffusion-div div{
+ display:inline-flex;
+ align-items:center;
+ gap:.8rem;
+ font-size:1.75rem;
+ padding-top:2rem;
+}
+.finetuned-diffusion-div div h1{
+ font-weight:900;
+ margin-bottom:7px
+}
+.finetuned-diffusion-div p{
+ margin-bottom:10px;
+ font-size:94%
+}
+.box {
+ float: left;
+ height: 20px;
+ width: 20px;
+ margin-bottom: 15px;
+ border: 1px solid black;
+ clear: both;
+}
+a{
+ text-decoration:underline
+}
+.tabs{
+ margin-top:0;
+ margin-bottom:0
+}
+#gallery{
+ min-height:20rem
+}
+.no-border {
+ border: none !important;
+}
+ """
+with gr.Blocks(css=css) as demo:
+ gr.HTML(
+ f"""
+
+
+
Demo for diffusion models
+
+
Running on CPU ๐ฅถ This demo does not work on CPU.
+
+ """
+ )
+ global_stats = gr.State(value={})
+
+ with gr.Row():
+
+ with gr.Column(scale=55):
+ model = gr.Dropdown(
+ choices=[k[0] for k in get_model_list()],
+ label="Model",
+ value=base_name,
+ )
+ with gr.Row():
+ image_out = gr.Image()
+ gallery = gr.Gallery(label="Generated images", show_label=True, elem_id="gallery",visible = False).style(grid=[1], height="auto")
+
+ with gr.Column(scale=45):
+
+ with gr.Group():
+
+ with gr.Row():
+ with gr.Column(scale=70):
+
+ prompt = gr.Textbox(
+ label="Prompt",
+ value="loli cat girl, blue eyes, flat chest, solo, long messy silver hair, blue capelet, cat ears, cat tail, upper body",
+ show_label=True,
+ #max_lines=4,
+ placeholder="Enter prompt.",
+ )
+ neg_prompt = gr.Textbox(
+ label="Negative Prompt",
+ value="bad quality, low quality, jpeg artifact, cropped",
+ show_label=True,
+ #max_lines=4,
+ placeholder="Enter negative prompt.",
+ )
+
+ generate = gr.Button(value="Generate").style(
+ rounded=(False, True, True, False)
+ )
+
+ with gr.Tab("Options"):
+
+ with gr.Group():
+
+ # n_images = gr.Slider(label="Images", value=1, minimum=1, maximum=4, step=1)
+ with gr.Row():
+ diffuser_pipeline = gr.Checkbox(label="Using diffusers pipeline", value=False)
+ latent_processing = gr.Checkbox(label="Show processing", value=False)
+ region_condition = gr.Checkbox(label="Enable region condition", value=False)
+ with gr.Row():
+ guidance = gr.Slider(
+ label="Guidance scale", value=7.5, maximum=20
+ )
+ guidance_rescale = gr.Slider(
+ label="Guidance rescale", value=0, maximum=20
+ )
+ with gr.Row():
+ width = gr.Slider(
+ label="Width", value=512, minimum=64, maximum=1920, step=8
+ )
+ height = gr.Slider(
+ label="Height", value=512, minimum=64, maximum=1920, step=8
+ )
+ with gr.Row():
+ clip_skip = gr.Slider(
+ label="Clip Skip", value=2, minimum=1, maximum=12, step=1
+ )
+ steps = gr.Slider(
+ label="Steps", value=25, minimum=2, maximum=100, step=1
+ )
+ with gr.Row():
+ long_encode = sampler = gr.Dropdown(
+ value="Automatic111 Encoding",
+ label="Encoding prompt type",
+ choices=[s for s in encoding_type],
+ )
+ sampler = gr.Dropdown(
+ value="DPM++ 2M Karras",
+ label="Sampler",
+ choices=[s[0] for s in samplers_k_diffusion],
+ )
+ with gr.Row():
+ seed = gr.Number(label="Seed (Lower than 0 = random)", value=-1)
+ Insert_model = gr.Textbox(
+ label="Insert model",
+ show_label=True,
+ placeholder="Enter a model's link.",
+ )
+ insert_model = gr.Button(value="Insert")
+ #reset_model = gr.Button(value="Reset")
+
+ insert_model.click(
+ add_model,
+ inputs=[Insert_model],
+ outputs=[model, Insert_model],
+ queue=False,
+ )
+
+
+ with gr.Tab("Image to image/Inpaiting"):
+ with gr.Group():
+ with gr.Row():
+ inpaiting_mode = gr.Checkbox(label="Inpaiting", value=False)
+ invert_mask_mode = gr.Checkbox(label="Black areas are used", value=False,visible = False)
+ with gr.Row():
+ inf_image = gr.Image(
+ label="Image", source="upload", type="pil",
+ )
+ inf_image_inpaiting = gr.Image(
+ label="Image", source="upload", type="pil", tool="sketch",visible = False
+ )
+ mask_upload = gr.Image(
+ label="Mask", source="upload", type="pil",image_mode='L',visible = False,
+ )
+ inf_strength = gr.Slider(
+ label="Transformation strength",
+ minimum=0,
+ maximum=1,
+ step=0.01,
+ value=0.5,
+ )
+ get_mask = gr.Button(value="Get mask",visible = False)
+ inpaiting_mode.change(
+ inpaiting_mode_fuction,
+ inputs=[inpaiting_mode],
+ outputs=[inf_image,inf_image_inpaiting,mask_upload,invert_mask_mode,get_mask,inf_strength],
+ queue=False,
+ )
+ get_mask.click(
+ get_mask_fuction,
+ inputs=[inf_image_inpaiting],
+ outputs=[mask_upload],
+ queue=False,
+ )
+ with gr.Tab("Hires fix"):
+ with gr.Group():
+ with gr.Row():
+ hr_enabled = gr.Checkbox(label="Enable upscaler", value=False)
+ hr_process_enabled = gr.Checkbox(label="Show processing upscaler", value=False)
+ hr_region_condition = gr.Checkbox(label="Enable region condition upscaler", value=False)
+ with gr.Row():
+ hr_method = gr.Dropdown(
+ [key for key in latent_upscale_modes.keys()],
+ value="Latent (bilinear)",
+ label="Upscale method",
+ )
+ sampler_hires = gr.Dropdown(
+ value="DPM++ 2M Karras",
+ label="Sampler",
+ choices=[s[0] for s in samplers_k_diffusion],
+ )
+
+ hr_scale = gr.Slider(
+ label="Upscale factor",
+ minimum=1.0,
+ maximum=2.0,
+ step=0.1,
+ value=1.2,
+ )
+ hr_denoise = gr.Slider(
+ label="Denoising strength",
+ minimum=0.0,
+ maximum=1.0,
+ step=0.1,
+ value=0.8,
+ )
+
+ hr_scale.change(
+ lambda g, x, w, h: gr.Checkbox.update(
+ label=res_cap(g, w, h, x)
+ ),
+ inputs=[hr_enabled, hr_scale, width, height],
+ outputs=hr_enabled,
+ queue=False,
+ )
+ hr_process_enabled.change(
+ change_gallery,
+ inputs=[latent_processing,hr_process_enabled],
+ outputs=[gallery],
+ queue=False,
+ )
+ latent_processing.change(
+ change_gallery,
+ inputs=[latent_processing,hr_process_enabled],
+ outputs=[gallery],
+ queue=False,
+ )
+ with gr.Tab("IP-Adapter"):
+ with gr.Group():
+ with gr.Row():
+ ip_adapter = gr.Checkbox(label="Using IP-Adapter", value=False)
+ ip_adapter_multi = gr.Checkbox(label="Using Multi IP-Adapter", value=False)
+ invert_ip_adapter_mask_mode = gr.Checkbox(label="Black areas are used", value=True)
+ model_ip_adapter = gr.Dropdown(
+ choices=[k for k in model_ip_adapter_lst],
+ label="Model IP-Adapter",
+ value=model_ip_adapter_lst[0],
+ )
+
+ with gr.Row():
+ inf_adapt_image = gr.Image(
+ label="IP-Adapter", source="upload", type="pil"
+ )
+ inf_control_adapt_image = gr.Image(
+ label="Region apply", source="upload", type="pil",image_mode='L'
+ )
+ inf_adapt_image_multi = gr.Image(
+ label="IP-Adapter", source="upload", type="pil",visible= False
+ )
+ inf_control_adapt_image_multi = gr.Image(
+ label="Region apply", source="upload", type="pil",image_mode='L',visible= False
+ )
+ inf_adapt_image_strength = gr.Slider(
+ label="IP-Adapter scale",
+ minimum=0,
+ maximum=2,
+ step=0.01,
+ value=1,
+ )
+ inf_adapt_image_strength_multi = gr.Slider(
+ label="IP-Adapter scale",
+ minimum=0,
+ maximum=2,
+ step=0.01,
+ value=1,
+ visible= False,
+ )
+ with gr.Row():
+ previous_ip_adapter_setting = gr.Button(value="Previous setting",visible = False)
+ next_ip_adapter_setting = gr.Button(value="Next setting",visible = False)
+ with gr.Row():
+ edit_ip_adapter_setting = gr.Button(value="Edit previous setting",visible = False)
+ apply_ip_adapter_setting = gr.Button(value="Apply setting",visible = False)
+ with gr.Row():
+ apply_edit_ip_adapter_setting = gr.Button(value="Apply change",visible = False)
+ complete_cip_adapter_setting = gr.Button(value="Complete change",visible = False)
+ ip_adapter.change(
+ ip_adpater_function,
+ inputs=[ip_adapter],
+ outputs=[ip_adapter_multi],
+ queue=False,
+ )
+ ip_adapter_multi.change(
+ ip_adpater_multi_function,
+ inputs=[ip_adapter_multi],
+ outputs=[model_ip_adapter,ip_adapter,inf_adapt_image,inf_adapt_image_multi,inf_adapt_image_strength,inf_adapt_image_strength_multi,edit_ip_adapter_setting,apply_ip_adapter_setting,inf_control_adapt_image,inf_control_adapt_image_multi],
+ queue=False,
+ )
+ apply_ip_adapter_setting.click(
+ apply_ip_adapter_setting_function,
+ inputs = [model_ip_adapter,inf_adapt_image_multi,inf_adapt_image_strength_multi,inf_control_adapt_image_multi],
+ outputs = [inf_adapt_image_multi,inf_control_adapt_image_multi],
+ )
+ edit_ip_adapter_setting.click(
+ edit_ip_adapter_setting_function,
+ inputs = [],
+ outputs =[model_ip_adapter,inf_adapt_image_multi,inf_adapt_image_strength_multi,previous_ip_adapter_setting,next_ip_adapter_setting,apply_edit_ip_adapter_setting,complete_cip_adapter_setting,edit_ip_adapter_setting,apply_ip_adapter_setting,inf_control_adapt_image_multi],
+ queue =False,
+ )
+ previous_ip_adapter_setting.click(
+ previous_ip_adapter_setting_function,
+ inputs = [],
+ outputs = [model_ip_adapter,inf_adapt_image_multi,inf_adapt_image_strength_multi,inf_control_adapt_image_multi],
+ queue = False,
+ )
+ next_ip_adapter_setting.click(
+ next_ip_adapter_setting_function,
+ inputs = [],
+ outputs = [model_ip_adapter,inf_adapt_image_multi,inf_adapt_image_strength_multi,inf_control_adapt_image_multi],
+ queue = False,
+ )
+ apply_edit_ip_adapter_setting.click(
+ apply_edit_ip_adapter_setting_function,
+ inputs = [model_ip_adapter,inf_adapt_image_multi,inf_adapt_image_strength_multi,inf_control_adapt_image_multi],
+ outputs =[model_ip_adapter,inf_adapt_image_multi,inf_adapt_image_strength_multi,previous_ip_adapter_setting,next_ip_adapter_setting,edit_ip_adapter_setting,apply_ip_adapter_setting,apply_edit_ip_adapter_setting,complete_cip_adapter_setting,inf_control_adapt_image_multi],
+ queue = False,
+ )
+ complete_cip_adapter_setting.click(
+ complete_cip_adapter_setting_function,
+ inputs = [],
+ outputs = [inf_adapt_image_multi,previous_ip_adapter_setting,next_ip_adapter_setting,edit_ip_adapter_setting,apply_ip_adapter_setting,apply_edit_ip_adapter_setting,complete_cip_adapter_setting,inf_control_adapt_image_multi],
+ queue = False,
+ )
+ with gr.Tab("Controlnet"):
+ with gr.Group():
+ with gr.Row():
+ controlnet_enabled = gr.Checkbox(label="Enable Controlnet", value=False)
+ disable_preprocessing = gr.Checkbox(label="Disable preprocessing", value=False)
+ multi_controlnet = gr.Checkbox(label="Enable Multi Controlnet", value=False)
+ #sketch_enabled = gr.Checkbox(label="Sketch image", value=False)
+ model_control_net = gr.Dropdown(
+ choices=[k for k in controlnet_lst],
+ label="Model Controlnet",
+ value=controlnet_lst[0],
+ )
+ with gr.Row():
+ low_threshold = gr.Slider(
+ label="Canny low threshold", value=100, minimum=1, maximum=255, step=1
+ )
+ high_threshold = gr.Slider(
+ label="Canny high threshold", value=200, minimum=1, maximum=255, step=1
+ )
+ with gr.Row():
+ has_body_openpose = gr.Checkbox(label="Has body", value=True,visible= False)
+ has_hand_openpose = gr.Checkbox(label="Has hand", value=False,visible= False)
+ has_face_openpose = gr.Checkbox(label="Has face", value=False,visible= False)
+ preprocessor_name = gr.Radio(
+ label="Preprocessor",
+ type="value",
+ visible= False,
+ )
+ with gr.Row():
+ control_guidance_start = gr.Slider(
+ label="Control guidance start", value=0, minimum=0, maximum=1, step=0.01
+ )
+ control_guidance_end = gr.Slider(
+ label="Control guidance end", value=1, minimum=0, maximum=1, step=0.01
+ )
+ controlnet_scale = gr.Slider(
+ label="Controlnet scale", value=1, minimum=0, maximum=2, step=0.01
+ )
+ with gr.Row():
+ controlnet_img = gr.Image(
+ image_mode="RGB",
+ source="upload",
+ label = "Image",
+ type = 'pil',
+ )
+ image_condition = gr.Image(interactive=False,image_mode="RGB",label = "Preprocessor Preview",type = 'pil')
+ control_image_click = gr.Button(value="Preview")
+ with gr.Row():
+ previous_multi_control_image = gr.Button(value="Previous control setting",visible = False)
+ next_multi_control_image = gr.Button(value="Next control setting",visible = False)
+ with gr.Row():
+ edit_multi_control_image = gr.Button(value="Edit previous setting",visible = False)
+ apply_multi_control_image = gr.Button(value="Apply setting",visible = False)
+ with gr.Row():
+ apply_edit_multi = gr.Button(value="Apply change",visible = False)
+ complete_change_multi = gr.Button(value="Complete change",visible = False)
+
+ control_image_click.click(
+ preview_image,
+ inputs=[model_control_net,low_threshold,high_threshold,has_body_openpose,has_hand_openpose,has_face_openpose,controlnet_img,preprocessor_name,multi_controlnet,disable_preprocessing],
+ outputs=[image_condition],
+ queue=False,
+ )
+ multi_controlnet.change(
+ multi_controlnet_function,
+ inputs=[multi_controlnet],
+ outputs=[controlnet_enabled,edit_multi_control_image,apply_multi_control_image,previous_multi_control_image,next_multi_control_image,apply_edit_multi,complete_change_multi],
+ queue=False,
+ )
+ edit_multi_control_image.click(
+ edit_multi_control_image_function,
+ inputs=[],
+ outputs=[previous_multi_control_image,next_multi_control_image,apply_edit_multi,complete_change_multi,edit_multi_control_image,apply_multi_control_image,model_control_net,controlnet_img,low_threshold,high_threshold,has_body_openpose,has_hand_openpose,has_face_openpose,preprocessor_name,controlnet_scale,control_guidance_start,control_guidance_end,disable_preprocessing],
+ queue=False,
+ )
+
+ previous_multi_control_image.click(
+ previous_view_control,
+ inputs=[],
+ outputs=[model_control_net,controlnet_img,low_threshold,high_threshold,has_body_openpose,has_hand_openpose,has_face_openpose,preprocessor_name,controlnet_scale,control_guidance_start,control_guidance_end,disable_preprocessing],
+ queue=False,
+ )
+
+ next_multi_control_image.click(
+ next_view_control,
+ inputs=[],
+ outputs=[model_control_net,controlnet_img,low_threshold,high_threshold,has_body_openpose,has_hand_openpose,has_face_openpose,preprocessor_name,controlnet_scale,control_guidance_start,control_guidance_end,disable_preprocessing],
+ queue=False,
+ )
+
+ apply_multi_control_image.click(
+ control_net_muti,
+ inputs=[model_control_net,controlnet_img,low_threshold,high_threshold,has_body_openpose,has_hand_openpose,has_face_openpose,preprocessor_name,controlnet_scale,control_guidance_start,control_guidance_end,disable_preprocessing],
+ outputs=[controlnet_img],
+ queue=False,
+ )
+ apply_edit_multi.click(
+ apply_edit_control_net,
+ inputs=[model_control_net,controlnet_img,low_threshold,high_threshold,has_body_openpose,has_hand_openpose,has_face_openpose,preprocessor_name,controlnet_scale,control_guidance_start,control_guidance_end,disable_preprocessing],
+ outputs=[model_control_net,controlnet_img,low_threshold,high_threshold,has_body_openpose,has_hand_openpose,has_face_openpose,preprocessor_name,multi_controlnet,previous_multi_control_image,next_multi_control_image,apply_edit_multi,complete_change_multi,controlnet_scale,control_guidance_start,control_guidance_end,disable_preprocessing],
+ queue=False,
+ )
+
+ complete_change_multi.click(
+ complete_edit_multi,
+ inputs=[],
+ outputs=[edit_multi_control_image,apply_multi_control_image,controlnet_img,apply_edit_multi,complete_change_multi,next_multi_control_image,previous_multi_control_image],
+ queue=False,
+ )
+
+ controlnet_img.change(
+ change_image_condition,
+ inputs=[image_condition],
+ outputs=[image_condition],
+ queue=False,
+ )
+
+ model_control_net.change(
+ change_control_net,
+ inputs=[model_control_net, low_threshold, high_threshold,has_body_openpose,has_hand_openpose,has_face_openpose],
+ outputs=[low_threshold, high_threshold,has_body_openpose,has_hand_openpose,has_face_openpose,preprocessor_name],
+ queue=False,
+ )
+
+ with gr.Tab("T2I Adapter"):
+ with gr.Group():
+ with gr.Row():
+ adapter_enabled = gr.Checkbox(label="Enable T2I Adapter", value=False)
+ disable_preprocessing_adapter = gr.Checkbox(label="Disable preprocessing", value=False)
+ multi_adapter = gr.Checkbox(label="Enable Multi T2I Adapter", value=False)
+ #sketch_enabled = gr.Checkbox(label="Sketch image", value=False)
+ model_adapter = gr.Dropdown(
+ choices=[k for k in adapter_lst],
+ label="Model Controlnet",
+ value=adapter_lst[0],
+ )
+ with gr.Row():
+ low_threshold_adapter = gr.Slider(
+ label="Canny low threshold", value=100, minimum=1, maximum=255, step=1
+ )
+ high_threshold_adapter = gr.Slider(
+ label="Canny high threshold", value=200, minimum=1, maximum=255, step=1
+ )
+ with gr.Row():
+ has_body_openpose_adapter = gr.Checkbox(label="Has body", value=True,visible= False)
+ has_hand_openpose_adapter = gr.Checkbox(label="Has hand", value=False,visible= False)
+ has_face_openpose_adapter = gr.Checkbox(label="Has face", value=False,visible= False)
+ preprocessor_adapter = gr.Radio(
+ label="Preprocessor",
+ type="value",
+ visible= False,
+ )
+ with gr.Row():
+ adapter_conditioning_scale = gr.Slider(
+ label="Conditioning scale", value=1, minimum=0, maximum=2, step=0.01
+ )
+ adapter_conditioning_factor = gr.Slider(
+ label="Conditioning factor", value=1, minimum=0, maximum=1, step=0.01
+ )
+ '''controlnet_scale = gr.Slider(
+ label="Controlnet scale", value=1, minimum=0, maximum=2, step=0.01
+ )'''
+ with gr.Row():
+ adapter_img = gr.Image(
+ image_mode="RGB",
+ source="upload",
+ label = "Image",
+ type = 'pil',
+ )
+ image_condition_adapter = gr.Image(interactive=False,image_mode="RGB",label = "Preprocessor Preview",type = 'pil')
+ adapter_image_click = gr.Button(value="Preview")
+ with gr.Row():
+ previous_multi_adapter_image = gr.Button(value="Previous adapter setting",visible = False)
+ next_multi_adapter_image = gr.Button(value="Next adapter setting",visible = False)
+ with gr.Row():
+ edit_multi_adapter_image = gr.Button(value="Edit previous setting",visible = False)
+ apply_multi_adapter_image = gr.Button(value="Apply setting",visible = False)
+ with gr.Row():
+ apply_edit_multi_adapter = gr.Button(value="Apply change",visible = False)
+ complete_change_multi_adapter = gr.Button(value="Complete change",visible = False)
+
+ adapter_image_click.click(
+ preview_image_adapter,
+ inputs=[model_adapter,low_threshold_adapter,high_threshold_adapter,has_body_openpose_adapter,has_hand_openpose_adapter,has_face_openpose_adapter,adapter_img,preprocessor_adapter,multi_adapter,disable_preprocessing_adapter],
+ outputs=[image_condition_adapter],
+ queue=False,
+ )
+ multi_adapter.change(
+ multi_adapter_function,
+ inputs=[multi_adapter],
+ outputs=[adapter_enabled,edit_multi_adapter_image,apply_multi_adapter_image,previous_multi_adapter_image,next_multi_adapter_image,apply_edit_multi_adapter,complete_change_multi_adapter],
+ queue=False,
+ )
+ edit_multi_adapter_image.click(
+ edit_multi_adapter_image_function,
+ inputs=[],
+ outputs=[previous_multi_adapter_image,next_multi_adapter_image,apply_edit_multi_adapter,complete_change_multi_adapter,edit_multi_adapter_image,apply_multi_adapter_image,model_adapter,adapter_img,low_threshold_adapter,high_threshold_adapter,has_body_openpose_adapter,has_hand_openpose_adapter,has_face_openpose_adapter,preprocessor_adapter,adapter_conditioning_scale,adapter_conditioning_factor,disable_preprocessing_adapter],
+ queue=False,
+ )
+
+ previous_multi_adapter_image.click(
+ previous_view_adapter,
+ inputs=[],
+ outputs=[model_adapter,adapter_img,low_threshold_adapter,high_threshold_adapter,has_body_openpose_adapter,has_hand_openpose_adapter,has_face_openpose_adapter,preprocessor_adapter,adapter_conditioning_scale,adapter_conditioning_factor,disable_preprocessing_adapter],
+ queue=False,
+ )
+
+ next_multi_adapter_image.click(
+ next_view_adapter,
+ inputs=[],
+ outputs=[model_adapter,adapter_img,low_threshold_adapter,high_threshold_adapter,has_body_openpose_adapter,has_hand_openpose_adapter,has_face_openpose_adapter,preprocessor_adapter,adapter_conditioning_scale,adapter_conditioning_factor,disable_preprocessing_adapter],
+ queue=False,
+ )
+
+ apply_multi_adapter_image.click(
+ adapter_muti,
+ inputs=[model_adapter,adapter_img,low_threshold_adapter,high_threshold_adapter,has_body_openpose_adapter,has_hand_openpose_adapter,has_face_openpose_adapter,preprocessor_adapter,adapter_conditioning_scale,adapter_conditioning_factor,disable_preprocessing_adapter],
+ outputs=[adapter_img],
+ queue=False,
+ )
+ apply_edit_multi_adapter.click(
+ apply_edit_adapter,
+ inputs=[model_adapter,adapter_img,low_threshold_adapter,high_threshold_adapter,has_body_openpose_adapter,has_hand_openpose_adapter,has_face_openpose_adapter,preprocessor_adapter,adapter_conditioning_scale,adapter_conditioning_factor,disable_preprocessing_adapter],
+ outputs=[model_adapter,adapter_img,low_threshold_adapter,high_threshold_adapter,has_body_openpose_adapter,has_hand_openpose_adapter,has_face_openpose_adapter,preprocessor_adapter,multi_adapter,previous_multi_adapter_image,next_multi_adapter_image,apply_edit_multi_adapter,complete_change_multi_adapter,adapter_conditioning_scale,adapter_conditioning_factor,disable_preprocessing_adapter],
+ queue=False,
+ )
+
+ complete_change_multi_adapter.click(
+ complete_edit_multi_adapter,
+ inputs=[],
+ outputs=[edit_multi_adapter_image,apply_multi_adapter_image,adapter_img,apply_edit_multi_adapter,complete_change_multi_adapter,next_multi_adapter_image,previous_multi_adapter_image],
+ queue=False,
+ )
+
+ adapter_img.change(
+ change_image_condition_adapter,
+ inputs=[image_condition_adapter],
+ outputs=[image_condition_adapter],
+ queue=False,
+ )
+
+ model_adapter.change(
+ change_control_net,
+ inputs=[model_adapter, low_threshold_adapter, high_threshold_adapter,has_body_openpose_adapter,has_hand_openpose_adapter,has_face_openpose_adapter],
+ outputs=[low_threshold_adapter, high_threshold_adapter,has_body_openpose_adapter,has_hand_openpose_adapter,has_face_openpose_adapter,preprocessor_adapter],
+ queue=False,
+ )
+
+ diffuser_pipeline.change(
+ mode_diffuser_pipeline_sampler,
+ inputs=[diffuser_pipeline, sampler,sampler_hires],
+ outputs=[diffuser_pipeline,sampler,sampler_hires],
+ queue=False,
+ )
+ hr_enabled.change(
+ lambda g, x, w, h: gr.Checkbox.update(
+ label=res_cap(g, w, h, x)
+ ),
+ inputs=[hr_enabled, hr_scale, width, height],
+ outputs=hr_enabled,
+ queue=False,
+ )
+
+ adapter_enabled.change(
+ mode_diffuser_pipeline,
+ inputs=[adapter_enabled],
+ outputs=[adapter_enabled,multi_adapter],
+ queue=False,
+ )
+
+ controlnet_enabled.change(
+ mode_diffuser_pipeline,
+ inputs=[controlnet_enabled],
+ outputs=[controlnet_enabled,multi_controlnet],
+ queue=False,
+ )
+
+ '''controlnet_enabled.change(
+ mode_diffuser_pipeline1,
+ inputs=[diffuser_pipeline, controlnet_enabled],
+ outputs=[controlnet_enabled],
+ queue=False,
+ )'''
+
+ with gr.Tab("Vae Setting"):
+ with gr.Group():
+ vae_used = gr.Dropdown(
+ choices=[k for k in vae_lst],
+ label="Chosing Vae",
+ value=vae_lst[0],
+ )
+ with gr.Row():
+ with gr.Column():
+ Insert_vae = gr.Textbox(
+ label="Insert Vae's link",
+ show_label=True,
+ placeholder="Enter a Vae's link.",
+ )
+ single_load_file = gr.Checkbox(label="Is Single File", value=False)
+ insert_vae = gr.Button(value="Insert")
+
+ insert_vae.click(
+ add_vae,
+ inputs=[Insert_vae,single_load_file],
+ outputs=[vae_used, Insert_vae,single_load_file],
+ queue=False,
+ )
+
+
+ with gr.Tab("Embeddings/Loras"):
+
+ ti_state = gr.State(dict())
+ lora_group = gr.State(dict())
+
+ with gr.Group():
+ with gr.Row():
+ with gr.Column():
+ ti_vals = gr.CheckboxGroup(label="Chosing embeddings")
+ embs_choose = gr.Text(label="Embeddings chosen")
+ with gr.Row():
+ choose_em = gr.Button(value="Select Embeddings")
+ delete_em = gr.Button(value="Delete Embeddings")
+ choose_em.click(choose_tistate,inputs=[ti_vals],outputs=[ti_state,embs_choose,ti_vals],queue=False,)
+ delete_em.click(delete_embed,inputs=[ti_vals,ti_state,embs_choose],outputs=[ti_vals,ti_state,embs_choose],queue=False,)
+
+ with gr.Row():
+ with gr.Column():
+ lora_list = gr.CheckboxGroup(label="Chosing Loras")
+ lora_choose = gr.Text(label="Loras chosen")
+ with gr.Row():
+ choose_lora = gr.Button(value="Select Loras")
+ delete_lora = gr.Button(value="Delete Loras")
+ lora_vals = gr.Dropdown(choices=[k for k in lora_lst],label="Loras Scale",value=lora_lst[0],)
+ choose_lora.click(choose_lora_function,inputs=[lora_list],outputs=[lora_group,lora_choose,lora_list,lora_vals],queue=False,)
+ delete_lora.click(delete_lora_function,inputs=[lora_list,lora_group,lora_choose],outputs=[lora_list,lora_group,lora_choose,lora_vals],queue=False,)
+ # delete_lora_but = gr.Button(value="Delete Lora")
+ link_download = gr.Textbox(
+ label="Insert lora's/embedding's link",
+ show_label=True,
+ placeholder="Enter a link download.",
+ )
+ #delete_lora_but.click(lora_delete,inputs=[lora_vals],outputs=[lora_vals],queue=False,)
+ with gr.Row():
+
+ uploads = gr.Files(label="Upload new embeddings/lora")
+
+ with gr.Column():
+ lora_scale = gr.Slider(
+ label="Lora scale",
+ minimum=0,
+ maximum=2,
+ step=0.01,
+ value=1.0,
+ )
+ btn = gr.Button(value="Upload/Download")
+ btn_del = gr.Button(value="Reset")
+ lora_vals.change(
+ change_lora_value,
+ inputs=[lora_vals],
+ outputs=[lora_scale],
+ queue=False,
+ )
+
+ lora_scale.change(
+ update_lora_value,
+ inputs=[lora_scale,lora_vals],
+ outputs=[],
+ queue=False,
+ )
+
+ btn.click(
+ add_net,
+ inputs=[uploads,link_download],
+ outputs=[ti_vals,lora_list, lora_vals, uploads,link_download],
+ queue=False,
+ )
+ btn_del.click(
+ clean_states,
+ inputs=[ti_state,lora_group],
+ outputs=[ti_state,lora_group, ti_vals,lora_list, lora_vals, uploads,embs_choose,lora_choose,link_download],
+ queue=False,
+ )
+
+ # error_output = gr.Markdown()
+
+ gr.HTML(
+ f"""
+
+
+
Define the object's region.
+
+
+ Using the following formula as default: w = scale * token_weight_martix * sigma * std(qk).
+
+
+ """
+ )
+
+ with gr.Row():
+
+ with gr.Column(scale=55):
+ formula_button = gr.Dropdown(
+ choices=[k[0] for k in formula],
+ label="Formual",
+ value=formula[0][0],
+ )
+
+ rendered = gr.Image(
+ invert_colors=True,
+ source="canvas",
+ interactive=False,
+ image_mode="RGBA",
+ )
+
+ with gr.Column(scale=45):
+
+ with gr.Group():
+ with gr.Row():
+ with gr.Column(scale=70):
+ # g_strength = gr.Slider(
+ # label="Compliance rate",
+ # minimum=0,
+ # maximum=2,
+ # step=0.01,
+ # value=0.4,
+ # )
+
+ text = gr.Textbox(
+ lines=2,
+ interactive=True,
+ label="Token to Draw: (Separate by comma)",
+ )
+
+ radio = gr.Radio([], label="Tokens",visible = False)
+
+ sk_update = gr.Button(value="Update").style(
+ rounded=(False, True, True, False)
+ )
+
+ # g_strength.change(lambda b: gr.update(f"Scaled additional attn: $w = {b} \log (1 + \sigma) \std (Q^T K)$."), inputs=g_strength, outputs=[g_output])
+
+ with gr.Tab("SketchPad"):
+
+ sp = gr.Image(
+ width = 512,
+ height = 512,
+ image_mode="L",
+ tool="sketch",
+ source="canvas",
+ interactive=False
+ )
+
+ '''mask_outsides = gr.Checkbox(
+ label="Mask other areas",
+ value=False
+ )'''
+ with gr.Row():
+ mask_outsides = gr.Slider(
+ label="Decrease unmarked region weight",
+ minimum=0,
+ maximum=3,
+ step=0.01,
+ value=0,
+ )
+
+ strength = gr.Slider(
+ label="Token-Region strength",
+ minimum=0,
+ maximum=3,
+ step=0.01,
+ value=0.5,
+ )
+
+ width.change(
+ apply_size_sketch,
+ inputs=[width, height,global_stats,inf_image,inpaiting_mode,inf_image_inpaiting],
+ outputs=[global_stats, rendered,sp],
+ queue=False,
+ )
+
+ height.change(
+ apply_size_sketch,
+ inputs=[width, height,global_stats,inf_image,inpaiting_mode,inf_image_inpaiting],
+ outputs=[global_stats, rendered,sp],
+ queue=False,
+ )
+
+ inf_image.change(
+ apply_size_sketch,
+ inputs=[width, height,global_stats,inf_image,inpaiting_mode,inf_image_inpaiting],
+ outputs=[global_stats, rendered,sp],
+ queue=False,
+ )
+
+
+ sk_update.click(
+ detect_text,
+ inputs=[text, global_stats, width, height,formula_button,inf_image,inpaiting_mode,inf_image_inpaiting],
+ outputs=[global_stats, sp, radio, rendered,formula_button],
+ queue=False,
+ )
+ radio.change(
+ switch_canvas,
+ inputs=[radio, global_stats, width, height,inf_image,inpaiting_mode,inf_image_inpaiting],
+ outputs=[sp, strength, mask_outsides, rendered],
+ queue=False,
+ )
+ sp.edit(
+ apply_canvas,
+ inputs=[radio, sp, global_stats, width, height,inf_image,inpaiting_mode,inf_image_inpaiting],
+ outputs=[global_stats, rendered],
+ queue=False,
+ )
+ strength.change(
+ apply_weight,
+ inputs=[radio, strength, global_stats],
+ outputs=[global_stats],
+ queue=False,
+ )
+ mask_outsides.change(
+ apply_option,
+ inputs=[radio, mask_outsides, global_stats],
+ outputs=[global_stats],
+ queue=False,
+ )
+
+ with gr.Tab("UploadFile"):
+
+ sp2 = gr.Image(
+ image_mode="RGB",
+ source="upload",
+ )
+
+ sp3 = gr.Image(
+ image_mode="L",
+ source="canvas",
+ visible = False,
+ interactive = False,
+ )
+ with gr.Row():
+ previous_page = gr.Button(value="Previous",visible = False,)
+ next_page = gr.Button(value="Next",visible = False,)
+
+ '''mask_outsides2 = gr.Checkbox(
+ label="Mask other areas",
+ value=False,
+ )'''
+
+ with gr.Row():
+ mask_outsides2 = gr.Slider(
+ label="Decrease unmarked region weight",
+ minimum=0,
+ maximum=3,
+ step=0.01,
+ value=0,
+ )
+
+ strength2 = gr.Slider(
+ label="Token-Region strength",
+ minimum=0,
+ maximum=3,
+ step=0.01,
+ value=0.5,
+ )
+ '''sk_update.click(
+ detect_text1,
+ inputs=[text, global_stats, width, height,formula_button,inf_image],
+ outputs=[global_stats, radio, rendered,formula_button],
+ queue=False,
+ )'''
+
+
+ with gr.Row():
+ apply_style = gr.Button(value="Apply")
+ apply_clustering_style = gr.Button(value="Extracting color regions")
+
+ with gr.Row():
+ add_style = gr.Button(value="Apply",visible = False)
+ complete_clustering = gr.Button(value="Complete",visible = False)
+
+ apply_style.click(
+ apply_image,
+ inputs=[sp2, radio, width, height, strength2, mask_outsides2, global_stats,inf_image,inpaiting_mode,inf_image_inpaiting],
+ outputs=[global_stats, rendered],
+ queue=False,
+ )
+ apply_clustering_style.click(
+ apply_base_on_color,
+ inputs=[sp2,global_stats,width, height,inf_image,inpaiting_mode,inf_image_inpaiting],
+ outputs=[rendered,apply_style,apply_clustering_style,previous_page,next_page,complete_clustering,sp2,sp3,add_style,global_stats],
+ queue=False,
+ )
+ previous_page.click(
+ previous_image_page,
+ inputs=[sp3],
+ outputs=[sp3],
+ queue=False,
+ )
+ next_page.click(
+ next_image_page,
+ inputs=[sp3],
+ outputs=[sp3],
+ queue=False,
+ )
+ add_style.click(
+ apply_image_clustering,
+ inputs=[sp3, radio, width, height, strength2, mask_outsides2, global_stats,inf_image,inpaiting_mode,inf_image_inpaiting],
+ outputs=[global_stats,rendered],
+ queue=False,
+ )
+ complete_clustering.click(
+ completing_clustering,
+ inputs=[sp2],
+ outputs=[apply_style,apply_clustering_style,previous_page,next_page,complete_clustering,sp2,sp3,add_style],
+ queue=False,
+ )
+
+ '''width.change(
+ apply_new_res,
+ inputs=[width, height, global_stats,inf_image,rendered],
+ outputs=[global_stats, rendered],
+ queue=False,
+ )
+ height.change(
+ apply_new_res,
+ inputs=[width, height, global_stats,inf_image,rendered],
+ outputs=[global_stats, rendered],
+ queue=False,
+ )'''
+
+ # color_stats = gr.State(value={})
+ # text.change(detect_color, inputs=[sp, text, color_stats], outputs=[color_stats, rendered])
+ # sp.change(detect_color, inputs=[sp, text, color_stats], outputs=[color_stats, rendered])
+
+ inputs = [
+ prompt,
+ guidance,
+ steps,
+ width,
+ height,
+ clip_skip,
+ seed,
+ neg_prompt,
+ global_stats,
+ #g_strength,
+ inf_image,
+ inf_strength,
+ hr_enabled,
+ hr_method,
+ hr_scale,
+ hr_denoise,
+ sampler,
+ ti_state,
+ model,
+ lora_group,
+ #lora_vals,
+ #lora_scale,
+ formula_button,
+ controlnet_enabled,
+ model_control_net,
+ low_threshold,
+ high_threshold,
+ has_body_openpose,
+ has_hand_openpose,
+ has_face_openpose,
+ controlnet_img,
+ image_condition,
+ controlnet_scale,
+ preprocessor_name,
+ diffuser_pipeline,
+ sampler_hires,
+ latent_processing,
+ control_guidance_start,
+ control_guidance_end,
+ multi_controlnet,
+ disable_preprocessing,
+ region_condition,
+ hr_process_enabled,
+ ip_adapter,
+ model_ip_adapter,
+ inf_adapt_image,
+ inf_adapt_image_strength,
+ hr_region_condition,
+ adapter_enabled,
+ model_adapter,
+ low_threshold_adapter,
+ high_threshold_adapter,
+ has_body_openpose_adapter,
+ has_hand_openpose_adapter,
+ has_face_openpose_adapter,
+ adapter_img,
+ image_condition_adapter,
+ preprocessor_adapter,
+ adapter_conditioning_scale,
+ adapter_conditioning_factor,
+ multi_adapter,
+ disable_preprocessing_adapter,
+ ip_adapter_multi,
+ guidance_rescale,
+ inf_control_adapt_image,
+ long_encode,
+ inpaiting_mode,
+ invert_mask_mode,
+ mask_upload,
+ inf_image_inpaiting,
+ invert_ip_adapter_mask_mode,
+ vae_used,
+ ]
+ outputs = [image_out,gallery]
+ prompt.submit(inference, inputs=inputs, outputs=outputs)
+ generate.click(inference, inputs=inputs, outputs=outputs)
+
+print(f"Space built in {time.time() - start_time:.2f} seconds")
+demo.queue().launch(share=True,debug=True)
+demo.launch(enable_queue=True, server_name="0.0.0.0", server_port=7860)
diff --git a/modules/attention_modify.py b/modules/attention_modify.py
new file mode 100644
index 0000000000000000000000000000000000000000..77c0a82e95dc2386063f95321b83f0e37c565379
--- /dev/null
+++ b/modules/attention_modify.py
@@ -0,0 +1,1044 @@
+from diffusers.utils import (
+ USE_PEFT_BACKEND,
+ _get_model_file,
+ delete_adapter_layers,
+ is_accelerate_available,
+ logging,
+ set_adapter_layers,
+ set_weights_and_activate_adapters,
+)
+
+import torch
+import torch.nn.functional as F
+from torch.autograd.function import Function
+import torch.nn as nn
+from torch import einsum
+import os
+from collections import defaultdict
+from contextlib import nullcontext
+from typing import Callable, Dict, List, Optional, Union
+from diffusers.utils import USE_PEFT_BACKEND, BaseOutput, deprecate, logging, scale_lora_layers, unscale_lora_layers
+from diffusers.models.embeddings import ImageProjection
+from diffusers.models.modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT, load_model_dict_into_meta
+import math
+from einops import rearrange
+from diffusers.image_processor import IPAdapterMaskProcessor
+
+xformers_available = False
+try:
+ import xformers
+
+ xformers_available = True
+except ImportError:
+ pass
+
+EPSILON = 1e-6
+exists = lambda val: val is not None
+default = lambda val, d: val if exists(val) else d
+logger = logging.get_logger(__name__) # pylint: disable=invalid-name
+def get_attention_scores(attn, query, key, attention_mask=None):
+
+ if attn.upcast_attention:
+ query = query.float()
+ key = key.float()
+ if attention_mask is None:
+ baddbmm_input = torch.empty(
+ query.shape[0],
+ query.shape[1],
+ key.shape[1],
+ dtype=query.dtype,
+ device=query.device,
+ )
+ beta = 0
+ else:
+ baddbmm_input = attention_mask
+ beta = 1
+
+ attention_scores = torch.baddbmm(
+ baddbmm_input,
+ query,
+ key.transpose(-1, -2),
+ beta=beta,
+ alpha=attn.scale,
+ )
+
+ del baddbmm_input
+
+ if attn.upcast_softmax:
+ attention_scores = attention_scores.float()
+
+ return attention_scores.to(query.dtype)
+
+
+# Get attention_score with this:
+def scaled_dot_product_attention_regionstate(query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False, scale=None,weight_func =None, region_state = None, sigma = None) -> torch.Tensor:
+ # Efficient implementation equivalent to the following:
+ L, S = query.size(-2), key.size(-2)
+ scale_factor = 1 / math.sqrt(query.size(-1)) if scale is None else scale
+ attn_bias = torch.zeros(L, S, dtype=query.dtype,device = query.device)
+ if is_causal:
+ assert attn_mask is None
+ temp_mask = torch.ones(L, S, dtype=torch.bool).tril(diagonal=0)
+ attn_bias.masked_fill_(temp_mask.logical_not(), float("-inf"))
+ attn_bias.to(query.dtype)
+
+ if attn_mask is not None:
+ if attn_mask.dtype == torch.bool:
+ attn_mask.masked_fill_(attn_mask.logical_not(), float("-inf"))
+ else:
+ attn_bias += attn_mask
+ attn_weight = query @ key.transpose(-2, -1) * scale_factor
+ attn_weight += attn_bias
+
+ batch_size, num_heads, sequence_length, embed_dim = attn_weight.shape
+ attn_weight = attn_weight.reshape((-1,sequence_length,embed_dim))
+ cross_attention_weight = weight_func(region_state, sigma, attn_weight)
+ repeat_time = attn_weight.shape[0]//cross_attention_weight.shape[0]
+ attn_weight += torch.repeat_interleave(
+ cross_attention_weight, repeats=repeat_time, dim=0
+ )
+ attn_weight = attn_weight.reshape((-1,num_heads,sequence_length,embed_dim))
+ attn_weight = torch.softmax(attn_weight, dim=-1)
+ attn_weight = torch.dropout(attn_weight, dropout_p, train=True)
+ return attn_weight @ value
+
+class FlashAttentionFunction(Function):
+ @staticmethod
+ @torch.no_grad()
+ def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size):
+ """Algorithm 2 in the paper"""
+
+ device = q.device
+ max_neg_value = -torch.finfo(q.dtype).max
+ qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)
+
+ o = torch.zeros_like(q)
+ all_row_sums = torch.zeros((*q.shape[:-1], 1), device=device)
+ all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, device=device)
+
+ scale = q.shape[-1] ** -0.5
+
+ if not exists(mask):
+ mask = (None,) * math.ceil(q.shape[-2] / q_bucket_size)
+ else:
+ mask = rearrange(mask, "b n -> b 1 1 n")
+ mask = mask.split(q_bucket_size, dim=-1)
+
+ row_splits = zip(
+ q.split(q_bucket_size, dim=-2),
+ o.split(q_bucket_size, dim=-2),
+ mask,
+ all_row_sums.split(q_bucket_size, dim=-2),
+ all_row_maxes.split(q_bucket_size, dim=-2),
+ )
+
+ for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits):
+ q_start_index = ind * q_bucket_size - qk_len_diff
+
+ col_splits = zip(
+ k.split(k_bucket_size, dim=-2),
+ v.split(k_bucket_size, dim=-2),
+ )
+
+ for k_ind, (kc, vc) in enumerate(col_splits):
+ k_start_index = k_ind * k_bucket_size
+
+ attn_weights = einsum("... i d, ... j d -> ... i j", qc, kc) * scale
+
+ if exists(row_mask):
+ attn_weights.masked_fill_(~row_mask, max_neg_value)
+
+ if causal and q_start_index < (k_start_index + k_bucket_size - 1):
+ causal_mask = torch.ones(
+ (qc.shape[-2], kc.shape[-2]), dtype=torch.bool, device=device
+ ).triu(q_start_index - k_start_index + 1)
+ attn_weights.masked_fill_(causal_mask, max_neg_value)
+
+ block_row_maxes = attn_weights.amax(dim=-1, keepdims=True)
+ attn_weights -= block_row_maxes
+ exp_weights = torch.exp(attn_weights)
+
+ if exists(row_mask):
+ exp_weights.masked_fill_(~row_mask, 0.0)
+
+ block_row_sums = exp_weights.sum(dim=-1, keepdims=True).clamp(
+ min=EPSILON
+ )
+
+ new_row_maxes = torch.maximum(block_row_maxes, row_maxes)
+
+ exp_values = einsum("... i j, ... j d -> ... i d", exp_weights, vc)
+
+ exp_row_max_diff = torch.exp(row_maxes - new_row_maxes)
+ exp_block_row_max_diff = torch.exp(block_row_maxes - new_row_maxes)
+
+ new_row_sums = (
+ exp_row_max_diff * row_sums
+ + exp_block_row_max_diff * block_row_sums
+ )
+
+ oc.mul_((row_sums / new_row_sums) * exp_row_max_diff).add_(
+ (exp_block_row_max_diff / new_row_sums) * exp_values
+ )
+
+ row_maxes.copy_(new_row_maxes)
+ row_sums.copy_(new_row_sums)
+
+ lse = all_row_sums.log() + all_row_maxes
+
+ ctx.args = (causal, scale, mask, q_bucket_size, k_bucket_size)
+ ctx.save_for_backward(q, k, v, o, lse)
+
+ return o
+
+ @staticmethod
+ @torch.no_grad()
+ def backward(ctx, do):
+ """Algorithm 4 in the paper"""
+
+ causal, scale, mask, q_bucket_size, k_bucket_size = ctx.args
+ q, k, v, o, lse = ctx.saved_tensors
+
+ device = q.device
+
+ max_neg_value = -torch.finfo(q.dtype).max
+ qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)
+
+ dq = torch.zeros_like(q)
+ dk = torch.zeros_like(k)
+ dv = torch.zeros_like(v)
+
+ row_splits = zip(
+ q.split(q_bucket_size, dim=-2),
+ o.split(q_bucket_size, dim=-2),
+ do.split(q_bucket_size, dim=-2),
+ mask,
+ lse.split(q_bucket_size, dim=-2),
+ dq.split(q_bucket_size, dim=-2),
+ )
+
+ for ind, (qc, oc, doc, row_mask, lsec, dqc) in enumerate(row_splits):
+ q_start_index = ind * q_bucket_size - qk_len_diff
+
+ col_splits = zip(
+ k.split(k_bucket_size, dim=-2),
+ v.split(k_bucket_size, dim=-2),
+ dk.split(k_bucket_size, dim=-2),
+ dv.split(k_bucket_size, dim=-2),
+ )
+
+ for k_ind, (kc, vc, dkc, dvc) in enumerate(col_splits):
+ k_start_index = k_ind * k_bucket_size
+
+ attn_weights = einsum("... i d, ... j d -> ... i j", qc, kc) * scale
+
+ if causal and q_start_index < (k_start_index + k_bucket_size - 1):
+ causal_mask = torch.ones(
+ (qc.shape[-2], kc.shape[-2]), dtype=torch.bool, device=device
+ ).triu(q_start_index - k_start_index + 1)
+ attn_weights.masked_fill_(causal_mask, max_neg_value)
+
+ p = torch.exp(attn_weights - lsec)
+
+ if exists(row_mask):
+ p.masked_fill_(~row_mask, 0.0)
+
+ dv_chunk = einsum("... i j, ... i d -> ... j d", p, doc)
+ dp = einsum("... i d, ... j d -> ... i j", doc, vc)
+
+ D = (doc * oc).sum(dim=-1, keepdims=True)
+ ds = p * scale * (dp - D)
+
+ dq_chunk = einsum("... i j, ... j d -> ... i d", ds, kc)
+ dk_chunk = einsum("... i j, ... i d -> ... j d", ds, qc)
+
+ dqc.add_(dq_chunk)
+ dkc.add_(dk_chunk)
+ dvc.add_(dv_chunk)
+
+ return dq, dk, dv, None, None, None, None
+
+class AttnProcessor(nn.Module):
+ def __call__(
+ self,
+ attn,
+ hidden_states,
+ encoder_hidden_states=None,
+ attention_mask=None,
+ temb: Optional[torch.Tensor] = None,
+ region_prompt = None,
+ ip_adapter_masks = None,
+ *args,
+ **kwargs,
+ ):
+ if len(args) > 0 or kwargs.get("scale", None) is not None:
+ deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`."
+ deprecate("scale", "1.0.0", deprecation_message)
+
+ residual = hidden_states
+
+
+ #_,img_sequence_length,_ = hidden_states.shape
+ img_sequence_length = hidden_states.shape[1]
+
+ if attn.spatial_norm is not None:
+ hidden_states = attn.spatial_norm(hidden_states, temb)
+
+ input_ndim = hidden_states.ndim
+
+ if input_ndim == 4:
+ batch_size, channel, height, width = hidden_states.shape
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
+
+
+ is_xattn = False
+ if encoder_hidden_states is not None and region_prompt is not None:
+ is_xattn = True
+ region_state = region_prompt["region_state"]
+ weight_func = region_prompt["weight_func"]
+ sigma = region_prompt["sigma"]
+
+ batch_size, sequence_length, _ = (
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
+ )
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length,batch_size)
+
+ if attn.group_norm is not None:
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
+
+ query = attn.to_q(hidden_states)
+
+ if encoder_hidden_states is None:
+ encoder_hidden_states = hidden_states
+ elif attn.norm_cross:
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
+
+
+ key = attn.to_k(encoder_hidden_states)
+ value = attn.to_v(encoder_hidden_states)
+
+ query = attn.head_to_batch_dim(query)
+ key = attn.head_to_batch_dim(key)
+ value = attn.head_to_batch_dim(value)
+
+ if is_xattn and isinstance(region_state, dict):
+ # use torch.baddbmm method (slow)
+ attention_scores = get_attention_scores(attn, query, key, attention_mask)
+ cross_attention_weight = weight_func(region_state[img_sequence_length].to(query.device), sigma, attention_scores)
+ attention_scores += torch.repeat_interleave(
+ cross_attention_weight, repeats=attention_scores.shape[0] // cross_attention_weight.shape[0], dim=0
+ )
+
+ # calc probs
+ attention_probs = attention_scores.softmax(dim=-1)
+ attention_probs = attention_probs.to(query.dtype)
+ hidden_states = torch.bmm(attention_probs, value)
+
+ elif xformers_available:
+ hidden_states = xformers.ops.memory_efficient_attention(
+ query.contiguous(),
+ key.contiguous(),
+ value.contiguous(),
+ attn_bias=attention_mask,
+ )
+ hidden_states = hidden_states.to(query.dtype)
+
+ else:
+ '''q_bucket_size = 512
+ k_bucket_size = 1024
+
+ # use flash-attention
+ hidden_states = FlashAttentionFunction.apply(
+ query.contiguous(),
+ key.contiguous(),
+ value.contiguous(),
+ attention_mask,
+ False,
+ q_bucket_size,
+ k_bucket_size,
+ )'''
+ attention_probs = attn.get_attention_scores(query, key, attention_mask)
+ hidden_states = torch.bmm(attention_probs, value)
+ hidden_states = hidden_states.to(query.dtype)
+
+ hidden_states = attn.batch_to_head_dim(hidden_states)
+
+ # linear proj
+ hidden_states = attn.to_out[0](hidden_states)
+
+ # dropout
+ hidden_states = attn.to_out[1](hidden_states)
+
+ if input_ndim == 4:
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
+
+ if attn.residual_connection:
+ hidden_states = hidden_states + residual
+
+ hidden_states = hidden_states / attn.rescale_output_factor
+
+ return hidden_states
+class IPAdapterAttnProcessor(nn.Module):
+ r"""
+ Attention processor for Multiple IP-Adapters.
+
+ Args:
+ hidden_size (`int`):
+ The hidden size of the attention layer.
+ cross_attention_dim (`int`):
+ The number of channels in the `encoder_hidden_states`.
+ num_tokens (`int`, `Tuple[int]` or `List[int]`, defaults to `(4,)`):
+ The context length of the image features.
+ scale (`float` or List[`float`], defaults to 1.0):
+ the weight scale of image prompt.
+ """
+
+ def __init__(self, hidden_size, cross_attention_dim=None, num_tokens=(4,), scale=1.0):
+ super().__init__()
+
+ self.hidden_size = hidden_size
+ self.cross_attention_dim = cross_attention_dim
+
+ if not isinstance(num_tokens, (tuple, list)):
+ num_tokens = [num_tokens]
+ self.num_tokens = num_tokens
+
+ if not isinstance(scale, list):
+ scale = [scale] * len(num_tokens)
+ if len(scale) != len(num_tokens):
+ raise ValueError("`scale` should be a list of integers with the same length as `num_tokens`.")
+ self.scale = scale
+
+ self.to_k_ip = nn.ModuleList(
+ [nn.Linear(cross_attention_dim, hidden_size, bias=False) for _ in range(len(num_tokens))]
+ )
+ self.to_v_ip = nn.ModuleList(
+ [nn.Linear(cross_attention_dim, hidden_size, bias=False) for _ in range(len(num_tokens))]
+ )
+
+ def __call__(
+ self,
+ attn,
+ hidden_states,
+ encoder_hidden_states=None,
+ attention_mask=None,
+ temb=None,
+ scale=1.0,
+ region_prompt = None,
+ ip_adapter_masks = None,
+ ):
+
+ #_,img_sequence_length,_ = hidden_states.shape
+ img_sequence_length= hidden_states.shape[1]
+ residual = hidden_states
+
+ is_xattn = False
+ if encoder_hidden_states is not None and region_prompt is not None:
+ is_xattn = True
+ region_state = region_prompt["region_state"]
+ weight_func = region_prompt["weight_func"]
+ sigma = region_prompt["sigma"]
+
+ # separate ip_hidden_states from encoder_hidden_states
+ if encoder_hidden_states is not None:
+ if isinstance(encoder_hidden_states, tuple):
+ encoder_hidden_states, ip_hidden_states = encoder_hidden_states
+ else:
+ deprecation_message = (
+ "You have passed a tensor as `encoder_hidden_states`. This is deprecated and will be removed in a future release."
+ " Please make sure to update your script to pass `encoder_hidden_states` as a tuple to suppress this warning."
+ )
+ deprecate("encoder_hidden_states not a tuple", "1.0.0", deprecation_message, standard_warn=False)
+ end_pos = encoder_hidden_states.shape[1] - self.num_tokens[0]
+ encoder_hidden_states, ip_hidden_states = (
+ encoder_hidden_states[:, :end_pos, :],
+ [encoder_hidden_states[:, end_pos:, :]],
+ )
+
+
+ if attn.spatial_norm is not None:
+ hidden_states = attn.spatial_norm(hidden_states, temb)
+
+ input_ndim = hidden_states.ndim
+
+ if input_ndim == 4:
+ batch_size, channel, height, width = hidden_states.shape
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
+
+
+
+ batch_size, sequence_length, _ = (
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
+ )
+
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
+
+ if attn.group_norm is not None:
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
+
+ query = attn.to_q(hidden_states)
+
+ if encoder_hidden_states is None:
+ encoder_hidden_states = hidden_states
+ elif attn.norm_cross:
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
+
+ key = attn.to_k(encoder_hidden_states)
+ value = attn.to_v(encoder_hidden_states)
+
+
+ query = attn.head_to_batch_dim(query)
+ key = attn.head_to_batch_dim(key)
+ value = attn.head_to_batch_dim(value)
+
+ if is_xattn and isinstance(region_state, dict):
+ # use torch.baddbmm method (slow)
+ attention_scores = get_attention_scores(attn, query, key, attention_mask)
+ cross_attention_weight = weight_func(region_state[img_sequence_length].to(query.device), sigma, attention_scores)
+ attention_scores += torch.repeat_interleave(
+ cross_attention_weight, repeats=attention_scores.shape[0] // cross_attention_weight.shape[0], dim=0
+ )
+
+ # calc probs
+ attention_probs = attention_scores.softmax(dim=-1)
+ attention_probs = attention_probs.to(query.dtype)
+ hidden_states = torch.bmm(attention_probs, value)
+
+ elif xformers_available:
+ hidden_states = xformers.ops.memory_efficient_attention(
+ query.contiguous(),
+ key.contiguous(),
+ value.contiguous(),
+ attn_bias=attention_mask,
+ )
+ hidden_states = hidden_states.to(query.dtype)
+
+ else:
+ '''q_bucket_size = 512
+ k_bucket_size = 1024
+
+ # use flash-attention
+ hidden_states = FlashAttentionFunction.apply(
+ query.contiguous(),
+ key.contiguous(),
+ value.contiguous(),
+ attention_mask,
+ False,
+ q_bucket_size,
+ k_bucket_size,
+ )'''
+ attention_probs = attn.get_attention_scores(query, key, attention_mask)
+ hidden_states = torch.bmm(attention_probs, value)
+ hidden_states = hidden_states.to(query.dtype)
+
+ hidden_states = attn.batch_to_head_dim(hidden_states)
+
+
+ '''# for ip-adapter
+ for current_ip_hidden_states, scale, to_k_ip, to_v_ip in zip(
+ ip_hidden_states, self.scale, self.to_k_ip, self.to_v_ip
+ ):
+ ip_key = to_k_ip(current_ip_hidden_states)
+ ip_value = to_v_ip(current_ip_hidden_states)
+
+ ip_key = attn.head_to_batch_dim(ip_key)
+ ip_value = attn.head_to_batch_dim(ip_value)
+
+ if xformers_available:
+ current_ip_hidden_states = xformers.ops.memory_efficient_attention(
+ query.contiguous(),
+ ip_key.contiguous(),
+ ip_value.contiguous(),
+ attn_bias=None,
+ )
+ current_ip_hidden_states = current_ip_hidden_states.to(query.dtype)
+ else:
+ ip_attention_probs = attn.get_attention_scores(query, ip_key, None)
+ current_ip_hidden_states = torch.bmm(ip_attention_probs, ip_value)
+ current_ip_hidden_states = current_ip_hidden_states.to(query.dtype)
+
+ current_ip_hidden_states = attn.batch_to_head_dim(current_ip_hidden_states)
+ hidden_states = hidden_states + scale * current_ip_hidden_states'''
+
+ #control region apply ip-adapter
+ if ip_adapter_masks is not None:
+ if not isinstance(ip_adapter_masks, List):
+ # for backward compatibility, we accept `ip_adapter_mask` as a tensor of shape [num_ip_adapter, 1, height, width]
+ ip_adapter_masks = list(ip_adapter_masks.unsqueeze(1))
+ if not (len(ip_adapter_masks) == len(self.scale) == len(ip_hidden_states)):
+ raise ValueError(
+ f"Length of ip_adapter_masks array ({len(ip_adapter_masks)}) must match "
+ f"length of self.scale array ({len(self.scale)}) and number of ip_hidden_states "
+ f"({len(ip_hidden_states)})"
+ )
+ else:
+ for index, (mask, scale, ip_state) in enumerate(zip(ip_adapter_masks, self.scale, ip_hidden_states)):
+ if not isinstance(mask, torch.Tensor) or mask.ndim != 4:
+ raise ValueError(
+ "Each element of the ip_adapter_masks array should be a tensor with shape "
+ "[1, num_images_for_ip_adapter, height, width]."
+ " Please use `IPAdapterMaskProcessor` to preprocess your mask"
+ )
+ if mask.shape[1] != ip_state.shape[1]:
+ raise ValueError(
+ f"Number of masks ({mask.shape[1]}) does not match "
+ f"number of ip images ({ip_state.shape[1]}) at index {index}"
+ )
+ if isinstance(scale, list) and not len(scale) == mask.shape[1]:
+ raise ValueError(
+ f"Number of masks ({mask.shape[1]}) does not match "
+ f"number of scales ({len(scale)}) at index {index}"
+ )
+ else:
+ ip_adapter_masks = [None] * len(self.scale)
+
+ # for ip-adapter
+ for current_ip_hidden_states, scale, to_k_ip, to_v_ip, mask in zip(
+ ip_hidden_states, self.scale, self.to_k_ip, self.to_v_ip, ip_adapter_masks
+ ):
+ skip = False
+ if isinstance(scale, list):
+ if all(s == 0 for s in scale):
+ skip = True
+ elif scale == 0:
+ skip = True
+ if not skip:
+ if mask is not None:
+ if not isinstance(scale, list):
+ scale = [scale] * mask.shape[1]
+
+ current_num_images = mask.shape[1]
+ for i in range(current_num_images):
+ ip_key = to_k_ip(current_ip_hidden_states[:, i, :, :])
+ ip_value = to_v_ip(current_ip_hidden_states[:, i, :, :])
+
+ ip_key = attn.head_to_batch_dim(ip_key)
+ ip_value = attn.head_to_batch_dim(ip_value)
+
+ ip_attention_probs = attn.get_attention_scores(query, ip_key, None)
+ _current_ip_hidden_states = torch.bmm(ip_attention_probs, ip_value)
+ _current_ip_hidden_states = attn.batch_to_head_dim(_current_ip_hidden_states)
+
+ mask_downsample = IPAdapterMaskProcessor.downsample(
+ mask[:, i, :, :],
+ batch_size,
+ _current_ip_hidden_states.shape[1],
+ _current_ip_hidden_states.shape[2],
+ )
+
+ mask_downsample = mask_downsample.to(dtype=query.dtype, device=query.device)
+
+ hidden_states = hidden_states + scale[i] * (_current_ip_hidden_states * mask_downsample)
+ else:
+ ip_key = to_k_ip(current_ip_hidden_states)
+ ip_value = to_v_ip(current_ip_hidden_states)
+
+ ip_key = attn.head_to_batch_dim(ip_key)
+ ip_value = attn.head_to_batch_dim(ip_value)
+
+ ip_attention_probs = attn.get_attention_scores(query, ip_key, None)
+ current_ip_hidden_states = torch.bmm(ip_attention_probs, ip_value)
+ current_ip_hidden_states = attn.batch_to_head_dim(current_ip_hidden_states)
+
+ hidden_states = hidden_states + scale * current_ip_hidden_states
+
+ # linear proj
+ hidden_states = attn.to_out[0](hidden_states)
+
+ # dropout
+ hidden_states = attn.to_out[1](hidden_states)
+
+ if input_ndim == 4:
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
+
+ if attn.residual_connection:
+ hidden_states = hidden_states + residual
+
+ hidden_states = hidden_states / attn.rescale_output_factor
+
+ return hidden_states
+
+
+
+class AttnProcessor2_0:
+ r"""
+ Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0).
+ """
+
+ def __init__(self):
+ if not hasattr(F, "scaled_dot_product_attention"):
+ raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.")
+
+ def __call__(
+ self,
+ attn,
+ hidden_states: torch.Tensor,
+ encoder_hidden_states = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ temb: Optional[torch.Tensor] = None,
+ region_prompt = None,
+ ip_adapter_masks = None,
+ *args,
+ **kwargs,
+ ) -> torch.Tensor:
+
+ if len(args) > 0 or kwargs.get("scale", None) is not None:
+
+ deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`."
+
+ deprecate("scale", "1.0.0", deprecation_message)
+
+ residual = hidden_states
+
+ #_,img_sequence_length,_ = hidden_states.shape
+ img_sequence_length= hidden_states.shape[1]
+ if attn.spatial_norm is not None:
+ hidden_states = attn.spatial_norm(hidden_states, temb)
+
+ input_ndim = hidden_states.ndim
+
+ if input_ndim == 4:
+ batch_size, channel, height, width = hidden_states.shape
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
+
+ is_xattn = False
+ if encoder_hidden_states is not None and region_prompt is not None:
+ is_xattn = True
+ region_state = region_prompt["region_state"]
+ weight_func = region_prompt["weight_func"]
+ sigma = region_prompt["sigma"]
+
+ batch_size, sequence_length, _ = (
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
+ )
+
+ if attention_mask is not None:
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
+ # scaled_dot_product_attention expects attention_mask shape to be
+ # (batch, heads, source_length, target_length)
+ attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
+
+ if attn.group_norm is not None:
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
+
+ query = attn.to_q(hidden_states)
+
+ if encoder_hidden_states is None:
+ encoder_hidden_states = hidden_states
+ elif attn.norm_cross:
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
+
+ key = attn.to_k(encoder_hidden_states)
+ value = attn.to_v(encoder_hidden_states)
+
+ inner_dim = key.shape[-1]
+ head_dim = inner_dim // attn.heads
+
+ query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
+
+ key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
+ value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
+
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
+ # TODO: add support for attn.scale when we move to Torch 2.1
+
+ if is_xattn and isinstance(region_state, dict):
+ #w = attn.head_to_batch_dim(w,out_dim = 4).transpose(1, 2)
+ hidden_states = scaled_dot_product_attention_regionstate(query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False,weight_func = weight_func,region_state=region_state[img_sequence_length].to(query.device),sigma = sigma)
+ else:
+ hidden_states = F.scaled_dot_product_attention(
+ query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
+ )
+
+ hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
+ hidden_states = hidden_states.to(query.dtype)
+
+ # linear proj
+ hidden_states = attn.to_out[0](hidden_states)
+ # dropout
+ hidden_states = attn.to_out[1](hidden_states)
+
+ if input_ndim == 4:
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
+
+ if attn.residual_connection:
+ hidden_states = hidden_states + residual
+
+ hidden_states = hidden_states / attn.rescale_output_factor
+
+ return hidden_states
+
+
+class IPAdapterAttnProcessor2_0(torch.nn.Module):
+ r"""
+ Attention processor for IP-Adapter for PyTorch 2.0.
+
+ Args:
+ hidden_size (`int`):
+ The hidden size of the attention layer.
+ cross_attention_dim (`int`):
+ The number of channels in the `encoder_hidden_states`.
+ num_tokens (`int`, `Tuple[int]` or `List[int]`, defaults to `(4,)`):
+ The context length of the image features.
+ scale (`float` or `List[float]`, defaults to 1.0):
+ the weight scale of image prompt.
+ """
+
+ def __init__(self, hidden_size, cross_attention_dim=None, num_tokens=(4,), scale=1.0):
+ super().__init__()
+
+ if not hasattr(F, "scaled_dot_product_attention"):
+ raise ImportError(
+ f"{self.__class__.__name__} requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0."
+ )
+
+ self.hidden_size = hidden_size
+ self.cross_attention_dim = cross_attention_dim
+
+ if not isinstance(num_tokens, (tuple, list)):
+ num_tokens = [num_tokens]
+ self.num_tokens = num_tokens
+
+ if not isinstance(scale, list):
+ scale = [scale] * len(num_tokens)
+ if len(scale) != len(num_tokens):
+ raise ValueError("`scale` should be a list of integers with the same length as `num_tokens`.")
+ self.scale = scale
+
+ self.to_k_ip = nn.ModuleList(
+ [nn.Linear(cross_attention_dim, hidden_size, bias=False) for _ in range(len(num_tokens))]
+ )
+ self.to_v_ip = nn.ModuleList(
+ [nn.Linear(cross_attention_dim, hidden_size, bias=False) for _ in range(len(num_tokens))]
+ )
+
+ def __call__(
+ self,
+ attn,
+ hidden_states,
+ encoder_hidden_states=None,
+ attention_mask=None,
+ temb=None,
+ scale=1.0,
+ region_prompt = None,
+ ip_adapter_masks = None,
+ ):
+ residual = hidden_states
+
+ #_,img_sequence_length,_ = hidden_states.shape
+ img_sequence_length= hidden_states.shape[1]
+
+ is_xattn = False
+ if encoder_hidden_states is not None and region_prompt is not None:
+ is_xattn = True
+ region_state = region_prompt["region_state"]
+ weight_func = region_prompt["weight_func"]
+ sigma = region_prompt["sigma"]
+
+ # separate ip_hidden_states from encoder_hidden_states
+ if encoder_hidden_states is not None:
+ if isinstance(encoder_hidden_states, tuple):
+ encoder_hidden_states, ip_hidden_states = encoder_hidden_states
+ else:
+ deprecation_message = (
+ "You have passed a tensor as `encoder_hidden_states`. This is deprecated and will be removed in a future release."
+ " Please make sure to update your script to pass `encoder_hidden_states` as a tuple to suppress this warning."
+ )
+ deprecate("encoder_hidden_states not a tuple", "1.0.0", deprecation_message, standard_warn=False)
+ end_pos = encoder_hidden_states.shape[1] - self.num_tokens[0]
+ encoder_hidden_states, ip_hidden_states = (
+ encoder_hidden_states[:, :end_pos, :],
+ [encoder_hidden_states[:, end_pos:, :]],
+ )
+
+ if attn.spatial_norm is not None:
+ hidden_states = attn.spatial_norm(hidden_states, temb)
+
+ input_ndim = hidden_states.ndim
+
+ if input_ndim == 4:
+ batch_size, channel, height, width = hidden_states.shape
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
+
+
+
+
+ batch_size, sequence_length, _ = (
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
+ )
+
+ if attention_mask is not None:
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
+ # scaled_dot_product_attention expects attention_mask shape to be
+ # (batch, heads, source_length, target_length)
+ attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
+
+ if attn.group_norm is not None:
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
+
+ query = attn.to_q(hidden_states)
+
+ if encoder_hidden_states is None:
+ encoder_hidden_states = hidden_states
+ elif attn.norm_cross:
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
+
+
+ key = attn.to_k(encoder_hidden_states)
+ value = attn.to_v(encoder_hidden_states)
+
+ inner_dim = key.shape[-1]
+ head_dim = inner_dim // attn.heads
+
+ query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
+
+ key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
+ value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
+
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
+ # TODO: add support for attn.scale when we move to Torch 2.1
+
+ if is_xattn and isinstance(region_state, dict):
+ #w = attn.head_to_batch_dim(w,out_dim = 4).transpose(1, 2)
+ hidden_states = scaled_dot_product_attention_regionstate(query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False,weight_func = weight_func,region_state=region_state[img_sequence_length].to(query.device),sigma = sigma)
+ else:
+ hidden_states = F.scaled_dot_product_attention(
+ query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
+ )
+
+ hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
+ hidden_states = hidden_states.to(query.dtype)
+
+ ''''# for ip-adapter
+ for current_ip_hidden_states, scale, to_k_ip, to_v_ip in zip(
+ ip_hidden_states, self.scale, self.to_k_ip, self.to_v_ip
+ ):
+ ip_key = to_k_ip(current_ip_hidden_states)
+ ip_value = to_v_ip(current_ip_hidden_states)
+
+ ip_key = ip_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
+ ip_value = ip_value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
+
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
+ # TODO: add support for attn.scale when we move to Torch 2.1
+ current_ip_hidden_states = F.scaled_dot_product_attention(
+ query, ip_key, ip_value, attn_mask=None, dropout_p=0.0, is_causal=False
+ )
+
+ current_ip_hidden_states = current_ip_hidden_states.transpose(1, 2).reshape(
+ batch_size, -1, attn.heads * head_dim
+ )
+ current_ip_hidden_states = current_ip_hidden_states.to(query.dtype)
+
+ hidden_states = hidden_states + scale * current_ip_hidden_states'''
+
+
+ if ip_adapter_masks is not None:
+ if not isinstance(ip_adapter_masks, List):
+ # for backward compatibility, we accept `ip_adapter_mask` as a tensor of shape [num_ip_adapter, 1, height, width]
+ ip_adapter_masks = list(ip_adapter_masks.unsqueeze(1))
+ if not (len(ip_adapter_masks) == len(self.scale) == len(ip_hidden_states)):
+ raise ValueError(
+ f"Length of ip_adapter_masks array ({len(ip_adapter_masks)}) must match "
+ f"length of self.scale array ({len(self.scale)}) and number of ip_hidden_states "
+ f"({len(ip_hidden_states)})"
+ )
+ else:
+ for index, (mask, scale, ip_state) in enumerate(zip(ip_adapter_masks, self.scale, ip_hidden_states)):
+ if not isinstance(mask, torch.Tensor) or mask.ndim != 4:
+ raise ValueError(
+ "Each element of the ip_adapter_masks array should be a tensor with shape "
+ "[1, num_images_for_ip_adapter, height, width]."
+ " Please use `IPAdapterMaskProcessor` to preprocess your mask"
+ )
+ if mask.shape[1] != ip_state.shape[1]:
+ raise ValueError(
+ f"Number of masks ({mask.shape[1]}) does not match "
+ f"number of ip images ({ip_state.shape[1]}) at index {index}"
+ )
+ if isinstance(scale, list) and not len(scale) == mask.shape[1]:
+ raise ValueError(
+ f"Number of masks ({mask.shape[1]}) does not match "
+ f"number of scales ({len(scale)}) at index {index}"
+ )
+ else:
+ ip_adapter_masks = [None] * len(self.scale)
+
+ # for ip-adapter
+ for current_ip_hidden_states, scale, to_k_ip, to_v_ip, mask in zip(
+ ip_hidden_states, self.scale, self.to_k_ip, self.to_v_ip, ip_adapter_masks
+ ):
+ skip = False
+ if isinstance(scale, list):
+ if all(s == 0 for s in scale):
+ skip = True
+ elif scale == 0:
+ skip = True
+ if not skip:
+ if mask is not None:
+ if not isinstance(scale, list):
+ scale = [scale] * mask.shape[1]
+
+ current_num_images = mask.shape[1]
+ for i in range(current_num_images):
+ ip_key = to_k_ip(current_ip_hidden_states[:, i, :, :])
+ ip_value = to_v_ip(current_ip_hidden_states[:, i, :, :])
+
+ ip_key = ip_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
+ ip_value = ip_value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
+
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
+ # TODO: add support for attn.scale when we move to Torch 2.1
+ _current_ip_hidden_states = F.scaled_dot_product_attention(
+ query, ip_key, ip_value, attn_mask=None, dropout_p=0.0, is_causal=False
+ )
+
+ _current_ip_hidden_states = _current_ip_hidden_states.transpose(1, 2).reshape(
+ batch_size, -1, attn.heads * head_dim
+ )
+ _current_ip_hidden_states = _current_ip_hidden_states.to(query.dtype)
+
+ mask_downsample = IPAdapterMaskProcessor.downsample(
+ mask[:, i, :, :],
+ batch_size,
+ _current_ip_hidden_states.shape[1],
+ _current_ip_hidden_states.shape[2],
+ )
+
+ mask_downsample = mask_downsample.to(dtype=query.dtype, device=query.device)
+ hidden_states = hidden_states + scale[i] * (_current_ip_hidden_states * mask_downsample)
+ else:
+ ip_key = to_k_ip(current_ip_hidden_states)
+ ip_value = to_v_ip(current_ip_hidden_states)
+
+ ip_key = ip_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
+ ip_value = ip_value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
+
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
+ # TODO: add support for attn.scale when we move to Torch 2.1
+ current_ip_hidden_states = F.scaled_dot_product_attention(
+ query, ip_key, ip_value, attn_mask=None, dropout_p=0.0, is_causal=False
+ )
+
+ current_ip_hidden_states = current_ip_hidden_states.transpose(1, 2).reshape(
+ batch_size, -1, attn.heads * head_dim
+ )
+ current_ip_hidden_states = current_ip_hidden_states.to(query.dtype)
+
+ hidden_states = hidden_states + scale * current_ip_hidden_states
+
+ # linear proj
+ hidden_states = attn.to_out[0](hidden_states)
+ # dropout
+ hidden_states = attn.to_out[1](hidden_states)
+
+ if input_ndim == 4:
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
+
+ if attn.residual_connection:
+ hidden_states = hidden_states + residual
+
+ hidden_states = hidden_states / attn.rescale_output_factor
+
+ return hidden_states
+
diff --git a/modules/controlnetxs/controlnetxs.py b/modules/controlnetxs/controlnetxs.py
new file mode 100644
index 0000000000000000000000000000000000000000..08328318147c8df19918abcb47284c4b7c0964ef
--- /dev/null
+++ b/modules/controlnetxs/controlnetxs.py
@@ -0,0 +1,1017 @@
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import math
+from dataclasses import dataclass
+from typing import Any, Dict, List, Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import functional as F
+from torch.nn.modules.normalization import GroupNorm
+
+from diffusers.configuration_utils import ConfigMixin, register_to_config
+from diffusers.models.attention_processor import USE_PEFT_BACKEND, AttentionProcessor
+from diffusers.models.autoencoders import AutoencoderKL
+from diffusers.models.lora import LoRACompatibleConv
+from diffusers.models.modeling_utils import ModelMixin
+from diffusers.models.unet_2d_blocks import (
+ CrossAttnDownBlock2D,
+ CrossAttnUpBlock2D,
+ DownBlock2D,
+ Downsample2D,
+ ResnetBlock2D,
+ Transformer2DModel,
+ UpBlock2D,
+ Upsample2D,
+)
+from diffusers.models.unet_2d_condition import UNet2DConditionModel
+from diffusers.utils import BaseOutput, logging
+from modules.attention_modify import CrossAttnProcessor,IPAdapterAttnProcessor
+
+
+logger = logging.get_logger(__name__) # pylint: disable=invalid-name
+
+
+@dataclass
+class ControlNetXSOutput(BaseOutput):
+ """
+ The output of [`ControlNetXSModel`].
+
+ Args:
+ sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ The output of the `ControlNetXSModel`. Unlike `ControlNetOutput` this is NOT to be added to the base model
+ output, but is already the final output.
+ """
+
+ sample: torch.FloatTensor = None
+
+
+# copied from diffusers.models.controlnet.ControlNetConditioningEmbedding
+class ControlNetConditioningEmbedding(nn.Module):
+ """
+ Quoting from https://arxiv.org/abs/2302.05543: "Stable Diffusion uses a pre-processing method similar to VQ-GAN
+ [11] to convert the entire dataset of 512 ร 512 images into smaller 64 ร 64 โlatent imagesโ for stabilized
+ training. This requires ControlNets to convert image-based conditions to 64 ร 64 feature space to match the
+ convolution size. We use a tiny network E(ยท) of four convolution layers with 4 ร 4 kernels and 2 ร 2 strides
+ (activated by ReLU, channels are 16, 32, 64, 128, initialized with Gaussian weights, trained jointly with the full
+ model) to encode image-space conditions ... into feature maps ..."
+ """
+
+ def __init__(
+ self,
+ conditioning_embedding_channels: int,
+ conditioning_channels: int = 3,
+ block_out_channels: Tuple[int, ...] = (16, 32, 96, 256),
+ ):
+ super().__init__()
+
+ self.conv_in = nn.Conv2d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1)
+
+ self.blocks = nn.ModuleList([])
+
+ for i in range(len(block_out_channels) - 1):
+ channel_in = block_out_channels[i]
+ channel_out = block_out_channels[i + 1]
+ self.blocks.append(nn.Conv2d(channel_in, channel_in, kernel_size=3, padding=1))
+ self.blocks.append(nn.Conv2d(channel_in, channel_out, kernel_size=3, padding=1, stride=2))
+
+ self.conv_out = zero_module(
+ nn.Conv2d(block_out_channels[-1], conditioning_embedding_channels, kernel_size=3, padding=1)
+ )
+
+ def forward(self, conditioning):
+ embedding = self.conv_in(conditioning)
+ embedding = F.silu(embedding)
+
+ for block in self.blocks:
+ embedding = block(embedding)
+ embedding = F.silu(embedding)
+
+ embedding = self.conv_out(embedding)
+
+ return embedding
+
+
+class ControlNetXSModel(ModelMixin, ConfigMixin):
+ r"""
+ A ControlNet-XS model
+
+ This model inherits from [`ModelMixin`] and [`ConfigMixin`]. Check the superclass documentation for it's generic
+ methods implemented for all models (such as downloading or saving).
+
+ Most of parameters for this model are passed into the [`UNet2DConditionModel`] it creates. Check the documentation
+ of [`UNet2DConditionModel`] for them.
+
+ Parameters:
+ conditioning_channels (`int`, defaults to 3):
+ Number of channels of conditioning input (e.g. an image)
+ controlnet_conditioning_channel_order (`str`, defaults to `"rgb"`):
+ The channel order of conditional image. Will convert to `rgb` if it's `bgr`.
+ conditioning_embedding_out_channels (`tuple[int]`, defaults to `(16, 32, 96, 256)`):
+ The tuple of output channel for each block in the `controlnet_cond_embedding` layer.
+ time_embedding_input_dim (`int`, defaults to 320):
+ Dimension of input into time embedding. Needs to be same as in the base model.
+ time_embedding_dim (`int`, defaults to 1280):
+ Dimension of output from time embedding. Needs to be same as in the base model.
+ learn_embedding (`bool`, defaults to `False`):
+ Whether to use time embedding of the control model. If yes, the time embedding is a linear interpolation of
+ the time embeddings of the control and base model with interpolation parameter `time_embedding_mix**3`.
+ time_embedding_mix (`float`, defaults to 1.0):
+ Linear interpolation parameter used if `learn_embedding` is `True`. A value of 1.0 means only the
+ control model's time embedding will be used. A value of 0.0 means only the base model's time embedding will be used.
+ base_model_channel_sizes (`Dict[str, List[Tuple[int]]]`):
+ Channel sizes of each subblock of base model. Use `gather_subblock_sizes` on your base model to compute it.
+ """
+
+ @classmethod
+ def init_original(cls, base_model: UNet2DConditionModel, is_sdxl=True):
+ """
+ Create a ControlNetXS model with the same parameters as in the original paper (https://github.com/vislearn/ControlNet-XS).
+
+ Parameters:
+ base_model (`UNet2DConditionModel`):
+ Base UNet model. Needs to be either StableDiffusion or StableDiffusion-XL.
+ is_sdxl (`bool`, defaults to `True`):
+ Whether passed `base_model` is a StableDiffusion-XL model.
+ """
+
+ def get_dim_attn_heads(base_model: UNet2DConditionModel, size_ratio: float, num_attn_heads: int):
+ """
+ Currently, diffusers can only set the dimension of attention heads (see https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 for why).
+ The original ControlNet-XS model, however, define the number of attention heads.
+ That's why compute the dimensions needed to get the correct number of attention heads.
+ """
+ block_out_channels = [int(size_ratio * c) for c in base_model.config.block_out_channels]
+ dim_attn_heads = [math.ceil(c / num_attn_heads) for c in block_out_channels]
+ return dim_attn_heads
+
+ if is_sdxl:
+ return ControlNetXSModel.from_unet(
+ base_model,
+ time_embedding_mix=0.95,
+ learn_embedding=True,
+ size_ratio=0.1,
+ conditioning_embedding_out_channels=(16, 32, 96, 256),
+ num_attention_heads=get_dim_attn_heads(base_model, 0.1, 64),
+ )
+ else:
+ return ControlNetXSModel.from_unet(
+ base_model,
+ time_embedding_mix=1.0,
+ learn_embedding=True,
+ size_ratio=0.0125,
+ conditioning_embedding_out_channels=(16, 32, 96, 256),
+ num_attention_heads=get_dim_attn_heads(base_model, 0.0125, 8),
+ )
+
+ @classmethod
+ def _gather_subblock_sizes(cls, unet: UNet2DConditionModel, base_or_control: str):
+ """To create correctly sized connections between base and control model, we need to know
+ the input and output channels of each subblock.
+
+ Parameters:
+ unet (`UNet2DConditionModel`):
+ Unet of which the subblock channels sizes are to be gathered.
+ base_or_control (`str`):
+ Needs to be either "base" or "control". If "base", decoder is also considered.
+ """
+ if base_or_control not in ["base", "control"]:
+ raise ValueError("`base_or_control` needs to be either `base` or `control`")
+
+ channel_sizes = {"down": [], "mid": [], "up": []}
+
+ # input convolution
+ channel_sizes["down"].append((unet.conv_in.in_channels, unet.conv_in.out_channels))
+
+ # encoder blocks
+ for module in unet.down_blocks:
+ if isinstance(module, (CrossAttnDownBlock2D, DownBlock2D)):
+ for r in module.resnets:
+ channel_sizes["down"].append((r.in_channels, r.out_channels))
+ if module.downsamplers:
+ channel_sizes["down"].append(
+ (module.downsamplers[0].channels, module.downsamplers[0].out_channels)
+ )
+ else:
+ raise ValueError(f"Encountered unknown module of type {type(module)} while creating ControlNet-XS.")
+
+ # middle block
+ channel_sizes["mid"].append((unet.mid_block.resnets[0].in_channels, unet.mid_block.resnets[0].out_channels))
+
+ # decoder blocks
+ if base_or_control == "base":
+ for module in unet.up_blocks:
+ if isinstance(module, (CrossAttnUpBlock2D, UpBlock2D)):
+ for r in module.resnets:
+ channel_sizes["up"].append((r.in_channels, r.out_channels))
+ else:
+ raise ValueError(
+ f"Encountered unknown module of type {type(module)} while creating ControlNet-XS."
+ )
+
+ return channel_sizes
+
+ @register_to_config
+ def __init__(
+ self,
+ conditioning_channels: int = 3,
+ conditioning_embedding_out_channels: Tuple[int] = (16, 32, 96, 256),
+ controlnet_conditioning_channel_order: str = "rgb",
+ time_embedding_input_dim: int = 320,
+ time_embedding_dim: int = 1280,
+ time_embedding_mix: float = 1.0,
+ learn_embedding: bool = False,
+ base_model_channel_sizes: Dict[str, List[Tuple[int]]] = {
+ "down": [
+ (4, 320),
+ (320, 320),
+ (320, 320),
+ (320, 320),
+ (320, 640),
+ (640, 640),
+ (640, 640),
+ (640, 1280),
+ (1280, 1280),
+ ],
+ "mid": [(1280, 1280)],
+ "up": [
+ (2560, 1280),
+ (2560, 1280),
+ (1920, 1280),
+ (1920, 640),
+ (1280, 640),
+ (960, 640),
+ (960, 320),
+ (640, 320),
+ (640, 320),
+ ],
+ },
+ sample_size: Optional[int] = None,
+ down_block_types: Tuple[str] = (
+ "CrossAttnDownBlock2D",
+ "CrossAttnDownBlock2D",
+ "CrossAttnDownBlock2D",
+ "DownBlock2D",
+ ),
+ up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"),
+ block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
+ norm_num_groups: Optional[int] = 32,
+ cross_attention_dim: Union[int, Tuple[int]] = 1280,
+ transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]] = 1,
+ num_attention_heads: Optional[Union[int, Tuple[int]]] = 8,
+ upcast_attention: bool = False,
+ ):
+ super().__init__()
+
+ # 1 - Create control unet
+ self.control_model = UNet2DConditionModel(
+ sample_size=sample_size,
+ down_block_types=down_block_types,
+ up_block_types=up_block_types,
+ block_out_channels=block_out_channels,
+ norm_num_groups=norm_num_groups,
+ cross_attention_dim=cross_attention_dim,
+ transformer_layers_per_block=transformer_layers_per_block,
+ attention_head_dim=num_attention_heads,
+ use_linear_projection=True,
+ upcast_attention=upcast_attention,
+ time_embedding_dim=time_embedding_dim,
+ )
+
+ # 2 - Do model surgery on control model
+ # 2.1 - Allow to use the same time information as the base model
+ adjust_time_dims(self.control_model, time_embedding_input_dim, time_embedding_dim)
+
+ # 2.2 - Allow for information infusion from base model
+
+ # We concat the output of each base encoder subblocks to the input of the next control encoder subblock
+ # (We ignore the 1st element, as it represents the `conv_in`.)
+ extra_input_channels = [input_channels for input_channels, _ in base_model_channel_sizes["down"][1:]]
+ it_extra_input_channels = iter(extra_input_channels)
+
+ for b, block in enumerate(self.control_model.down_blocks):
+ for r in range(len(block.resnets)):
+ increase_block_input_in_encoder_resnet(
+ self.control_model, block_no=b, resnet_idx=r, by=next(it_extra_input_channels)
+ )
+
+ if block.downsamplers:
+ increase_block_input_in_encoder_downsampler(
+ self.control_model, block_no=b, by=next(it_extra_input_channels)
+ )
+
+ increase_block_input_in_mid_resnet(self.control_model, by=extra_input_channels[-1])
+
+ # 2.3 - Make group norms work with modified channel sizes
+ adjust_group_norms(self.control_model)
+
+ # 3 - Gather Channel Sizes
+ self.ch_inout_ctrl = ControlNetXSModel._gather_subblock_sizes(self.control_model, base_or_control="control")
+ self.ch_inout_base = base_model_channel_sizes
+
+ # 4 - Build connections between base and control model
+ self.down_zero_convs_out = nn.ModuleList([])
+ self.down_zero_convs_in = nn.ModuleList([])
+ self.middle_block_out = nn.ModuleList([])
+ self.middle_block_in = nn.ModuleList([])
+ self.up_zero_convs_out = nn.ModuleList([])
+ self.up_zero_convs_in = nn.ModuleList([])
+
+ for ch_io_base in self.ch_inout_base["down"]:
+ self.down_zero_convs_in.append(self._make_zero_conv(in_channels=ch_io_base[1], out_channels=ch_io_base[1]))
+ for i in range(len(self.ch_inout_ctrl["down"])):
+ self.down_zero_convs_out.append(
+ self._make_zero_conv(self.ch_inout_ctrl["down"][i][1], self.ch_inout_base["down"][i][1])
+ )
+
+ self.middle_block_out = self._make_zero_conv(
+ self.ch_inout_ctrl["mid"][-1][1], self.ch_inout_base["mid"][-1][1]
+ )
+
+ self.up_zero_convs_out.append(
+ self._make_zero_conv(self.ch_inout_ctrl["down"][-1][1], self.ch_inout_base["mid"][-1][1])
+ )
+ for i in range(1, len(self.ch_inout_ctrl["down"])):
+ self.up_zero_convs_out.append(
+ self._make_zero_conv(self.ch_inout_ctrl["down"][-(i + 1)][1], self.ch_inout_base["up"][i - 1][1])
+ )
+
+ # 5 - Create conditioning hint embedding
+ self.controlnet_cond_embedding = ControlNetConditioningEmbedding(
+ conditioning_embedding_channels=block_out_channels[0],
+ block_out_channels=conditioning_embedding_out_channels,
+ conditioning_channels=conditioning_channels,
+ )
+
+ # In the mininal implementation setting, we only need the control model up to the mid block
+ del self.control_model.up_blocks
+ del self.control_model.conv_norm_out
+ del self.control_model.conv_out
+
+ @classmethod
+ def from_unet(
+ cls,
+ unet: UNet2DConditionModel,
+ conditioning_channels: int = 3,
+ conditioning_embedding_out_channels: Tuple[int] = (16, 32, 96, 256),
+ controlnet_conditioning_channel_order: str = "rgb",
+ learn_embedding: bool = False,
+ time_embedding_mix: float = 1.0,
+ block_out_channels: Optional[Tuple[int]] = None,
+ size_ratio: Optional[float] = None,
+ num_attention_heads: Optional[Union[int, Tuple[int]]] = 8,
+ norm_num_groups: Optional[int] = None,
+ ):
+ r"""
+ Instantiate a [`ControlNetXSModel`] from [`UNet2DConditionModel`].
+
+ Parameters:
+ unet (`UNet2DConditionModel`):
+ The UNet model we want to control. The dimensions of the ControlNetXSModel will be adapted to it.
+ conditioning_channels (`int`, defaults to 3):
+ Number of channels of conditioning input (e.g. an image)
+ conditioning_embedding_out_channels (`tuple[int]`, defaults to `(16, 32, 96, 256)`):
+ The tuple of output channel for each block in the `controlnet_cond_embedding` layer.
+ controlnet_conditioning_channel_order (`str`, defaults to `"rgb"`):
+ The channel order of conditional image. Will convert to `rgb` if it's `bgr`.
+ learn_embedding (`bool`, defaults to `False`):
+ Wether to use time embedding of the control model. If yes, the time embedding is a linear interpolation
+ of the time embeddings of the control and base model with interpolation parameter
+ `time_embedding_mix**3`.
+ time_embedding_mix (`float`, defaults to 1.0):
+ Linear interpolation parameter used if `learn_embedding` is `True`.
+ block_out_channels (`Tuple[int]`, *optional*):
+ Down blocks output channels in control model. Either this or `size_ratio` must be given.
+ size_ratio (float, *optional*):
+ When given, block_out_channels is set to a relative fraction of the base model's block_out_channels.
+ Either this or `block_out_channels` must be given.
+ num_attention_heads (`Union[int, Tuple[int]]`, *optional*):
+ The dimension of the attention heads. The naming seems a bit confusing and it is, see https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 for why.
+ norm_num_groups (int, *optional*, defaults to `None`):
+ The number of groups to use for the normalization of the control unet. If `None`,
+ `int(unet.config.norm_num_groups * size_ratio)` is taken.
+ """
+
+ # Check input
+ fixed_size = block_out_channels is not None
+ relative_size = size_ratio is not None
+ if not (fixed_size ^ relative_size):
+ raise ValueError(
+ "Pass exactly one of `block_out_channels` (for absolute sizing) or `control_model_ratio` (for relative sizing)."
+ )
+
+ # Create model
+ if block_out_channels is None:
+ block_out_channels = [int(size_ratio * c) for c in unet.config.block_out_channels]
+
+ # Check that attention heads and group norms match channel sizes
+ # - attention heads
+ def attn_heads_match_channel_sizes(attn_heads, channel_sizes):
+ if isinstance(attn_heads, (tuple, list)):
+ return all(c % a == 0 for a, c in zip(attn_heads, channel_sizes))
+ else:
+ return all(c % attn_heads == 0 for c in channel_sizes)
+
+ num_attention_heads = num_attention_heads or unet.config.attention_head_dim
+ if not attn_heads_match_channel_sizes(num_attention_heads, block_out_channels):
+ raise ValueError(
+ f"The dimension of attention heads ({num_attention_heads}) must divide `block_out_channels` ({block_out_channels}). If you didn't set `num_attention_heads` the default settings don't match your model. Set `num_attention_heads` manually."
+ )
+
+ # - group norms
+ def group_norms_match_channel_sizes(num_groups, channel_sizes):
+ return all(c % num_groups == 0 for c in channel_sizes)
+
+ if norm_num_groups is None:
+ if group_norms_match_channel_sizes(unet.config.norm_num_groups, block_out_channels):
+ norm_num_groups = unet.config.norm_num_groups
+ else:
+ norm_num_groups = min(block_out_channels)
+
+ if group_norms_match_channel_sizes(norm_num_groups, block_out_channels):
+ print(
+ f"`norm_num_groups` was set to `min(block_out_channels)` (={norm_num_groups}) so it divides all block_out_channels` ({block_out_channels}). Set it explicitly to remove this information."
+ )
+ else:
+ raise ValueError(
+ f"`block_out_channels` ({block_out_channels}) don't match the base models `norm_num_groups` ({unet.config.norm_num_groups}). Setting `norm_num_groups` to `min(block_out_channels)` ({norm_num_groups}) didn't fix this. Pass `norm_num_groups` explicitly so it divides all block_out_channels."
+ )
+
+ def get_time_emb_input_dim(unet: UNet2DConditionModel):
+ return unet.time_embedding.linear_1.in_features
+
+ def get_time_emb_dim(unet: UNet2DConditionModel):
+ return unet.time_embedding.linear_2.out_features
+
+ # Clone params from base unet if
+ # (i) it's required to build SD or SDXL, and
+ # (ii) it's not used for the time embedding (as time embedding of control model is never used), and
+ # (iii) it's not set further below anyway
+ to_keep = [
+ "cross_attention_dim",
+ "down_block_types",
+ "sample_size",
+ "transformer_layers_per_block",
+ "up_block_types",
+ "upcast_attention",
+ ]
+ kwargs = {k: v for k, v in dict(unet.config).items() if k in to_keep}
+ kwargs.update(block_out_channels=block_out_channels)
+ kwargs.update(num_attention_heads=num_attention_heads)
+ kwargs.update(norm_num_groups=norm_num_groups)
+
+ # Add controlnetxs-specific params
+ kwargs.update(
+ conditioning_channels=conditioning_channels,
+ controlnet_conditioning_channel_order=controlnet_conditioning_channel_order,
+ time_embedding_input_dim=get_time_emb_input_dim(unet),
+ time_embedding_dim=get_time_emb_dim(unet),
+ time_embedding_mix=time_embedding_mix,
+ learn_embedding=learn_embedding,
+ base_model_channel_sizes=ControlNetXSModel._gather_subblock_sizes(unet, base_or_control="base"),
+ conditioning_embedding_out_channels=conditioning_embedding_out_channels,
+ )
+
+ return cls(**kwargs)
+
+ @property
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
+ r"""
+ Returns:
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
+ indexed by its weight name.
+ """
+ return self.control_model.attn_processors
+
+ def set_attn_processor(
+ self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]], _remove_lora=False
+ ):
+ r"""
+ Sets the attention processor to use to compute attention.
+
+ Parameters:
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
+ for **all** `Attention` layers.
+
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
+ processor. This is strongly recommended when setting trainable attention processors.
+
+ """
+ self.control_model.set_attn_processor(processor, _remove_lora)
+
+ def set_default_attn_processor(self):
+ """
+ Disables custom attention processors and sets the default attention implementation.
+ """
+ self.control_model.set_default_attn_processor()
+
+ def set_attention_slice(self, slice_size):
+ r"""
+ Enable sliced attention computation.
+
+ When this option is enabled, the attention module splits the input tensor in slices to compute attention in
+ several steps. This is useful for saving some memory in exchange for a small decrease in speed.
+
+ Args:
+ slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
+ When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If
+ `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is
+ provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
+ must be a multiple of `slice_size`.
+ """
+ self.control_model.set_attention_slice(slice_size)
+
+ def _set_gradient_checkpointing(self, module, value=False):
+ if isinstance(module, (UNet2DConditionModel)):
+ if value:
+ module.enable_gradient_checkpointing()
+ else:
+ module.disable_gradient_checkpointing()
+
+ def forward(
+ self,
+ base_model: UNet2DConditionModel,
+ sample: torch.FloatTensor,
+ timestep: Union[torch.Tensor, float, int],
+ encoder_hidden_states: Dict,
+ controlnet_cond: torch.Tensor,
+ conditioning_scale: float = 1.0,
+ class_labels: Optional[torch.Tensor] = None,
+ timestep_cond: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
+ added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
+ return_dict: bool = True,
+ ) -> Union[ControlNetXSOutput, Tuple]:
+ """
+ The [`ControlNetModel`] forward method.
+
+ Args:
+ base_model (`UNet2DConditionModel`):
+ The base unet model we want to control.
+ sample (`torch.FloatTensor`):
+ The noisy input tensor.
+ timestep (`Union[torch.Tensor, float, int]`):
+ The number of timesteps to denoise an input.
+ encoder_hidden_states (`torch.Tensor`):
+ The encoder hidden states.
+ controlnet_cond (`torch.FloatTensor`):
+ The conditional input tensor of shape `(batch_size, sequence_length, hidden_size)`.
+ conditioning_scale (`float`, defaults to `1.0`):
+ How much the control model affects the base model outputs.
+ class_labels (`torch.Tensor`, *optional*, defaults to `None`):
+ Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
+ timestep_cond (`torch.Tensor`, *optional*, defaults to `None`):
+ Additional conditional embeddings for timestep. If provided, the embeddings will be summed with the
+ timestep_embedding passed through the `self.time_embedding` layer to obtain the final timestep
+ embeddings.
+ attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
+ An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
+ is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
+ negative values to the attention scores corresponding to "discard" tokens.
+ added_cond_kwargs (`dict`):
+ Additional conditions for the Stable Diffusion XL UNet.
+ cross_attention_kwargs (`dict[str]`, *optional*, defaults to `None`):
+ A kwargs dictionary that if specified is passed along to the `AttnProcessor`.
+ return_dict (`bool`, defaults to `True`):
+ Whether or not to return a [`~models.controlnet.ControlNetOutput`] instead of a plain tuple.
+
+ Returns:
+ [`~models.controlnetxs.ControlNetXSOutput`] **or** `tuple`:
+ If `return_dict` is `True`, a [`~models.controlnetxs.ControlNetXSOutput`] is returned, otherwise a
+ tuple is returned where the first element is the sample tensor.
+ """
+ # check channel order
+ channel_order = self.config.controlnet_conditioning_channel_order
+
+ if channel_order == "rgb":
+ # in rgb order by default
+ ...
+ elif channel_order == "bgr":
+ controlnet_cond = torch.flip(controlnet_cond, dims=[1])
+ else:
+ raise ValueError(f"unknown `controlnet_conditioning_channel_order`: {channel_order}")
+
+ # scale control strength
+ n_connections = len(self.down_zero_convs_out) + 1 + len(self.up_zero_convs_out)
+ scale_list = torch.full((n_connections,), conditioning_scale)
+
+ # prepare attention_mask
+ if attention_mask is not None:
+ attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
+ attention_mask = attention_mask.unsqueeze(1)
+
+ # 1. time
+ timesteps = timestep
+ if not torch.is_tensor(timesteps):
+ # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
+ # This would be a good case for the `match` statement (Python 3.10+)
+ is_mps = sample.device.type == "mps"
+ if isinstance(timestep, float):
+ dtype = torch.float32 if is_mps else torch.float64
+ else:
+ dtype = torch.int32 if is_mps else torch.int64
+ timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
+ elif len(timesteps.shape) == 0:
+ timesteps = timesteps[None].to(sample.device)
+
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
+ timesteps = timesteps.expand(sample.shape[0])
+
+ t_emb = base_model.time_proj(timesteps)
+
+ # timesteps does not contain any weights and will always return f32 tensors
+ # but time_embedding might actually be running in fp16. so we need to cast here.
+ # there might be better ways to encapsulate this.
+ t_emb = t_emb.to(dtype=sample.dtype)
+
+ if self.config.learn_embedding:
+ ctrl_temb = self.control_model.time_embedding(t_emb, timestep_cond)
+ base_temb = base_model.time_embedding(t_emb, timestep_cond)
+ interpolation_param = self.config.time_embedding_mix**0.3
+
+ temb = ctrl_temb * interpolation_param + base_temb * (1 - interpolation_param)
+ else:
+ temb = base_model.time_embedding(t_emb)
+
+ # added time & text embeddings
+ aug_emb = None
+
+ if base_model.class_embedding is not None:
+ if class_labels is None:
+ raise ValueError("class_labels should be provided when num_class_embeds > 0")
+
+ if base_model.config.class_embed_type == "timestep":
+ class_labels = base_model.time_proj(class_labels)
+
+ class_emb = base_model.class_embedding(class_labels).to(dtype=self.dtype)
+ temb = temb + class_emb
+
+ if base_model.config.addition_embed_type is not None:
+ if base_model.config.addition_embed_type == "text":
+ aug_emb = base_model.add_embedding(encoder_hidden_states["states"])
+ elif base_model.config.addition_embed_type == "text_image":
+ raise NotImplementedError()
+ elif base_model.config.addition_embed_type == "text_time":
+ # SDXL - style
+ if "text_embeds" not in added_cond_kwargs:
+ raise ValueError(
+ f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`"
+ )
+ text_embeds = added_cond_kwargs.get("text_embeds")
+ if "time_ids" not in added_cond_kwargs:
+ raise ValueError(
+ f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`"
+ )
+ time_ids = added_cond_kwargs.get("time_ids")
+ time_embeds = base_model.add_time_proj(time_ids.flatten())
+ time_embeds = time_embeds.reshape((text_embeds.shape[0], -1))
+ add_embeds = torch.concat([text_embeds, time_embeds], dim=-1)
+ add_embeds = add_embeds.to(temb.dtype)
+ aug_emb = base_model.add_embedding(add_embeds)
+ elif base_model.config.addition_embed_type == "image":
+ raise NotImplementedError()
+ elif base_model.config.addition_embed_type == "image_hint":
+ raise NotImplementedError()
+
+ temb = temb + aug_emb if aug_emb is not None else temb
+
+ # text embeddings
+ cemb = encoder_hidden_states["states"]
+
+ # Preparation
+ guided_hint = self.controlnet_cond_embedding(controlnet_cond)
+
+ h_ctrl = h_base = sample
+ hs_base, hs_ctrl = [], []
+ it_down_convs_in, it_down_convs_out, it_dec_convs_in, it_up_convs_out = map(
+ iter, (self.down_zero_convs_in, self.down_zero_convs_out, self.up_zero_convs_in, self.up_zero_convs_out)
+ )
+ scales = iter(scale_list)
+
+ base_down_subblocks = to_sub_blocks(base_model.down_blocks)
+ ctrl_down_subblocks = to_sub_blocks(self.control_model.down_blocks)
+ base_mid_subblocks = to_sub_blocks([base_model.mid_block])
+ ctrl_mid_subblocks = to_sub_blocks([self.control_model.mid_block])
+ base_up_subblocks = to_sub_blocks(base_model.up_blocks)
+
+ # Cross Control
+ # 0 - conv in
+ h_base = base_model.conv_in(h_base)
+ h_ctrl = self.control_model.conv_in(h_ctrl)
+ if guided_hint is not None:
+ h_ctrl += guided_hint
+ h_base = h_base + next(it_down_convs_out)(h_ctrl) * next(scales) # D - add ctrl -> base
+
+ hs_base.append(h_base)
+ hs_ctrl.append(h_ctrl)
+
+ # 1 - down
+ for m_base, m_ctrl in zip(base_down_subblocks, ctrl_down_subblocks):
+ h_ctrl = torch.cat([h_ctrl, next(it_down_convs_in)(h_base)], dim=1) # A - concat base -> ctrl
+ h_base = m_base(h_base, temb, cemb, attention_mask, cross_attention_kwargs) # B - apply base subblock
+ h_ctrl = m_ctrl(h_ctrl, temb, cemb, attention_mask, cross_attention_kwargs) # C - apply ctrl subblock
+ h_base = h_base + next(it_down_convs_out)(h_ctrl) * next(scales) # D - add ctrl -> base
+ hs_base.append(h_base)
+ hs_ctrl.append(h_ctrl)
+
+ # 2 - mid
+ h_ctrl = torch.cat([h_ctrl, next(it_down_convs_in)(h_base)], dim=1) # A - concat base -> ctrl
+ for m_base, m_ctrl in zip(base_mid_subblocks, ctrl_mid_subblocks):
+ h_base = m_base(h_base, temb, cemb, attention_mask, cross_attention_kwargs) # B - apply base subblock
+ h_ctrl = m_ctrl(h_ctrl, temb, cemb, attention_mask, cross_attention_kwargs) # C - apply ctrl subblock
+ h_base = h_base + self.middle_block_out(h_ctrl) * next(scales) # D - add ctrl -> base
+
+ # 3 - up
+ for i, m_base in enumerate(base_up_subblocks):
+ h_base = h_base + next(it_up_convs_out)(hs_ctrl.pop()) * next(scales) # add info from ctrl encoder
+ h_base = torch.cat([h_base, hs_base.pop()], dim=1) # concat info from base encoder+ctrl encoder
+ h_base = m_base(h_base, temb, cemb, attention_mask, cross_attention_kwargs)
+
+ h_base = base_model.conv_norm_out(h_base)
+ h_base = base_model.conv_act(h_base)
+ h_base = base_model.conv_out(h_base)
+
+ if not return_dict:
+ return h_base
+
+ return ControlNetXSOutput(sample=h_base)
+
+ def _make_zero_conv(self, in_channels, out_channels=None):
+ # keep running track of channels sizes
+ self.in_channels = in_channels
+ self.out_channels = out_channels or in_channels
+
+ return zero_module(nn.Conv2d(in_channels, out_channels, 1, padding=0))
+
+ @torch.no_grad()
+ def _check_if_vae_compatible(self, vae: AutoencoderKL):
+ condition_downscale_factor = 2 ** (len(self.config.conditioning_embedding_out_channels) - 1)
+ vae_downscale_factor = 2 ** (len(vae.config.block_out_channels) - 1)
+ compatible = condition_downscale_factor == vae_downscale_factor
+ return compatible, condition_downscale_factor, vae_downscale_factor
+
+
+class SubBlock(nn.ModuleList):
+ """A SubBlock is the largest piece of either base or control model, that is executed independently of the other model respectively.
+ Before each subblock, information is concatted from base to control. And after each subblock, information is added from control to base.
+ """
+
+ def __init__(self, ms, *args, **kwargs):
+ if not is_iterable(ms):
+ ms = [ms]
+ super().__init__(ms, *args, **kwargs)
+
+ def forward(
+ self,
+ x: torch.Tensor,
+ temb: torch.Tensor,
+ cemb: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
+ ):
+ """Iterate through children and pass correct information to each."""
+ for m in self:
+ if isinstance(m, ResnetBlock2D):
+ x = m(x, temb)
+ elif isinstance(m, Transformer2DModel):
+ x = m(x, cemb, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs).sample
+ elif isinstance(m, Downsample2D):
+ x = m(x)
+ elif isinstance(m, Upsample2D):
+ x = m(x)
+ else:
+ raise ValueError(
+ f"Type of m is {type(m)} but should be `ResnetBlock2D`, `Transformer2DModel`, `Downsample2D` or `Upsample2D`"
+ )
+
+ return x
+
+
+def adjust_time_dims(unet: UNet2DConditionModel, in_dim: int, out_dim: int):
+ unet.time_embedding.linear_1 = nn.Linear(in_dim, out_dim)
+
+
+def increase_block_input_in_encoder_resnet(unet: UNet2DConditionModel, block_no, resnet_idx, by):
+ """Increase channels sizes to allow for additional concatted information from base model"""
+ r = unet.down_blocks[block_no].resnets[resnet_idx]
+ old_norm1, old_conv1 = r.norm1, r.conv1
+ # norm
+ norm_args = "num_groups num_channels eps affine".split(" ")
+ for a in norm_args:
+ assert hasattr(old_norm1, a)
+ norm_kwargs = {a: getattr(old_norm1, a) for a in norm_args}
+ norm_kwargs["num_channels"] += by # surgery done here
+ # conv1
+ conv1_args = [
+ "in_channels",
+ "out_channels",
+ "kernel_size",
+ "stride",
+ "padding",
+ "dilation",
+ "groups",
+ "bias",
+ "padding_mode",
+ ]
+ if not USE_PEFT_BACKEND:
+ conv1_args.append("lora_layer")
+
+ for a in conv1_args:
+ assert hasattr(old_conv1, a)
+
+ conv1_kwargs = {a: getattr(old_conv1, a) for a in conv1_args}
+ conv1_kwargs["bias"] = "bias" in conv1_kwargs # as param, bias is a boolean, but as attr, it's a tensor.
+ conv1_kwargs["in_channels"] += by # surgery done here
+ # conv_shortcut
+ # as we changed the input size of the block, the input and output sizes are likely different,
+ # therefore we need a conv_shortcut (simply adding won't work)
+ conv_shortcut_args_kwargs = {
+ "in_channels": conv1_kwargs["in_channels"],
+ "out_channels": conv1_kwargs["out_channels"],
+ # default arguments from resnet.__init__
+ "kernel_size": 1,
+ "stride": 1,
+ "padding": 0,
+ "bias": True,
+ }
+ # swap old with new modules
+ unet.down_blocks[block_no].resnets[resnet_idx].norm1 = GroupNorm(**norm_kwargs)
+ unet.down_blocks[block_no].resnets[resnet_idx].conv1 = (
+ nn.Conv2d(**conv1_kwargs) if USE_PEFT_BACKEND else LoRACompatibleConv(**conv1_kwargs)
+ )
+ unet.down_blocks[block_no].resnets[resnet_idx].conv_shortcut = (
+ nn.Conv2d(**conv_shortcut_args_kwargs) if USE_PEFT_BACKEND else LoRACompatibleConv(**conv_shortcut_args_kwargs)
+ )
+ unet.down_blocks[block_no].resnets[resnet_idx].in_channels += by # surgery done here
+
+
+def increase_block_input_in_encoder_downsampler(unet: UNet2DConditionModel, block_no, by):
+ """Increase channels sizes to allow for additional concatted information from base model"""
+ old_down = unet.down_blocks[block_no].downsamplers[0].conv
+
+ args = [
+ "in_channels",
+ "out_channels",
+ "kernel_size",
+ "stride",
+ "padding",
+ "dilation",
+ "groups",
+ "bias",
+ "padding_mode",
+ ]
+ if not USE_PEFT_BACKEND:
+ args.append("lora_layer")
+
+ for a in args:
+ assert hasattr(old_down, a)
+ kwargs = {a: getattr(old_down, a) for a in args}
+ kwargs["bias"] = "bias" in kwargs # as param, bias is a boolean, but as attr, it's a tensor.
+ kwargs["in_channels"] += by # surgery done here
+ # swap old with new modules
+ unet.down_blocks[block_no].downsamplers[0].conv = (
+ nn.Conv2d(**kwargs) if USE_PEFT_BACKEND else LoRACompatibleConv(**kwargs)
+ )
+ unet.down_blocks[block_no].downsamplers[0].channels += by # surgery done here
+
+
+def increase_block_input_in_mid_resnet(unet: UNet2DConditionModel, by):
+ """Increase channels sizes to allow for additional concatted information from base model"""
+ m = unet.mid_block.resnets[0]
+ old_norm1, old_conv1 = m.norm1, m.conv1
+ # norm
+ norm_args = "num_groups num_channels eps affine".split(" ")
+ for a in norm_args:
+ assert hasattr(old_norm1, a)
+ norm_kwargs = {a: getattr(old_norm1, a) for a in norm_args}
+ norm_kwargs["num_channels"] += by # surgery done here
+ conv1_args = [
+ "in_channels",
+ "out_channels",
+ "kernel_size",
+ "stride",
+ "padding",
+ "dilation",
+ "groups",
+ "bias",
+ "padding_mode",
+ ]
+ if not USE_PEFT_BACKEND:
+ conv1_args.append("lora_layer")
+
+ conv1_kwargs = {a: getattr(old_conv1, a) for a in conv1_args}
+ conv1_kwargs["bias"] = "bias" in conv1_kwargs # as param, bias is a boolean, but as attr, it's a tensor.
+ conv1_kwargs["in_channels"] += by # surgery done here
+ # conv_shortcut
+ # as we changed the input size of the block, the input and output sizes are likely different,
+ # therefore we need a conv_shortcut (simply adding won't work)
+ conv_shortcut_args_kwargs = {
+ "in_channels": conv1_kwargs["in_channels"],
+ "out_channels": conv1_kwargs["out_channels"],
+ # default arguments from resnet.__init__
+ "kernel_size": 1,
+ "stride": 1,
+ "padding": 0,
+ "bias": True,
+ }
+ # swap old with new modules
+ unet.mid_block.resnets[0].norm1 = GroupNorm(**norm_kwargs)
+ unet.mid_block.resnets[0].conv1 = (
+ nn.Conv2d(**conv1_kwargs) if USE_PEFT_BACKEND else LoRACompatibleConv(**conv1_kwargs)
+ )
+ unet.mid_block.resnets[0].conv_shortcut = (
+ nn.Conv2d(**conv_shortcut_args_kwargs) if USE_PEFT_BACKEND else LoRACompatibleConv(**conv_shortcut_args_kwargs)
+ )
+ unet.mid_block.resnets[0].in_channels += by # surgery done here
+
+
+def adjust_group_norms(unet: UNet2DConditionModel, max_num_group: int = 32):
+ def find_denominator(number, start):
+ if start >= number:
+ return number
+ while start != 0:
+ residual = number % start
+ if residual == 0:
+ return start
+ start -= 1
+
+ for block in [*unet.down_blocks, unet.mid_block]:
+ # resnets
+ for r in block.resnets:
+ if r.norm1.num_groups < max_num_group:
+ r.norm1.num_groups = find_denominator(r.norm1.num_channels, start=max_num_group)
+
+ if r.norm2.num_groups < max_num_group:
+ r.norm2.num_groups = find_denominator(r.norm2.num_channels, start=max_num_group)
+
+ # transformers
+ if hasattr(block, "attentions"):
+ for a in block.attentions:
+ if a.norm.num_groups < max_num_group:
+ a.norm.num_groups = find_denominator(a.norm.num_channels, start=max_num_group)
+
+
+def is_iterable(o):
+ if isinstance(o, str):
+ return False
+ try:
+ iter(o)
+ return True
+ except TypeError:
+ return False
+
+
+def to_sub_blocks(blocks):
+ if not is_iterable(blocks):
+ blocks = [blocks]
+
+ sub_blocks = []
+
+ for b in blocks:
+ if hasattr(b, "resnets"):
+ if hasattr(b, "attentions") and b.attentions is not None:
+ for r, a in zip(b.resnets, b.attentions):
+ sub_blocks.append([r, a])
+
+ num_resnets = len(b.resnets)
+ num_attns = len(b.attentions)
+
+ if num_resnets > num_attns:
+ # we can have more resnets than attentions, so add each resnet as separate subblock
+ for i in range(num_attns, num_resnets):
+ sub_blocks.append([b.resnets[i]])
+ else:
+ for r in b.resnets:
+ sub_blocks.append([r])
+
+ # upsamplers are part of the same subblock
+ if hasattr(b, "upsamplers") and b.upsamplers is not None:
+ for u in b.upsamplers:
+ sub_blocks[-1].extend([u])
+
+ # downsamplers are own subblock
+ if hasattr(b, "downsamplers") and b.downsamplers is not None:
+ for d in b.downsamplers:
+ sub_blocks.append([d])
+
+ return list(map(SubBlock, sub_blocks))
+
+
+def zero_module(module):
+ for p in module.parameters():
+ nn.init.zeros_(p)
+ return module
diff --git a/modules/controlnetxs/pipeline_controlnet_xs.py b/modules/controlnetxs/pipeline_controlnet_xs.py
new file mode 100644
index 0000000000000000000000000000000000000000..ee5d83b1994320ce90c5dd49be87abe9b1b9f220
--- /dev/null
+++ b/modules/controlnetxs/pipeline_controlnet_xs.py
@@ -0,0 +1,1022 @@
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import inspect
+from typing import Any, Callable, Dict, List, Optional, Union
+
+import numpy as np
+import PIL.Image
+import torch
+import torch.nn.functional as F
+from controlnetxs import ControlNetXSModel
+from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
+
+from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
+from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
+from diffusers.models import AutoencoderKL, UNet2DConditionModel
+from diffusers.models.lora import adjust_lora_scale_text_encoder
+from diffusers.pipelines.pipeline_utils import DiffusionPipeline
+from diffusers.pipelines.stable_diffusion.pipeline_output import StableDiffusionPipelineOutput
+from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
+from diffusers.schedulers import KarrasDiffusionSchedulers
+from diffusers.utils import (
+ USE_PEFT_BACKEND,
+ deprecate,
+ logging,
+ scale_lora_layers,
+ unscale_lora_layers,
+)
+from diffusers.utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor
+from modules.prompt_parser import FrozenCLIPEmbedderWithCustomWords
+
+
+logger = logging.get_logger(__name__) # pylint: disable=invalid-name
+
+
+
+#Support for find the region of object
+def encode_sketchs(state,tokenizer,unet, scale_ratio=8, g_strength=1.0, text_ids=None):
+ uncond, cond = text_ids[0], text_ids[1]
+
+ img_state = []
+ if state is None:
+ return torch.FloatTensor(0)
+
+ for k, v in state.items():
+ if v["map"] is None:
+ continue
+
+ v_input = tokenizer(
+ k,
+ max_length=tokenizer.model_max_length,
+ truncation=True,
+ add_special_tokens=False,
+ ).input_ids
+
+ dotmap = v["map"] < 255
+ out = dotmap.astype(float)
+ if v["mask_outsides"]:
+ out[out==0] = -1
+
+ arr = torch.from_numpy(
+ out * float(v["weight"]) * g_strength
+ )
+ img_state.append((v_input, arr))
+
+ if len(img_state) == 0:
+ return torch.FloatTensor(0)
+
+ w_tensors = dict()
+ cond = cond.tolist()
+ uncond = uncond.tolist()
+ for layer in unet.down_blocks:
+ c = int(len(cond))
+ w, h = img_state[0][1].shape
+ w_r, h_r = w // scale_ratio, h // scale_ratio
+
+ ret_cond_tensor = torch.zeros((1, int(w_r * h_r), c), dtype=torch.float32)
+ ret_uncond_tensor = torch.zeros((1, int(w_r * h_r), c), dtype=torch.float32)
+
+ for v_as_tokens, img_where_color in img_state:
+ is_in = 0
+
+ ret = (
+ F.interpolate(
+ img_where_color.unsqueeze(0).unsqueeze(1),
+ scale_factor=1 / scale_ratio,
+ mode="bilinear",
+ align_corners=True,
+ )
+ .squeeze()
+ .reshape(-1, 1)
+ .repeat(1, len(v_as_tokens))
+ )
+
+ for idx, tok in enumerate(cond):
+ if cond[idx : idx + len(v_as_tokens)] == v_as_tokens:
+ is_in = 1
+ ret_cond_tensor[0, :, idx : idx + len(v_as_tokens)] += ret
+
+ for idx, tok in enumerate(uncond):
+ if uncond[idx : idx + len(v_as_tokens)] == v_as_tokens:
+ is_in = 1
+ ret_uncond_tensor[0, :, idx : idx + len(v_as_tokens)] += ret
+
+ if not is_in == 1:
+ print(f"tokens {v_as_tokens} not found in text")
+
+ w_tensors[w_r * h_r] = torch.cat([ret_uncond_tensor, ret_cond_tensor])
+ scale_ratio *= 2
+
+ return w_tensors
+
+
+class StableDiffusionControlNetXSPipeline(
+ DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin
+):
+ r"""
+ Pipeline for text-to-image generation using Stable Diffusion with ControlNet-XS guidance.
+
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
+
+ The pipeline also inherits the following loading methods:
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
+ - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
+ - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
+ - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
+
+ Args:
+ vae ([`AutoencoderKL`]):
+ Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
+ text_encoder ([`~transformers.CLIPTextModel`]):
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
+ tokenizer ([`~transformers.CLIPTokenizer`]):
+ A `CLIPTokenizer` to tokenize text.
+ unet ([`UNet2DConditionModel`]):
+ A `UNet2DConditionModel` to denoise the encoded image latents.
+ controlnet ([`ControlNetXSModel`]):
+ Provides additional conditioning to the `unet` during the denoising process.
+ scheduler ([`SchedulerMixin`]):
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
+ safety_checker ([`StableDiffusionSafetyChecker`]):
+ Classification module that estimates whether generated images could be considered offensive or harmful.
+ Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
+ about a model's potential harms.
+ feature_extractor ([`~transformers.CLIPImageProcessor`]):
+ A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
+ """
+
+ model_cpu_offload_seq = "text_encoder->unet->vae>controlnet"
+ _optional_components = ["safety_checker", "feature_extractor"]
+ _exclude_from_cpu_offload = ["safety_checker"]
+
+ def __init__(
+ self,
+ vae: AutoencoderKL,
+ text_encoder: CLIPTextModel,
+ tokenizer: CLIPTokenizer,
+ unet: UNet2DConditionModel,
+ controlnet: ControlNetXSModel,
+ scheduler: KarrasDiffusionSchedulers,
+ safety_checker: StableDiffusionSafetyChecker,
+ feature_extractor: CLIPImageProcessor,
+ requires_safety_checker: bool = True,
+ ):
+ super().__init__()
+
+ if safety_checker is None and requires_safety_checker:
+ logger.warning(
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
+ )
+
+ if safety_checker is not None and feature_extractor is None:
+ raise ValueError(
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
+ )
+
+ vae_compatible, cnxs_condition_downsample_factor, vae_downsample_factor = controlnet._check_if_vae_compatible(
+ vae
+ )
+ if not vae_compatible:
+ raise ValueError(
+ f"The downsampling factors of the VAE ({vae_downsample_factor}) and the conditioning part of ControlNetXS model {cnxs_condition_downsample_factor} need to be equal. Consider building the ControlNetXS model with different `conditioning_block_sizes`."
+ )
+
+ self.register_modules(
+ vae=vae,
+ text_encoder=text_encoder,
+ tokenizer=tokenizer,
+ unet=unet,
+ controlnet=controlnet,
+ scheduler=scheduler,
+ safety_checker=safety_checker,
+ feature_extractor=feature_extractor,
+ )
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True)
+ self.control_image_processor = VaeImageProcessor(
+ vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
+ )
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
+
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
+ def enable_vae_slicing(self):
+ r"""
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
+ """
+ self.vae.enable_slicing()
+
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
+ def disable_vae_slicing(self):
+ r"""
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
+ computing decoding in one step.
+ """
+ self.vae.disable_slicing()
+
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
+ def enable_vae_tiling(self):
+ r"""
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
+ processing larger images.
+ """
+ self.vae.enable_tiling()
+
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
+ def disable_vae_tiling(self):
+ r"""
+ Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
+ computing decoding in one step.
+ """
+ self.vae.disable_tiling()
+
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
+ def _encode_prompt(
+ self,
+ prompt,
+ device,
+ num_images_per_prompt,
+ do_classifier_free_guidance,
+ negative_prompt=None,
+ prompt_embeds: Optional[torch.FloatTensor] = None,
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
+ lora_scale: Optional[float] = None,
+ **kwargs,
+ ):
+ deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple."
+ deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False)
+
+ prompt_embeds_tuple = self.encode_prompt(
+ prompt=prompt,
+ device=device,
+ num_images_per_prompt=num_images_per_prompt,
+ do_classifier_free_guidance=do_classifier_free_guidance,
+ negative_prompt=negative_prompt,
+ prompt_embeds=prompt_embeds,
+ negative_prompt_embeds=negative_prompt_embeds,
+ lora_scale=lora_scale,
+ **kwargs,
+ )
+
+ # concatenate for backwards comp
+ prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
+
+ return prompt_embeds
+
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt
+ def encode_prompt(
+ self,
+ prompt,
+ device,
+ num_images_per_prompt,
+ do_classifier_free_guidance,
+ negative_prompt=None,
+ prompt_embeds: Optional[torch.FloatTensor] = None,
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
+ lora_scale: Optional[float] = None,
+ clip_skip: Optional[int] = None,
+ ):
+ r"""
+ Encodes the prompt into text encoder hidden states.
+
+ Args:
+ prompt (`str` or `List[str]`, *optional*):
+ prompt to be encoded
+ device: (`torch.device`):
+ torch device
+ num_images_per_prompt (`int`):
+ number of images that should be generated per prompt
+ do_classifier_free_guidance (`bool`):
+ whether to use classifier free guidance or not
+ negative_prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
+ less than `1`).
+ prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
+ provided, text embeddings will be generated from `prompt` input argument.
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
+ argument.
+ lora_scale (`float`, *optional*):
+ A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
+ clip_skip (`int`, *optional*):
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
+ the output of the pre-final layer will be used for computing the prompt embeddings.
+ """
+ # set lora scale so that monkey patched LoRA
+ # function of text encoder can correctly access it
+ if lora_scale is not None and isinstance(self, LoraLoaderMixin):
+ self._lora_scale = lora_scale
+
+ # dynamically adjust the LoRA scale
+ if not USE_PEFT_BACKEND:
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
+ else:
+ scale_lora_layers(self.text_encoder, lora_scale)
+
+ if prompt is not None and isinstance(prompt, str):
+ batch_size = 1
+ elif prompt is not None and isinstance(prompt, list):
+ batch_size = len(prompt)
+ else:
+ batch_size = prompt_embeds.shape[0]
+
+ if prompt_embeds is None:
+ # textual inversion: procecss multi-vector tokens if necessary
+ if isinstance(self, TextualInversionLoaderMixin):
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
+
+ text_inputs = self.tokenizer(
+ prompt,
+ padding="max_length",
+ max_length=self.tokenizer.model_max_length,
+ truncation=True,
+ return_tensors="pt",
+ )
+ text_input_ids = text_inputs.input_ids
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
+
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
+ text_input_ids, untruncated_ids
+ ):
+ removed_text = self.tokenizer.batch_decode(
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
+ )
+ logger.warning(
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
+ )
+
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
+ attention_mask = text_inputs.attention_mask.to(device)
+ else:
+ attention_mask = None
+
+ if clip_skip is None:
+ prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
+ prompt_embeds = prompt_embeds[0]
+ else:
+ prompt_embeds = self.text_encoder(
+ text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
+ )
+ # Access the `hidden_states` first, that contains a tuple of
+ # all the hidden states from the encoder layers. Then index into
+ # the tuple to access the hidden states from the desired layer.
+ prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
+ # We also need to apply the final LayerNorm here to not mess with the
+ # representations. The `last_hidden_states` that we typically use for
+ # obtaining the final prompt representations passes through the LayerNorm
+ # layer.
+ prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
+
+ if self.text_encoder is not None:
+ prompt_embeds_dtype = self.text_encoder.dtype
+ elif self.unet is not None:
+ prompt_embeds_dtype = self.unet.dtype
+ else:
+ prompt_embeds_dtype = prompt_embeds.dtype
+
+ prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
+
+ bs_embed, seq_len, _ = prompt_embeds.shape
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
+
+ # get unconditional embeddings for classifier free guidance
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
+ uncond_tokens: List[str]
+ if negative_prompt is None:
+ uncond_tokens = [""] * batch_size
+ elif prompt is not None and type(prompt) is not type(negative_prompt):
+ raise TypeError(
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
+ f" {type(prompt)}."
+ )
+ elif isinstance(negative_prompt, str):
+ uncond_tokens = [negative_prompt]
+ elif batch_size != len(negative_prompt):
+ raise ValueError(
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
+ " the batch size of `prompt`."
+ )
+ else:
+ uncond_tokens = negative_prompt
+
+ # textual inversion: procecss multi-vector tokens if necessary
+ if isinstance(self, TextualInversionLoaderMixin):
+ uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
+
+ max_length = prompt_embeds.shape[1]
+ uncond_input = self.tokenizer(
+ uncond_tokens,
+ padding="max_length",
+ max_length=max_length,
+ truncation=True,
+ return_tensors="pt",
+ )
+
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
+ attention_mask = uncond_input.attention_mask.to(device)
+ else:
+ attention_mask = None
+
+ negative_prompt_embeds = self.text_encoder(
+ uncond_input.input_ids.to(device),
+ attention_mask=attention_mask,
+ )
+ negative_prompt_embeds = negative_prompt_embeds[0]
+
+ if do_classifier_free_guidance:
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
+ seq_len = negative_prompt_embeds.shape[1]
+
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
+
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
+
+ if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
+ # Retrieve the original scale by scaling back the LoRA layers
+ unscale_lora_layers(self.text_encoder, lora_scale)
+
+ return prompt_embeds, negative_prompt_embeds
+
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
+ def run_safety_checker(self, image, device, dtype):
+ if self.safety_checker is None:
+ has_nsfw_concept = None
+ else:
+ if torch.is_tensor(image):
+ feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
+ else:
+ feature_extractor_input = self.image_processor.numpy_to_pil(image)
+ safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
+ image, has_nsfw_concept = self.safety_checker(
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
+ )
+ return image, has_nsfw_concept
+
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
+ def decode_latents(self, latents):
+ deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
+ deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
+
+ latents = 1 / self.vae.config.scaling_factor * latents
+ image = self.vae.decode(latents, return_dict=False)[0]
+ image = (image / 2 + 0.5).clamp(0, 1)
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
+ return image
+
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
+ def prepare_extra_step_kwargs(self, generator, eta):
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
+ # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers.
+ # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502
+ # and should be between [0, 1]
+
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
+ extra_step_kwargs = {}
+ if accepts_eta:
+ extra_step_kwargs["eta"] = eta
+
+ # check if the scheduler accepts generator
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
+ if accepts_generator:
+ extra_step_kwargs["generator"] = generator
+ return extra_step_kwargs
+
+ def check_inputs(
+ self,
+ prompt,
+ image,
+ callback_steps,
+ negative_prompt=None,
+ prompt_embeds=None,
+ negative_prompt_embeds=None,
+ controlnet_conditioning_scale=1.0,
+ control_guidance_start=0.0,
+ control_guidance_end=1.0,
+ ):
+ if (callback_steps is None) or (
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
+ ):
+ raise ValueError(
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
+ f" {type(callback_steps)}."
+ )
+
+ if prompt is not None and prompt_embeds is not None:
+ raise ValueError(
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
+ " only forward one of the two."
+ )
+ elif prompt is None and prompt_embeds is None:
+ raise ValueError(
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
+ )
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
+
+ if negative_prompt is not None and negative_prompt_embeds is not None:
+ raise ValueError(
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
+ )
+
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
+ raise ValueError(
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
+ f" {negative_prompt_embeds.shape}."
+ )
+
+ # Check `image`
+ is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
+ self.controlnet, torch._dynamo.eval_frame.OptimizedModule
+ )
+ if (
+ isinstance(self.controlnet, ControlNetXSModel)
+ or is_compiled
+ and isinstance(self.controlnet._orig_mod, ControlNetXSModel)
+ ):
+ self.check_image(image, prompt, prompt_embeds)
+ else:
+ assert False
+
+ # Check `controlnet_conditioning_scale`
+ if (
+ isinstance(self.controlnet, ControlNetXSModel)
+ or is_compiled
+ and isinstance(self.controlnet._orig_mod, ControlNetXSModel)
+ ):
+ if not isinstance(controlnet_conditioning_scale, float):
+ raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
+ else:
+ assert False
+
+ start, end = control_guidance_start, control_guidance_end
+ if start >= end:
+ raise ValueError(
+ f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
+ )
+ if start < 0.0:
+ raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
+ if end > 1.0:
+ raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
+
+ def check_image(self, image, prompt, prompt_embeds):
+ image_is_pil = isinstance(image, PIL.Image.Image)
+ image_is_tensor = isinstance(image, torch.Tensor)
+ image_is_np = isinstance(image, np.ndarray)
+ image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
+ image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
+ image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
+
+ if (
+ not image_is_pil
+ and not image_is_tensor
+ and not image_is_np
+ and not image_is_pil_list
+ and not image_is_tensor_list
+ and not image_is_np_list
+ ):
+ raise TypeError(
+ f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
+ )
+
+ if image_is_pil:
+ image_batch_size = 1
+ else:
+ image_batch_size = len(image)
+
+ if prompt is not None and isinstance(prompt, str):
+ prompt_batch_size = 1
+ elif prompt is not None and isinstance(prompt, list):
+ prompt_batch_size = len(prompt)
+ elif prompt_embeds is not None:
+ prompt_batch_size = prompt_embeds.shape[0]
+
+ if image_batch_size != 1 and image_batch_size != prompt_batch_size:
+ raise ValueError(
+ f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
+ )
+
+ def prepare_image(
+ self,
+ image,
+ width,
+ height,
+ batch_size,
+ num_images_per_prompt,
+ device,
+ dtype,
+ do_classifier_free_guidance=False,
+ ):
+ image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
+ image_batch_size = image.shape[0]
+
+ if image_batch_size == 1:
+ repeat_by = batch_size
+ else:
+ # image batch size is the same as prompt batch size
+ repeat_by = num_images_per_prompt
+
+ image = image.repeat_interleave(repeat_by, dim=0)
+
+ image = image.to(device=device, dtype=dtype)
+
+ if do_classifier_free_guidance:
+ image = torch.cat([image] * 2)
+
+ return image
+
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
+ if isinstance(generator, list) and len(generator) != batch_size:
+ raise ValueError(
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
+ )
+
+ if latents is None:
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
+ else:
+ latents = latents.to(device)
+
+ # scale the initial noise by the standard deviation required by the scheduler
+ latents = latents * self.scheduler.init_noise_sigma
+ return latents
+
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_freeu
+ def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
+ r"""Enables the FreeU mechanism as in https://arxiv.org/abs/2309.11497.
+
+ The suffixes after the scaling factors represent the stages where they are being applied.
+
+ Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of the values
+ that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
+
+ Args:
+ s1 (`float`):
+ Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
+ mitigate "oversmoothing effect" in the enhanced denoising process.
+ s2 (`float`):
+ Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
+ mitigate "oversmoothing effect" in the enhanced denoising process.
+ b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
+ b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
+ """
+ if not hasattr(self, "unet"):
+ raise ValueError("The pipeline must have `unet` for using FreeU.")
+ self.unet.enable_freeu(s1=s1, s2=s2, b1=b1, b2=b2)
+
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_freeu
+ def disable_freeu(self):
+ """Disables the FreeU mechanism if enabled."""
+ self.unet.disable_freeu()
+
+ def type_output(self,output_type,device,d_type,return_dict,latents,generator):
+ if not output_type == "latent":
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False,generator=generator)[0]
+ image, has_nsfw_concept = self.run_safety_checker(image, device, d_type)
+ else:
+ image = latents
+ has_nsfw_concept = None
+
+ if has_nsfw_concept is None:
+ do_denormalize = [True] * image.shape[0]
+ else:
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
+
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
+
+ # Offload all models
+ self.maybe_free_model_hooks()
+
+ if not return_dict:
+ return (image, has_nsfw_concept)
+
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
+
+ @torch.no_grad()
+ def __call__(
+ self,
+ prompt: Union[str, List[str]] = None,
+ image: PipelineImageInput = None,
+ height: Optional[int] = None,
+ width: Optional[int] = None,
+ num_inference_steps: int = 50,
+ guidance_scale: float = 7.5,
+ negative_prompt: Optional[Union[str, List[str]]] = None,
+ num_images_per_prompt: Optional[int] = 1,
+ eta: float = 0.0,
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
+ latents: Optional[torch.FloatTensor] = None,
+ prompt_embeds: Optional[torch.FloatTensor] = None,
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
+ output_type: Optional[str] = "pil",
+ return_dict: bool = True,
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
+ callback_steps: int = 1,
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
+ controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
+ control_guidance_start: float = 0.0,
+ control_guidance_end: float = 1.0,
+ clip_skip: Optional[int] = 0,
+ pww_state=None,
+ pww_attn_weight=1.0,
+ weight_func = lambda w, sigma, qk: w * sigma * qk.std(),
+ latent_processing = 0,
+ ):
+ r"""
+ The call function to the pipeline for generation.
+
+ Args:
+ prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
+ image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,
+ `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
+ The ControlNet input condition to provide guidance to the `unet` for generation. If the type is
+ specified as `torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be
+ accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height
+ and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in
+ `init`, images must be passed as a list such that each element of the list can be correctly batched for
+ input to a single ControlNet.
+ height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
+ The height in pixels of the generated image.
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
+ The width in pixels of the generated image.
+ num_inference_steps (`int`, *optional*, defaults to 50):
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
+ expense of slower inference.
+ guidance_scale (`float`, *optional*, defaults to 7.5):
+ A higher guidance scale value encourages the model to generate images closely linked to the text
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
+ negative_prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
+ The number of images to generate per prompt.
+ eta (`float`, *optional*, defaults to 0.0):
+ Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
+ generation deterministic.
+ latents (`torch.FloatTensor`, *optional*):
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
+ tensor is generated by sampling using the supplied random `generator`.
+ prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
+ provided, text embeddings are generated from the `prompt` input argument.
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
+ output_type (`str`, *optional*, defaults to `"pil"`):
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
+ return_dict (`bool`, *optional*, defaults to `True`):
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
+ plain tuple.
+ callback (`Callable`, *optional*):
+ A function that calls every `callback_steps` steps during inference. The function is called with the
+ following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
+ callback_steps (`int`, *optional*, defaults to 1):
+ The frequency at which the `callback` function is called. If not specified, the callback is called at
+ every step.
+ cross_attention_kwargs (`dict`, *optional*):
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
+ controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
+ The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added
+ to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set
+ the corresponding scale as a list.
+ control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
+ The percentage of total steps at which the ControlNet starts applying.
+ control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
+ The percentage of total steps at which the ControlNet stops applying.
+ clip_skip (`int`, *optional*):
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
+ the output of the pre-final layer will be used for computing the prompt embeddings.
+
+ Examples:
+
+ Returns:
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
+ otherwise a `tuple` is returned where the first element is a list with the generated images and the
+ second element is a list of `bool`s indicating whether the corresponding generated image contains
+ "not-safe-for-work" (nsfw) content.
+ """
+ controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
+
+ if height is None:
+ height = image.height
+ if width is None:
+ width = image.width
+
+ self.prompt_parser = FrozenCLIPEmbedderWithCustomWords(self.tokenizer, self.text_encoder,clip_skip+1)
+
+ # 1. Check inputs. Raise error if not correct
+ self.check_inputs(
+ prompt,
+ image,
+ callback_steps,
+ negative_prompt,
+ prompt_embeds,
+ negative_prompt_embeds,
+ controlnet_conditioning_scale,
+ control_guidance_start,
+ control_guidance_end,
+ )
+
+ # 2. Define call parameters
+ if prompt is not None and isinstance(prompt, str):
+ batch_size = 1
+ elif prompt is not None and isinstance(prompt, list):
+ batch_size = len(prompt)
+ else:
+ batch_size = prompt_embeds.shape[0]
+
+ device = self._execution_device
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
+ # corresponds to doing no classifier free guidance.
+ do_classifier_free_guidance = guidance_scale > 1.0
+
+ text_ids, text_embeddings = self.prompt_parser([negative_prompt, prompt])
+ text_embeddings = text_embeddings.to(self.unet.dtype)
+
+ # 3. Encode input prompt
+ text_encoder_lora_scale = (
+ cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
+ )
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
+ prompt,
+ device,
+ num_images_per_prompt,
+ do_classifier_free_guidance,
+ negative_prompt,
+ prompt_embeds=prompt_embeds,
+ negative_prompt_embeds=negative_prompt_embeds,
+ lora_scale=text_encoder_lora_scale,
+ clip_skip=clip_skip,
+ )
+ # For classifier free guidance, we need to do two forward passes.
+ # Here we concatenate the unconditional and text embeddings into a single batch
+ # to avoid doing two forward passes
+ if do_classifier_free_guidance:
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
+
+ # 4. Prepare image
+ if isinstance(controlnet, ControlNetXSModel):
+ image = self.prepare_image(
+ image=image,
+ width=width,
+ height=height,
+ batch_size=batch_size * num_images_per_prompt,
+ num_images_per_prompt=num_images_per_prompt,
+ device=device,
+ dtype=controlnet.dtype,
+ do_classifier_free_guidance=do_classifier_free_guidance,
+ )
+ height, width = image.shape[-2:]
+ else:
+ assert False
+
+ # 5. Prepare timesteps
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
+ timesteps = self.scheduler.timesteps
+
+ # 6. Prepare latent variables
+ img_state = encode_sketchs(
+ pww_state,
+ tokenizer = self.tokenizer,
+ unet = self.unet,
+ g_strength=pww_attn_weight,
+ text_ids=text_ids,
+ )
+
+ num_channels_latents = self.unet.config.in_channels
+ latents = self.prepare_latents(
+ batch_size * num_images_per_prompt,
+ num_channels_latents,
+ height,
+ width,
+ prompt_embeds.dtype,
+ device,
+ generator,
+ latents,
+ )
+
+ if latent_processing == 1:
+ lst_latent = [self.type_output("pil",device,prompt_embeds.dtype,return_dict,latents,generator).images[0]]
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
+
+ # 8. Denoising loop
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
+ is_unet_compiled = is_compiled_module(self.unet)
+ is_controlnet_compiled = is_compiled_module(self.controlnet)
+ is_torch_higher_equal_2_1 = is_torch_version(">=", "2.1")
+
+ if pww_state is not None:
+ prompt_embeds = text_embeddings.clone().detach()
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
+ step_x = 0
+ for i, t in enumerate(timesteps):
+ # Relevant thread:
+ # https://dev-discuss.pytorch.org/t/cudagraphs-in-pytorch-2-0/1428
+ if (is_unet_compiled and is_controlnet_compiled) and is_torch_higher_equal_2_1:
+ torch._inductor.cudagraph_mark_step_begin()
+ # expand the latents if we are doing classifier free guidance
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
+
+ # predict the noise residual
+ dont_control = (
+ i / len(timesteps) < control_guidance_start or (i + 1) / len(timesteps) > control_guidance_end
+ )
+ encoder_state = {
+ "img_state": img_state,
+ "states": prompt_embeds,
+ "sigma": self.scheduler.sigmas[step_x],
+ "weight_func": weight_func,
+ }
+ step_x=step_x+1
+ if dont_control:
+ noise_pred = self.unet(
+ sample=latent_model_input,
+ timestep=t,
+ encoder_hidden_states=encoder_state,
+ cross_attention_kwargs=cross_attention_kwargs,
+ return_dict=True,
+ ).sample
+ else:
+ noise_pred = self.controlnet(
+ base_model=self.unet,
+ sample=latent_model_input,
+ timestep=t,
+ encoder_hidden_states=encoder_state,
+ controlnet_cond=image,
+ conditioning_scale=controlnet_conditioning_scale,
+ cross_attention_kwargs=cross_attention_kwargs,
+ return_dict=True,
+ ).sample
+
+ # perform guidance
+ if do_classifier_free_guidance:
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
+
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
+
+ # call the callback, if provided
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
+ progress_bar.update()
+ if callback is not None and i % callback_steps == 0:
+ step_idx = i // getattr(self.scheduler, "order", 1)
+ callback(step_idx, t, latents)
+
+ # If we do sequential model offloading, let's offload unet and controlnet
+ # manually for max memory savings
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
+ self.unet.to("cpu")
+ self.controlnet.to("cpu")
+ torch.cuda.empty_cache()
+ if latent_processing == 1:
+ if output_type == 'latent':
+ lst_latent.append(self.type_output(output_type,device,prompt_embeds.dtype,return_dict,latents,generator).images[0])
+ return lst_latent
+ if output_type == 'latent':
+ return [self.type_output("pil",device,prompt_embeds.dtype,return_dict,latents,generator).images[0],self.type_output(output_type,device,prompt_embeds.dtype,return_dict,latents,generator).images[0]]
+ return [self.type_output(output_type,device,prompt_embeds.dtype,return_dict,latents,generator).images[0]]
+
diff --git a/modules/encode_region_map_function.py b/modules/encode_region_map_function.py
new file mode 100644
index 0000000000000000000000000000000000000000..aeda59e17bb649891054664e79b5c0d9262fa290
--- /dev/null
+++ b/modules/encode_region_map_function.py
@@ -0,0 +1,168 @@
+from typing import Any, Callable, Dict, List, Optional, Union
+import importlib
+import inspect
+import math
+from pathlib import Path
+import re
+from collections import defaultdict
+import cv2
+import time
+import numpy as np
+import PIL
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from torch import einsum
+from torch.autograd.function import Function
+from diffusers import DiffusionPipeline
+
+
+#Support for find the region of object
+def encode_region_map_sp(state,tokenizer,unet,width,height, scale_ratio=8, text_ids=None,do_classifier_free_guidance = True):
+ if text_ids is None:
+ return torch.Tensor(0)
+ uncond, cond = text_ids[0], text_ids[1]
+
+ '''img_state = []
+
+
+ for k, v in state.items():
+ if v["map"] is None:
+ continue
+
+ v_input = tokenizer(
+ k,
+ max_length=tokenizer.model_max_length,
+ truncation=True,
+ add_special_tokens=False,
+ ).input_ids
+
+ dotmap = v["map"] < 255
+ out = dotmap.astype(float)
+ out = out * float(v["weight"]) * g_strength
+ #if v["mask_outsides"]:
+ out[out==0] = -1 * float(v["mask_outsides"])
+
+ arr = torch.from_numpy(
+ out
+ )
+ img_state.append((v_input, arr))
+
+ if len(img_state) == 0:
+ return torch.Tensor(0)'''
+
+ w_tensors = dict()
+ cond = cond.reshape(-1,).tolist() if isinstance(cond,np.ndarray) or isinstance(cond, torch.Tensor) else None
+ uncond = uncond.reshape(-1,).tolist() if isinstance(uncond,np.ndarray) or isinstance(uncond, torch.Tensor) else None
+ for layer in unet.down_blocks:
+ c = int(len(cond))
+ #w, h = img_state[0][1].shape
+ w_r, h_r = int(math.ceil(width / scale_ratio)), int(math.ceil(height / scale_ratio))
+
+ ret_cond_tensor = torch.zeros((1, int(w_r * h_r), c), dtype=torch.float32)
+ ret_uncond_tensor = torch.zeros((1, int(w_r * h_r), c), dtype=torch.float32)
+
+ #for v_as_tokens, img_where_color in img_state:
+ if state is not None:
+ for k, v in state.items():
+ if v["map"] is None:
+ continue
+ is_in = 0
+
+ k_as_tokens = tokenizer(
+ k,
+ max_length=tokenizer.model_max_length,
+ truncation=True,
+ add_special_tokens=False,
+ ).input_ids
+
+ region_map_resize = np.array(v["map"] < 255 ,dtype = np.uint8)
+ region_map_resize = cv2.resize(region_map_resize,(w_r,h_r),interpolation = cv2.INTER_CUBIC)
+ region_map_resize = (region_map_resize == np.max(region_map_resize)).astype(float)
+ region_map_resize = region_map_resize * float(v["weight"])
+ region_map_resize[region_map_resize==0] = -1 * float(v["mask_outsides"])
+ ret = torch.from_numpy(
+ region_map_resize
+ )
+ ret = ret.reshape(-1, 1).repeat(1, len(k_as_tokens))
+
+ '''ret = (
+ F.interpolate(
+ img_where_color.unsqueeze(0).unsqueeze(1),
+ scale_factor=1 / scale_ratio,
+ mode="bilinear",
+ align_corners=True,
+ )
+ .squeeze()
+ .reshape(-1, 1)
+ .repeat(1, len(v_as_tokens))
+ )'''
+
+ if cond is not None:
+ for idx, tok in enumerate(cond):
+ if cond[idx : idx + len(k_as_tokens)] == k_as_tokens:
+ is_in = 1
+ ret_cond_tensor[0, :, idx : idx + len(k_as_tokens)] += ret
+
+ if uncond is not None:
+ for idx, tok in enumerate(uncond):
+ if uncond[idx : idx + len(k_as_tokens)] == k_as_tokens:
+ is_in = 1
+ ret_uncond_tensor[0, :, idx : idx + len(k_as_tokens)] += ret
+
+ if not is_in == 1:
+ print(f"tokens {k_as_tokens} not found in text")
+
+ w_tensors[w_r * h_r] = torch.cat([ret_uncond_tensor, ret_cond_tensor]) if do_classifier_free_guidance else ret_cond_tensor
+ scale_ratio *= 2
+
+ return w_tensors
+
+def encode_region_map(
+ pipe : DiffusionPipeline,
+ state,
+ width,
+ height,
+ num_images_per_prompt,
+ text_ids = None,
+):
+ negative_prompt_tokens_id, prompt_tokens_id = text_ids[0] , text_ids[1]
+ if prompt_tokens_id is None:
+ return torch.Tensor(0)
+ prompt_tokens_id = np.array(prompt_tokens_id)
+ negative_prompt_tokens_id = np.array(prompt_tokens_id) if negative_prompt_tokens_id is not None else None
+
+ #Spilit to each prompt
+ number_prompt = prompt_tokens_id.shape[0]
+ prompt_tokens_id = np.split(prompt_tokens_id,number_prompt)
+ negative_prompt_tokens_id = np.split(negative_prompt_tokens_id,number_prompt) if negative_prompt_tokens_id is not None else None
+ lst_prompt_map = []
+ if not isinstance(state,list):
+ state = [state]
+ if len(state) < number_prompt:
+ state = [state] + [None] * int(number_prompt - len(state))
+ for i in range(0,number_prompt):
+ text_ids = [negative_prompt_tokens_id[i],prompt_tokens_id[i]] if negative_prompt_tokens_id is not None else [None,prompt_tokens_id[i]]
+ region_map = encode_region_map_sp(state[i],pipe.tokenizer,pipe.unet,width,height,scale_ratio = pipe.vae_scale_factor,text_ids = text_ids,do_classifier_free_guidance = pipe.do_classifier_free_guidance)
+ lst_prompt_map.append(region_map)
+
+ region_state_sp = {}
+ for d in lst_prompt_map:
+ for key, tensor in d.items():
+ if key in region_state_sp:
+ #If key exist, concat
+ region_state_sp[key] = torch.cat((region_state_sp[key], tensor))
+ else:
+ # if key doesnt exist, add
+ region_state_sp[key] = tensor
+
+ #add_when_apply num_images_per_prompt
+ region_state = {}
+
+ for key, tensor in region_state_sp.items():
+ # Repeant accoding to axis = 0
+ region_state[key] = tensor.repeat(num_images_per_prompt,1,1)
+
+ return region_state
+
+
diff --git a/modules/encoder_prompt_modify.py b/modules/encoder_prompt_modify.py
new file mode 100644
index 0000000000000000000000000000000000000000..51d0f2cba1420f344fe976382fc056c586103e96
--- /dev/null
+++ b/modules/encoder_prompt_modify.py
@@ -0,0 +1,831 @@
+import re
+import math
+import numpy as np
+import torch
+from diffusers import DiffusionPipeline
+from typing import Any, Callable, Dict, List, Optional, Union
+from diffusers.models.lora import adjust_lora_scale_text_encoder
+from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
+from diffusers.utils import (
+ USE_PEFT_BACKEND,
+ deprecate,
+ logging,
+ replace_example_docstring,
+ scale_lora_layers,
+ unscale_lora_layers,
+)
+from .prompt_parser import FrozenCLIPEmbedderWithCustomWords
+
+
+
+re_attention = re.compile(
+ r"""
+\\\(|
+\\\)|
+\\\[|
+\\]|
+\\\\|
+\\|
+\(|
+\[|
+:([+-]?[.\d]+)\)|
+\)|
+]|
+[^\\()\[\]:]+|
+:
+""",
+ re.X,
+)
+
+
+def parse_prompt_attention(text):
+ """
+ Parses a string with attention tokens and returns a list of pairs: text and its associated weight.
+ Accepted tokens are:
+ (abc) - increases attention to abc by a multiplier of 1.1
+ (abc:3.12) - increases attention to abc by a multiplier of 3.12
+ [abc] - decreases attention to abc by a multiplier of 1.1
+ \\( - literal character '('
+ \\[ - literal character '['
+ \\) - literal character ')'
+ \\] - literal character ']'
+ \\ - literal character '\'
+ anything else - just text
+ >>> parse_prompt_attention('normal text')
+ [['normal text', 1.0]]
+ >>> parse_prompt_attention('an (important) word')
+ [['an ', 1.0], ['important', 1.1], [' word', 1.0]]
+ >>> parse_prompt_attention('(unbalanced')
+ [['unbalanced', 1.1]]
+ >>> parse_prompt_attention('\\(literal\\]')
+ [['(literal]', 1.0]]
+ >>> parse_prompt_attention('(unnecessary)(parens)')
+ [['unnecessaryparens', 1.1]]
+ >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).')
+ [['a ', 1.0],
+ ['house', 1.5730000000000004],
+ [' ', 1.1],
+ ['on', 1.0],
+ [' a ', 1.1],
+ ['hill', 0.55],
+ [', sun, ', 1.1],
+ ['sky', 1.4641000000000006],
+ ['.', 1.1]]
+ """
+
+ res = []
+ round_brackets = []
+ square_brackets = []
+
+ round_bracket_multiplier = 1.1
+ square_bracket_multiplier = 1 / 1.1
+
+ def multiply_range(start_position, multiplier):
+ for p in range(start_position, len(res)):
+ res[p][1] *= multiplier
+
+ for m in re_attention.finditer(text):
+ text = m.group(0)
+ weight = m.group(1)
+
+ if text.startswith("\\"):
+ res.append([text[1:], 1.0])
+ elif text == "(":
+ round_brackets.append(len(res))
+ elif text == "[":
+ square_brackets.append(len(res))
+ elif weight is not None and len(round_brackets) > 0:
+ multiply_range(round_brackets.pop(), float(weight))
+ elif text == ")" and len(round_brackets) > 0:
+ multiply_range(round_brackets.pop(), round_bracket_multiplier)
+ elif text == "]" and len(square_brackets) > 0:
+ multiply_range(square_brackets.pop(), square_bracket_multiplier)
+ else:
+ res.append([text, 1.0])
+
+ for pos in round_brackets:
+ multiply_range(pos, round_bracket_multiplier)
+
+ for pos in square_brackets:
+ multiply_range(pos, square_bracket_multiplier)
+
+ if len(res) == 0:
+ res = [["", 1.0]]
+
+ # merge runs of identical weights
+ i = 0
+ while i + 1 < len(res):
+ if res[i][1] == res[i + 1][1]:
+ res[i][0] += res[i + 1][0]
+ res.pop(i + 1)
+ else:
+ i += 1
+
+ return res
+
+
+def get_prompts_with_weights(pipe: DiffusionPipeline, prompt: List[str], max_length: int):
+ r"""
+ Tokenize a list of prompts and return its tokens with weights of each token.
+
+ No padding, starting or ending token is included.
+ """
+ tokens = []
+ weights = []
+ truncated = False
+ for text in prompt:
+ texts_and_weights = parse_prompt_attention(text)
+ text_token = []
+ text_weight = []
+ for word, weight in texts_and_weights:
+ # tokenize and discard the starting and the ending token
+ token = pipe.tokenizer(word).input_ids[1:-1]
+ text_token += token
+ # copy the weight by length of token
+ text_weight += [weight] * len(token)
+ # stop if the text is too long (longer than truncation limit)
+ if len(text_token) > max_length:
+ truncated = True
+ break
+ # truncate
+ if len(text_token) > max_length:
+ truncated = True
+ text_token = text_token[:max_length]
+ text_weight = text_weight[:max_length]
+ tokens.append(text_token)
+ weights.append(text_weight)
+ if truncated:
+ logger.warning("Prompt was truncated. Try to shorten the prompt or increase max_embeddings_multiples")
+ return tokens, weights
+
+
+def pad_tokens_and_weights(tokens, weights, max_length, bos, eos, pad, no_boseos_middle=True, chunk_length=77):
+ r"""
+ Pad the tokens (with starting and ending tokens) and weights (with 1.0) to max_length.
+ """
+ max_embeddings_multiples = (max_length - 2) // (chunk_length - 2)
+ weights_length = max_length if no_boseos_middle else max_embeddings_multiples * chunk_length
+ for i in range(len(tokens)):
+ tokens[i] = [bos] + tokens[i] + [pad] * (max_length - 1 - len(tokens[i]) - 1) + [eos]
+ if no_boseos_middle:
+ weights[i] = [1.0] + weights[i] + [1.0] * (max_length - 1 - len(weights[i]))
+ else:
+ w = []
+ if len(weights[i]) == 0:
+ w = [1.0] * weights_length
+ else:
+ for j in range(max_embeddings_multiples):
+ w.append(1.0) # weight for starting token in this chunk
+ w += weights[i][j * (chunk_length - 2) : min(len(weights[i]), (j + 1) * (chunk_length - 2))]
+ w.append(1.0) # weight for ending token in this chunk
+ w += [1.0] * (weights_length - len(w))
+ weights[i] = w[:]
+
+ return tokens, weights
+
+def clip_skip_prompt(
+ pipe,
+ text_input,
+ clip_skip = None,
+):
+ if hasattr(pipe.text_encoder.config, "use_attention_mask") and pipe.text_encoder.config.use_attention_mask:
+ attention_mask = text_inputs.attention_mask.to(device)
+ else:
+ attention_mask = None
+ if clip_skip is not None and clip_skip > 1:
+ text_embedding = pipe.text_encoder(text_input, attention_mask=attention_mask, output_hidden_states=True)
+ # Access the `hidden_states` first, that contains a tuple of
+ # all the hidden states from the encoder layers. Then index into
+ # the tuple to access the hidden states from the desired layer.
+ text_embedding = text_embedding[-1][-clip_skip]
+ # We also need to apply the final LayerNorm here to not mess with the
+ # representations. The `last_hidden_states` that we typically use for
+ # obtaining the final prompt representations passes through the LayerNorm
+ # layer.
+ text_embedding = pipe.text_encoder.text_model.final_layer_norm(text_embedding)
+ else:
+ text_embedding = pipe.text_encoder(text_input, attention_mask=attention_mask)
+ text_embedding = text_embedding[0]
+
+ return text_embedding
+
+def get_unweighted_text_embeddings(
+ pipe: DiffusionPipeline,
+ text_input: torch.Tensor,
+ chunk_length: int,
+ no_boseos_middle: Optional[bool] = True,
+ clip_skip : Optional[int] = None,
+):
+ """
+ When the length of tokens is a multiple of the capacity of the text encoder,
+ it should be split into chunks and sent to the text encoder individually.
+ """
+ max_embeddings_multiples = (text_input.shape[1] - 2) // (chunk_length - 2)
+ if max_embeddings_multiples > 1:
+ text_embeddings = []
+ for i in range(max_embeddings_multiples):
+ # extract the i-th chunk
+ text_input_chunk = text_input[:, i * (chunk_length - 2) : (i + 1) * (chunk_length - 2) + 2].clone()
+
+ # cover the head and the tail by the starting and the ending tokens
+ text_input_chunk[:, 0] = text_input[0, 0]
+ text_input_chunk[:, -1] = text_input[0, -1]
+
+ text_embedding = clip_skip_prompt(pipe,text_input_chunk,clip_skip)
+
+ if no_boseos_middle:
+ if i == 0:
+ # discard the ending token
+ text_embedding = text_embedding[:, :-1]
+ elif i == max_embeddings_multiples - 1:
+ # discard the starting token
+ text_embedding = text_embedding[:, 1:]
+ else:
+ # discard both starting and ending tokens
+ text_embedding = text_embedding[:, 1:-1]
+
+ text_embeddings.append(text_embedding)
+ text_embeddings = torch.concat(text_embeddings, axis=1)
+ else:
+ text_embeddings = clip_skip_prompt(pipe,text_input,clip_skip)
+ return text_embeddings
+
+
+def get_weighted_text_embeddings(
+ pipe: DiffusionPipeline,
+ prompt: Union[str, List[str]],
+ uncond_prompt: Optional[Union[str, List[str]]] = None,
+ max_embeddings_multiples: Optional[int] = 3,
+ no_boseos_middle: Optional[bool] = False,
+ skip_parsing: Optional[bool] = False,
+ skip_weighting: Optional[bool] = False,
+ clip_skip : Optional[int] = None,
+):
+ r"""
+ Prompts can be assigned with local weights using brackets. For example,
+ prompt 'A (very beautiful) masterpiece' highlights the words 'very beautiful',
+ and the embedding tokens corresponding to the words get multiplied by a constant, 1.1.
+
+ Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the original mean.
+
+ Args:
+ pipe (`DiffusionPipeline`):
+ Pipe to provide access to the tokenizer and the text encoder.
+ prompt (`str` or `List[str]`):
+ The prompt or prompts to guide the image generation.
+ uncond_prompt (`str` or `List[str]`):
+ The unconditional prompt or prompts for guide the image generation. If unconditional prompt
+ is provided, the embeddings of prompt and uncond_prompt are concatenated.
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
+ no_boseos_middle (`bool`, *optional*, defaults to `False`):
+ If the length of text token is multiples of the capacity of text encoder, whether reserve the starting and
+ ending token in each of the chunk in the middle.
+ skip_parsing (`bool`, *optional*, defaults to `False`):
+ Skip the parsing of brackets.
+ skip_weighting (`bool`, *optional*, defaults to `False`):
+ Skip the weighting. When the parsing is skipped, it is forced True.
+ """
+ max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
+ prompt_tokens_id = None
+ negative_prompt_tokens_id = None
+ if isinstance(prompt, str):
+ prompt = [prompt]
+
+ if not skip_parsing:
+ prompt_tokens, prompt_weights = get_prompts_with_weights(pipe, prompt, max_length - 2)
+ if uncond_prompt is not None:
+ if isinstance(uncond_prompt, str):
+ uncond_prompt = [uncond_prompt]
+ uncond_tokens, uncond_weights = get_prompts_with_weights(pipe, uncond_prompt, max_length - 2)
+ else:
+ prompt_tokens = [
+ token[1:-1] for token in pipe.tokenizer(prompt, max_length=max_length, truncation=True).input_ids
+ ]
+ prompt_weights = [[1.0] * len(token) for token in prompt_tokens]
+ if uncond_prompt is not None:
+ if isinstance(uncond_prompt, str):
+ uncond_prompt = [uncond_prompt]
+ uncond_tokens = [
+ token[1:-1]
+ for token in pipe.tokenizer(uncond_prompt, max_length=max_length, truncation=True).input_ids
+ ]
+ uncond_weights = [[1.0] * len(token) for token in uncond_tokens]
+
+ # round up the longest length of tokens to a multiple of (model_max_length - 2)
+ max_length = max([len(token) for token in prompt_tokens])
+ if uncond_prompt is not None:
+ max_length = max(max_length, max([len(token) for token in uncond_tokens]))
+
+ max_embeddings_multiples = min(
+ max_embeddings_multiples,
+ (max_length - 1) // (pipe.tokenizer.model_max_length - 2) + 1,
+ )
+ max_embeddings_multiples = max(1, max_embeddings_multiples)
+ max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
+
+ # pad the length of tokens and weights
+ bos = pipe.tokenizer.bos_token_id
+ eos = pipe.tokenizer.eos_token_id
+ pad = getattr(pipe.tokenizer, "pad_token_id", eos)
+ prompt_tokens, prompt_weights = pad_tokens_and_weights(
+ prompt_tokens,
+ prompt_weights,
+ max_length,
+ bos,
+ eos,
+ pad,
+ no_boseos_middle=no_boseos_middle,
+ chunk_length=pipe.tokenizer.model_max_length,
+ )
+
+ prompt_tokens_id = np.array(prompt_tokens, dtype=np.int64)
+ prompt_tokens = torch.tensor(prompt_tokens, dtype=torch.long, device=pipe.device)
+ if uncond_prompt is not None:
+ uncond_tokens, uncond_weights = pad_tokens_and_weights(
+ uncond_tokens,
+ uncond_weights,
+ max_length,
+ bos,
+ eos,
+ pad,
+ no_boseos_middle=no_boseos_middle,
+ chunk_length=pipe.tokenizer.model_max_length,
+ )
+ negative_prompt_tokens_id = np.array(uncond_tokens, dtype=np.int64)
+ uncond_tokens = torch.tensor(uncond_tokens, dtype=torch.long, device=pipe.device)
+
+ # get the embeddings
+ text_embeddings = get_unweighted_text_embeddings(
+ pipe,
+ prompt_tokens,
+ pipe.tokenizer.model_max_length,
+ no_boseos_middle=no_boseos_middle,
+ clip_skip = clip_skip,
+ )
+ prompt_weights = torch.tensor(prompt_weights, dtype=text_embeddings.dtype, device=text_embeddings.device)
+ if uncond_prompt is not None:
+ uncond_embeddings = get_unweighted_text_embeddings(
+ pipe,
+ uncond_tokens,
+ pipe.tokenizer.model_max_length,
+ no_boseos_middle=no_boseos_middle,
+ clip_skip = clip_skip,
+ )
+ uncond_weights = torch.tensor(uncond_weights, dtype=uncond_embeddings.dtype, device=uncond_embeddings.device)
+
+ # assign weights to the prompts and normalize in the sense of mean
+ # TODO: should we normalize by chunk or in a whole (current implementation)?
+ if (not skip_parsing) and (not skip_weighting):
+ previous_mean = text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype)
+ text_embeddings *= prompt_weights.unsqueeze(-1)
+ current_mean = text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype)
+ text_embeddings *= (previous_mean / current_mean).unsqueeze(-1).unsqueeze(-1)
+ if uncond_prompt is not None:
+ previous_mean = uncond_embeddings.float().mean(axis=[-2, -1]).to(uncond_embeddings.dtype)
+ uncond_embeddings *= uncond_weights.unsqueeze(-1)
+ current_mean = uncond_embeddings.float().mean(axis=[-2, -1]).to(uncond_embeddings.dtype)
+ uncond_embeddings *= (previous_mean / current_mean).unsqueeze(-1).unsqueeze(-1)
+
+ if uncond_prompt is not None:
+ return text_embeddings, uncond_embeddings, negative_prompt_tokens_id, prompt_tokens_id
+ return text_embeddings, None, None, prompt_tokens_id
+
+
+def encoder_long_prompt(
+ pipe,
+ prompt,
+ device,
+ num_images_per_prompt,
+ do_classifier_free_guidance,
+ negative_prompt=None,
+ prompt_embeds: Optional[torch.Tensor] = None,
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
+ lora_scale: Optional[float] = None,
+ clip_skip : Optional[int] = None,
+ max_embeddings_multiples: Optional[int] = 3,
+):
+ r"""
+ Encodes the prompt into text encoder hidden states.
+
+ Args:
+ prompt (`str` or `list(int)`):
+ prompt to be encoded
+ device: (`torch.device`):
+ torch device
+ num_images_per_prompt (`int`):
+ number of images that should be generated per prompt
+ do_classifier_free_guidance (`bool`):
+ whether to use classifier free guidance or not
+ negative_prompt (`str` or `List[str]`):
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
+ if `guidance_scale` is less than `1`).
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
+ """
+
+ # set lora scale so that monkey patched LoRA
+ # function of text encoder can correctly access it
+ if lora_scale is not None and isinstance(pipe, LoraLoaderMixin):
+ pipe._lora_scale = lora_scale
+ # dynamically adjust the LoRA scale
+ if not USE_PEFT_BACKEND:
+ adjust_lora_scale_text_encoder(pipe.text_encoder, lora_scale)
+ else:
+ scale_lora_layers(pipe.text_encoder, lora_scale)
+ if prompt is not None and isinstance(prompt, str):
+ batch_size = 1
+ elif prompt is not None and isinstance(prompt, list):
+ batch_size = len(prompt)
+ else:
+ batch_size = prompt_embeds.shape[0]
+
+ negative_prompt_tokens_id, prompt_tokens_id = None, None
+ if negative_prompt_embeds is None:
+ if negative_prompt is None:
+ negative_prompt = [""] * batch_size
+ elif isinstance(negative_prompt, str):
+ negative_prompt = [negative_prompt] * batch_size
+ if batch_size != len(negative_prompt):
+ raise ValueError(
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
+ " the batch size of `prompt`."
+ )
+ if prompt_embeds is None or negative_prompt_embeds is None:
+ if isinstance(pipe, TextualInversionLoaderMixin):
+ prompt = pipe.maybe_convert_prompt(prompt, pipe.tokenizer)
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
+ negative_prompt = pipe.maybe_convert_prompt(negative_prompt, pipe.tokenizer)
+
+ prompt_embeds1, negative_prompt_embeds1, negative_prompt_tokens_id, prompt_tokens_id = get_weighted_text_embeddings(
+ pipe=pipe,
+ prompt=prompt,
+ uncond_prompt=negative_prompt if do_classifier_free_guidance else None,
+ max_embeddings_multiples=int(max_embeddings_multiples),
+ clip_skip = clip_skip,
+ )
+ if prompt_embeds is None:
+ prompt_embeds = prompt_embeds1
+ if negative_prompt_embeds is None:
+ negative_prompt_embeds = negative_prompt_embeds1
+
+ bs_embed, seq_len, _ = prompt_embeds.shape
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
+
+ if do_classifier_free_guidance:
+ bs_embed, seq_len, _ = negative_prompt_embeds.shape
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
+ negative_prompt_embeds = negative_prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
+
+ if isinstance(pipe, LoraLoaderMixin) and USE_PEFT_BACKEND:
+ # Retrieve the original scale by scaling back the LoRA layers
+ unscale_lora_layers(pipe.text_encoder, lora_scale)
+
+ return prompt_embeds, negative_prompt_embeds, [negative_prompt_tokens_id, prompt_tokens_id]
+
+
+
+
+def encode_short_prompt(
+ pipe,
+ prompt,
+ device,
+ num_images_per_prompt,
+ do_classifier_free_guidance,
+ negative_prompt=None,
+ prompt_embeds: Optional[torch.Tensor] = None,
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
+ lora_scale: Optional[float] = None,
+ clip_skip: Optional[int] = None,
+):
+ r"""
+ Encodes the prompt into text encoder hidden states.
+
+ Args:
+ prompt (`str` or `List[str]`, *optional*):
+ prompt to be encoded
+ device: (`torch.device`):
+ torch device
+ num_images_per_prompt (`int`):
+ number of images that should be generated per prompt
+ do_classifier_free_guidance (`bool`):
+ whether to use classifier free guidance or not
+ negative_prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
+ less than `1`).
+ prompt_embeds (`torch.Tensor`, *optional*):
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
+ provided, text embeddings will be generated from `prompt` input argument.
+ negative_prompt_embeds (`torch.Tensor`, *optional*):
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
+ argument.
+ lora_scale (`float`, *optional*):
+ A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
+ clip_skip (`int`, *optional*):
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
+ the output of the pre-final layer will be used for computing the prompt embeddings.
+ """
+ # set lora scale so that monkey patched LoRA
+ # function of text encoder can correctly access it
+ if lora_scale is not None and isinstance(pipe, LoraLoaderMixin):
+ pipe._lora_scale = lora_scale
+
+ # dynamically adjust the LoRA scale
+ if not USE_PEFT_BACKEND:
+ adjust_lora_scale_text_encoder(pipe.text_encoder, lora_scale)
+ else:
+ scale_lora_layers(pipe.text_encoder, lora_scale)
+
+ if prompt is not None and isinstance(prompt, str):
+ batch_size = 1
+ elif prompt is not None and isinstance(prompt, list):
+ batch_size = len(prompt)
+ else:
+ batch_size = prompt_embeds.shape[0]
+
+ prompt_tokens_id = None
+ negative_prompt_tokens_id = None
+
+ if prompt_embeds is None:
+ # textual inversion: process multi-vector tokens if necessary
+ if isinstance(pipe, TextualInversionLoaderMixin):
+ prompt = pipe.maybe_convert_prompt(prompt, pipe.tokenizer)
+
+ text_inputs = pipe.tokenizer(
+ prompt,
+ padding="max_length",
+ max_length=pipe.tokenizer.model_max_length,
+ truncation=True,
+ return_tensors="pt",
+ )
+ text_input_ids = text_inputs.input_ids
+ prompt_tokens_id = text_inputs.input_ids.detach().cpu().numpy()
+ untruncated_ids = pipe.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
+
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
+ text_input_ids, untruncated_ids
+ ):
+ removed_text = pipe.tokenizer.batch_decode(
+ untruncated_ids[:, pipe.tokenizer.model_max_length - 1 : -1]
+ )
+ logger.warning(
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
+ f" {pipe.tokenizer.model_max_length} tokens: {removed_text}"
+ )
+
+ if hasattr(pipe.text_encoder.config, "use_attention_mask") and pipe.text_encoder.config.use_attention_mask:
+ attention_mask = text_inputs.attention_mask.to(device)
+ else:
+ attention_mask = None
+
+ if clip_skip is not None and clip_skip > 1:
+ prompt_embeds = pipe.text_encoder(
+ text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
+ )
+ # Access the `hidden_states` first, that contains a tuple of
+ # all the hidden states from the encoder layers. Then index into
+ # the tuple to access the hidden states from the desired layer.
+ prompt_embeds = prompt_embeds[-1][-clip_skip]
+ # We also need to apply the final LayerNorm here to not mess with the
+ # representations. The `last_hidden_states` that we typically use for
+ # obtaining the final prompt representations passes through the LayerNorm
+ # layer.
+ prompt_embeds = pipe.text_encoder.text_model.final_layer_norm(prompt_embeds)
+ else:
+ prompt_embeds = pipe.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
+ prompt_embeds = prompt_embeds[0]
+
+ if pipe.text_encoder is not None:
+ prompt_embeds_dtype = pipe.text_encoder.dtype
+ elif pipe.unet is not None:
+ prompt_embeds_dtype = pipe.unet.dtype
+ else:
+ prompt_embeds_dtype = prompt_embeds.dtype
+
+ prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
+
+ bs_embed, seq_len, _ = prompt_embeds.shape
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
+
+ # get unconditional embeddings for classifier free guidance
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
+ uncond_tokens: List[str]
+ if negative_prompt is None:
+ uncond_tokens = [""] * batch_size
+ elif prompt is not None and type(prompt) is not type(negative_prompt):
+ raise TypeError(
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
+ f" {type(prompt)}."
+ )
+ elif isinstance(negative_prompt, str):
+ uncond_tokens = [negative_prompt]
+ elif batch_size != len(negative_prompt):
+ raise ValueError(
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
+ " the batch size of `prompt`."
+ )
+ else:
+ uncond_tokens = negative_prompt
+
+ # textual inversion: process multi-vector tokens if necessary
+ if isinstance(pipe, TextualInversionLoaderMixin):
+ uncond_tokens = pipe.maybe_convert_prompt(uncond_tokens, pipe.tokenizer)
+
+ max_length = prompt_embeds.shape[1]
+ uncond_input = pipe.tokenizer(
+ uncond_tokens,
+ padding="max_length",
+ max_length=max_length,
+ truncation=True,
+ return_tensors="pt",
+ )
+ negative_prompt_tokens_id = uncond_input.input_ids.detach().cpu().numpy()
+
+ if hasattr(pipe.text_encoder.config, "use_attention_mask") and pipe.text_encoder.config.use_attention_mask:
+ attention_mask = uncond_input.attention_mask.to(device)
+ else:
+ attention_mask = None
+
+ if clip_skip is not None and clip_skip > 1:
+ negative_prompt_embeds = pipe.text_encoder(
+ uncond_input.input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
+ )
+ # Access the `hidden_states` first, that contains a tuple of
+ # all the hidden states from the encoder layers. Then index into
+ # the tuple to access the hidden states from the desired layer.
+ negative_prompt_embeds = negative_prompt_embeds[-1][-clip_skip ]
+ # We also need to apply the final LayerNorm here to not mess with the
+ # representations. The `last_hidden_states` that we typically use for
+ # obtaining the final prompt representations passes through the LayerNorm
+ # layer.
+ negative_prompt_embeds = pipe.text_encoder.text_model.final_layer_norm(negative_prompt_embeds)
+ else:
+ negative_prompt_embeds = pipe.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask)
+ negative_prompt_embeds = negative_prompt_embeds[0]
+
+ if do_classifier_free_guidance:
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
+ seq_len = negative_prompt_embeds.shape[1]
+
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
+
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
+
+ if isinstance(pipe, LoraLoaderMixin) and USE_PEFT_BACKEND:
+ # Retrieve the original scale by scaling back the LoRA layers
+ unscale_lora_layers(pipe.text_encoder, lora_scale)
+
+ return prompt_embeds, negative_prompt_embeds, [negative_prompt_tokens_id, prompt_tokens_id]
+
+
+
+def encode_prompt_automatic1111(
+ pipe,
+ prompt,
+ device,
+ num_images_per_prompt,
+ do_classifier_free_guidance,
+ negative_prompt=None,
+ prompt_embeds: Optional[torch.Tensor] = None,
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
+ lora_scale: Optional[float] = None,
+ clip_skip: Optional[int] = None,
+):
+ if lora_scale is not None and isinstance(pipe, LoraLoaderMixin):
+ pipe._lora_scale = lora_scale
+
+ # dynamically adjust the LoRA scale
+ if not USE_PEFT_BACKEND:
+ adjust_lora_scale_text_encoder(pipe.text_encoder, lora_scale)
+ else:
+ scale_lora_layers(pipe.text_encoder, lora_scale)
+
+ if prompt is not None and isinstance(prompt, str):
+ batch_size = 1
+ elif prompt is not None and isinstance(prompt, list):
+ batch_size = len(prompt)
+ else:
+ batch_size = prompt_embeds.shape[0]
+
+ prompt_tokens_id = None
+ negative_prompt_tokens_id = None
+
+
+ # get unconditional embeddings for classifier free guidance
+ uncond_tokens = []
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
+ if negative_prompt is None:
+ uncond_tokens = [""] * batch_size
+ elif prompt is not None and type(prompt) is not type(negative_prompt):
+ raise TypeError(
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
+ f" {type(prompt)}."
+ )
+ elif isinstance(negative_prompt, str):
+ uncond_tokens = [negative_prompt] + [""] * (batch_size - 1)
+ elif batch_size != len(negative_prompt):
+ raise ValueError(
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
+ " the batch size of `prompt`."
+ )
+ else:
+ uncond_tokens = negative_prompt
+
+ # textual inversion: process multi-vector tokens if necessary
+ if isinstance(pipe, TextualInversionLoaderMixin):
+ uncond_tokens = pipe.maybe_convert_prompt(uncond_tokens, pipe.tokenizer)
+ if len(uncond_tokens) == 0:
+ uncond_tokens = [""]* batch_size
+ # textual inversion: process multi-vector tokens if necessary
+ if isinstance(pipe, TextualInversionLoaderMixin):
+ uncond_tokens = pipe.maybe_convert_prompt(uncond_tokens, pipe.tokenizer)
+
+ if prompt_embeds is None:
+ if not isinstance(prompt,list):
+ prompt = [prompt]
+ # textual inversion: process multi-vector tokens if necessary
+ if isinstance(pipe, TextualInversionLoaderMixin):
+ prompt = pipe.maybe_convert_prompt(prompt, pipe.tokenizer)
+
+ prompt_parser = FrozenCLIPEmbedderWithCustomWords(pipe.tokenizer, pipe.text_encoder,clip_skip)
+ prompt_embeds_lst = []
+ negative_prompt_embeds_lst =[]
+ negative_prompt_tokens_id_lst =[]
+ prompt_tokens_id_lst =[]
+ for i in range(0,batch_size):
+ text_ids, text_embeddings = prompt_parser([uncond_tokens[i], prompt[i]])
+ negative_prompt_embeddings, prompt_embeddings = torch.chunk(text_embeddings, 2, dim=0)
+ text_ids = np.split(text_ids,text_ids.shape[0])
+ negative_prompt_embeddings_id, prompt_embeddings_id = text_ids[0], text_ids[1]
+ prompt_embeds_lst.append(prompt_embeddings)
+ negative_prompt_embeds_lst.append(negative_prompt_embeddings)
+ negative_prompt_tokens_id_lst.append(negative_prompt_embeddings_id)
+ prompt_tokens_id_lst.append(prompt_embeddings_id)
+
+ if prompt_embeds is None:
+ prompt_embeds = torch.cat(prompt_embeds_lst)
+ prompt_tokens_id = np.concatenate(prompt_tokens_id_lst)
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
+ negative_prompt_embeds = torch.cat(negative_prompt_embeds_lst)
+ negative_prompt_tokens_id = np.concatenate(negative_prompt_tokens_id_lst)
+
+ if pipe.text_encoder is not None:
+ prompt_embeds_dtype = pipe.text_encoder.dtype
+ elif pipe.unet is not None:
+ prompt_embeds_dtype = pipe.unet.dtype
+ else:
+ prompt_embeds_dtype = prompt_embeds.dtype
+
+ prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
+
+ bs_embed, seq_len, _ = prompt_embeds.shape
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
+
+ if do_classifier_free_guidance:
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
+ seq_len = negative_prompt_embeds.shape[1]
+
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
+
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
+
+ if isinstance(pipe, LoraLoaderMixin) and USE_PEFT_BACKEND:
+ # Retrieve the original scale by scaling back the LoRA layers
+ unscale_lora_layers(pipe.text_encoder, lora_scale)
+
+ return prompt_embeds, negative_prompt_embeds, [negative_prompt_tokens_id, prompt_tokens_id]
+
+
+
+
+def encode_prompt_function(
+ pipe,
+ prompt,
+ device,
+ num_images_per_prompt,
+ do_classifier_free_guidance,
+ negative_prompt=None,
+ prompt_embeds: Optional[torch.Tensor] = None,
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
+ lora_scale: Optional[float] = None,
+ clip_skip: Optional[int] = None,
+ long_encode: Optional[bool] = False,
+):
+ if long_encode == 0:
+ return encode_prompt_automatic1111(pipe, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds, negative_prompt_embeds, lora_scale, clip_skip)
+ elif long_encode == 1:
+ return encoder_long_prompt(pipe, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds, negative_prompt_embeds, lora_scale, clip_skip)
+ return encode_short_prompt(pipe, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds, negative_prompt_embeds, lora_scale, clip_skip)
\ No newline at end of file
diff --git a/modules/external_k_diffusion.py b/modules/external_k_diffusion.py
new file mode 100644
index 0000000000000000000000000000000000000000..cca401694d39c8c43edc7403d02f8f69add86fe7
--- /dev/null
+++ b/modules/external_k_diffusion.py
@@ -0,0 +1,182 @@
+import math
+
+import torch
+from torch import nn
+import k_diffusion
+from k_diffusion import sampling, utils
+
+class VDenoiser(nn.Module):
+ """A v-diffusion-pytorch model wrapper for k-diffusion."""
+
+ def __init__(self, inner_model):
+ super().__init__()
+ self.inner_model = inner_model
+ self.sigma_data = 1.
+
+ def get_scalings(self, sigma):
+ c_skip = self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2)
+ c_out = -sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
+ c_in = 1 / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
+ return c_skip, c_out, c_in
+
+ def sigma_to_t(self, sigma):
+ return sigma.atan() / math.pi * 2
+
+ def t_to_sigma(self, t):
+ return (t * math.pi / 2).tan()
+
+ def loss(self, input, noise, sigma, **kwargs):
+ c_skip, c_out, c_in = [utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)]
+ noised_input = input + noise * utils.append_dims(sigma, input.ndim)
+ model_output = self.inner_model(noised_input * c_in, self.sigma_to_t(sigma), **kwargs)
+ target = (input - c_skip * noised_input) / c_out
+ return (model_output - target).pow(2).flatten(1).mean(1)
+
+ def forward(self, input, sigma, **kwargs):
+ c_skip, c_out, c_in = [utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)]
+ return self.inner_model(input * c_in, self.sigma_to_t(sigma), **kwargs) * c_out + input * c_skip
+
+
+class DiscreteSchedule(nn.Module):
+ """A mapping between continuous noise levels (sigmas) and a list of discrete noise
+ levels."""
+
+ def __init__(self, sigmas, quantize):
+ super().__init__()
+ self.register_buffer('sigmas', sigmas)
+ self.register_buffer('log_sigmas', sigmas.log())
+ self.quantize = quantize
+
+ @property
+ def sigma_min(self):
+ return self.sigmas[0]
+
+ @property
+ def sigma_max(self):
+ return self.sigmas[-1]
+
+ def get_sigmas(self, n=None):
+ if n is None:
+ return sampling.append_zero(self.sigmas.flip(0))
+ t_max = len(self.sigmas) - 1
+ t = torch.linspace(t_max, 0, n, device=self.sigmas.device)
+ return sampling.append_zero(self.t_to_sigma(t))
+
+ def sigma_to_t(self, sigma, quantize=None):
+ quantize = self.quantize if quantize is None else quantize
+ log_sigma = sigma.log()
+ dists = log_sigma - self.log_sigmas[:, None]
+ if quantize:
+ return dists.abs().argmin(dim=0).view(sigma.shape)
+ low_idx = dists.ge(0).cumsum(dim=0).argmax(dim=0).clamp(max=self.log_sigmas.shape[0] - 2)
+ high_idx = low_idx + 1
+ low, high = self.log_sigmas[low_idx], self.log_sigmas[high_idx]
+ w = (low - log_sigma) / (low - high)
+ w = w.clamp(0, 1)
+ t = (1 - w) * low_idx + w * high_idx
+ return t.view(sigma.shape)
+
+ def t_to_sigma(self, t):
+ t = t.float()
+ low_idx, high_idx, w = t.floor().long(), t.ceil().long(), t.frac()
+ log_sigma = (1 - w) * self.log_sigmas[low_idx] + w * self.log_sigmas[high_idx]
+ return log_sigma.exp()
+
+
+class DiscreteEpsDDPMDenoiser(DiscreteSchedule):
+ """A wrapper for discrete schedule DDPM models that output eps (the predicted
+ noise)."""
+
+ def __init__(self, model, alphas_cumprod, quantize):
+ super().__init__(((1 - alphas_cumprod) / alphas_cumprod) ** 0.5, quantize)
+ self.inner_model = model
+ self.sigma_data = 1.
+
+ def get_scalings(self, sigma):
+ c_out = -sigma
+ c_in = 1 / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
+ return c_out, c_in
+
+ def get_eps(self, *args, **kwargs):
+ return self.inner_model(*args, **kwargs)
+
+ def loss(self, input, noise, sigma, **kwargs):
+ c_out, c_in = [utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)]
+ noised_input = input + noise * utils.append_dims(sigma, input.ndim)
+ eps = self.get_eps(noised_input * c_in, self.sigma_to_t(sigma), **kwargs)
+ return (eps - noise).pow(2).flatten(1).mean(1)
+
+ def forward(self, input, sigma, **kwargs):
+ c_out, c_in = [utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)]
+ eps = self.get_eps(input * c_in, self.sigma_to_t(sigma), **kwargs)
+# !!! fix for special models (controlnet, inpaint, depth, ..)
+ input = input[:, :eps.shape[1],...]
+ return input + eps * c_out
+
+
+class OpenAIDenoiser(DiscreteEpsDDPMDenoiser):
+ """A wrapper for OpenAI diffusion models."""
+
+ def __init__(self, model, diffusion, quantize=False, has_learned_sigmas=True, device='cpu'):
+ alphas_cumprod = torch.tensor(diffusion.alphas_cumprod, device=device, dtype=torch.float32)
+ super().__init__(model, alphas_cumprod, quantize=quantize)
+ self.has_learned_sigmas = has_learned_sigmas
+
+ def get_eps(self, *args, **kwargs):
+ model_output = self.inner_model(*args, **kwargs)
+ if self.has_learned_sigmas:
+ return model_output.chunk(2, dim=1)[0]
+ return model_output
+
+
+class CompVisDenoiser(DiscreteEpsDDPMDenoiser):
+ """A wrapper for CompVis diffusion models."""
+
+ def __init__(self, model, quantize=False, device='cpu'):
+ super().__init__(model, model.alphas_cumprod, quantize=quantize)
+
+ def get_eps(self, *args, **kwargs):
+ return self.inner_model.apply_model(*args, **kwargs)
+
+
+class DiscreteVDDPMDenoiser(DiscreteSchedule):
+ """A wrapper for discrete schedule DDPM models that output v."""
+
+ def __init__(self, model, alphas_cumprod, quantize):
+ super().__init__(((1 - alphas_cumprod) / alphas_cumprod) ** 0.5, quantize)
+ self.inner_model = model
+ self.sigma_data = 1.
+
+ def get_scalings(self, sigma):
+ c_skip = self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2)
+ c_out = -sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
+ c_in = 1 / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
+ return c_skip, c_out, c_in
+
+ def get_v(self, *args, **kwargs):
+ return self.inner_model(*args, **kwargs)
+
+ def loss(self, input, noise, sigma, **kwargs):
+ c_skip, c_out, c_in = [utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)]
+ noised_input = input + noise * utils.append_dims(sigma, input.ndim)
+ model_output = self.get_v(noised_input * c_in, self.sigma_to_t(sigma), **kwargs)
+ target = (input - c_skip * noised_input) / c_out
+ return (model_output - target).pow(2).flatten(1).mean(1)
+
+ def forward(self, input, sigma, **kwargs):
+ c_skip, c_out, c_in = [utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)]
+ vout = self.get_v(input * c_in, self.sigma_to_t(sigma), **kwargs) * c_out
+ # !!! fix for special models (controlnet, upscale, ..)
+ input = input[:, :vout.shape[1],...]
+ return vout + input * c_skip
+ #return self.get_v(input * c_in, self.sigma_to_t(sigma), **kwargs) * c_out + input * c_skip
+
+
+class CompVisVDenoiser(DiscreteVDDPMDenoiser):
+ """A wrapper for CompVis diffusion models that output v."""
+
+ def __init__(self, model, quantize=False, device='cpu'):
+ super().__init__(model, model.alphas_cumprod, quantize=quantize)
+
+ def get_v(self, x, t, cond, **kwargs):
+ return self.inner_model.apply_model(x, t, cond)
\ No newline at end of file
diff --git a/modules/ip_adapter.py b/modules/ip_adapter.py
new file mode 100644
index 0000000000000000000000000000000000000000..b919cd9db0f58f634fa81d7dabe95e0b5d90a759
--- /dev/null
+++ b/modules/ip_adapter.py
@@ -0,0 +1,343 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from pathlib import Path
+from typing import Callable, Dict, List, Optional, Union
+
+import torch
+import torch.nn.functional as F
+from huggingface_hub.utils import validate_hf_hub_args
+from safetensors import safe_open
+
+from diffusers.models.modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT, load_state_dict
+
+
+from diffusers.utils import (
+ USE_PEFT_BACKEND,
+ _get_model_file,
+ is_accelerate_available,
+ is_torch_version,
+ is_transformers_available,
+ logging,
+)
+
+from diffusers.loaders.unet_loader_utils import _maybe_expand_lora_scales
+
+
+
+if is_transformers_available():
+ from transformers import (
+ CLIPImageProcessor,
+ CLIPVisionModelWithProjection,
+ )
+
+from .attention_modify import (
+ AttnProcessor,
+ IPAdapterAttnProcessor,
+ AttnProcessor2_0,
+ IPAdapterAttnProcessor2_0
+ )
+
+logger = logging.get_logger(__name__)
+
+
+class IPAdapterMixin:
+ """Mixin for handling IP Adapters."""
+
+ @validate_hf_hub_args
+ def load_ip_adapter(
+ self,
+ pretrained_model_name_or_path_or_dict: Union[str, List[str], Dict[str, torch.Tensor]],
+ subfolder: Union[str, List[str]],
+ weight_name: Union[str, List[str]],
+ image_encoder_folder: Optional[str] = "image_encoder",
+ **kwargs,
+ ):
+ """
+ Parameters:
+ pretrained_model_name_or_path_or_dict (`str` or `List[str]` or `os.PathLike` or `List[os.PathLike]` or `dict` or `List[dict]`):
+ Can be either:
+
+ - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
+ the Hub.
+ - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
+ with [`ModelMixin.save_pretrained`].
+ - A [torch state
+ dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
+ subfolder (`str` or `List[str]`):
+ The subfolder location of a model file within a larger model repository on the Hub or locally. If a
+ list is passed, it should have the same length as `weight_name`.
+ weight_name (`str` or `List[str]`):
+ The name of the weight file to load. If a list is passed, it should have the same length as
+ `weight_name`.
+ image_encoder_folder (`str`, *optional*, defaults to `image_encoder`):
+ The subfolder location of the image encoder within a larger model repository on the Hub or locally.
+ Pass `None` to not load the image encoder. If the image encoder is located in a folder inside
+ `subfolder`, you only need to pass the name of the folder that contains image encoder weights, e.g.
+ `image_encoder_folder="image_encoder"`. If the image encoder is located in a folder other than
+ `subfolder`, you should pass the path to the folder that contains image encoder weights, for example,
+ `image_encoder_folder="different_subfolder/image_encoder"`.
+ cache_dir (`Union[str, os.PathLike]`, *optional*):
+ Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
+ is not used.
+ force_download (`bool`, *optional*, defaults to `False`):
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
+ cached versions if they exist.
+ resume_download:
+ Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v1
+ of Diffusers.
+ proxies (`Dict[str, str]`, *optional*):
+ A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
+ local_files_only (`bool`, *optional*, defaults to `False`):
+ Whether to only load local model weights and configuration files or not. If set to `True`, the model
+ won't be downloaded from the Hub.
+ token (`str` or *bool*, *optional*):
+ The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
+ `diffusers-cli login` (stored in `~/.huggingface`) is used.
+ revision (`str`, *optional*, defaults to `"main"`):
+ The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
+ allowed by Git.
+ low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
+ Speed up model loading only loading the pretrained weights and not initializing the weights. This also
+ tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
+ Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this
+ argument to `True` will raise an error.
+ """
+
+ # handle the list inputs for multiple IP Adapters
+ if not isinstance(weight_name, list):
+ weight_name = [weight_name]
+
+ if not isinstance(pretrained_model_name_or_path_or_dict, list):
+ pretrained_model_name_or_path_or_dict = [pretrained_model_name_or_path_or_dict]
+ if len(pretrained_model_name_or_path_or_dict) == 1:
+ pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict * len(weight_name)
+
+ if not isinstance(subfolder, list):
+ subfolder = [subfolder]
+ if len(subfolder) == 1:
+ subfolder = subfolder * len(weight_name)
+
+ if len(weight_name) != len(pretrained_model_name_or_path_or_dict):
+ raise ValueError("`weight_name` and `pretrained_model_name_or_path_or_dict` must have the same length.")
+
+ if len(weight_name) != len(subfolder):
+ raise ValueError("`weight_name` and `subfolder` must have the same length.")
+
+ # Load the main state dict first.
+ cache_dir = kwargs.pop("cache_dir", None)
+ force_download = kwargs.pop("force_download", False)
+ resume_download = kwargs.pop("resume_download", None)
+ proxies = kwargs.pop("proxies", None)
+ local_files_only = kwargs.pop("local_files_only", None)
+ token = kwargs.pop("token", None)
+ revision = kwargs.pop("revision", None)
+ low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT)
+
+ if low_cpu_mem_usage and not is_accelerate_available():
+ low_cpu_mem_usage = False
+ logger.warning(
+ "Cannot initialize model with low cpu memory usage because `accelerate` was not found in the"
+ " environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install"
+ " `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip"
+ " install accelerate\n```\n."
+ )
+
+ if low_cpu_mem_usage is True and not is_torch_version(">=", "1.9.0"):
+ raise NotImplementedError(
+ "Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set"
+ " `low_cpu_mem_usage=False`."
+ )
+
+ user_agent = {
+ "file_type": "attn_procs_weights",
+ "framework": "pytorch",
+ }
+ state_dicts = []
+ for pretrained_model_name_or_path_or_dict, weight_name, subfolder in zip(
+ pretrained_model_name_or_path_or_dict, weight_name, subfolder
+ ):
+ if not isinstance(pretrained_model_name_or_path_or_dict, dict):
+ model_file = _get_model_file(
+ pretrained_model_name_or_path_or_dict,
+ weights_name=weight_name,
+ cache_dir=cache_dir,
+ force_download=force_download,
+ resume_download=resume_download,
+ proxies=proxies,
+ local_files_only=local_files_only,
+ token=token,
+ revision=revision,
+ subfolder=subfolder,
+ user_agent=user_agent,
+ )
+ if weight_name.endswith(".safetensors"):
+ state_dict = {"image_proj": {}, "ip_adapter": {}}
+ with safe_open(model_file, framework="pt", device="cpu") as f:
+ for key in f.keys():
+ if key.startswith("image_proj."):
+ state_dict["image_proj"][key.replace("image_proj.", "")] = f.get_tensor(key)
+ elif key.startswith("ip_adapter."):
+ state_dict["ip_adapter"][key.replace("ip_adapter.", "")] = f.get_tensor(key)
+ else:
+ state_dict = load_state_dict(model_file)
+ else:
+ state_dict = pretrained_model_name_or_path_or_dict
+
+ keys = list(state_dict.keys())
+ if keys != ["image_proj", "ip_adapter"]:
+ raise ValueError("Required keys are (`image_proj` and `ip_adapter`) missing from the state dict.")
+
+ state_dicts.append(state_dict)
+
+ # load CLIP image encoder here if it has not been registered to the pipeline yet
+ if hasattr(self, "image_encoder") and getattr(self, "image_encoder", None) is None:
+ if image_encoder_folder is not None:
+ if not isinstance(pretrained_model_name_or_path_or_dict, dict):
+ logger.info(f"loading image_encoder from {pretrained_model_name_or_path_or_dict}")
+ if image_encoder_folder.count("/") == 0:
+ image_encoder_subfolder = Path(subfolder, image_encoder_folder).as_posix()
+ else:
+ image_encoder_subfolder = Path(image_encoder_folder).as_posix()
+
+ image_encoder = CLIPVisionModelWithProjection.from_pretrained(
+ pretrained_model_name_or_path_or_dict,
+ subfolder=image_encoder_subfolder,
+ low_cpu_mem_usage=low_cpu_mem_usage,
+ ).to(self.device, dtype=self.dtype)
+ self.register_modules(image_encoder=image_encoder)
+ else:
+ raise ValueError(
+ "`image_encoder` cannot be loaded because `pretrained_model_name_or_path_or_dict` is a state dict."
+ )
+ else:
+ logger.warning(
+ "image_encoder is not loaded since `image_encoder_folder=None` passed. You will not be able to use `ip_adapter_image` when calling the pipeline with IP-Adapter."
+ "Use `ip_adapter_image_embeds` to pass pre-generated image embedding instead."
+ )
+
+ # create feature extractor if it has not been registered to the pipeline yet
+ if hasattr(self, "feature_extractor") and getattr(self, "feature_extractor", None) is None:
+ feature_extractor = CLIPImageProcessor()
+ self.register_modules(feature_extractor=feature_extractor)
+
+ # load ip-adapter into unet
+ unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
+ unet._load_ip_adapter_weights(state_dicts, low_cpu_mem_usage=low_cpu_mem_usage)
+
+ extra_loras = unet._load_ip_adapter_loras(state_dicts)
+ if extra_loras != {}:
+ if not USE_PEFT_BACKEND:
+ logger.warning("PEFT backend is required to load these weights.")
+ else:
+ # apply the IP Adapter Face ID LoRA weights
+ peft_config = getattr(unet, "peft_config", {})
+ for k, lora in extra_loras.items():
+ if f"faceid_{k}" not in peft_config:
+ self.load_lora_weights(lora, adapter_name=f"faceid_{k}")
+ self.set_adapters([f"faceid_{k}"], adapter_weights=[1.0])
+
+ def set_ip_adapter_scale(self, scale):
+ """
+ Set IP-Adapter scales per-transformer block. Input `scale` could be a single config or a list of configs for
+ granular control over each IP-Adapter behavior. A config can be a float or a dictionary.
+
+ Example:
+
+ ```py
+ # To use original IP-Adapter
+ scale = 1.0
+ pipeline.set_ip_adapter_scale(scale)
+
+ # To use style block only
+ scale = {
+ "up": {"block_0": [0.0, 1.0, 0.0]},
+ }
+ pipeline.set_ip_adapter_scale(scale)
+
+ # To use style+layout blocks
+ scale = {
+ "down": {"block_2": [0.0, 1.0]},
+ "up": {"block_0": [0.0, 1.0, 0.0]},
+ }
+ pipeline.set_ip_adapter_scale(scale)
+
+ # To use style and layout from 2 reference images
+ scales = [{"down": {"block_2": [0.0, 1.0]}}, {"up": {"block_0": [0.0, 1.0, 0.0]}}]
+ pipeline.set_ip_adapter_scale(scales)
+ ```
+ """
+ unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
+ if not isinstance(scale, list):
+ scale = [scale]
+ scale_configs = _maybe_expand_lora_scales(unet, scale, default_scale=0.0)
+
+ for attn_name, attn_processor in unet.attn_processors.items():
+ if isinstance(attn_processor, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0)):
+ if len(scale_configs) != len(attn_processor.scale):
+ raise ValueError(
+ f"Cannot assign {len(scale_configs)} scale_configs to "
+ f"{len(attn_processor.scale)} IP-Adapter."
+ )
+ elif len(scale_configs) == 1:
+ scale_configs = scale_configs * len(attn_processor.scale)
+ for i, scale_config in enumerate(scale_configs):
+ if isinstance(scale_config, dict):
+ for k, s in scale_config.items():
+ if attn_name.startswith(k):
+ attn_processor.scale[i] = s
+ else:
+ attn_processor.scale[i] = scale_config
+
+ def unload_ip_adapter(self):
+ """
+ Unloads the IP Adapter weights
+
+ Examples:
+
+ ```python
+ >>> # Assuming `pipeline` is already loaded with the IP Adapter weights.
+ >>> pipeline.unload_ip_adapter()
+ >>> ...
+ ```
+ """
+ # remove CLIP image encoder
+ if hasattr(self, "image_encoder") and getattr(self, "image_encoder", None) is not None:
+ self.image_encoder = None
+ self.register_to_config(image_encoder=[None, None])
+
+ # remove feature extractor only when safety_checker is None as safety_checker uses
+ # the feature_extractor later
+ if not hasattr(self, "safety_checker"):
+ if hasattr(self, "feature_extractor") and getattr(self, "feature_extractor", None) is not None:
+ self.feature_extractor = None
+ self.register_to_config(feature_extractor=[None, None])
+
+ # remove hidden encoder
+ self.unet.encoder_hid_proj = None
+ self.config.encoder_hid_dim_type = None
+
+ # restore original Unet attention processors layers
+ attn_procs = {}
+ for name, value in self.unet.attn_processors.items():
+ attn_processor_class = (
+ AttnProcessor2_0() if hasattr(F, "scaled_dot_product_attention") else AttnProcessor()
+ )
+ attn_procs[name] = (
+ attn_processor_class
+ if isinstance(value, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0))
+ else value.__class__()
+ )
+ self.unet.set_attn_processor(attn_procs)
\ No newline at end of file
diff --git a/modules/keypose/__init__.py b/modules/keypose/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..786487bf01f242df5322d6a4cc3fddbb9c6363d2
--- /dev/null
+++ b/modules/keypose/__init__.py
@@ -0,0 +1,216 @@
+import numpy as np
+import cv2
+import torch
+
+import os
+#from modules import devices
+#from annotator.annotator_path import models_path
+
+import mmcv
+from mmdet.apis import inference_detector, init_detector
+from mmpose.apis import inference_top_down_pose_model
+from mmpose.apis import init_pose_model, process_mmdet_results, vis_pose_result
+
+device = "cpu"
+if torch.cuda.is_available():
+ device = "cuda"
+
+def preprocessing(image, device):
+ # Resize
+ scale = 640 / max(image.shape[:2])
+ image = cv2.resize(image, dsize=None, fx=scale, fy=scale)
+ raw_image = image.astype(np.uint8)
+
+ # Subtract mean values
+ image = image.astype(np.float32)
+ image -= np.array(
+ [
+ float(104.008),
+ float(116.669),
+ float(122.675),
+ ]
+ )
+
+ # Convert to torch.Tensor and add "batch" axis
+ image = torch.from_numpy(image.transpose(2, 0, 1)).float().unsqueeze(0)
+ image = image.to(device)
+
+ return image, raw_image
+
+
+def imshow_keypoints(img,
+ pose_result,
+ skeleton=None,
+ kpt_score_thr=0.1,
+ pose_kpt_color=None,
+ pose_link_color=None,
+ radius=4,
+ thickness=1):
+ """Draw keypoints and links on an image.
+ Args:
+ img (ndarry): The image to draw poses on.
+ pose_result (list[kpts]): The poses to draw. Each element kpts is
+ a set of K keypoints as an Kx3 numpy.ndarray, where each
+ keypoint is represented as x, y, score.
+ kpt_score_thr (float, optional): Minimum score of keypoints
+ to be shown. Default: 0.3.
+ pose_kpt_color (np.array[Nx3]`): Color of N keypoints. If None,
+ the keypoint will not be drawn.
+ pose_link_color (np.array[Mx3]): Color of M links. If None, the
+ links will not be drawn.
+ thickness (int): Thickness of lines.
+ """
+
+ img_h, img_w, _ = img.shape
+ img = np.zeros(img.shape)
+
+ for idx, kpts in enumerate(pose_result):
+ if idx > 1:
+ continue
+ kpts = kpts['keypoints']
+ # print(kpts)
+ kpts = np.array(kpts, copy=False)
+
+ # draw each point on image
+ if pose_kpt_color is not None:
+ assert len(pose_kpt_color) == len(kpts)
+
+ for kid, kpt in enumerate(kpts):
+ x_coord, y_coord, kpt_score = int(kpt[0]), int(kpt[1]), kpt[2]
+
+ if kpt_score < kpt_score_thr or pose_kpt_color[kid] is None:
+ # skip the point that should not be drawn
+ continue
+
+ color = tuple(int(c) for c in pose_kpt_color[kid])
+ cv2.circle(img, (int(x_coord), int(y_coord)),
+ radius, color, -1)
+
+ # draw links
+ if skeleton is not None and pose_link_color is not None:
+ assert len(pose_link_color) == len(skeleton)
+
+ for sk_id, sk in enumerate(skeleton):
+ pos1 = (int(kpts[sk[0], 0]), int(kpts[sk[0], 1]))
+ pos2 = (int(kpts[sk[1], 0]), int(kpts[sk[1], 1]))
+
+ if (pos1[0] <= 0 or pos1[0] >= img_w or pos1[1] <= 0 or pos1[1] >= img_h or pos2[0] <= 0
+ or pos2[0] >= img_w or pos2[1] <= 0 or pos2[1] >= img_h or kpts[sk[0], 2] < kpt_score_thr
+ or kpts[sk[1], 2] < kpt_score_thr or pose_link_color[sk_id] is None):
+ # skip the link that should not be drawn
+ continue
+ color = tuple(int(c) for c in pose_link_color[sk_id])
+ cv2.line(img, pos1, pos2, color, thickness=thickness)
+
+ return img
+
+
+human_det, pose_model = None, None
+det_model_path = "https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth"
+pose_model_path = "https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth"
+
+#modeldir = os.path.join(models_path, "keypose")
+modeldir = os.getcwd()
+old_modeldir = os.path.dirname(os.path.realpath(__file__))
+
+det_config = 'faster_rcnn_r50_fpn_coco.py'
+pose_config = 'hrnet_w48_coco_256x192.py'
+
+det_checkpoint = 'faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth'
+pose_checkpoint = 'hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth'
+det_cat_id = 1
+bbox_thr = 0.2
+
+skeleton = [
+ [15, 13], [13, 11], [16, 14], [14, 12], [11, 12], [5, 11], [6, 12], [5, 6], [5, 7], [6, 8],
+ [7, 9], [8, 10],
+ [1, 2], [0, 1], [0, 2], [1, 3], [2, 4], [3, 5], [4, 6]
+]
+
+pose_kpt_color = [
+ [51, 153, 255], [51, 153, 255], [51, 153, 255], [51, 153, 255], [51, 153, 255],
+ [0, 255, 0],
+ [255, 128, 0], [0, 255, 0], [255, 128, 0], [0, 255, 0], [255, 128, 0], [0, 255, 0],
+ [255, 128, 0],
+ [0, 255, 0], [255, 128, 0], [0, 255, 0], [255, 128, 0]
+]
+
+pose_link_color = [
+ [0, 255, 0], [0, 255, 0], [255, 128, 0], [255, 128, 0],
+ [51, 153, 255], [51, 153, 255], [51, 153, 255], [51, 153, 255], [0, 255, 0],
+ [255, 128, 0],
+ [0, 255, 0], [255, 128, 0], [51, 153, 255], [51, 153, 255], [51, 153, 255],
+ [51, 153, 255],
+ [51, 153, 255], [51, 153, 255], [51, 153, 255]
+]
+
+def find_download_model(checkpoint, remote_path):
+ modelpath = os.path.join(modeldir, checkpoint)
+ old_modelpath = os.path.join(old_modeldir, checkpoint)
+
+ if os.path.exists(old_modelpath):
+ modelpath = old_modelpath
+ elif not os.path.exists(modelpath):
+ from basicsr.utils.download_util import load_file_from_url
+ load_file_from_url(remote_path, model_dir=modeldir)
+
+ return modelpath
+
+def apply_keypose(input_image):
+ global human_det, pose_model,device
+ if netNetwork is None:
+ det_model_local = find_download_model(det_checkpoint, det_model_path)
+ hrnet_model_local = find_download_model(pose_checkpoint, pose_model_path)
+ det_config_mmcv = mmcv.Config.fromfile(det_config)
+ pose_config_mmcv = mmcv.Config.fromfile(pose_config)
+ human_det = init_detector(det_config_mmcv, det_model_local, device=device)
+ pose_model = init_pose_model(pose_config_mmcv, hrnet_model_local, device=device)
+
+ assert input_image.ndim == 3
+ input_image = input_image.copy()
+ with torch.no_grad():
+ image = torch.from_numpy(input_image).float().to(device)
+ image = image / 255.0
+ mmdet_results = inference_detector(human_det, image)
+
+ # keep the person class bounding boxes.
+ person_results = process_mmdet_results(mmdet_results, det_cat_id)
+
+ return_heatmap = False
+ dataset = pose_model.cfg.data['test']['type']
+
+ # e.g. use ('backbone', ) to return backbone feature
+ output_layer_names = None
+ pose_results, _ = inference_top_down_pose_model(
+ pose_model,
+ image,
+ person_results,
+ bbox_thr=bbox_thr,
+ format='xyxy',
+ dataset=dataset,
+ dataset_info=None,
+ return_heatmap=return_heatmap,
+ outputs=output_layer_names
+ )
+
+ im_keypose_out = imshow_keypoints(
+ image,
+ pose_results,
+ skeleton=skeleton,
+ pose_kpt_color=pose_kpt_color,
+ pose_link_color=pose_link_color,
+ radius=2,
+ thickness=2
+ )
+ im_keypose_out = im_keypose_out.astype(np.uint8)
+
+ # image_hed = rearrange(image_hed, 'h w c -> 1 c h w')
+ # edge = netNetwork(image_hed)[0]
+ # edge = (edge.cpu().numpy() * 255.0).clip(0, 255).astype(np.uint8)
+ return im_keypose_out
+
+
+def unload_hed_model():
+ global netNetwork
+ if netNetwork is not None:
+ netNetwork.cpu()
\ No newline at end of file
diff --git a/modules/keypose/faster_rcnn_r50_fpn_coco.py b/modules/keypose/faster_rcnn_r50_fpn_coco.py
new file mode 100644
index 0000000000000000000000000000000000000000..67005a67ddbcf0f1c9734fd0669dd9364805473e
--- /dev/null
+++ b/modules/keypose/faster_rcnn_r50_fpn_coco.py
@@ -0,0 +1,182 @@
+checkpoint_config = dict(interval=1)
+# yapf:disable
+log_config = dict(
+ interval=50,
+ hooks=[
+ dict(type='TextLoggerHook'),
+ # dict(type='TensorboardLoggerHook')
+ ])
+# yapf:enable
+dist_params = dict(backend='nccl')
+log_level = 'INFO'
+load_from = None
+resume_from = None
+workflow = [('train', 1)]
+# optimizer
+optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
+optimizer_config = dict(grad_clip=None)
+# learning policy
+lr_config = dict(
+ policy='step',
+ warmup='linear',
+ warmup_iters=500,
+ warmup_ratio=0.001,
+ step=[8, 11])
+total_epochs = 12
+
+model = dict(
+ type='FasterRCNN',
+ pretrained='torchvision://resnet50',
+ backbone=dict(
+ type='ResNet',
+ depth=50,
+ num_stages=4,
+ out_indices=(0, 1, 2, 3),
+ frozen_stages=1,
+ norm_cfg=dict(type='BN', requires_grad=True),
+ norm_eval=True,
+ style='pytorch'),
+ neck=dict(
+ type='FPN',
+ in_channels=[256, 512, 1024, 2048],
+ out_channels=256,
+ num_outs=5),
+ rpn_head=dict(
+ type='RPNHead',
+ in_channels=256,
+ feat_channels=256,
+ anchor_generator=dict(
+ type='AnchorGenerator',
+ scales=[8],
+ ratios=[0.5, 1.0, 2.0],
+ strides=[4, 8, 16, 32, 64]),
+ bbox_coder=dict(
+ type='DeltaXYWHBBoxCoder',
+ target_means=[.0, .0, .0, .0],
+ target_stds=[1.0, 1.0, 1.0, 1.0]),
+ loss_cls=dict(
+ type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
+ loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
+ roi_head=dict(
+ type='StandardRoIHead',
+ bbox_roi_extractor=dict(
+ type='SingleRoIExtractor',
+ roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
+ out_channels=256,
+ featmap_strides=[4, 8, 16, 32]),
+ bbox_head=dict(
+ type='Shared2FCBBoxHead',
+ in_channels=256,
+ fc_out_channels=1024,
+ roi_feat_size=7,
+ num_classes=80,
+ bbox_coder=dict(
+ type='DeltaXYWHBBoxCoder',
+ target_means=[0., 0., 0., 0.],
+ target_stds=[0.1, 0.1, 0.2, 0.2]),
+ reg_class_agnostic=False,
+ loss_cls=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+ loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
+ # model training and testing settings
+ train_cfg=dict(
+ rpn=dict(
+ assigner=dict(
+ type='MaxIoUAssigner',
+ pos_iou_thr=0.7,
+ neg_iou_thr=0.3,
+ min_pos_iou=0.3,
+ match_low_quality=True,
+ ignore_iof_thr=-1),
+ sampler=dict(
+ type='RandomSampler',
+ num=256,
+ pos_fraction=0.5,
+ neg_pos_ub=-1,
+ add_gt_as_proposals=False),
+ allowed_border=-1,
+ pos_weight=-1,
+ debug=False),
+ rpn_proposal=dict(
+ nms_pre=2000,
+ max_per_img=1000,
+ nms=dict(type='nms', iou_threshold=0.7),
+ min_bbox_size=0),
+ rcnn=dict(
+ assigner=dict(
+ type='MaxIoUAssigner',
+ pos_iou_thr=0.5,
+ neg_iou_thr=0.5,
+ min_pos_iou=0.5,
+ match_low_quality=False,
+ ignore_iof_thr=-1),
+ sampler=dict(
+ type='RandomSampler',
+ num=512,
+ pos_fraction=0.25,
+ neg_pos_ub=-1,
+ add_gt_as_proposals=True),
+ pos_weight=-1,
+ debug=False)),
+ test_cfg=dict(
+ rpn=dict(
+ nms_pre=1000,
+ max_per_img=1000,
+ nms=dict(type='nms', iou_threshold=0.7),
+ min_bbox_size=0),
+ rcnn=dict(
+ score_thr=0.05,
+ nms=dict(type='nms', iou_threshold=0.5),
+ max_per_img=100)
+ # soft-nms is also supported for rcnn testing
+ # e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05)
+ ))
+
+dataset_type = 'CocoDataset'
+data_root = 'data/coco'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='LoadAnnotations', with_bbox=True),
+ dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+ dict(type='RandomFlip', flip_ratio=0.5),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='Pad', size_divisor=32),
+ dict(type='DefaultFormatBundle'),
+ dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='MultiScaleFlipAug',
+ img_scale=(1333, 800),
+ flip=False,
+ transforms=[
+ dict(type='Resize', keep_ratio=True),
+ dict(type='RandomFlip'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='Pad', size_divisor=32),
+ dict(type='DefaultFormatBundle'),
+ dict(type='Collect', keys=['img']),
+ ])
+]
+data = dict(
+ samples_per_gpu=2,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ ann_file=f'{data_root}/annotations/instances_train2017.json',
+ img_prefix=f'{data_root}/train2017/',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ ann_file=f'{data_root}/annotations/instances_val2017.json',
+ img_prefix=f'{data_root}/val2017/',
+ pipeline=test_pipeline),
+ test=dict(
+ type=dataset_type,
+ ann_file=f'{data_root}/annotations/instances_val2017.json',
+ img_prefix=f'{data_root}/val2017/',
+ pipeline=test_pipeline))
+evaluation = dict(interval=1, metric='bbox')
\ No newline at end of file
diff --git a/modules/keypose/hrnet_w48_coco_256x192.py b/modules/keypose/hrnet_w48_coco_256x192.py
new file mode 100644
index 0000000000000000000000000000000000000000..121c239f9be89eb2d7785eff03fef84fbef826fa
--- /dev/null
+++ b/modules/keypose/hrnet_w48_coco_256x192.py
@@ -0,0 +1,169 @@
+# _base_ = [
+# '../../../../_base_/default_runtime.py',
+# '../../../../_base_/datasets/coco.py'
+# ]
+evaluation = dict(interval=10, metric='mAP', save_best='AP')
+
+optimizer = dict(
+ type='Adam',
+ lr=5e-4,
+)
+optimizer_config = dict(grad_clip=None)
+# learning policy
+lr_config = dict(
+ policy='step',
+ warmup='linear',
+ warmup_iters=500,
+ warmup_ratio=0.001,
+ step=[170, 200])
+total_epochs = 210
+channel_cfg = dict(
+ num_output_channels=17,
+ dataset_joints=17,
+ dataset_channel=[
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
+ ],
+ inference_channel=[
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
+ ])
+
+# model settings
+model = dict(
+ type='TopDown',
+ pretrained='https://download.openmmlab.com/mmpose/'
+ 'pretrain_models/hrnet_w48-8ef0771d.pth',
+ backbone=dict(
+ type='HRNet',
+ in_channels=3,
+ extra=dict(
+ stage1=dict(
+ num_modules=1,
+ num_branches=1,
+ block='BOTTLENECK',
+ num_blocks=(4, ),
+ num_channels=(64, )),
+ stage2=dict(
+ num_modules=1,
+ num_branches=2,
+ block='BASIC',
+ num_blocks=(4, 4),
+ num_channels=(48, 96)),
+ stage3=dict(
+ num_modules=4,
+ num_branches=3,
+ block='BASIC',
+ num_blocks=(4, 4, 4),
+ num_channels=(48, 96, 192)),
+ stage4=dict(
+ num_modules=3,
+ num_branches=4,
+ block='BASIC',
+ num_blocks=(4, 4, 4, 4),
+ num_channels=(48, 96, 192, 384))),
+ ),
+ keypoint_head=dict(
+ type='TopdownHeatmapSimpleHead',
+ in_channels=48,
+ out_channels=channel_cfg['num_output_channels'],
+ num_deconv_layers=0,
+ extra=dict(final_conv_kernel=1, ),
+ loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
+ train_cfg=dict(),
+ test_cfg=dict(
+ flip_test=True,
+ post_process='default',
+ shift_heatmap=True,
+ modulate_kernel=11))
+
+data_cfg = dict(
+ image_size=[192, 256],
+ heatmap_size=[48, 64],
+ num_output_channels=channel_cfg['num_output_channels'],
+ num_joints=channel_cfg['dataset_joints'],
+ dataset_channel=channel_cfg['dataset_channel'],
+ inference_channel=channel_cfg['inference_channel'],
+ soft_nms=False,
+ nms_thr=1.0,
+ oks_thr=0.9,
+ vis_thr=0.2,
+ use_gt_bbox=False,
+ det_bbox_thr=0.0,
+ bbox_file='data/coco/person_detection_results/'
+ 'COCO_val2017_detections_AP_H_56_person.json',
+)
+
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='TopDownGetBboxCenterScale', padding=1.25),
+ dict(type='TopDownRandomShiftBboxCenter', shift_factor=0.16, prob=0.3),
+ dict(type='TopDownRandomFlip', flip_prob=0.5),
+ dict(
+ type='TopDownHalfBodyTransform',
+ num_joints_half_body=8,
+ prob_half_body=0.3),
+ dict(
+ type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
+ dict(type='TopDownAffine'),
+ dict(type='ToTensor'),
+ dict(
+ type='NormalizeTensor',
+ mean=[0.485, 0.456, 0.406],
+ std=[0.229, 0.224, 0.225]),
+ dict(type='TopDownGenerateTarget', sigma=2),
+ dict(
+ type='Collect',
+ keys=['img', 'target', 'target_weight'],
+ meta_keys=[
+ 'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
+ 'rotation', 'bbox_score', 'flip_pairs'
+ ]),
+]
+
+val_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='TopDownGetBboxCenterScale', padding=1.25),
+ dict(type='TopDownAffine'),
+ dict(type='ToTensor'),
+ dict(
+ type='NormalizeTensor',
+ mean=[0.485, 0.456, 0.406],
+ std=[0.229, 0.224, 0.225]),
+ dict(
+ type='Collect',
+ keys=['img'],
+ meta_keys=[
+ 'image_file', 'center', 'scale', 'rotation', 'bbox_score',
+ 'flip_pairs'
+ ]),
+]
+
+test_pipeline = val_pipeline
+
+data_root = 'data/coco'
+data = dict(
+ samples_per_gpu=32,
+ workers_per_gpu=2,
+ val_dataloader=dict(samples_per_gpu=32),
+ test_dataloader=dict(samples_per_gpu=32),
+ train=dict(
+ type='TopDownCocoDataset',
+ ann_file=f'{data_root}/annotations/person_keypoints_train2017.json',
+ img_prefix=f'{data_root}/train2017/',
+ data_cfg=data_cfg,
+ pipeline=train_pipeline,
+ dataset_info={{_base_.dataset_info}}),
+ val=dict(
+ type='TopDownCocoDataset',
+ ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
+ img_prefix=f'{data_root}/val2017/',
+ data_cfg=data_cfg,
+ pipeline=val_pipeline,
+ dataset_info={{_base_.dataset_info}}),
+ test=dict(
+ type='TopDownCocoDataset',
+ ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
+ img_prefix=f'{data_root}/val2017/',
+ data_cfg=data_cfg,
+ pipeline=test_pipeline,
+ dataset_info={{_base_.dataset_info}}),
+)
\ No newline at end of file
diff --git a/modules/lora.py b/modules/lora.py
new file mode 100644
index 0000000000000000000000000000000000000000..43d08d74a042a301545e695216b880093784a831
--- /dev/null
+++ b/modules/lora.py
@@ -0,0 +1,187 @@
+# LoRA network module
+# reference:
+# https://github.com/microsoft/LoRA/blob/main/loralib/layers.py
+# https://github.com/cloneofsimo/lora/blob/master/lora_diffusion/lora.py
+# https://github.com/bmaltais/kohya_ss/blob/master/networks/lora.py#L48
+
+import math
+import os
+import torch
+import diffusers
+import modules.safe as _
+from safetensors.torch import load_file
+
+
+class LoRAModule(torch.nn.Module):
+ """
+ replaces forward method of the original Linear, instead of replacing the original Linear module.
+ """
+
+ def __init__(
+ self,
+ lora_name,
+ org_module: torch.nn.Module,
+ multiplier=1.0,
+ lora_dim=4,
+ alpha=1,
+ ):
+ """if alpha == 0 or None, alpha is rank (no scaling)."""
+ super().__init__()
+ self.lora_name = lora_name
+ self.lora_dim = lora_dim
+
+ if org_module.__class__.__name__ == "Conv2d":
+ in_dim = org_module.in_channels
+ out_dim = org_module.out_channels
+ self.lora_down = torch.nn.Conv2d(in_dim, lora_dim, (1, 1), bias=False)
+ self.lora_up = torch.nn.Conv2d(lora_dim, out_dim, (1, 1), bias=False)
+ else:
+ in_dim = org_module.in_features
+ out_dim = org_module.out_features
+ self.lora_down = torch.nn.Linear(in_dim, lora_dim, bias=False)
+ self.lora_up = torch.nn.Linear(lora_dim, out_dim, bias=False)
+
+ if type(alpha) == torch.Tensor:
+ alpha = alpha.detach().float().numpy() # without casting, bf16 causes error
+
+ alpha = lora_dim if alpha is None or alpha == 0 else alpha
+ self.scale = alpha / self.lora_dim
+ self.register_buffer("alpha", torch.tensor(alpha)) # ๅฎๆฐใจใใฆๆฑใใ
+
+ # same as microsoft's
+ torch.nn.init.kaiming_uniform_(self.lora_down.weight, a=math.sqrt(5))
+ torch.nn.init.zeros_(self.lora_up.weight)
+
+ self.multiplier = multiplier
+ self.org_module = org_module # remove in applying
+ self.enable = False
+
+ def resize(self, rank, alpha, multiplier):
+ self.alpha = alpha.clone().detach()
+ self.multiplier = multiplier
+ self.scale = alpha / rank
+ if self.lora_down.__class__.__name__ == "Conv2d":
+ in_dim = self.lora_down.in_channels
+ out_dim = self.lora_up.out_channels
+ self.lora_down = torch.nn.Conv2d(in_dim, rank, (1, 1), bias=False)
+ self.lora_up = torch.nn.Conv2d(rank, out_dim, (1, 1), bias=False)
+ else:
+ in_dim = self.lora_down.in_features
+ out_dim = self.lora_up.out_features
+ self.lora_down = torch.nn.Linear(in_dim, rank, bias=False)
+ self.lora_up = torch.nn.Linear(rank, out_dim, bias=False)
+
+ def apply(self):
+ if hasattr(self, "org_module"):
+ self.org_forward = self.org_module.forward
+ self.org_module.forward = self.forward
+ del self.org_module
+
+ def forward(self, x):
+ if self.enable:
+ return (
+ self.org_forward(x)
+ + self.lora_up(self.lora_down(x)) * self.multiplier * self.scale
+ )
+ return self.org_forward(x)
+
+
+class LoRANetwork(torch.nn.Module):
+ UNET_TARGET_REPLACE_MODULE = ["Transformer2DModel", "Attention"]
+ TEXT_ENCODER_TARGET_REPLACE_MODULE = ["CLIPAttention", "CLIPMLP"]
+ LORA_PREFIX_UNET = "lora_unet"
+ LORA_PREFIX_TEXT_ENCODER = "lora_te"
+
+ def __init__(self, text_encoder, unet, multiplier=1.0, lora_dim=4, alpha=1) -> None:
+ super().__init__()
+ self.multiplier = multiplier
+ self.lora_dim = lora_dim
+ self.alpha = alpha
+
+ # create module instances
+ def create_modules(prefix, root_module: torch.nn.Module, target_replace_modules):
+ loras = []
+ for name, module in root_module.named_modules():
+ if module.__class__.__name__ in target_replace_modules:
+ for child_name, child_module in module.named_modules():
+ if child_module.__class__.__name__ == "Linear" or (child_module.__class__.__name__ == "Conv2d" and child_module.kernel_size == (1, 1)):
+ lora_name = prefix + "." + name + "." + child_name
+ lora_name = lora_name.replace(".", "_")
+ lora = LoRAModule(lora_name, child_module, self.multiplier, self.lora_dim, self.alpha,)
+ loras.append(lora)
+ return loras
+
+ if isinstance(text_encoder, list):
+ self.text_encoder_loras = text_encoder
+ else:
+ self.text_encoder_loras = create_modules(LoRANetwork.LORA_PREFIX_TEXT_ENCODER, text_encoder, LoRANetwork.TEXT_ENCODER_TARGET_REPLACE_MODULE)
+ print(f"Create LoRA for Text Encoder: {len(self.text_encoder_loras)} modules.")
+
+ if diffusers.__version__ >= "0.15.0":
+ LoRANetwork.UNET_TARGET_REPLACE_MODULE = ["Transformer2DModel"]
+
+ self.unet_loras = create_modules(LoRANetwork.LORA_PREFIX_UNET, unet, LoRANetwork.UNET_TARGET_REPLACE_MODULE)
+ print(f"Create LoRA for U-Net: {len(self.unet_loras)} modules.")
+
+ self.weights_sd = None
+
+ # assertion
+ names = set()
+ for lora in self.text_encoder_loras + self.unet_loras:
+ assert (lora.lora_name not in names), f"duplicated lora name: {lora.lora_name}"
+ names.add(lora.lora_name)
+
+ lora.apply()
+ self.add_module(lora.lora_name, lora)
+
+ def reset(self):
+ for lora in self.text_encoder_loras + self.unet_loras:
+ lora.enable = False
+
+ def load(self, file, scale):
+
+ weights = None
+ if os.path.splitext(file)[1] == ".safetensors":
+ weights = load_file(file)
+ else:
+ weights = torch.load(file, map_location="cpu")
+
+ if not weights:
+ return
+
+ network_alpha = None
+ network_dim = None
+ for key, value in weights.items():
+ if network_alpha is None and "alpha" in key:
+ network_alpha = value
+ if network_dim is None and "lora_down" in key and len(value.size()) == 2:
+ network_dim = value.size()[0]
+
+ if network_alpha is None:
+ network_alpha = network_dim
+
+ weights_has_text_encoder = weights_has_unet = False
+ weights_to_modify = []
+
+ for key in weights.keys():
+ if key.startswith(LoRANetwork.LORA_PREFIX_TEXT_ENCODER):
+ weights_has_text_encoder = True
+
+ if key.startswith(LoRANetwork.LORA_PREFIX_UNET):
+ weights_has_unet = True
+
+ if weights_has_text_encoder:
+ weights_to_modify += self.text_encoder_loras
+
+ if weights_has_unet:
+ weights_to_modify += self.unet_loras
+
+ for lora in self.text_encoder_loras + self.unet_loras:
+ lora.resize(network_dim, network_alpha, scale)
+ if lora in weights_to_modify:
+ lora.enable = True
+
+ info = self.load_state_dict(weights, False)
+ if len(info.unexpected_keys) > 0:
+ print(f"Weights are loaded. Unexpected keys={info.unexpected_keys}")
+
\ No newline at end of file
diff --git a/modules/model_diffusers.py b/modules/model_diffusers.py
new file mode 100644
index 0000000000000000000000000000000000000000..b2d3adba092c533cf22e74c4d42ee336bb4a94de
--- /dev/null
+++ b/modules/model_diffusers.py
@@ -0,0 +1,2644 @@
+import importlib
+import inspect
+import math
+from pathlib import Path
+import re
+from collections import defaultdict
+import cv2
+import time
+import k_diffusion
+import numpy as np
+import PIL
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from einops import rearrange
+from .external_k_diffusion import CompVisDenoiser, CompVisVDenoiser
+from torch import einsum
+from torch.autograd.function import Function
+
+from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback
+
+from diffusers import DiffusionPipeline
+from diffusers.utils import PIL_INTERPOLATION, is_accelerate_available, logging
+from diffusers.utils.torch_utils import randn_tensor,is_compiled_module,is_torch_version
+from diffusers.image_processor import VaeImageProcessor,PipelineImageInput
+from safetensors.torch import load_file
+from diffusers import ControlNetModel
+from PIL import Image
+import torchvision.transforms as transforms
+from diffusers import StableDiffusionPipeline,StableDiffusionControlNetPipeline,StableDiffusionControlNetImg2ImgPipeline,StableDiffusionImg2ImgPipeline,StableDiffusionInpaintPipeline,StableDiffusionControlNetInpaintPipeline
+from typing import Any, Callable, Dict, List, Optional, Union
+from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
+from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
+from diffusers import AutoencoderKL, LMSDiscreteScheduler
+from .u_net_condition_modify import UNet2DConditionModel
+from diffusers.models.lora import adjust_lora_scale_text_encoder
+from diffusers.models import AutoencoderKL, ImageProjection,AsymmetricAutoencoderKL
+from diffusers.schedulers import KarrasDiffusionSchedulers
+from diffusers.utils import (
+ USE_PEFT_BACKEND,
+ deprecate,
+ logging,
+ replace_example_docstring,
+ scale_lora_layers,
+ unscale_lora_layers,
+)
+from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
+from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
+from diffusers.pipelines.pipeline_utils import DiffusionPipeline
+from packaging import version
+from diffusers.configuration_utils import FrozenDict
+from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
+from .ip_adapter import IPAdapterMixin
+from .t2i_adapter import preprocessing_t2i_adapter,default_height_width
+from .encoder_prompt_modify import encode_prompt_function
+from .encode_region_map_function import encode_region_map
+
+
+def get_image_size(image):
+ height, width = None, None
+ if isinstance(image, Image.Image):
+ return image.size
+ elif isinstance(image, np.ndarray):
+ height, width = image.shape[:2]
+ return (width, height)
+ elif torch.is_tensor(image):
+ #RGB image
+ if len(image.shape) == 3:
+ _, height, width = image.shape
+ else:
+ height, width = image.shape
+ return (width, height)
+ else:
+ raise TypeError("The image must be an instance of PIL.Image, numpy.ndarray, or torch.Tensor.")
+
+#Get id token of text at present only support for batch_size = 1 because prompt is a string ("For easy to handle")
+#Class_name is the name of the class for example StableDiffusion
+def get_id_text(class_name,prompt,max_length,negative_prompt = None,prompt_embeds: Optional[torch.Tensor] = None,negative_prompt_embeds: Optional[torch.Tensor] = None):
+ #Check prompt_embeds is None -> not using prompt as input
+ if prompt_embeds is not None or negative_prompt_embeds is not None :
+ return None,None
+
+ if prompt is not None and isinstance(prompt, str):
+ batch_size = 1
+ elif prompt is not None and isinstance(prompt, list):
+ batch_size = len(prompt)
+ else:
+ batch_size = prompt_embeds.shape[0]
+
+ if isinstance(class_name, TextualInversionLoaderMixin):
+ prompt = class_name.maybe_convert_prompt(prompt, class_name.tokenizer)
+
+ text_inputs = class_name.tokenizer(
+ prompt,
+ padding="max_length",
+ max_length=class_name.tokenizer.model_max_length,
+ truncation=True,
+ return_tensors="pt",
+ )
+
+ text_input_ids = text_inputs.input_ids.detach().cpu().numpy()
+
+ uncond_tokens: List[str]
+ if negative_prompt is None:
+ uncond_tokens = [""] * batch_size
+ elif prompt is not None and type(prompt) is not type(negative_prompt):
+ raise TypeError(
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
+ f" {type(prompt)}."
+ )
+ elif isinstance(negative_prompt, str):
+ uncond_tokens = [negative_prompt]
+ elif batch_size != len(negative_prompt):
+ raise ValueError(
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
+ " the batch size of `prompt`."
+ )
+ else:
+ uncond_tokens = negative_prompt
+
+ # textual inversion: procecss multi-vector tokens if necessary
+ if isinstance(class_name, TextualInversionLoaderMixin):
+ uncond_tokens = class_name.maybe_convert_prompt(uncond_tokens, class_name.tokenizer)
+
+ uncond_input = class_name.tokenizer(
+ uncond_tokens,
+ padding="max_length",
+ max_length=max_length,
+ truncation=True,
+ return_tensors="pt",
+ )
+
+ uncond_input_ids = uncond_input.input_ids.detach().cpu().numpy()
+
+
+ if batch_size == 1:
+ return text_input_ids.reshape((1,-1)),uncond_input_ids.reshape((1,-1))
+ return text_input_ids,uncond_input_ids
+
+
+
+
+# from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
+def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
+ """
+ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
+ Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
+ """
+ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
+ std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
+ # rescale the results from guidance (fixes overexposure)
+ noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
+ # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
+ noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
+ return noise_cfg
+
+
+def retrieve_timesteps(
+ scheduler,
+ num_inference_steps: Optional[int] = None,
+ device: Optional[Union[str, torch.device]] = None,
+ timesteps: Optional[List[int]] = None,
+ sigmas: Optional[List[float]] = None,
+ **kwargs,
+):
+ """
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
+
+ Args:
+ scheduler (`SchedulerMixin`):
+ The scheduler to get timesteps from.
+ num_inference_steps (`int`):
+ The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
+ must be `None`.
+ device (`str` or `torch.device`, *optional*):
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
+ timesteps (`List[int]`, *optional*):
+ Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
+ `num_inference_steps` and `sigmas` must be `None`.
+ sigmas (`List[float]`, *optional*):
+ Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
+ `num_inference_steps` and `timesteps` must be `None`.
+
+ Returns:
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
+ second element is the number of inference steps.
+ """
+ if timesteps is not None and sigmas is not None:
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
+ if timesteps is not None:
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
+ if not accepts_timesteps:
+ raise ValueError(
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
+ f" timestep schedules. Please check whether you are using the correct scheduler."
+ )
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
+ timesteps = scheduler.timesteps
+ num_inference_steps = len(timesteps)
+ elif sigmas is not None:
+ accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
+ if not accept_sigmas:
+ raise ValueError(
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
+ f" sigmas schedules. Please check whether you are using the correct scheduler."
+ )
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
+ timesteps = scheduler.timesteps
+ num_inference_steps = len(timesteps)
+ else:
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
+ timesteps = scheduler.timesteps
+ return timesteps, num_inference_steps
+
+class StableDiffusionPipeline_finetune(IPAdapterMixin,StableDiffusionPipeline):
+ def type_output(self,output_type,device,d_type,return_dict,latents,generator):
+ if not output_type == "latent":
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False,generator=generator)[0]
+ image, has_nsfw_concept = self.run_safety_checker(image, device, d_type)
+ else:
+ image = latents
+ has_nsfw_concept = None
+
+ if has_nsfw_concept is None:
+ do_denormalize = [True] * image.shape[0]
+ else:
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
+
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
+
+ # Offload all models
+ self.maybe_free_model_hooks()
+
+ if not return_dict:
+ return (image, has_nsfw_concept)
+
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
+
+ @torch.no_grad()
+ def __call__(
+ self,
+ prompt: Union[str, List[str]] = None,
+ height: Optional[int] = None,
+ width: Optional[int] = None,
+ num_inference_steps: int = 50,
+ timesteps: List[int] = None,
+ sigmas: List[float] = None,
+ guidance_scale: float = 7.5,
+ negative_prompt: Optional[Union[str, List[str]]] = None,
+ num_images_per_prompt: Optional[int] = 1,
+ eta: float = 0.0,
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
+ latents: Optional[torch.Tensor] = None,
+ prompt_embeds: Optional[torch.Tensor] = None,
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
+ ip_adapter_image: Optional[PipelineImageInput] = None,
+ ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
+ output_type: Optional[str] = "pil",
+ return_dict: bool = True,
+ #callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
+ #callback_steps: int = 1,
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
+ guidance_rescale: float = 0.0,
+ clip_skip: Optional[int] = 0,
+ callback_on_step_end: Optional[
+ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]
+ ] = None,
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
+ region_map_state=None,
+ weight_func = lambda w, sigma, qk: w * sigma * qk.std(),
+ latent_processing = 0,
+ image_t2i_adapter : Optional[PipelineImageInput] = None,
+ adapter_conditioning_scale: Union[float, List[float]] = 1.0,
+ adapter_conditioning_factor: float = 1.0,
+ long_encode: int = 0,
+ **kwargs,
+ ):
+ callback = kwargs.pop("callback", None)
+ callback_steps = kwargs.pop("callback_steps", None)
+
+ if callback is not None:
+ deprecate(
+ "callback",
+ "1.0.0",
+ "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
+ )
+ if callback_steps is not None:
+ deprecate(
+ "callback_steps",
+ "1.0.0",
+ "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
+ )
+
+ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
+ callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
+
+ # 0. Default height and width to unet
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
+ # to deal with lora scaling and other possible forward hooks
+
+
+
+
+ # 1. Check inputs. Raise error if not correct
+ self.check_inputs(
+ prompt,
+ height,
+ width,
+ callback_steps,
+ negative_prompt,
+ prompt_embeds,
+ negative_prompt_embeds,
+ ip_adapter_image,
+ ip_adapter_image_embeds,
+ callback_on_step_end_tensor_inputs,
+ )
+ self._guidance_scale = guidance_scale
+ self._guidance_rescale = guidance_rescale
+ self._clip_skip = clip_skip
+ self._cross_attention_kwargs = cross_attention_kwargs
+ self._interrupt = False
+
+ adapter_state = None
+ if image_t2i_adapter is not None:
+ height, width = default_height_width(self,height, width, image_t2i_adapter)
+ adapter_state = preprocessing_t2i_adapter(self,image_t2i_adapter,width,height,adapter_conditioning_scale,num_images_per_prompt)
+
+ # 2. Define call parameters
+ if prompt is not None and isinstance(prompt, str):
+ batch_size = 1
+ elif prompt is not None and isinstance(prompt, list):
+ batch_size = len(prompt)
+ else:
+ batch_size = prompt_embeds.shape[0]
+
+ device = self._execution_device
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
+ # corresponds to doing no classifier free guidance.
+ do_classifier_free_guidance = guidance_scale > 1.0
+
+ # 3. Encode input prompt
+ lora_scale = (
+ self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
+ )
+ #print(type(negative_prompt))
+ #print(type(prompt))
+ '''if negative_prompt is None:
+ negative_prompt = ''
+ if prompt is None:
+ prompt ='''
+ #text_ids, text_embeddings = self.prompt_parser([negative_prompt, prompt])
+ #text_embeddings = text_embeddings.to(self.unet.dtype)
+ #print(text_embeddings)
+ #Copy prompt_embed of input for support get token_id
+ prompt_embeds_copy = None
+ negative_prompt_embeds_copy = None
+ if prompt_embeds is not None:
+ prompt_embeds_copy = prompt_embeds.clone().detach()
+ if negative_prompt_embeds is not None:
+ negative_prompt_embeds_copy = negative_prompt_embeds.clone().detach()
+ prompt_embeds, negative_prompt_embeds,text_input_ids = encode_prompt_function(
+ self,
+ prompt,
+ device,
+ num_images_per_prompt,
+ self.do_classifier_free_guidance,
+ negative_prompt,
+ prompt_embeds=prompt_embeds,
+ negative_prompt_embeds=negative_prompt_embeds,
+ lora_scale=lora_scale,
+ clip_skip=self.clip_skip,
+ long_encode = long_encode,
+ )
+
+ #Get token_id
+ #text_input_ids,uncond_input_ids = get_id_text(self,prompt,max_length = prompt_embeds.shape[1],negative_prompt = negative_prompt,prompt_embeds = prompt_embeds_copy,negative_prompt_embeds = negative_prompt_embeds_copy)
+ # For classifier free guidance, we need to do two forward passes.
+ # Here we concatenate the unconditional and text embeddings into a single batch
+ # to avoid doing two forward passes
+ '''if text_input_ids is not None:
+ text_input_ids = np.concatenate([uncond_input_ids, text_input_ids])'''
+ if self.do_classifier_free_guidance:
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
+
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
+ image_embeds = self.prepare_ip_adapter_image_embeds(
+ ip_adapter_image,
+ ip_adapter_image_embeds,
+ device,
+ batch_size * num_images_per_prompt,
+ self.do_classifier_free_guidance,
+ )
+
+ # 4. Prepare timesteps
+ #print(prompt_embeds)
+ timesteps, num_inference_steps = retrieve_timesteps(
+ self.scheduler, num_inference_steps, device, timesteps, sigmas
+ )
+
+ #4.1 Prepare region
+ region_state = encode_region_map(
+ self,
+ region_map_state,
+ width = width,
+ height = height,
+ num_images_per_prompt = num_images_per_prompt,
+ text_ids=text_input_ids,
+ )
+ if self.cross_attention_kwargs is None:
+ self._cross_attention_kwargs ={}
+ # 5. Prepare latent variables
+ num_channels_latents = self.unet.config.in_channels
+ latents = self.prepare_latents(
+ batch_size * num_images_per_prompt,
+ num_channels_latents,
+ height,
+ width,
+ prompt_embeds.dtype,
+ device,
+ generator,
+ latents,
+ )
+
+ lst_latent = []
+ if latent_processing == 1:
+ lst_latent = [self.type_output("pil",device,prompt_embeds.dtype,return_dict,latents,generator).images[0]]
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
+
+ # 6.1 Add image embeds for IP-Adapter
+ added_cond_kwargs = (
+ {"image_embeds": image_embeds}
+ if (ip_adapter_image is not None or ip_adapter_image_embeds is not None)
+ else None
+ )
+
+ # 6.2 Optionally get Guidance Scale Embedding
+ timestep_cond = None
+ if self.unet.config.time_cond_proj_dim is not None:
+ guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
+ timestep_cond = self.get_guidance_scale_embedding(
+ guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
+ ).to(device=device, dtype=latents.dtype)
+
+ #print(self.scheduler.sigmas)
+ #print(len(self.scheduler.sigmas))
+ #values, indices = torch.sort(self.scheduler.sigmas, descending=True)
+ #print(self.scheduler.sigmas)
+ # 7. Denoising loop
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
+ self._num_timesteps = len(timesteps)
+
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
+ #step_x = 0
+ for i, t in enumerate(timesteps):
+
+ if self.interrupt:
+ continue
+
+ # expand the latents if we are doing classifier free guidance
+ latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
+ #print(self.scheduler.sigmas[step_x])
+
+ region_prompt = {
+ "region_state": region_state,
+ "sigma": self.scheduler.sigmas[i],
+ "weight_func": weight_func,
+ }
+ self._cross_attention_kwargs["region_prompt"] = region_prompt
+ #print(t)
+ #step_x=step_x+1
+
+ #tensor_data = {k: torch.Tensor(v) for k, v in encoder_state.items()}
+ # predict the noise residual
+ down_intrablock_additional_residuals = None
+ if adapter_state is not None:
+ if i < int(num_inference_steps * adapter_conditioning_factor):
+ down_intrablock_additional_residuals = [state.clone() for state in adapter_state]
+ else:
+ down_intrablock_additional_residuals = None
+ noise_pred = self.unet(
+ latent_model_input,
+ t,
+ encoder_hidden_states=prompt_embeds,
+ timestep_cond=timestep_cond,
+ cross_attention_kwargs=self.cross_attention_kwargs,
+ down_intrablock_additional_residuals = down_intrablock_additional_residuals,
+ added_cond_kwargs=added_cond_kwargs,
+ return_dict=False,
+ )[0]
+
+ # perform guidance
+ if self.do_classifier_free_guidance:
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
+ noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
+
+ if self.do_classifier_free_guidance and self.guidance_rescale > 0.0:
+ # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
+ noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale)
+
+ # compute the previous noisy sample x_t -> x_t-1
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
+
+ if latent_processing == 1:
+ lst_latent.append(self.type_output("pil",device,prompt_embeds.dtype,return_dict,latents,generator).images[0])
+
+ if callback_on_step_end is not None:
+ callback_kwargs = {}
+ for k in callback_on_step_end_tensor_inputs:
+ callback_kwargs[k] = locals()[k]
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
+
+ latents = callback_outputs.pop("latents", latents)
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
+
+ # call the callback, if provided
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
+ progress_bar.update()
+ if callback is not None and i % callback_steps == 0:
+ step_idx = i // getattr(self.scheduler, "order", 1)
+ callback(step_idx, t, latents)
+ torch.cuda.empty_cache()
+
+ if latent_processing == 1:
+ if output_type == 'latent':
+ lst_latent.append(self.type_output(output_type,device,prompt_embeds.dtype,return_dict,latents,generator).images[0])
+ return lst_latent
+ if output_type == 'latent':
+ return [self.type_output("pil",device,prompt_embeds.dtype,return_dict,latents,generator).images[0],self.type_output(output_type,device,prompt_embeds.dtype,return_dict,latents,generator).images[0]]
+ return [self.type_output(output_type,device,prompt_embeds.dtype,return_dict,latents,generator).images[0]]
+
+
+class StableDiffusionControlNetPipeline_finetune(IPAdapterMixin,StableDiffusionControlNetPipeline):
+ def type_output(self,output_type,device,d_type,return_dict,latents,generator):
+ if not output_type == "latent":
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False,generator=generator)[0]
+ image, has_nsfw_concept = self.run_safety_checker(image, device, d_type)
+ else:
+ image = latents
+ has_nsfw_concept = None
+
+ if has_nsfw_concept is None:
+ do_denormalize = [True] * image.shape[0]
+ else:
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
+
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
+
+ # Offload all models
+ self.maybe_free_model_hooks()
+
+ if not return_dict:
+ return (image, has_nsfw_concept)
+
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
+
+ @torch.no_grad()
+ def __call__(
+ self,
+ prompt: Union[str, List[str]] = None,
+ image: PipelineImageInput = None,
+ height: Optional[int] = None,
+ width: Optional[int] = None,
+ num_inference_steps: int = 50,
+ timesteps: List[int] = None,
+ sigmas: List[float] = None,
+ guidance_scale: float = 7.5,
+ negative_prompt: Optional[Union[str, List[str]]] = None,
+ num_images_per_prompt: Optional[int] = 1,
+ eta: float = 0.0,
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
+ latents: Optional[torch.Tensor] = None,
+ prompt_embeds: Optional[torch.Tensor] = None,
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
+ ip_adapter_image: Optional[PipelineImageInput] = None,
+ ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
+ output_type: Optional[str] = "pil",
+ return_dict: bool = True,
+ guidance_rescale: float = 0.0,
+ #callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
+ #callback_steps: int = 1,
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
+ controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
+ guess_mode: bool = False,
+ control_guidance_start: Union[float, List[float]] = 0.0,
+ control_guidance_end: Union[float, List[float]] = 1.0,
+ clip_skip: Optional[int] = 0,
+ callback_on_step_end: Optional[
+ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]
+ ] = None,
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
+ region_map_state=None,
+ weight_func = lambda w, sigma, qk: w * sigma * qk.std(),
+ latent_processing = 0,
+ image_t2i_adapter : Optional[PipelineImageInput] = None,
+ adapter_conditioning_scale: Union[float, List[float]] = 1.0,
+ adapter_conditioning_factor: float = 1.0,
+ long_encode: int = 0,
+ **kwargs,
+ ):
+ callback = kwargs.pop("callback", None)
+ callback_steps = kwargs.pop("callback_steps", None)
+
+ if callback is not None:
+ deprecate(
+ "callback",
+ "1.0.0",
+ "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
+ )
+ if callback_steps is not None:
+ deprecate(
+ "callback_steps",
+ "1.0.0",
+ "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
+ )
+
+ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
+ callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
+
+ controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
+
+ # align format for control guidance
+ if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
+ control_guidance_start = len(control_guidance_end) * [control_guidance_start]
+ elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
+ control_guidance_end = len(control_guidance_start) * [control_guidance_end]
+ elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
+ mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
+ control_guidance_start, control_guidance_end = (
+ mult * [control_guidance_start],
+ mult * [control_guidance_end],
+ )
+
+ if height is None:
+ _,height = get_image_size(image)
+ height = int((height // 8)*8)
+ if width is None:
+ width,_ = get_image_size(image)
+ width = int((width // 8)*8)
+ # 1. Check inputs. Raise error if not correct
+ self.check_inputs(
+ prompt,
+ image,
+ callback_steps,
+ negative_prompt,
+ prompt_embeds,
+ negative_prompt_embeds,
+ ip_adapter_image,
+ ip_adapter_image_embeds,
+ controlnet_conditioning_scale,
+ control_guidance_start,
+ control_guidance_end,
+ callback_on_step_end_tensor_inputs,
+ )
+
+ self._guidance_scale = guidance_scale
+ self._clip_skip = clip_skip
+ self._cross_attention_kwargs = cross_attention_kwargs
+
+ adapter_state = None
+
+ if image_t2i_adapter is not None:
+ height, width = default_height_width(self,height, width, image_t2i_adapter)
+ adapter_state = preprocessing_t2i_adapter(self,image_t2i_adapter,width,height,adapter_conditioning_scale,num_images_per_prompt)
+
+ # 2. Define call parameters
+ if prompt is not None and isinstance(prompt, str):
+ batch_size = 1
+ elif prompt is not None and isinstance(prompt, list):
+ batch_size = len(prompt)
+ else:
+ batch_size = prompt_embeds.shape[0]
+
+ device = self._execution_device
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
+ # corresponds to doing no classifier free guidance.
+ #do_classifier_free_guidance = guidance_scale > 1.0
+
+ if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
+ controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
+
+ global_pool_conditions = (
+ controlnet.config.global_pool_conditions
+ if isinstance(controlnet, ControlNetModel)
+ else controlnet.nets[0].config.global_pool_conditions
+ )
+ guess_mode = guess_mode or global_pool_conditions
+
+ # 3. Encode input prompt
+ text_encoder_lora_scale = (
+ self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
+ )
+
+ #text_ids, text_embeddings = self.prompt_parser([negative_prompt, prompt])
+ #text_embeddings = text_embeddings.to(self.unet.dtype)
+
+ #Copy input prompt_embeds and negative_prompt_embeds
+ prompt_embeds_copy = None
+ negative_prompt_embeds_copy = None
+ if prompt_embeds is not None:
+ prompt_embeds_copy = prompt_embeds.clone().detach()
+ if negative_prompt_embeds is not None:
+ negative_prompt_embeds_copy = negative_prompt_embeds.clone().detach()
+ prompt_embeds, negative_prompt_embeds,text_input_ids = encode_prompt_function(
+ self,
+ prompt,
+ device,
+ num_images_per_prompt,
+ self.do_classifier_free_guidance,
+ negative_prompt,
+ prompt_embeds=prompt_embeds,
+ negative_prompt_embeds=negative_prompt_embeds,
+ lora_scale=text_encoder_lora_scale,
+ clip_skip=self.clip_skip,
+ long_encode = long_encode,
+ )
+
+ #Get token_id
+ #text_input_ids,uncond_input_ids = get_id_text(self,prompt,max_length = prompt_embeds.shape[1],negative_prompt = negative_prompt,prompt_embeds = prompt_embeds_copy,negative_prompt_embeds = negative_prompt_embeds_copy)
+ # For classifier free guidance, we need to do two forward passes.
+ # Here we concatenate the unconditional and text embeddings into a single batch
+ # to avoid doing two forward passes
+ '''if text_input_ids is not None:
+ text_input_ids = np.concatenate([uncond_input_ids, text_input_ids])'''
+ # For classifier free guidance, we need to do two forward passes.
+ # Here we concatenate the unconditional and text embeddings into a single batch
+ # to avoid doing two forward passes
+ if self.do_classifier_free_guidance:
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
+
+
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
+ image_embeds = self.prepare_ip_adapter_image_embeds(
+ ip_adapter_image,
+ ip_adapter_image_embeds,
+ device,
+ batch_size * num_images_per_prompt,
+ self.do_classifier_free_guidance,
+ )
+
+ #if height is None and width is None:
+ #height, width = image.shape[-2:]
+
+ # 4. Prepare image
+ if isinstance(controlnet, ControlNetModel):
+ image = self.prepare_image(
+ image=image,
+ width=width,
+ height=height,
+ batch_size=batch_size * num_images_per_prompt,
+ num_images_per_prompt=num_images_per_prompt,
+ device=device,
+ dtype=controlnet.dtype,
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
+ guess_mode=guess_mode,
+ )
+ elif isinstance(controlnet, MultiControlNetModel):
+ images = []
+ # Nested lists as ControlNet condition
+ if isinstance(image[0], list):
+ # Transpose the nested image list
+ image = [list(t) for t in zip(*image)]
+
+ for image_ in image:
+ image_ = self.prepare_image(
+ image=image_,
+ width=width,
+ height=height,
+ batch_size=batch_size * num_images_per_prompt,
+ num_images_per_prompt=num_images_per_prompt,
+ device=device,
+ dtype=controlnet.dtype,
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
+ guess_mode=guess_mode,
+ )
+
+ images.append(image_)
+
+ image = images
+ height, width = image[0].shape[-2:]
+ else:
+ assert False
+
+ # 5. Prepare timesteps
+ timesteps, num_inference_steps = retrieve_timesteps(
+ self.scheduler, num_inference_steps, device, timesteps, sigmas
+ )
+ self._num_timesteps = len(timesteps)
+
+ # 6. Prepare latent variables
+ region_state = encode_region_map(
+ self,
+ region_map_state,
+ width = width,
+ height = height,
+ num_images_per_prompt = num_images_per_prompt,
+ text_ids=text_input_ids,
+ )
+ if self.cross_attention_kwargs is None:
+ self._cross_attention_kwargs ={}
+ num_channels_latents = self.unet.config.in_channels
+ latents = self.prepare_latents(
+ batch_size * num_images_per_prompt,
+ num_channels_latents,
+ height,
+ width,
+ prompt_embeds.dtype,
+ device,
+ generator,
+ latents,
+ )
+
+ # 6.5 Optionally get Guidance Scale Embedding
+ timestep_cond = None
+ if self.unet.config.time_cond_proj_dim is not None:
+ guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
+ timestep_cond = self.get_guidance_scale_embedding(
+ guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
+ ).to(device=device, dtype=latents.dtype)
+
+ lst_latent = []
+ if latent_processing == 1:
+ lst_latent = [self.type_output("pil",device,prompt_embeds.dtype,return_dict,latents,generator).images[0]]
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
+
+ # 7.1 Add image embeds for IP-Adapter
+ added_cond_kwargs = (
+ {"image_embeds": image_embeds}
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None
+ else None
+ )
+
+ # 7.2 Create tensor stating which controlnets to keep
+ controlnet_keep = []
+ for i in range(len(timesteps)):
+ keeps = [
+ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
+ for s, e in zip(control_guidance_start, control_guidance_end)
+ ]
+ controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
+
+ # 8. Denoising loop
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
+ is_unet_compiled = is_compiled_module(self.unet)
+ is_controlnet_compiled = is_compiled_module(self.controlnet)
+ is_torch_higher_equal_2_1 = is_torch_version(">=", "2.1")
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
+ #step_x = 0
+ for i, t in enumerate(timesteps):
+ # Relevant thread:
+ # https://dev-discuss.pytorch.org/t/cudagraphs-in-pytorch-2-0/1428
+ if (is_unet_compiled and is_controlnet_compiled) and is_torch_higher_equal_2_1:
+ torch._inductor.cudagraph_mark_step_begin()
+ # expand the latents if we are doing classifier free guidance
+ latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
+
+ # controlnet(s) inference
+ if guess_mode and self.do_classifier_free_guidance:
+ # Infer ControlNet only for the conditional batch.
+ control_model_input = latents
+ control_model_input = self.scheduler.scale_model_input(control_model_input, t)
+ controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
+ else:
+ control_model_input = latent_model_input
+ controlnet_prompt_embeds = prompt_embeds
+
+ if isinstance(controlnet_keep[i], list):
+ cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
+ else:
+ controlnet_cond_scale = controlnet_conditioning_scale
+ if isinstance(controlnet_cond_scale, list):
+ controlnet_cond_scale = controlnet_cond_scale[0]
+ cond_scale = controlnet_cond_scale * controlnet_keep[i]
+
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
+ control_model_input,
+ t,
+ encoder_hidden_states=controlnet_prompt_embeds,
+ controlnet_cond=image,
+ conditioning_scale=cond_scale,
+ guess_mode=guess_mode,
+ return_dict=False,
+ )
+
+ if guess_mode and self.do_classifier_free_guidance:
+ # Infered ControlNet only for the conditional batch.
+ # To apply the output of ControlNet to both the unconditional and conditional batches,
+ # add 0 to the unconditional batch to keep it unchanged.
+ down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
+ mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
+
+ region_prompt = {
+ "region_state": region_state,
+ "sigma": self.scheduler.sigmas[i],
+ "weight_func": weight_func,
+ }
+ self._cross_attention_kwargs["region_prompt"] = region_prompt
+ #print(t)
+ #step_x=step_x+1
+
+ down_intrablock_additional_residuals = None
+ if adapter_state is not None:
+ if i < int(num_inference_steps * adapter_conditioning_factor):
+ down_intrablock_additional_residuals = [state.clone() for state in adapter_state]
+ else:
+ down_intrablock_additional_residuals = None
+
+ # predict the noise residual
+ noise_pred = self.unet(
+ latent_model_input,
+ t,
+ encoder_hidden_states=prompt_embeds,
+ timestep_cond=timestep_cond,
+ cross_attention_kwargs=self.cross_attention_kwargs,
+ down_block_additional_residuals=down_block_res_samples,
+ mid_block_additional_residual=mid_block_res_sample,
+ down_intrablock_additional_residuals = down_intrablock_additional_residuals,
+ added_cond_kwargs=added_cond_kwargs,
+ return_dict=False,
+ )[0]
+
+ # perform guidance
+ if self.do_classifier_free_guidance:
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
+ noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
+ if self.do_classifier_free_guidance and guidance_rescale > 0.0:
+ # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
+ noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
+
+ # compute the previous noisy sample x_t -> x_t-1
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
+
+ if latent_processing == 1:
+ lst_latent.append(self.type_output("pil",device,prompt_embeds.dtype,return_dict,latents,generator).images[0])
+
+ if callback_on_step_end is not None:
+ callback_kwargs = {}
+ for k in callback_on_step_end_tensor_inputs:
+ callback_kwargs[k] = locals()[k]
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
+
+ latents = callback_outputs.pop("latents", latents)
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
+
+ # call the callback, if provided
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
+ progress_bar.update()
+ if callback is not None and i % callback_steps == 0:
+ step_idx = i // getattr(self.scheduler, "order", 1)
+ callback(step_idx, t, latents)
+
+ # If we do sequential model offloading, let's offload unet and controlnet
+ # manually for max memory savings
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
+ self.unet.to("cpu")
+ self.controlnet.to("cpu")
+ torch.cuda.empty_cache()
+
+ if latent_processing == 1:
+ if output_type == 'latent':
+ lst_latent.append(self.type_output(output_type,device,prompt_embeds.dtype,return_dict,latents,generator).images[0])
+ return lst_latent
+ if output_type == 'latent':
+ return [self.type_output("pil",device,prompt_embeds.dtype,return_dict,latents,generator).images[0],self.type_output(output_type,device,prompt_embeds.dtype,return_dict,latents,generator).images[0]]
+ return [self.type_output(output_type,device,prompt_embeds.dtype,return_dict,latents,generator).images[0]]
+
+class StableDiffusionControlNetImg2ImgPipeline_finetune(IPAdapterMixin,StableDiffusionControlNetImg2ImgPipeline):
+ def type_output(self,output_type,device,d_type,return_dict,latents,generator):
+ if not output_type == "latent":
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False,generator=generator)[0]
+ image, has_nsfw_concept = self.run_safety_checker(image, device, d_type)
+ else:
+ image = latents
+ has_nsfw_concept = None
+
+ if has_nsfw_concept is None:
+ do_denormalize = [True] * image.shape[0]
+ else:
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
+
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
+
+ # Offload all models
+ self.maybe_free_model_hooks()
+
+ if not return_dict:
+ return (image, has_nsfw_concept)
+
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
+
+ @torch.no_grad()
+ def __call__(
+ self,
+ prompt: Union[str, List[str]] = None,
+ image: PipelineImageInput = None,
+ control_image: PipelineImageInput = None,
+ height: Optional[int] = None,
+ width: Optional[int] = None,
+ strength: float = 0.8,
+ num_inference_steps: int = 50,
+ guidance_scale: float = 7.5,
+ negative_prompt: Optional[Union[str, List[str]]] = None,
+ num_images_per_prompt: Optional[int] = 1,
+ eta: float = 0.0,
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
+ latents: Optional[torch.Tensor] = None,
+ prompt_embeds: Optional[torch.Tensor] = None,
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
+ ip_adapter_image: Optional[PipelineImageInput] = None,
+ ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
+ output_type: Optional[str] = "pil",
+ return_dict: bool = True,
+ guidance_rescale: float = 0.0,
+ #callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
+ #callback_steps: int = 1,
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
+ controlnet_conditioning_scale: Union[float, List[float]] = 0.8,
+ guess_mode: bool = False,
+ control_guidance_start: Union[float, List[float]] = 0.0,
+ control_guidance_end: Union[float, List[float]] = 1.0,
+ clip_skip: Optional[int] = 0,
+ callback_on_step_end: Optional[
+ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]
+ ] = None,
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
+ region_map_state=None,
+ weight_func = lambda w, sigma, qk: w * sigma * qk.std(),
+ latent_processing = 0,
+ image_t2i_adapter : Optional[PipelineImageInput] = None,
+ adapter_conditioning_scale: Union[float, List[float]] = 1.0,
+ adapter_conditioning_factor: float = 1.0,
+ long_encode: int = 0,
+ **kwargs,
+ ):
+ init_step = num_inference_steps
+ callback = kwargs.pop("callback", None)
+ callback_steps = kwargs.pop("callback_steps", None)
+
+ if callback is not None:
+ deprecate(
+ "callback",
+ "1.0.0",
+ "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
+ )
+ if callback_steps is not None:
+ deprecate(
+ "callback_steps",
+ "1.0.0",
+ "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
+ )
+
+ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
+ callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
+
+ controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
+ if height is None:
+ _,height = get_image_size(image)
+ height = int((height // 8)*8)
+ if width is None:
+ width,_ = get_image_size(image)
+ width = int((width // 8)*8)
+
+
+
+
+ # align format for control guidance
+ if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
+ control_guidance_start = len(control_guidance_end) * [control_guidance_start]
+ elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
+ control_guidance_end = len(control_guidance_start) * [control_guidance_end]
+ elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
+ mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
+ control_guidance_start, control_guidance_end = (
+ mult * [control_guidance_start],
+ mult * [control_guidance_end],
+ )
+
+ # 1. Check inputs. Raise error if not correct
+ self.check_inputs(
+ prompt,
+ control_image,
+ callback_steps,
+ negative_prompt,
+ prompt_embeds,
+ negative_prompt_embeds,
+ ip_adapter_image,
+ ip_adapter_image_embeds,
+ controlnet_conditioning_scale,
+ control_guidance_start,
+ control_guidance_end,
+ callback_on_step_end_tensor_inputs,
+ )
+
+ self._guidance_scale = guidance_scale
+ self._clip_skip = clip_skip
+ self._cross_attention_kwargs = cross_attention_kwargs
+
+ adapter_state = None
+
+ if image_t2i_adapter is not None:
+ height, width = default_height_width(self,height, width, image_t2i_adapter)
+ adapter_state = preprocessing_t2i_adapter(self,image_t2i_adapter,width,height,adapter_conditioning_scale,num_images_per_prompt)
+
+ #self.prompt_parser = FrozenCLIPEmbedderWithCustomWords(self.tokenizer, self.text_encoder,clip_skip+1)
+
+ # 2. Define call parameters
+ if prompt is not None and isinstance(prompt, str):
+ batch_size = 1
+ elif prompt is not None and isinstance(prompt, list):
+ batch_size = len(prompt)
+ else:
+ batch_size = prompt_embeds.shape[0]
+
+ device = self._execution_device
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
+ # corresponds to doing no classifier free guidance.
+ #do_classifier_free_guidance = guidance_scale > 1.0
+
+ if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
+ controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
+
+ global_pool_conditions = (
+ controlnet.config.global_pool_conditions
+ if isinstance(controlnet, ControlNetModel)
+ else controlnet.nets[0].config.global_pool_conditions
+ )
+ guess_mode = guess_mode or global_pool_conditions
+
+ # 3. Encode input prompt
+ text_encoder_lora_scale = (
+ self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
+ )
+ #text_ids, text_embeddings = self.prompt_parser([negative_prompt, prompt])
+ #text_embeddings = text_embeddings.to(self.unet.dtype)
+
+ #Copy input prompt_embeds and negative_prompt_embeds
+ prompt_embeds_copy = None
+ negative_prompt_embeds_copy = None
+ if prompt_embeds is not None:
+ prompt_embeds_copy = prompt_embeds.clone().detach()
+ if negative_prompt_embeds is not None:
+ negative_prompt_embeds_copy = negative_prompt_embeds.clone().detach()
+
+ prompt_embeds, negative_prompt_embeds,text_input_ids = encode_prompt_function(
+ self,
+ prompt,
+ device,
+ num_images_per_prompt,
+ self.do_classifier_free_guidance,
+ negative_prompt,
+ prompt_embeds=prompt_embeds,
+ negative_prompt_embeds=negative_prompt_embeds,
+ lora_scale=text_encoder_lora_scale,
+ clip_skip=self.clip_skip,
+ long_encode = long_encode,
+ )
+
+ #Get token_id
+ #text_input_ids,uncond_input_ids = get_id_text(self,prompt,max_length = prompt_embeds.shape[1],negative_prompt = negative_prompt,prompt_embeds = prompt_embeds_copy,negative_prompt_embeds = negative_prompt_embeds_copy)
+ # For classifier free guidance, we need to do two forward passes.
+ # Here we concatenate the unconditional and text embeddings into a single batch
+ # to avoid doing two forward passes
+ '''if text_input_ids is not None:
+ text_input_ids = np.concatenate([uncond_input_ids, text_input_ids])'''
+ # For classifier free guidance, we need to do two forward passes.
+ # Here we concatenate the unconditional and text embeddings into a single batch
+ # to avoid doing two forward passes
+ if self.do_classifier_free_guidance:
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
+
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
+ image_embeds = self.prepare_ip_adapter_image_embeds(
+ ip_adapter_image,
+ ip_adapter_image_embeds,
+ device,
+ batch_size * num_images_per_prompt,
+ self.do_classifier_free_guidance,
+ )
+ # 4. Prepare image
+ image = self.image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
+
+
+ # 5. Prepare controlnet_conditioning_image
+ if isinstance(controlnet, ControlNetModel):
+ control_image = self.prepare_control_image(
+ image=control_image,
+ width=width,
+ height=height,
+ batch_size=batch_size * num_images_per_prompt,
+ num_images_per_prompt=num_images_per_prompt,
+ device=device,
+ dtype=controlnet.dtype,
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
+ guess_mode=guess_mode,
+ )
+ elif isinstance(controlnet, MultiControlNetModel):
+ control_images = []
+ # Nested lists as ControlNet condition
+ if isinstance(image[0], list):
+ # Transpose the nested image list
+ image = [list(t) for t in zip(*image)]
+
+ for control_image_ in control_image:
+ control_image_ = self.prepare_control_image(
+ image=control_image_,
+ width=width,
+ height=height,
+ batch_size=batch_size * num_images_per_prompt,
+ num_images_per_prompt=num_images_per_prompt,
+ device=device,
+ dtype=controlnet.dtype,
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
+ guess_mode=guess_mode,
+ )
+
+ control_images.append(control_image_)
+
+ control_image = control_images
+ else:
+ assert False
+
+ # 5. Prepare timesteps
+ region_state = encode_region_map(
+ self,
+ region_map_state,
+ width = width,
+ height = height,
+ num_images_per_prompt = num_images_per_prompt,
+ text_ids=text_input_ids,
+ )
+ if self.cross_attention_kwargs is None:
+ self._cross_attention_kwargs ={}
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
+ latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
+ self._num_timesteps = len(timesteps)
+
+ # 6. Prepare latent variables
+ if latents is None:
+ latents = self.prepare_latents(
+ image,
+ latent_timestep,
+ batch_size,
+ num_images_per_prompt,
+ prompt_embeds.dtype,
+ device,
+ generator,
+ )
+
+ lst_latent = []
+ if latent_processing == 1:
+ lst_latent = [self.type_output("pil",device,prompt_embeds.dtype,return_dict,latents,generator).images[0]]
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
+
+ # 7.1 Add image embeds for IP-Adapter
+ added_cond_kwargs = (
+ {"image_embeds": image_embeds}
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None
+ else None
+ )
+
+ # 7.2 Create tensor stating which controlnets to keep
+ controlnet_keep = []
+ for i in range(len(timesteps)):
+ keeps = [
+ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
+ for s, e in zip(control_guidance_start, control_guidance_end)
+ ]
+ controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
+
+ sigmas = self.scheduler.sigmas[init_step-len(timesteps):]
+
+
+ # 8. Denoising loop
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
+ #step_x = 0
+ for i, t in enumerate(timesteps):
+ # expand the latents if we are doing classifier free guidance
+ latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
+
+ # controlnet(s) inference
+ if guess_mode and self.do_classifier_free_guidance:
+ # Infer ControlNet only for the conditional batch.
+ control_model_input = latents
+ control_model_input = self.scheduler.scale_model_input(control_model_input, t)
+ controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
+ else:
+ control_model_input = latent_model_input
+ controlnet_prompt_embeds = prompt_embeds
+
+ if isinstance(controlnet_keep[i], list):
+ cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
+ else:
+ controlnet_cond_scale = controlnet_conditioning_scale
+ if isinstance(controlnet_cond_scale, list):
+ controlnet_cond_scale = controlnet_cond_scale[0]
+ cond_scale = controlnet_cond_scale * controlnet_keep[i]
+
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
+ control_model_input,
+ t,
+ encoder_hidden_states=controlnet_prompt_embeds,
+ controlnet_cond=control_image,
+ conditioning_scale=cond_scale,
+ guess_mode=guess_mode,
+ return_dict=False,
+ )
+
+ if guess_mode and self.do_classifier_free_guidance:
+ # Infered ControlNet only for the conditional batch.
+ # To apply the output of ControlNet to both the unconditional and conditional batches,
+ # add 0 to the unconditional batch to keep it unchanged.
+ down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
+ mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
+
+ region_prompt = {
+ "region_state": region_state,
+ "sigma": self.scheduler.sigmas[i],
+ "weight_func": weight_func,
+ }
+ self._cross_attention_kwargs["region_prompt"] = region_prompt
+ #print(t)
+ #step_x=step_x+1
+
+ down_intrablock_additional_residuals = None
+ if adapter_state is not None:
+ if i < int(num_inference_steps * adapter_conditioning_factor):
+ down_intrablock_additional_residuals = [state.clone() for state in adapter_state]
+ else:
+ down_intrablock_additional_residuals = None
+
+ # predict the noise residual
+ # predict the noise residual
+ noise_pred = self.unet(
+ latent_model_input,
+ t,
+ encoder_hidden_states=prompt_embeds,
+ cross_attention_kwargs=self.cross_attention_kwargs,
+ down_block_additional_residuals=down_block_res_samples,
+ mid_block_additional_residual=mid_block_res_sample,
+ down_intrablock_additional_residuals = down_intrablock_additional_residuals,
+ added_cond_kwargs=added_cond_kwargs,
+ return_dict=False,
+ )[0]
+
+ # perform guidance
+ if self.do_classifier_free_guidance:
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
+ if self.do_classifier_free_guidance and guidance_rescale > 0.0:
+ # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
+ noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
+
+ # compute the previous noisy sample x_t -> x_t-1
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
+
+ if latent_processing == 1:
+ lst_latent.append(self.type_output("pil",device,prompt_embeds.dtype,return_dict,latents,generator).images[0])
+
+ if callback_on_step_end is not None:
+ callback_kwargs = {}
+ for k in callback_on_step_end_tensor_inputs:
+ callback_kwargs[k] = locals()[k]
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
+
+ latents = callback_outputs.pop("latents", latents)
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
+
+ # call the callback, if provided
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
+ progress_bar.update()
+ if callback is not None and i % callback_steps == 0:
+ step_idx = i // getattr(self.scheduler, "order", 1)
+ callback(step_idx, t, latents)
+
+ # If we do sequential model offloading, let's offload unet and controlnet
+ # manually for max memory savings
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
+ self.unet.to("cpu")
+ self.controlnet.to("cpu")
+ torch.cuda.empty_cache()
+
+ if latent_processing == 1:
+ if output_type == 'latent':
+ lst_latent.append(self.type_output(output_type,device,prompt_embeds.dtype,return_dict,latents,generator).images[0])
+ return lst_latent
+ if output_type == 'latent':
+ return [self.type_output("pil",device,prompt_embeds.dtype,return_dict,latents,generator).images[0],self.type_output(output_type,device,prompt_embeds.dtype,return_dict,latents,generator).images[0]]
+ return [self.type_output(output_type,device,prompt_embeds.dtype,return_dict,latents,generator).images[0]]
+
+class StableDiffusionImg2ImgPipeline_finetune(IPAdapterMixin,StableDiffusionImg2ImgPipeline):
+ def type_output(self,output_type,device,d_type,return_dict,latents,generator):
+ if not output_type == "latent":
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False,generator=generator)[0]
+ image, has_nsfw_concept = self.run_safety_checker(image, device, d_type)
+ else:
+ image = latents
+ has_nsfw_concept = None
+
+ if has_nsfw_concept is None:
+ do_denormalize = [True] * image.shape[0]
+ else:
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
+
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
+
+ # Offload all models
+ self.maybe_free_model_hooks()
+
+ if not return_dict:
+ return (image, has_nsfw_concept)
+
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
+
+ @torch.no_grad()
+ def __call__(
+ self,
+ prompt: Union[str, List[str]] = None,
+ image: PipelineImageInput = None,
+ height: Optional[int] = None,
+ width: Optional[int] = None,
+ strength: float = 0.8,
+ num_inference_steps: Optional[int] = 50,
+ timesteps: List[int] = None,
+ sigmas: List[float] = None,
+ guidance_scale: Optional[float] = 7.5,
+ negative_prompt: Optional[Union[str, List[str]]] = None,
+ num_images_per_prompt: Optional[int] = 1,
+ eta: Optional[float] = 0.0,
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
+ prompt_embeds: Optional[torch.Tensor] = None,
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
+ ip_adapter_image: Optional[PipelineImageInput] = None,
+ ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
+ output_type: Optional[str] = "pil",
+ return_dict: bool = True,
+ guidance_rescale: float = 0.0,
+ #callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
+ #callback_steps: int = 1,
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
+ clip_skip: int = 0,
+ callback_on_step_end: Optional[
+ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]
+ ] = None,
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
+ region_map_state=None,
+ weight_func = lambda w, sigma, qk: w * sigma * qk.std(),
+ latent_processing = 0,
+ image_t2i_adapter : Optional[PipelineImageInput] = None,
+ adapter_conditioning_scale: Union[float, List[float]] = 1.0,
+ adapter_conditioning_factor: float = 1.0,
+ long_encode: int = 0,
+ **kwargs,
+ ):
+ init_step = num_inference_steps
+ callback = kwargs.pop("callback", None)
+ callback_steps = kwargs.pop("callback_steps", None)
+
+
+
+ if callback is not None:
+ deprecate(
+ "callback",
+ "1.0.0",
+ "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
+ )
+ if callback_steps is not None:
+ deprecate(
+ "callback_steps",
+ "1.0.0",
+ "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
+ )
+
+ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
+ callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
+
+ # 1. Check inputs. Raise error if not correct
+ self.check_inputs(
+ prompt,
+ strength,
+ callback_steps,
+ negative_prompt,
+ prompt_embeds,
+ negative_prompt_embeds,
+ ip_adapter_image,
+ ip_adapter_image_embeds,
+ callback_on_step_end_tensor_inputs,
+ )
+
+ #self.prompt_parser = FrozenCLIPEmbedderWithCustomWords(self.tokenizer, self.text_encoder,clip_skip+1)
+
+
+ self._guidance_scale = guidance_scale
+ self._clip_skip = clip_skip
+ self._cross_attention_kwargs = cross_attention_kwargs
+ self._interrupt = False
+
+ if height is None:
+ _,height = get_image_size(image)
+ height = int((height // 8)*8)
+ if width is None:
+ width,_ = get_image_size(image)
+ width = int((width // 8)*8)
+
+ adapter_state = None
+
+ if image_t2i_adapter is not None:
+ height, width = default_height_width(self,height, width, image_t2i_adapter)
+ adapter_state = preprocessing_t2i_adapter(self,image_t2i_adapter,width,height,adapter_conditioning_scale,num_images_per_prompt)
+
+ # 2. Define call parameters
+ if prompt is not None and isinstance(prompt, str):
+ batch_size = 1
+ elif prompt is not None and isinstance(prompt, list):
+ batch_size = len(prompt)
+ else:
+ batch_size = prompt_embeds.shape[0]
+ device = self._execution_device
+
+
+ # 3. Encode input prompt
+ text_encoder_lora_scale = (
+ self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
+ )
+ #Copy input prompt_embeds and negative_prompt_embeds
+ prompt_embeds_copy = None
+ negative_prompt_embeds_copy = None
+ if prompt_embeds is not None:
+ prompt_embeds_copy = prompt_embeds.clone().detach()
+ if negative_prompt_embeds is not None:
+ negative_prompt_embeds_copy = negative_prompt_embeds.clone().detach()
+
+ prompt_embeds, negative_prompt_embeds,text_input_ids = encode_prompt_function(
+ self,
+ prompt,
+ device,
+ num_images_per_prompt,
+ self.do_classifier_free_guidance,
+ negative_prompt,
+ prompt_embeds=prompt_embeds,
+ negative_prompt_embeds=negative_prompt_embeds,
+ lora_scale=text_encoder_lora_scale,
+ clip_skip=self.clip_skip,
+ long_encode = long_encode,
+ )
+
+ #Get token_id
+ #text_input_ids,uncond_input_ids = get_id_text(self,prompt,max_length = prompt_embeds.shape[1],negative_prompt = negative_prompt,prompt_embeds = prompt_embeds_copy,negative_prompt_embeds = negative_prompt_embeds_copy)
+ # For classifier free guidance, we need to do two forward passes.
+ # Here we concatenate the unconditional and text embeddings into a single batch
+ # to avoid doing two forward passes
+ '''if text_input_ids is not None:
+ text_input_ids = np.concatenate([uncond_input_ids, text_input_ids])'''
+ #text_ids, text_embeddings = self.prompt_parser([negative_prompt, prompt])
+ #text_embeddings = text_embeddings.to(self.unet.dtype)
+ # For classifier free guidance, we need to do two forward passes.
+ # Here we concatenate the unconditional and text embeddings into a single batch
+ # to avoid doing two forward passes
+ if self.do_classifier_free_guidance:
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
+
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
+ image_embeds = self.prepare_ip_adapter_image_embeds(
+ ip_adapter_image,
+ ip_adapter_image_embeds,
+ device,
+ batch_size * num_images_per_prompt,
+ self.do_classifier_free_guidance,
+ )
+
+ # 4. Preprocess image
+ image = self.image_processor.preprocess(image)
+
+ # 5. set timesteps
+ region_state = encode_region_map(
+ self,
+ region_map_state,
+ width = width,
+ height = height,
+ num_images_per_prompt = num_images_per_prompt,
+ text_ids=text_input_ids,
+ )
+ if self.cross_attention_kwargs is None:
+ self._cross_attention_kwargs ={}
+ timesteps, num_inference_steps = retrieve_timesteps(
+ self.scheduler, num_inference_steps, device, timesteps, sigmas
+ )
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
+ latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
+
+ # 6. Prepare latent variables
+ latents = self.prepare_latents(
+ image,
+ latent_timestep,
+ batch_size,
+ num_images_per_prompt,
+ prompt_embeds.dtype,
+ device,
+ generator,
+ )
+
+ lst_latent =[]
+ if latent_processing == 1:
+ lst_latent = [self.type_output("pil",device,prompt_embeds.dtype,return_dict,latents,generator).images[0]]
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
+
+
+ # 7.1 Add image embeds for IP-Adapter
+ added_cond_kwargs = (
+ {"image_embeds": image_embeds}
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None
+ else None
+ )
+
+ # 7.2 Optionally get Guidance Scale Embedding
+ timestep_cond = None
+ if self.unet.config.time_cond_proj_dim is not None:
+ guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
+ timestep_cond = self.get_guidance_scale_embedding(
+ guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
+ ).to(device=device, dtype=latents.dtype)
+
+ sigmas = self.scheduler.sigmas[init_step-len(timesteps):]
+
+ # 8. Denoising loop
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
+ self._num_timesteps = len(timesteps)
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
+ #step_x = 0
+ for i, t in enumerate(timesteps):
+ if self.interrupt:
+ continue
+
+ # expand the latents if we are doing classifier free guidance
+ latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
+
+ region_prompt = {
+ "region_state": region_state,
+ "sigma": self.scheduler.sigmas[i],
+ "weight_func": weight_func,
+ }
+ self._cross_attention_kwargs["region_prompt"] = region_prompt
+ #print(t)
+ #step_x=step_x+1
+
+ down_intrablock_additional_residuals = None
+ if adapter_state is not None:
+ if i < int(num_inference_steps * adapter_conditioning_factor):
+ down_intrablock_additional_residuals = [state.clone() for state in adapter_state]
+ else:
+ down_intrablock_additional_residuals = None
+ # predict the noise residual
+ noise_pred = self.unet(
+ latent_model_input,
+ t,
+ encoder_hidden_states=prompt_embeds,
+ timestep_cond=timestep_cond,
+ cross_attention_kwargs=self.cross_attention_kwargs,
+ down_intrablock_additional_residuals = down_intrablock_additional_residuals,
+ added_cond_kwargs=added_cond_kwargs,
+ return_dict=False,
+ )[0]
+
+ # perform guidance
+ if self.do_classifier_free_guidance:
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
+ noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
+ if self.do_classifier_free_guidance and guidance_rescale > 0.0:
+ # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
+ noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
+
+ # compute the previous noisy sample x_t -> x_t-1
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
+ if latent_processing == 1:
+ lst_latent.append(self.type_output("pil",device,prompt_embeds.dtype,return_dict,latents,generator).images[0])
+
+ if callback_on_step_end is not None:
+ callback_kwargs = {}
+ for k in callback_on_step_end_tensor_inputs:
+ callback_kwargs[k] = locals()[k]
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
+
+ latents = callback_outputs.pop("latents", latents)
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
+
+ # call the callback, if provided
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
+ progress_bar.update()
+ if callback is not None and i % callback_steps == 0:
+ step_idx = i // getattr(self.scheduler, "order", 1)
+ callback(step_idx, t, latents)
+
+ if latent_processing == 1:
+ if output_type == 'latent':
+ lst_latent.append(self.type_output(output_type,device,prompt_embeds.dtype,return_dict,latents,generator).images[0])
+ return lst_latent
+ if output_type == 'latent':
+ return [self.type_output("pil",device,prompt_embeds.dtype,return_dict,latents,generator).images[0],self.type_output(output_type,device,prompt_embeds.dtype,return_dict,latents,generator).images[0]]
+ return [self.type_output(output_type,device,prompt_embeds.dtype,return_dict,latents,generator).images[0]]
+
+
+
+class StableDiffusionInpaintPipeline_finetune(IPAdapterMixin,StableDiffusionInpaintPipeline):
+ def type_output(self,output_type,device,d_type,return_dict,latents,generator,init_image,padding_mask_crop,mask_image,original_image,crops_coords):
+ if not output_type == "latent":
+ condition_kwargs = {}
+ if isinstance(self.vae, AsymmetricAutoencoderKL):
+ init_image = init_image.to(device=device, dtype=masked_image_latents.dtype)
+ init_image_condition = init_image.clone()
+ init_image = self._encode_vae_image(init_image, generator=generator)
+ mask_condition = mask_condition.to(device=device, dtype=masked_image_latents.dtype)
+ condition_kwargs = {"image": init_image_condition, "mask": mask_condition}
+ image = self.vae.decode(
+ latents / self.vae.config.scaling_factor, return_dict=False, generator=generator, **condition_kwargs
+ )[0]
+ image, has_nsfw_concept = self.run_safety_checker(image, device, d_type)
+ else:
+ image = latents
+ has_nsfw_concept = None
+
+ if has_nsfw_concept is None:
+ do_denormalize = [True] * image.shape[0]
+ else:
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
+
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
+
+ if padding_mask_crop is not None:
+ image = [self.image_processor.apply_overlay(mask_image, original_image, i, crops_coords) for i in image]
+
+ # Offload all models
+ self.maybe_free_model_hooks()
+
+ if not return_dict:
+ return (image, has_nsfw_concept)
+
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
+
+ @torch.no_grad()
+ def __call__(
+ self,
+ prompt: Union[str, List[str]] = None,
+ image: PipelineImageInput = None,
+ mask_image: PipelineImageInput = None,
+ masked_image_latents: torch.Tensor = None,
+ height: Optional[int] = None,
+ width: Optional[int] = None,
+ padding_mask_crop: Optional[int] = None,
+ strength: float = 1.0,
+ num_inference_steps: int = 50,
+ timesteps: List[int] = None,
+ sigmas: List[float] = None,
+ guidance_scale: float = 7.5,
+ negative_prompt: Optional[Union[str, List[str]]] = None,
+ num_images_per_prompt: Optional[int] = 1,
+ eta: float = 0.0,
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
+ latents: Optional[torch.Tensor] = None,
+ prompt_embeds: Optional[torch.Tensor] = None,
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
+ ip_adapter_image: Optional[PipelineImageInput] = None,
+ ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
+ output_type: Optional[str] = "pil",
+ return_dict: bool = True,
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
+ clip_skip: int = None,
+ callback_on_step_end: Optional[
+ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]
+ ] = None,
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
+ region_map_state=None,
+ weight_func = lambda w, sigma, qk: w * sigma * qk.std(),
+ latent_processing = 0,
+ image_t2i_adapter : Optional[PipelineImageInput] = None,
+ adapter_conditioning_scale: Union[float, List[float]] = 1.0,
+ adapter_conditioning_factor: float = 1.0,
+ long_encode: int = 0,
+ guidance_rescale: float = 0.0,
+ **kwargs,
+ ):
+
+ callback = kwargs.pop("callback", None)
+ callback_steps = kwargs.pop("callback_steps", None)
+
+ if callback is not None:
+ deprecate(
+ "callback",
+ "1.0.0",
+ "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
+ )
+ if callback_steps is not None:
+ deprecate(
+ "callback_steps",
+ "1.0.0",
+ "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
+ )
+
+ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
+ callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
+
+ # 0. Default height and width to unet
+ '''height = height or self.unet.config.sample_size * self.vae_scale_factor
+ width = width or self.unet.config.sample_size * self.vae_scale_factor'''
+
+ if height is None:
+ _,height = get_image_size(image)
+ height = int((height // 8)*8)
+ if width is None:
+ width,_ = get_image_size(image)
+ width = int((width // 8)*8)
+
+ adapter_state = None
+
+ if image_t2i_adapter is not None:
+ height, width = default_height_width(self,height, width, image_t2i_adapter)
+ adapter_state = preprocessing_t2i_adapter(self,image_t2i_adapter,width,height,adapter_conditioning_scale,num_images_per_prompt)
+
+ # 1. Check inputs
+ self.check_inputs(
+ prompt,
+ image,
+ mask_image,
+ height,
+ width,
+ strength,
+ callback_steps,
+ output_type,
+ negative_prompt,
+ prompt_embeds,
+ negative_prompt_embeds,
+ ip_adapter_image,
+ ip_adapter_image_embeds,
+ callback_on_step_end_tensor_inputs,
+ padding_mask_crop,
+ )
+
+ self._guidance_scale = guidance_scale
+ self._clip_skip = clip_skip
+ self._cross_attention_kwargs = cross_attention_kwargs
+ self._interrupt = False
+
+ # 2. Define call parameters
+ if prompt is not None and isinstance(prompt, str):
+ batch_size = 1
+ elif prompt is not None and isinstance(prompt, list):
+ batch_size = len(prompt)
+ else:
+ batch_size = prompt_embeds.shape[0]
+
+ device = self._execution_device
+
+ # 3. Encode input prompt
+ text_encoder_lora_scale = (
+ cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
+ )
+
+ #Copy input prompt_embeds and negative_prompt_embeds
+ prompt_embeds_copy = None
+ negative_prompt_embeds_copy = None
+ if prompt_embeds is not None:
+ prompt_embeds_copy = prompt_embeds.clone().detach()
+ if negative_prompt_embeds is not None:
+ negative_prompt_embeds_copy = negative_prompt_embeds.clone().detach()
+
+
+ prompt_embeds, negative_prompt_embeds,text_input_ids = encode_prompt_function(
+ self,
+ prompt,
+ device,
+ num_images_per_prompt,
+ self.do_classifier_free_guidance,
+ negative_prompt,
+ prompt_embeds=prompt_embeds,
+ negative_prompt_embeds=negative_prompt_embeds,
+ lora_scale=text_encoder_lora_scale,
+ clip_skip=self.clip_skip,
+ long_encode = long_encode,
+ )
+
+ #Get token_id
+ #text_input_ids,uncond_input_ids = get_id_text(self,prompt,max_length = prompt_embeds.shape[1],negative_prompt = negative_prompt,prompt_embeds = prompt_embeds_copy,negative_prompt_embeds = negative_prompt_embeds_copy)
+ # For classifier free guidance, we need to do two forward passes.
+ # Here we concatenate the unconditional and text embeddings into a single batch
+ # to avoid doing two forward passes
+ '''if text_input_ids is not None:
+ text_input_ids = np.concatenate([uncond_input_ids, text_input_ids])'''
+ #text_ids, text_embeddings = self.prompt_parser([negative_prompt, prompt])
+ #text_embeddings = text_embeddings.to(self.unet.dtype)
+ # For classifier free guidance, we need to do two forward passes.
+ # Here we concatenate the unconditional and text embeddings into a single batch
+ # to avoid doing two forward passes
+ if self.do_classifier_free_guidance:
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
+
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
+ image_embeds = self.prepare_ip_adapter_image_embeds(
+ ip_adapter_image,
+ ip_adapter_image_embeds,
+ device,
+ batch_size * num_images_per_prompt,
+ self.do_classifier_free_guidance,
+ )
+
+ # 4. set timesteps
+ timesteps, num_inference_steps = retrieve_timesteps(
+ self.scheduler, num_inference_steps, device, timesteps, sigmas
+ )
+ timesteps, num_inference_steps = self.get_timesteps(
+ num_inference_steps=num_inference_steps, strength=strength, device=device
+ )
+ # check that number of inference steps is not < 1 - as this doesn't make sense
+ if num_inference_steps < 1:
+ raise ValueError(
+ f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline"
+ f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline."
+ )
+ # at which timestep to set the initial noise (n.b. 50% if strength is 0.5)
+ latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
+ # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise
+ is_strength_max = strength == 1.0
+
+ #4.1 Preprocess region mao
+ region_state = encode_region_map(
+ self,
+ region_map_state,
+ width = width,
+ height = height,
+ num_images_per_prompt = num_images_per_prompt,
+ text_ids=text_input_ids,
+ )
+ if self.cross_attention_kwargs is None:
+ self._cross_attention_kwargs ={}
+
+ # 5. Preprocess mask and image
+
+ if padding_mask_crop is not None:
+ crops_coords = self.mask_processor.get_crop_region(mask_image, width, height, pad=padding_mask_crop)
+ resize_mode = "fill"
+ else:
+ crops_coords = None
+ resize_mode = "default"
+
+ original_image = image
+ init_image = self.image_processor.preprocess(
+ image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode
+ )
+ init_image = init_image.to(dtype=torch.float32)
+
+ # 6. Prepare latent variables
+ num_channels_latents = self.vae.config.latent_channels
+ num_channels_unet = self.unet.config.in_channels
+ return_image_latents = num_channels_unet == 4
+
+ latents_outputs = self.prepare_latents(
+ batch_size * num_images_per_prompt,
+ num_channels_latents,
+ height,
+ width,
+ prompt_embeds.dtype,
+ device,
+ generator,
+ latents,
+ image=init_image,
+ timestep=latent_timestep,
+ is_strength_max=is_strength_max,
+ return_noise=True,
+ return_image_latents=return_image_latents,
+ )
+
+ if return_image_latents:
+ latents, noise, image_latents = latents_outputs
+ else:
+ latents, noise = latents_outputs
+
+ # 7. Prepare mask latent variables
+ mask_condition = self.mask_processor.preprocess(
+ mask_image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords
+ )
+
+ if masked_image_latents is None:
+ masked_image = init_image * (mask_condition < 0.5)
+ else:
+ masked_image = masked_image_latents
+
+ mask, masked_image_latents = self.prepare_mask_latents(
+ mask_condition,
+ masked_image,
+ batch_size * num_images_per_prompt,
+ height,
+ width,
+ prompt_embeds.dtype,
+ device,
+ generator,
+ self.do_classifier_free_guidance,
+ )
+
+ # 8. Check that sizes of mask, masked image and latents match
+ if num_channels_unet == 9:
+ # default case for runwayml/stable-diffusion-inpainting
+ num_channels_mask = mask.shape[1]
+ num_channels_masked_image = masked_image_latents.shape[1]
+ if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
+ raise ValueError(
+ f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
+ f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
+ f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
+ f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of"
+ " `pipeline.unet` or your `mask_image` or `image` input."
+ )
+ elif num_channels_unet != 4:
+ raise ValueError(
+ f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}."
+ )
+
+ # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
+
+ # 9.1 Add image embeds for IP-Adapter
+ added_cond_kwargs = (
+ {"image_embeds": image_embeds}
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None
+ else None
+ )
+
+ # 9.2 Optionally get Guidance Scale Embedding
+ timestep_cond = None
+ if self.unet.config.time_cond_proj_dim is not None:
+ guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
+ timestep_cond = self.get_guidance_scale_embedding(
+ guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
+ ).to(device=device, dtype=latents.dtype)
+
+ lst_latent =[]
+ if latent_processing == 1:
+ lst_latent = [self.type_output("pil",device,prompt_embeds.dtype,return_dict,latents,generator,init_image,padding_mask_crop,mask_image,original_image,crops_coords).images[0]]
+
+ # 10. Denoising loop
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
+ self._num_timesteps = len(timesteps)
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
+ for i, t in enumerate(timesteps):
+ if self.interrupt:
+ continue
+
+ # expand the latents if we are doing classifier free guidance
+ latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
+
+ # concat latents, mask, masked_image_latents in the channel dimension
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
+
+ if num_channels_unet == 9:
+ latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
+
+ region_prompt = {
+ "region_state": region_state,
+ "sigma": self.scheduler.sigmas[i],
+ "weight_func": weight_func,
+ }
+ self._cross_attention_kwargs["region_prompt"] = region_prompt
+
+ down_intrablock_additional_residuals = None
+ if adapter_state is not None:
+ if i < int(num_inference_steps * adapter_conditioning_factor):
+ down_intrablock_additional_residuals = [state.clone() for state in adapter_state]
+ else:
+ down_intrablock_additional_residuals = None
+ # predict the noise residual
+ noise_pred = self.unet(
+ latent_model_input,
+ t,
+ encoder_hidden_states=prompt_embeds,
+ timestep_cond=timestep_cond,
+ cross_attention_kwargs=self.cross_attention_kwargs,
+ down_intrablock_additional_residuals = down_intrablock_additional_residuals,
+ added_cond_kwargs=added_cond_kwargs,
+ return_dict=False,
+ )[0]
+
+ # perform guidance
+ if self.do_classifier_free_guidance:
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
+ noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
+
+ if self.do_classifier_free_guidance and guidance_rescale > 0.0:
+ # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
+ noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
+
+ # compute the previous noisy sample x_t -> x_t-1
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
+ if num_channels_unet == 4:
+ init_latents_proper = image_latents
+ if self.do_classifier_free_guidance:
+ init_mask, _ = mask.chunk(2)
+ else:
+ init_mask = mask
+
+ if i < len(timesteps) - 1:
+ noise_timestep = timesteps[i + 1]
+ init_latents_proper = self.scheduler.add_noise(
+ init_latents_proper, noise, torch.tensor([noise_timestep])
+ )
+
+ latents = (1 - init_mask) * init_latents_proper + init_mask * latents
+
+ if latent_processing == 1:
+ lst_latent.append(self.type_output("pil",device,prompt_embeds.dtype,return_dict,latents,generator,init_image,padding_mask_crop,mask_image,original_image,crops_coords).images[0])
+ if callback_on_step_end is not None:
+ callback_kwargs = {}
+ for k in callback_on_step_end_tensor_inputs:
+ callback_kwargs[k] = locals()[k]
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
+
+ latents = callback_outputs.pop("latents", latents)
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
+ mask = callback_outputs.pop("mask", mask)
+ masked_image_latents = callback_outputs.pop("masked_image_latents", masked_image_latents)
+
+ # call the callback, if provided
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
+ progress_bar.update()
+ if callback is not None and i % callback_steps == 0:
+ step_idx = i // getattr(self.scheduler, "order", 1)
+ callback(step_idx, t, latents)
+
+ if latent_processing == 1:
+ if output_type == 'latent':
+ lst_latent.append(self.type_output(output_type,device,prompt_embeds.dtype,return_dict,latents,generator,init_image,padding_mask_crop,mask_image,original_image,crops_coords).images[0])
+ return lst_latent
+ if output_type == 'latent':
+ return [self.type_output("pil",device,prompt_embeds.dtype,return_dict,latents,generator,init_image,padding_mask_crop,mask_image,original_image,crops_coords).images[0],self.type_output(output_type,device,prompt_embeds.dtype,return_dict,latents,generator,init_image,padding_mask_crop,mask_image,original_image,crops_coords).images[0]]
+ return [self.type_output(output_type,device,prompt_embeds.dtype,return_dict,latents,generator,init_image,padding_mask_crop,mask_image,original_image,crops_coords).images[0]]
+
+class StableDiffusionControlNetInpaintPipeline_finetune(IPAdapterMixin,StableDiffusionControlNetInpaintPipeline):
+ def type_output(self,output_type,device,d_type,return_dict,latents,generator,padding_mask_crop,mask_image,original_image,crops_coords):
+ if not output_type == "latent":
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[
+ 0
+ ]
+ image, has_nsfw_concept = self.run_safety_checker(image, device,d_type)
+ else:
+ image = latents
+ has_nsfw_concept = None
+
+ if has_nsfw_concept is None:
+ do_denormalize = [True] * image.shape[0]
+ else:
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
+
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
+
+ if padding_mask_crop is not None:
+ image = [self.image_processor.apply_overlay(mask_image, original_image, i, crops_coords) for i in image]
+
+ # Offload all models
+ self.maybe_free_model_hooks()
+
+ if not return_dict:
+ return (image, has_nsfw_concept)
+
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
+
+ @torch.no_grad()
+ def __call__(
+ self,
+ prompt: Union[str, List[str]] = None,
+ image: PipelineImageInput = None,
+ mask_image: PipelineImageInput = None,
+ control_image: PipelineImageInput = None,
+ height: Optional[int] = None,
+ width: Optional[int] = None,
+ padding_mask_crop: Optional[int] = None,
+ strength: float = 1.0,
+ num_inference_steps: int = 50,
+ guidance_scale: float = 7.5,
+ negative_prompt: Optional[Union[str, List[str]]] = None,
+ num_images_per_prompt: Optional[int] = 1,
+ eta: float = 0.0,
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
+ latents: Optional[torch.Tensor] = None,
+ prompt_embeds: Optional[torch.Tensor] = None,
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
+ ip_adapter_image: Optional[PipelineImageInput] = None,
+ ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
+ output_type: Optional[str] = "pil",
+ return_dict: bool = True,
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
+ controlnet_conditioning_scale: Union[float, List[float]] = 0.5,
+ guess_mode: bool = False,
+ control_guidance_start: Union[float, List[float]] = 0.0,
+ control_guidance_end: Union[float, List[float]] = 1.0,
+ clip_skip: Optional[int] = None,
+ callback_on_step_end: Optional[
+ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]
+ ] = None,
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
+ region_map_state=None,
+ weight_func = lambda w, sigma, qk: w * sigma * qk.std(),
+ latent_processing = 0,
+ image_t2i_adapter : Optional[PipelineImageInput] = None,
+ adapter_conditioning_scale: Union[float, List[float]] = 1.0,
+ adapter_conditioning_factor: float = 1.0,
+ long_encode: int = 0,
+ guidance_rescale: float = 0.0,
+ **kwargs,
+ ):
+
+ callback = kwargs.pop("callback", None)
+ callback_steps = kwargs.pop("callback_steps", None)
+
+ if callback is not None:
+ deprecate(
+ "callback",
+ "1.0.0",
+ "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
+ )
+ if callback_steps is not None:
+ deprecate(
+ "callback_steps",
+ "1.0.0",
+ "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
+ )
+
+ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
+ callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
+
+ if height is None:
+ _,height = get_image_size(image)
+ height = int((height // 8)*8)
+ if width is None:
+ width,_ = get_image_size(image)
+ width = int((width // 8)*8)
+
+ adapter_state = None
+
+ if image_t2i_adapter is not None:
+ height, width = default_height_width(self,height, width, image_t2i_adapter)
+ adapter_state = preprocessing_t2i_adapter(self,image_t2i_adapter,width,height,adapter_conditioning_scale,num_images_per_prompt)
+ controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
+
+ # align format for control guidance
+ if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
+ control_guidance_start = len(control_guidance_end) * [control_guidance_start]
+ elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
+ control_guidance_end = len(control_guidance_start) * [control_guidance_end]
+ elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
+ mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
+ control_guidance_start, control_guidance_end = (
+ mult * [control_guidance_start],
+ mult * [control_guidance_end],
+ )
+
+ # 1. Check inputs. Raise error if not correct
+ self.check_inputs(
+ prompt,
+ control_image,
+ mask_image,
+ height,
+ width,
+ callback_steps,
+ output_type,
+ negative_prompt,
+ prompt_embeds,
+ negative_prompt_embeds,
+ ip_adapter_image,
+ ip_adapter_image_embeds,
+ controlnet_conditioning_scale,
+ control_guidance_start,
+ control_guidance_end,
+ callback_on_step_end_tensor_inputs,
+ padding_mask_crop,
+ )
+
+ self._guidance_scale = guidance_scale
+ self._clip_skip = clip_skip
+ self._cross_attention_kwargs = cross_attention_kwargs
+
+ # 2. Define call parameters
+ if prompt is not None and isinstance(prompt, str):
+ batch_size = 1
+ elif prompt is not None and isinstance(prompt, list):
+ batch_size = len(prompt)
+ else:
+ batch_size = prompt_embeds.shape[0]
+
+ if padding_mask_crop is not None:
+ height, width = self.image_processor.get_default_height_width(image, height, width)
+ crops_coords = self.mask_processor.get_crop_region(mask_image, width, height, pad=padding_mask_crop)
+ resize_mode = "fill"
+ else:
+ crops_coords = None
+ resize_mode = "default"
+
+ device = self._execution_device
+
+ if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
+ controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
+
+ global_pool_conditions = (
+ controlnet.config.global_pool_conditions
+ if isinstance(controlnet, ControlNetModel)
+ else controlnet.nets[0].config.global_pool_conditions
+ )
+ guess_mode = guess_mode or global_pool_conditions
+
+ # 3. Encode input prompt
+ text_encoder_lora_scale = (
+ self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
+ )
+ #Copy input prompt_embeds and negative_prompt_embeds
+ '''prompt_embeds_copy = None
+ negative_prompt_embeds_copy = None
+ if prompt_embeds is not None:
+ prompt_embeds_copy = prompt_embeds.clone().detach()
+ if negative_prompt_embeds is not None:
+ negative_prompt_embeds_copy = negative_prompt_embeds.clone().detach()'''
+
+
+ prompt_embeds, negative_prompt_embeds,text_input_ids = encode_prompt_function(
+ self,
+ prompt,
+ device,
+ num_images_per_prompt,
+ self.do_classifier_free_guidance,
+ negative_prompt,
+ prompt_embeds=prompt_embeds,
+ negative_prompt_embeds=negative_prompt_embeds,
+ lora_scale=text_encoder_lora_scale,
+ clip_skip=self.clip_skip,
+ long_encode = long_encode,
+ )
+
+ #Get token_id
+ #text_input_ids,uncond_input_ids = get_id_text(self,prompt,max_length = prompt_embeds.shape[1],negative_prompt = negative_prompt,prompt_embeds = prompt_embeds_copy,negative_prompt_embeds = negative_prompt_embeds_copy)
+ # For classifier free guidance, we need to do two forward passes.
+ # Here we concatenate the unconditional and text embeddings into a single batch
+ # to avoid doing two forward passes
+ '''if text_input_ids is not None:
+ text_input_ids = np.concatenate([uncond_input_ids, text_input_ids])'''
+ #text_ids, text_embeddings = self.prompt_parser([negative_prompt, prompt])
+ #text_embeddings = text_embeddings.to(self.unet.dtype)
+ # For classifier free guidance, we need to do two forward passes.
+ # Here we concatenate the unconditional and text embeddings into a single batch
+ # to avoid doing two forward passes
+ if self.do_classifier_free_guidance:
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
+
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
+ image_embeds = self.prepare_ip_adapter_image_embeds(
+ ip_adapter_image,
+ ip_adapter_image_embeds,
+ device,
+ batch_size * num_images_per_prompt,
+ self.do_classifier_free_guidance,
+ )
+
+ # 4. Prepare image
+ if isinstance(controlnet, ControlNetModel):
+ control_image = self.prepare_control_image(
+ image=control_image,
+ width=width,
+ height=height,
+ batch_size=batch_size * num_images_per_prompt,
+ num_images_per_prompt=num_images_per_prompt,
+ device=device,
+ dtype=controlnet.dtype,
+ crops_coords=crops_coords,
+ resize_mode=resize_mode,
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
+ guess_mode=guess_mode,
+ )
+ elif isinstance(controlnet, MultiControlNetModel):
+ control_images = []
+
+ for control_image_ in control_image:
+ control_image_ = self.prepare_control_image(
+ image=control_image_,
+ width=width,
+ height=height,
+ batch_size=batch_size * num_images_per_prompt,
+ num_images_per_prompt=num_images_per_prompt,
+ device=device,
+ dtype=controlnet.dtype,
+ crops_coords=crops_coords,
+ resize_mode=resize_mode,
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
+ guess_mode=guess_mode,
+ )
+
+ control_images.append(control_image_)
+
+ control_image = control_images
+ else:
+ assert False
+
+ # 4.1 Preprocess mask and image - resizes image and mask w.r.t height and width
+ original_image = image
+ init_image = self.image_processor.preprocess(
+ image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode
+ )
+ init_image = init_image.to(dtype=torch.float32)
+
+ mask = self.mask_processor.preprocess(
+ mask_image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords
+ )
+
+ masked_image = init_image * (mask < 0.5)
+ _, _, height, width = init_image.shape
+
+ #4.2 Preprocess region mao
+ region_state = encode_region_map(
+ self,
+ region_map_state,
+ width = width,
+ height = height,
+ num_images_per_prompt = num_images_per_prompt,
+ text_ids=text_input_ids,
+ )
+ if self.cross_attention_kwargs is None:
+ self._cross_attention_kwargs ={}
+
+ # 5. Prepare timesteps
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
+ timesteps, num_inference_steps = self.get_timesteps(
+ num_inference_steps=num_inference_steps, strength=strength, device=device
+ )
+ # at which timestep to set the initial noise (n.b. 50% if strength is 0.5)
+ latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
+ # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise
+ is_strength_max = strength == 1.0
+ self._num_timesteps = len(timesteps)
+
+ # 6. Prepare latent variables
+ num_channels_latents = self.vae.config.latent_channels
+ num_channels_unet = self.unet.config.in_channels
+ return_image_latents = num_channels_unet == 4
+ latents_outputs = self.prepare_latents(
+ batch_size * num_images_per_prompt,
+ num_channels_latents,
+ height,
+ width,
+ prompt_embeds.dtype,
+ device,
+ generator,
+ latents,
+ image=init_image,
+ timestep=latent_timestep,
+ is_strength_max=is_strength_max,
+ return_noise=True,
+ return_image_latents=return_image_latents,
+ )
+
+ if return_image_latents:
+ latents, noise, image_latents = latents_outputs
+ else:
+ latents, noise = latents_outputs
+
+ # 7. Prepare mask latent variables
+ mask, masked_image_latents = self.prepare_mask_latents(
+ mask,
+ masked_image,
+ batch_size * num_images_per_prompt,
+ height,
+ width,
+ prompt_embeds.dtype,
+ device,
+ generator,
+ self.do_classifier_free_guidance,
+ )
+
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
+
+ # 7.1 Add image embeds for IP-Adapter
+ added_cond_kwargs = (
+ {"image_embeds": image_embeds}
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None
+ else None
+ )
+
+ # 7.2 Create tensor stating which controlnets to keep
+ controlnet_keep = []
+ for i in range(len(timesteps)):
+ keeps = [
+ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
+ for s, e in zip(control_guidance_start, control_guidance_end)
+ ]
+ controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
+
+ lst_latent =[]
+ if latent_processing == 1:
+ lst_latent = [self.type_output("pil",device,prompt_embeds.dtype,return_dict,latents,generator,padding_mask_crop,mask_image,original_image,crops_coords).images[0]]
+ # 8. Denoising loop
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
+ for i, t in enumerate(timesteps):
+ # expand the latents if we are doing classifier free guidance
+ latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
+
+ # controlnet(s) inference
+ if guess_mode and self.do_classifier_free_guidance:
+ # Infer ControlNet only for the conditional batch.
+ control_model_input = latents
+ control_model_input = self.scheduler.scale_model_input(control_model_input, t)
+ controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
+ else:
+ control_model_input = latent_model_input
+ controlnet_prompt_embeds = prompt_embeds
+
+ if isinstance(controlnet_keep[i], list):
+ cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
+ else:
+ controlnet_cond_scale = controlnet_conditioning_scale
+ if isinstance(controlnet_cond_scale, list):
+ controlnet_cond_scale = controlnet_cond_scale[0]
+ cond_scale = controlnet_cond_scale * controlnet_keep[i]
+
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
+ control_model_input,
+ t,
+ encoder_hidden_states=controlnet_prompt_embeds,
+ controlnet_cond=control_image,
+ conditioning_scale=cond_scale,
+ guess_mode=guess_mode,
+ return_dict=False,
+ )
+
+ if guess_mode and self.do_classifier_free_guidance:
+ # Infered ControlNet only for the conditional batch.
+ # To apply the output of ControlNet to both the unconditional and conditional batches,
+ # add 0 to the unconditional batch to keep it unchanged.
+ down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
+ mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
+
+ # predict the noise residual
+ if num_channels_unet == 9:
+ latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
+
+ region_prompt = {
+ "region_state": region_state,
+ "sigma": self.scheduler.sigmas[i],
+ "weight_func": weight_func,
+ }
+ self._cross_attention_kwargs["region_prompt"] = region_prompt
+
+ down_intrablock_additional_residuals = None
+ if adapter_state is not None:
+ if i < int(num_inference_steps * adapter_conditioning_factor):
+ down_intrablock_additional_residuals = [state.clone() for state in adapter_state]
+ else:
+ down_intrablock_additional_residuals = None
+
+ noise_pred = self.unet(
+ latent_model_input,
+ t,
+ encoder_hidden_states=prompt_embeds,
+ cross_attention_kwargs=self.cross_attention_kwargs,
+ down_block_additional_residuals=down_block_res_samples,
+ mid_block_additional_residual=mid_block_res_sample,
+ down_intrablock_additional_residuals = down_intrablock_additional_residuals,
+ added_cond_kwargs=added_cond_kwargs,
+ return_dict=False,
+ )[0]
+
+ # perform guidance
+ if self.do_classifier_free_guidance:
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
+
+ if self.do_classifier_free_guidance and guidance_rescale > 0.0:
+ # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
+ noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
+
+ # compute the previous noisy sample x_t -> x_t-1
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
+
+ if num_channels_unet == 4:
+ init_latents_proper = image_latents
+ if self.do_classifier_free_guidance:
+ init_mask, _ = mask.chunk(2)
+ else:
+ init_mask = mask
+
+ if i < len(timesteps) - 1:
+ noise_timestep = timesteps[i + 1]
+ init_latents_proper = self.scheduler.add_noise(
+ init_latents_proper, noise, torch.tensor([noise_timestep])
+ )
+
+ latents = (1 - init_mask) * init_latents_proper + init_mask * latents
+
+ if latent_processing == 1:
+ lst_latent.append(self.type_output("pil",device,prompt_embeds.dtype,return_dict,latents,generator,padding_mask_crop,mask_image,original_image,crops_coords).images[0])
+
+ if callback_on_step_end is not None:
+ callback_kwargs = {}
+ for k in callback_on_step_end_tensor_inputs:
+ callback_kwargs[k] = locals()[k]
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
+
+ latents = callback_outputs.pop("latents", latents)
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
+
+ # call the callback, if provided
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
+ progress_bar.update()
+ if callback is not None and i % callback_steps == 0:
+ step_idx = i // getattr(self.scheduler, "order", 1)
+ callback(step_idx, t, latents)
+
+ # If we do sequential model offloading, let's offload unet and controlnet
+ # manually for max memory savings
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
+ self.unet.to("cpu")
+ self.controlnet.to("cpu")
+ torch.cuda.empty_cache()
+
+ if latent_processing == 1:
+ if output_type == 'latent':
+ lst_latent.append(self.type_output(output_type,device,prompt_embeds.dtype,return_dict,latents,generator,padding_mask_crop,mask_image,original_image,crops_coords).images[0])
+ return lst_latent
+ if output_type == 'latent':
+ return [self.type_output("pil",device,prompt_embeds.dtype,return_dict,latents,generator,padding_mask_crop,mask_image,original_image,crops_coords).images[0],self.type_output(output_type,device,prompt_embeds.dtype,return_dict,latents,generator,init_image,padding_mask_crop,mask_image,original_image,crops_coords).images[0]]
+ return [self.type_output(output_type,device,prompt_embeds.dtype,return_dict,latents,generator,padding_mask_crop,mask_image,original_image,crops_coords).images[0]]
\ No newline at end of file
diff --git a/modules/model_k_diffusion.py b/modules/model_k_diffusion.py
new file mode 100644
index 0000000000000000000000000000000000000000..1946a63c1fe9e29139f15a5cc70ab800abf3aff0
--- /dev/null
+++ b/modules/model_k_diffusion.py
@@ -0,0 +1,1960 @@
+import importlib
+import inspect
+import math
+from pathlib import Path
+import re
+from collections import defaultdict
+from typing import List, Optional, Union
+import cv2
+import time
+import k_diffusion
+import numpy as np
+import PIL
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from einops import rearrange
+from .external_k_diffusion import CompVisDenoiser, CompVisVDenoiser
+#from .prompt_parser import FrozenCLIPEmbedderWithCustomWords
+from torch import einsum
+from torch.autograd.function import Function
+
+from diffusers.utils import PIL_INTERPOLATION, is_accelerate_available
+from diffusers.utils import logging
+from diffusers.utils.torch_utils import randn_tensor,is_compiled_module
+from diffusers.image_processor import VaeImageProcessor,PipelineImageInput
+from safetensors.torch import load_file
+from diffusers import ControlNetModel
+from PIL import Image
+import torchvision.transforms as transforms
+from diffusers.models import AutoencoderKL, ImageProjection
+from .ip_adapter import IPAdapterMixin
+from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
+import gc
+from .t2i_adapter import preprocessing_t2i_adapter,default_height_width
+from .encoder_prompt_modify import encode_prompt_function
+from .encode_region_map_function import encode_region_map
+from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
+from diffusers.loaders import LoraLoaderMixin
+from diffusers.loaders import TextualInversionLoaderMixin
+
+def get_image_size(image):
+ height, width = None, None
+ if isinstance(image, Image.Image):
+ return image.size
+ elif isinstance(image, np.ndarray):
+ height, width = image.shape[:2]
+ return (width, height)
+ elif torch.is_tensor(image):
+ #RGB image
+ if len(image.shape) == 3:
+ _, height, width = image.shape
+ else:
+ height, width = image.shape
+ return (width, height)
+ else:
+ raise TypeError("The image must be an instance of PIL.Image, numpy.ndarray, or torch.Tensor.")
+
+
+def retrieve_latents(
+ encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
+):
+ if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
+ return encoder_output.latent_dist.sample(generator)
+ elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
+ return encoder_output.latent_dist.mode()
+ elif hasattr(encoder_output, "latents"):
+ return encoder_output.latents
+ else:
+ raise AttributeError("Could not access latents of provided encoder_output")
+
+# from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
+def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
+ """
+ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
+ Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
+ """
+ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
+ std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
+ # rescale the results from guidance (fixes overexposure)
+ noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
+ # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
+ noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
+ return noise_cfg
+
+
+class ModelWrapper:
+ def __init__(self, model, alphas_cumprod):
+ self.model = model
+ self.alphas_cumprod = alphas_cumprod
+
+ def apply_model(self, *args, **kwargs):
+ if len(args) == 3:
+ encoder_hidden_states = args[-1]
+ args = args[:2]
+ if kwargs.get("cond", None) is not None:
+ encoder_hidden_states = kwargs.pop("cond")
+ return self.model(
+ *args, encoder_hidden_states=encoder_hidden_states, **kwargs
+ ).sample
+
+
+class StableDiffusionPipeline(IPAdapterMixin,DiffusionPipeline,StableDiffusionMixin,LoraLoaderMixin,TextualInversionLoaderMixin):
+
+ _optional_components = ["safety_checker", "feature_extractor"]
+
+ def __init__(
+ self,
+ vae,
+ text_encoder,
+ tokenizer,
+ unet,
+ scheduler,
+ feature_extractor,
+ image_encoder = None,
+ ):
+ super().__init__()
+
+ # get correct sigmas from LMS
+ self.register_modules(
+ vae=vae,
+ text_encoder=text_encoder,
+ tokenizer=tokenizer,
+ unet=unet,
+ scheduler=scheduler,
+ feature_extractor=feature_extractor,
+ image_encoder=image_encoder,
+ )
+ self.controlnet = None
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
+ self.mask_processor = VaeImageProcessor(
+ vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True
+ )
+ self.setup_unet(self.unet)
+ #self.setup_text_encoder()
+
+ '''def setup_text_encoder(self, n=1, new_encoder=None):
+ if new_encoder is not None:
+ self.text_encoder = new_encoder
+
+ self.prompt_parser = FrozenCLIPEmbedderWithCustomWords(self.tokenizer, self.text_encoder,n)'''
+ #self.prompt_parser.CLIP_stop_at_last_layers = n
+
+ def setup_unet(self, unet):
+ unet = unet.to(self.device)
+ model = ModelWrapper(unet, self.scheduler.alphas_cumprod)
+ if self.scheduler.config.prediction_type == "v_prediction":
+ self.k_diffusion_model = CompVisVDenoiser(model)
+ else:
+ self.k_diffusion_model = CompVisDenoiser(model)
+
+ def get_scheduler(self, scheduler_type: str):
+ library = importlib.import_module("k_diffusion")
+ sampling = getattr(library, "sampling")
+ return getattr(sampling, scheduler_type)
+
+ def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
+ dtype = next(self.image_encoder.parameters()).dtype
+
+ if not isinstance(image, torch.Tensor):
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
+
+ image = image.to(device=device, dtype=dtype)
+ if output_hidden_states:
+ image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
+ image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
+ uncond_image_enc_hidden_states = self.image_encoder(
+ torch.zeros_like(image), output_hidden_states=True
+ ).hidden_states[-2]
+ uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
+ num_images_per_prompt, dim=0
+ )
+ return image_enc_hidden_states, uncond_image_enc_hidden_states
+ else:
+ image_embeds = self.image_encoder(image).image_embeds
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
+ uncond_image_embeds = torch.zeros_like(image_embeds)
+
+ return image_embeds, uncond_image_embeds
+
+
+ def prepare_ip_adapter_image_embeds(
+ self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance
+ ):
+ if ip_adapter_image_embeds is None:
+ if not isinstance(ip_adapter_image, list):
+ ip_adapter_image = [ip_adapter_image]
+
+ if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers):
+ raise ValueError(
+ f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
+ )
+
+ image_embeds = []
+ for single_ip_adapter_image, image_proj_layer in zip(
+ ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
+ ):
+ output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
+ single_image_embeds, single_negative_image_embeds = self.encode_image(
+ single_ip_adapter_image, device, 1, output_hidden_state
+ )
+ single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0)
+ single_negative_image_embeds = torch.stack(
+ [single_negative_image_embeds] * num_images_per_prompt, dim=0
+ )
+
+ if do_classifier_free_guidance:
+ single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
+ single_image_embeds = single_image_embeds.to(device)
+
+ image_embeds.append(single_image_embeds)
+ else:
+ repeat_dims = [1]
+ image_embeds = []
+ for single_image_embeds in ip_adapter_image_embeds:
+ if do_classifier_free_guidance:
+ single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2)
+ single_image_embeds = single_image_embeds.repeat(
+ num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:]))
+ )
+ single_negative_image_embeds = single_negative_image_embeds.repeat(
+ num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:]))
+ )
+ single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
+ else:
+ single_image_embeds = single_image_embeds.repeat(
+ num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:]))
+ )
+ image_embeds.append(single_image_embeds)
+
+ return image_embeds
+
+ def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
+ r"""
+ Enable sliced attention computation.
+
+ When this option is enabled, the attention module will split the input tensor in slices, to compute attention
+ in several steps. This is useful to save some memory in exchange for a small speed decrease.
+
+ Args:
+ slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
+ When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
+ a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
+ `attention_head_dim` must be a multiple of `slice_size`.
+ """
+ if slice_size == "auto":
+ # half the attention head size is usually a good trade-off between
+ # speed and memory
+ slice_size = self.unet.config.attention_head_dim // 2
+ self.unet.set_attention_slice(slice_size)
+
+ def disable_attention_slicing(self):
+ r"""
+ Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
+ back to computing attention in one step.
+ """
+ # set slice_size = `None` to disable `attention slicing`
+ self.enable_attention_slicing(None)
+
+ def enable_sequential_cpu_offload(self, gpu_id=0):
+ r"""
+ Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
+ text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
+ `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
+ """
+ if is_accelerate_available():
+ from accelerate import cpu_offload
+ else:
+ raise ImportError("Please install accelerate via `pip install accelerate`")
+
+ device = torch.device(f"cuda:{gpu_id}")
+
+ for cpu_offloaded_model in [
+ self.unet,
+ self.text_encoder,
+ self.vae,
+ self.safety_checker,
+ ]:
+ if cpu_offloaded_model is not None:
+ cpu_offload(cpu_offloaded_model, device)
+
+ @property
+ def _execution_device(self):
+ r"""
+ Returns the device on which the pipeline's models will be executed. After calling
+ `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
+ hooks.
+ """
+ if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"):
+ return self.device
+ for module in self.unet.modules():
+ if (
+ hasattr(module, "_hf_hook")
+ and hasattr(module._hf_hook, "execution_device")
+ and module._hf_hook.execution_device is not None
+ ):
+ return torch.device(module._hf_hook.execution_device)
+ return self.device
+
+ def decode_latents(self, latents):
+ latents = latents.to(self.device, dtype=self.vae.dtype)
+ #latents = 1 / 0.18215 * latents
+ latents = 1 / self.vae.config.scaling_factor * latents
+ image = self.vae.decode(latents).sample
+ image = (image / 2 + 0.5).clamp(0, 1)
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
+ return image
+
+
+ def _default_height_width(self, height, width, image):
+ if isinstance(image, list):
+ image = image[0]
+
+ if height is None:
+ if isinstance(image, PIL.Image.Image):
+ height = image.height
+ elif isinstance(image, torch.Tensor):
+ height = image.shape[3]
+
+ height = (height // 8) * 8 # round down to nearest multiple of 8
+
+ if width is None:
+ if isinstance(image, PIL.Image.Image):
+ width = image.width
+ elif isinstance(image, torch.Tensor):
+ width = image.shape[2]
+
+ width = (width // 8) * 8 # round down to nearest multiple of 8
+
+ return height, width
+
+ def check_inputs(self, prompt, height, width, callback_steps):
+ if not isinstance(prompt, str) and not isinstance(prompt, list):
+ raise ValueError(
+ f"`prompt` has to be of type `str` or `list` but is {type(prompt)}"
+ )
+
+ if height % 8 != 0 or width % 8 != 0:
+ raise ValueError(
+ f"`height` and `width` have to be divisible by 8 but are {height} and {width}."
+ )
+
+ if (callback_steps is None) or (
+ callback_steps is not None
+ and (not isinstance(callback_steps, int) or callback_steps <= 0)
+ ):
+ raise ValueError(
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
+ f" {type(callback_steps)}."
+ )
+
+ @property
+ def do_classifier_free_guidance(self):
+ return self._do_classifier_free_guidance and self.unet.config.time_cond_proj_dim is None
+
+ def setup_controlnet(self,controlnet):
+ if isinstance(controlnet, (list, tuple)):
+ controlnet = MultiControlNetModel(controlnet)
+ self.register_modules(
+ controlnet=controlnet,
+ )
+
+ def preprocess_controlnet(self,controlnet_conditioning_scale,control_guidance_start,control_guidance_end,image,width,height,num_inference_steps,batch_size,num_images_per_prompt):
+ controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
+
+ # align format for control guidance
+ if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
+ control_guidance_start = len(control_guidance_end) * [control_guidance_start]
+ elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
+ control_guidance_end = len(control_guidance_start) * [control_guidance_end]
+ elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
+ mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
+ control_guidance_start, control_guidance_end = (
+ mult * [control_guidance_start],
+ mult * [control_guidance_end],
+ )
+ if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
+ controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
+
+ global_pool_conditions = (
+ controlnet.config.global_pool_conditions
+ if isinstance(controlnet, ControlNetModel)
+ else controlnet.nets[0].config.global_pool_conditions
+ )
+ guess_mode = False or global_pool_conditions
+
+ # 4. Prepare image
+ if isinstance(controlnet, ControlNetModel):
+ image = self.prepare_image(
+ image=image,
+ width=width,
+ height=height,
+ batch_size=batch_size,
+ num_images_per_prompt=num_images_per_prompt,
+ device=self._execution_device,
+ dtype=controlnet.dtype,
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
+ guess_mode=guess_mode,
+ )
+ height, width = image.shape[-2:]
+ elif isinstance(controlnet, MultiControlNetModel):
+ images = []
+
+ for image_ in image:
+ image_ = self.prepare_image(
+ image=image_,
+ width=width,
+ height=height,
+ batch_size=batch_size,
+ num_images_per_prompt=num_images_per_prompt,
+ device=self._execution_device,
+ dtype=controlnet.dtype,
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
+ guess_mode=guess_mode,
+ )
+
+ images.append(image_)
+
+ image = images
+ height, width = image[0].shape[-2:]
+ else:
+ assert False
+
+ # 7.2 Create tensor stating which controlnets to keep
+ controlnet_keep = []
+ for i in range(num_inference_steps):
+ keeps = [
+ 1.0 - float(i / num_inference_steps < s or (i + 1) / num_inference_steps > e)
+ for s, e in zip(control_guidance_start, control_guidance_end)
+ ]
+ controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
+ return image,controlnet_keep,guess_mode,controlnet_conditioning_scale
+
+
+
+ def prepare_latents(
+ self,
+ batch_size,
+ num_channels_latents,
+ height,
+ width,
+ dtype,
+ device,
+ generator,
+ latents=None,
+ ):
+ shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor)
+ if latents is None:
+ if device.type == "mps":
+ # randn does not work reproducibly on mps
+ latents = torch.randn(
+ shape, generator=generator, device="cpu", dtype=dtype
+ ).to(device)
+ else:
+ latents = torch.randn(
+ shape, generator=generator, device=device, dtype=dtype
+ )
+ else:
+ # if latents.shape != shape:
+ # raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
+ latents = latents.to(device)
+
+ # scale the initial noise by the standard deviation required by the scheduler
+ return latents
+
+ def preprocess(self, image):
+ if isinstance(image, torch.Tensor):
+ return image
+ elif isinstance(image, PIL.Image.Image):
+ image = [image]
+
+ if isinstance(image[0], PIL.Image.Image):
+ w, h = image[0].size
+ w, h = map(lambda x: x - x % 8, (w, h)) # resize to integer multiple of 8
+
+ image = [
+ np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[
+ None, :
+ ]
+ for i in image
+ ]
+ image = np.concatenate(image, axis=0)
+ image = np.array(image).astype(np.float32) / 255.0
+ image = image.transpose(0, 3, 1, 2)
+ image = 2.0 * image - 1.0
+ image = torch.from_numpy(image)
+ elif isinstance(image[0], torch.Tensor):
+ image = torch.cat(image, dim=0)
+ return image
+
+ def prepare_image(
+ self,
+ image,
+ width,
+ height,
+ batch_size,
+ num_images_per_prompt,
+ device,
+ dtype,
+ do_classifier_free_guidance=False,
+ guess_mode=False,
+ ):
+
+ self.control_image_processor = VaeImageProcessor(
+ vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
+ )
+ image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
+ image_batch_size = image.shape[0]
+
+ if image_batch_size == 1:
+ repeat_by = batch_size
+ else:
+ # image batch size is the same as prompt batch size
+ repeat_by = num_images_per_prompt
+
+ #image = image.repeat_interleave(repeat_by, dim=0)
+
+ image = image.to(device=device, dtype=dtype)
+
+ if do_classifier_free_guidance and not guess_mode:
+ image = torch.cat([image] * 2)
+
+ return image
+
+ def numpy_to_pil(self,images):
+ r"""
+ Convert a numpy image or a batch of images to a PIL image.
+ """
+ if images.ndim == 3:
+ images = images[None, ...]
+ #images = (images * 255).round().astype("uint8")
+ images = np.clip((images * 255).round(), 0, 255).astype("uint8")
+ if images.shape[-1] == 1:
+ # special case for grayscale (single channel) images
+ pil_images = [Image.fromarray(image.squeeze(), mode="L") for image in images]
+ else:
+ pil_images = [Image.fromarray(image) for image in images]
+
+ return pil_images
+
+ def latent_to_image(self,latent,output_type):
+ image = self.decode_latents(latent)
+ if output_type == "pil":
+ image = self.numpy_to_pil(image)
+ if len(image) > 1:
+ return image
+ return image[0]
+
+
+ @torch.no_grad()
+ def img2img(
+ self,
+ prompt: Union[str, List[str]],
+ num_inference_steps: int = 50,
+ guidance_scale: float = 7.5,
+ negative_prompt: Optional[Union[str, List[str]]] = None,
+ generator: Optional[torch.Generator] = None,
+ image: Optional[torch.Tensor] = None,
+ output_type: Optional[str] = "pil",
+ latents=None,
+ strength=1.0,
+ region_map_state=None,
+ sampler_name="",
+ sampler_opt={},
+ start_time=-1,
+ timeout=180,
+ scale_ratio=8.0,
+ latent_processing = 0,
+ weight_func = lambda w, sigma, qk: w * sigma * qk.std(),
+ upscale=False,
+ upscale_x: float = 2.0,
+ upscale_method: str = "bicubic",
+ upscale_antialias: bool = False,
+ upscale_denoising_strength: int = 0.7,
+ width = None,
+ height = None,
+ seed = 0,
+ sampler_name_hires="",
+ sampler_opt_hires= {},
+ latent_upscale_processing = False,
+ ip_adapter_image = None,
+ control_img = None,
+ controlnet_conditioning_scale = None,
+ control_guidance_start = None,
+ control_guidance_end = None,
+ image_t2i_adapter : Optional[PipelineImageInput] = None,
+ adapter_conditioning_scale: Union[float, List[float]] = 1.0,
+ adapter_conditioning_factor: float = 1.0,
+ guidance_rescale: float = 0.0,
+ cross_attention_kwargs = None,
+ clip_skip = None,
+ long_encode = 0,
+ num_images_per_prompt = 1,
+ ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
+ ):
+ if isinstance(sampler_name, str):
+ sampler = self.get_scheduler(sampler_name)
+ else:
+ sampler = sampler_name
+ if height is None:
+ _,height = get_image_size(image)
+ height = int((height // 8)*8)
+ if width is None:
+ width,_ = get_image_size(image)
+ width = int((width // 8)*8)
+
+ if image_t2i_adapter is not None:
+ height, width = default_height_width(self,height, width, image_t2i_adapter)
+ if image is not None:
+ image = self.preprocess(image)
+ image = image.to(self.vae.device, dtype=self.vae.dtype)
+
+ init_latents = self.vae.encode(image).latent_dist.sample(generator)
+ latents = 0.18215 * init_latents
+
+ # 2. Define call parameters
+ batch_size = 1 if isinstance(prompt, str) else len(prompt)
+ device = self._execution_device
+ latents = latents.to(device, dtype=self.unet.dtype)
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
+ # corresponds to doing no classifier free guidance.
+
+ lora_scale = (
+ cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
+ )
+ self._do_classifier_free_guidance = False if guidance_scale <= 1.0 else True
+ '''if guidance_scale <= 1.0:
+ raise ValueError("has to use guidance_scale")'''
+ # 3. Encode input prompt
+
+ text_embeddings, negative_prompt_embeds, text_input_ids = encode_prompt_function(
+ self,
+ prompt,
+ device,
+ num_images_per_prompt,
+ self.do_classifier_free_guidance,
+ negative_prompt,
+ lora_scale = lora_scale,
+ clip_skip = clip_skip,
+ long_encode = long_encode,
+ )
+
+ if self.do_classifier_free_guidance:
+ text_embeddings = torch.cat([negative_prompt_embeds, text_embeddings])
+
+ #text_ids, text_embeddings = self.prompt_parser([negative_prompt, prompt])
+ text_embeddings = text_embeddings.to(self.unet.dtype)
+
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
+ t_start = max(num_inference_steps - init_timestep, 0)
+
+ sigmas = self.get_sigmas(num_inference_steps, sampler_opt).to(
+ text_embeddings.device, dtype=text_embeddings.dtype
+ )
+
+ sigma_sched = sigmas[t_start:]
+
+ noise = randn_tensor(
+ latents.shape,
+ generator=generator,
+ device=device,
+ dtype=text_embeddings.dtype,
+ )
+ latents = latents.to(device)
+ latents = latents + noise * (sigma_sched[0]**2 + 1) ** 0.5
+ #latents = latents + noise * sigma_sched[0] #Nearly
+ steps_denoising = len(sigma_sched)
+ # 5. Prepare latent variables
+ self.k_diffusion_model.sigmas = self.k_diffusion_model.sigmas.to(latents.device)
+ self.k_diffusion_model.log_sigmas = self.k_diffusion_model.log_sigmas.to(
+ latents.device
+ )
+
+ region_state = encode_region_map(
+ self,
+ region_map_state,
+ width = width,
+ height = height,
+ num_images_per_prompt = num_images_per_prompt,
+ text_ids=text_input_ids,
+ )
+ if cross_attention_kwargs is None:
+ cross_attention_kwargs ={}
+
+ controlnet_conditioning_scale_copy = controlnet_conditioning_scale.copy() if isinstance(controlnet_conditioning_scale, list) else controlnet_conditioning_scale
+ control_guidance_start_copy = control_guidance_start.copy() if isinstance(control_guidance_start, list) else control_guidance_start
+ control_guidance_end_copy = control_guidance_end.copy() if isinstance(control_guidance_end, list) else control_guidance_end
+ guess_mode = False
+
+ if self.controlnet is not None:
+ img_control,controlnet_keep,guess_mode,controlnet_conditioning_scale = self.preprocess_controlnet(controlnet_conditioning_scale,control_guidance_start,control_guidance_end,control_img,width,height,len(sigma_sched),batch_size,num_images_per_prompt)
+ #print(len(controlnet_keep))
+
+ #controlnet_conditioning_scale_copy = controlnet_conditioning_scale.copy()
+ #sp_control = 1
+
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
+ image_embeds = self.prepare_ip_adapter_image_embeds(
+ ip_adapter_image,
+ ip_adapter_image_embeds,
+ device,
+ batch_size * num_images_per_prompt,
+ self.do_classifier_free_guidance,
+ )
+ # 6.1 Add image embeds for IP-Adapter
+ added_cond_kwargs = (
+ {"image_embeds": image_embeds}
+ if (ip_adapter_image is not None or ip_adapter_image_embeds is not None)
+ else None
+ )
+ #if controlnet_img is not None:
+ #controlnet_img_processing = controlnet_img.convert("RGB")
+ #transform = transforms.Compose([transforms.PILToTensor()])
+ #controlnet_img_processing = transform(controlnet_img)
+ #controlnet_img_processing=controlnet_img_processing.to(device=device, dtype=self.cnet.dtype)
+ #controlnet_img = torch.from_numpy(controlnet_img).half()
+ #controlnet_img = controlnet_img.unsqueeze(0)
+ #controlnet_img = controlnet_img.repeat_interleave(3, dim=0)
+ #controlnet_img=controlnet_img.to(device)
+ #controlnet_img = controlnet_img.repeat_interleave(4 // len(controlnet_img), 0)
+ if latent_processing == 1:
+ latents_process = [self.latent_to_image(latents,output_type)]
+ lst_latent_sigma = []
+ step_control = -1
+ adapter_state = None
+ adapter_sp_count = []
+ if image_t2i_adapter is not None:
+ adapter_state = preprocessing_t2i_adapter(self,image_t2i_adapter,width,height,adapter_conditioning_scale,1)
+ def model_fn(x, sigma):
+ nonlocal step_control,lst_latent_sigma,adapter_sp_count
+
+ if start_time > 0 and timeout > 0:
+ assert (time.time() - start_time) < timeout, "inference process timed out"
+
+ latent_model_input = torch.cat([x] * 2) if self.do_classifier_free_guidance else x
+
+ region_prompt = {
+ "region_state": region_state,
+ "sigma": sigma[0],
+ "weight_func": weight_func,
+ }
+ cross_attention_kwargs["region_prompt"] = region_prompt
+
+ #print(self.k_diffusion_model.sigma_to_t(sigma[0]))
+
+ if latent_model_input.dtype != text_embeddings.dtype:
+ latent_model_input = latent_model_input.to(text_embeddings.dtype)
+ ukwargs = {}
+
+ down_intrablock_additional_residuals = None
+ if adapter_state is not None:
+ if len(adapter_sp_count) < int( steps_denoising* adapter_conditioning_factor):
+ down_intrablock_additional_residuals = [state.clone() for state in adapter_state]
+ else:
+ down_intrablock_additional_residuals = None
+ sigma_string_t2i = str(sigma.item())
+ if sigma_string_t2i not in adapter_sp_count:
+ adapter_sp_count.append(sigma_string_t2i)
+
+ if self.controlnet is not None :
+ sigma_string = str(sigma.item())
+ if sigma_string not in lst_latent_sigma:
+ #sigmas_sp = sigma.detach().clone()
+ step_control+=1
+ lst_latent_sigma.append(sigma_string)
+
+ if isinstance(controlnet_keep[step_control], list):
+ cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[step_control])]
+ else:
+ controlnet_cond_scale = controlnet_conditioning_scale
+ if isinstance(controlnet_cond_scale, list):
+ controlnet_cond_scale = controlnet_cond_scale[0]
+ cond_scale = controlnet_cond_scale * controlnet_keep[step_control]
+
+ down_block_res_samples = None
+ mid_block_res_sample = None
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
+ latent_model_input / ((sigma**2 + 1) ** 0.5),
+ self.k_diffusion_model.sigma_to_t(sigma),
+ encoder_hidden_states=text_embeddings,
+ controlnet_cond=img_control,
+ conditioning_scale=cond_scale,
+ guess_mode=guess_mode,
+ return_dict=False,
+ )
+ if guess_mode and self.do_classifier_free_guidance:
+ # Infered ControlNet only for the conditional batch.
+ # To apply the output of ControlNet to both the unconditional and conditional batches,
+ # add 0 to the unconditional batch to keep it unchanged.
+ down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
+ mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
+ ukwargs ={
+ "down_block_additional_residuals": down_block_res_samples,
+ "mid_block_additional_residual":mid_block_res_sample,
+ }
+
+ noise_pred = self.k_diffusion_model(
+ latent_model_input, sigma, cond=text_embeddings,cross_attention_kwargs = cross_attention_kwargs,down_intrablock_additional_residuals = down_intrablock_additional_residuals,added_cond_kwargs=added_cond_kwargs, **ukwargs
+ )
+
+
+ if self.do_classifier_free_guidance:
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
+ noise_pred = noise_pred_uncond + guidance_scale * (
+ noise_pred_text - noise_pred_uncond
+ )
+
+ if guidance_rescale > 0.0:
+ noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
+ if latent_processing == 1:
+ latents_process.append(self.latent_to_image(noise_pred,output_type))
+ # noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=0.7)
+ return noise_pred
+
+ sampler_args = self.get_sampler_extra_args_i2i(sigma_sched,len(sigma_sched),sampler_opt,latents,seed, sampler)
+ latents = sampler(model_fn, latents, **sampler_args)
+ self.maybe_free_model_hooks()
+ torch.cuda.empty_cache()
+ gc.collect()
+ if upscale:
+ vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
+ target_height = int(height * upscale_x // vae_scale_factor )* 8
+ target_width = int(width * upscale_x // vae_scale_factor)*8
+
+ latents = torch.nn.functional.interpolate(
+ latents,
+ size=(
+ int(target_height // vae_scale_factor),
+ int(target_width // vae_scale_factor),
+ ),
+ mode=upscale_method,
+ antialias=upscale_antialias,
+ )
+ #if controlnet_img is not None:
+ #controlnet_img = cv2.resize(controlnet_img,(latents.size(0), latents.size(1)))
+ #controlnet_img=controlnet_img.resize((latents.size(0), latents.size(1)), Image.LANCZOS)
+
+ #region_map_state = apply_size_sketch(int(target_width),int(target_height),region_map_state)
+ latent_reisze= self.img2img(
+ prompt=prompt,
+ num_inference_steps=num_inference_steps,
+ guidance_scale=guidance_scale,
+ negative_prompt=negative_prompt,
+ generator=generator,
+ latents=latents,
+ strength=upscale_denoising_strength,
+ sampler_name=sampler_name_hires,
+ sampler_opt=sampler_opt_hires,
+ region_map_state=region_map_state,
+ latent_processing = latent_upscale_processing,
+ width = int(target_width),
+ height = int(target_height),
+ seed = seed,
+ ip_adapter_image = ip_adapter_image,
+ control_img = control_img,
+ controlnet_conditioning_scale = controlnet_conditioning_scale_copy,
+ control_guidance_start = control_guidance_start_copy,
+ control_guidance_end = control_guidance_end_copy,
+ image_t2i_adapter= image_t2i_adapter,
+ adapter_conditioning_scale = adapter_conditioning_scale,
+ adapter_conditioning_factor = adapter_conditioning_factor,
+ guidance_rescale = guidance_rescale,
+ cross_attention_kwargs = cross_attention_kwargs,
+ clip_skip = clip_skip,
+ long_encode = long_encode,
+ num_images_per_prompt = num_images_per_prompt,
+ )
+ '''if latent_processing == 1:
+ latents = latents_process.copy()
+ images = []
+ for i in latents:
+ images.append(self.decode_latents(i))
+ image = []
+ if output_type == "pil":
+ for i in images:
+ image.append(self.numpy_to_pil(i))
+ image[-1] = latent_reisze
+ return image'''
+ if latent_processing == 1:
+ latents_process= latents_process+latent_reisze
+ return latents_process
+ torch.cuda.empty_cache()
+ gc.collect()
+ return latent_reisze
+
+ '''if latent_processing == 1:
+ latents = latents_process.copy()
+ images = []
+ for i in latents:
+ images.append(self.decode_latents(i))
+ image = []
+ # 10. Convert to PIL
+ if output_type == "pil":
+ for i in images:
+ image.append(self.numpy_to_pil(i))
+ else:
+ image = self.decode_latents(latents)
+ # 10. Convert to PIL
+ if output_type == "pil":
+ image = self.numpy_to_pil(image)'''
+ if latent_processing == 1:
+ return latents_process
+ self.maybe_free_model_hooks()
+ torch.cuda.empty_cache()
+ gc.collect()
+ return [self.latent_to_image(latents,output_type)]
+
+ def get_sigmas(self, steps, params):
+ discard_next_to_last_sigma = params.get("discard_next_to_last_sigma", False)
+ steps += 1 if discard_next_to_last_sigma else 0
+
+ if params.get("scheduler", None) == "karras":
+ sigma_min, sigma_max = (
+ self.k_diffusion_model.sigmas[0].item(),
+ self.k_diffusion_model.sigmas[-1].item(),
+ )
+ sigmas = k_diffusion.sampling.get_sigmas_karras(
+ n=steps, sigma_min=sigma_min, sigma_max=sigma_max, device=self.device
+ )
+ elif params.get("scheduler", None) == "exponential":
+ sigma_min, sigma_max = (
+ self.k_diffusion_model.sigmas[0].item(),
+ self.k_diffusion_model.sigmas[-1].item(),
+ )
+ sigmas = k_diffusion.sampling.get_sigmas_exponential(
+ n=steps, sigma_min=sigma_min, sigma_max=sigma_max, device=self.device
+ )
+ elif params.get("scheduler", None) == "polyexponential":
+ sigma_min, sigma_max = (
+ self.k_diffusion_model.sigmas[0].item(),
+ self.k_diffusion_model.sigmas[-1].item(),
+ )
+ sigmas = k_diffusion.sampling.get_sigmas_polyexponential(
+ n=steps, sigma_min=sigma_min, sigma_max=sigma_max, device=self.device
+ )
+ else:
+ sigmas = self.k_diffusion_model.get_sigmas(steps)
+
+ if discard_next_to_last_sigma:
+ sigmas = torch.cat([sigmas[:-2], sigmas[-1:]])
+
+ return sigmas
+
+ def create_noise_sampler(self, x, sigmas, p,seed):
+ """For DPM++ SDE: manually create noise sampler to enable deterministic results across different batch sizes"""
+
+ from k_diffusion.sampling import BrownianTreeNoiseSampler
+ sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
+ #current_iter_seeds = p.all_seeds[p.iteration * p.batch_size:(p.iteration + 1) * p.batch_size]
+ return BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seed)
+
+ # https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/48a15821de768fea76e66f26df83df3fddf18f4b/modules/sd_samplers.py#L454
+ def get_sampler_extra_args_t2i(self, sigmas, eta, steps,sampler_opt,latents,seed, func):
+ extra_params_kwargs = {}
+
+ if "eta" in inspect.signature(func).parameters:
+ extra_params_kwargs["eta"] = eta
+
+ if "sigma_min" in inspect.signature(func).parameters:
+ extra_params_kwargs["sigma_min"] = sigmas[0].item()
+ extra_params_kwargs["sigma_max"] = sigmas[-1].item()
+
+ if "n" in inspect.signature(func).parameters:
+ extra_params_kwargs["n"] = steps
+ else:
+ extra_params_kwargs["sigmas"] = sigmas
+ if sampler_opt.get('brownian_noise', False):
+ noise_sampler = self.create_noise_sampler(latents, sigmas, steps,seed)
+ extra_params_kwargs['noise_sampler'] = noise_sampler
+ if sampler_opt.get('solver_type', None) == 'heun':
+ extra_params_kwargs['solver_type'] = 'heun'
+
+ return extra_params_kwargs
+
+ # https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/48a15821de768fea76e66f26df83df3fddf18f4b/modules/sd_samplers.py#L454
+ def get_sampler_extra_args_i2i(self, sigmas,steps,sampler_opt,latents,seed, func):
+ extra_params_kwargs = {}
+
+ if "sigma_min" in inspect.signature(func).parameters:
+ ## last sigma is zero which isn't allowed by DPM Fast & Adaptive so taking value before last
+ extra_params_kwargs["sigma_min"] = sigmas[-2]
+
+ if "sigma_max" in inspect.signature(func).parameters:
+ extra_params_kwargs["sigma_max"] = sigmas[0]
+
+ if "n" in inspect.signature(func).parameters:
+ extra_params_kwargs["n"] = len(sigmas) - 1
+
+ if "sigma_sched" in inspect.signature(func).parameters:
+ extra_params_kwargs["sigma_sched"] = sigmas
+
+ if "sigmas" in inspect.signature(func).parameters:
+ extra_params_kwargs["sigmas"] = sigmas
+ if sampler_opt.get('brownian_noise', False):
+ noise_sampler = self.create_noise_sampler(latents, sigmas, steps,seed)
+ extra_params_kwargs['noise_sampler'] = noise_sampler
+ if sampler_opt.get('solver_type', None) == 'heun':
+ extra_params_kwargs['solver_type'] = 'heun'
+
+ return extra_params_kwargs
+
+ @torch.no_grad()
+ def txt2img(
+ self,
+ prompt: Union[str, List[str]],
+ height: int = 512,
+ width: int = 512,
+ num_inference_steps: int = 50,
+ guidance_scale: float = 7.5,
+ negative_prompt: Optional[Union[str, List[str]]] = None,
+ eta: float = 0.0,
+ generator: Optional[torch.Generator] = None,
+ latents: Optional[torch.Tensor] = None,
+ output_type: Optional[str] = "pil",
+ callback_steps: Optional[int] = 1,
+ upscale=False,
+ upscale_x: float = 2.0,
+ upscale_method: str = "bicubic",
+ upscale_antialias: bool = False,
+ upscale_denoising_strength: int = 0.7,
+ region_map_state=None,
+ sampler_name="",
+ sampler_opt={},
+ start_time=-1,
+ timeout=180,
+ latent_processing = 0,
+ weight_func = lambda w, sigma, qk: w * sigma * qk.std(),
+ seed = 0,
+ sampler_name_hires= "",
+ sampler_opt_hires= {},
+ latent_upscale_processing = False,
+ ip_adapter_image = None,
+ control_img = None,
+ controlnet_conditioning_scale = None,
+ control_guidance_start = None,
+ control_guidance_end = None,
+ image_t2i_adapter : Optional[PipelineImageInput] = None,
+ adapter_conditioning_scale: Union[float, List[float]] = 1.0,
+ adapter_conditioning_factor: float = 1.0,
+ guidance_rescale: float = 0.0,
+ cross_attention_kwargs = None,
+ clip_skip = None,
+ long_encode = 0,
+ num_images_per_prompt = 1,
+ ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
+ ):
+ height, width = self._default_height_width(height, width, None)
+ if isinstance(sampler_name, str):
+ sampler = self.get_scheduler(sampler_name)
+ else:
+ sampler = sampler_name
+ # 1. Check inputs. Raise error if not correct
+ if image_t2i_adapter is not None:
+ height, width = default_height_width(self,height, width, image_t2i_adapter)
+ #print(default_height_width(self,height, width, image_t2i_adapter))
+ self.check_inputs(prompt, height, width, callback_steps)
+ # 2. Define call parameters
+ batch_size = 1 if isinstance(prompt, str) else len(prompt)
+ device = self._execution_device
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
+ # corresponds to doing no classifier free guidance.
+ '''do_classifier_free_guidance = True
+ if guidance_scale <= 1.0:
+ raise ValueError("has to use guidance_scale")'''
+
+ lora_scale = (
+ cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
+ )
+ self._do_classifier_free_guidance = False if guidance_scale <= 1.0 else True
+ '''if guidance_scale <= 1.0:
+ raise ValueError("has to use guidance_scale")'''
+ # 3. Encode input prompt
+
+ text_embeddings, negative_prompt_embeds, text_input_ids = encode_prompt_function(
+ self,
+ prompt,
+ device,
+ num_images_per_prompt,
+ self.do_classifier_free_guidance,
+ negative_prompt,
+ lora_scale = lora_scale,
+ clip_skip = clip_skip,
+ long_encode = long_encode,
+ )
+ if self.do_classifier_free_guidance:
+ text_embeddings = torch.cat([negative_prompt_embeds, text_embeddings])
+
+ # 3. Encode input prompt
+ #text_ids, text_embeddings = self.prompt_parser([negative_prompt, prompt])
+ text_embeddings = text_embeddings.to(self.unet.dtype)
+
+ # 4. Prepare timesteps
+ sigmas = self.get_sigmas(num_inference_steps, sampler_opt).to(
+ text_embeddings.device, dtype=text_embeddings.dtype
+ )
+
+ # 5. Prepare latent variables
+ num_channels_latents = self.unet.config.in_channels
+ latents = self.prepare_latents(
+ batch_size * num_images_per_prompt,
+ num_channels_latents,
+ height,
+ width,
+ text_embeddings.dtype,
+ device,
+ generator,
+ latents,
+ )
+ latents = latents * (sigmas[0]**2 + 1) ** 0.5
+ #latents = latents * sigmas[0]#Nearly
+ steps_denoising = len(sigmas)
+ self.k_diffusion_model.sigmas = self.k_diffusion_model.sigmas.to(latents.device)
+ self.k_diffusion_model.log_sigmas = self.k_diffusion_model.log_sigmas.to(
+ latents.device
+ )
+
+ region_state = encode_region_map(
+ self,
+ region_map_state,
+ width = width,
+ height = height,
+ num_images_per_prompt = num_images_per_prompt,
+ text_ids=text_input_ids,
+ )
+ if cross_attention_kwargs is None:
+ cross_attention_kwargs ={}
+ controlnet_conditioning_scale_copy = controlnet_conditioning_scale.copy() if isinstance(controlnet_conditioning_scale, list) else controlnet_conditioning_scale
+ control_guidance_start_copy = control_guidance_start.copy() if isinstance(control_guidance_start, list) else control_guidance_start
+ control_guidance_end_copy = control_guidance_end.copy() if isinstance(control_guidance_end, list) else control_guidance_end
+ guess_mode = False
+
+ if self.controlnet is not None:
+ img_control,controlnet_keep,guess_mode,controlnet_conditioning_scale = self.preprocess_controlnet(controlnet_conditioning_scale,control_guidance_start,control_guidance_end,control_img,width,height,num_inference_steps,batch_size,num_images_per_prompt)
+ #print(len(controlnet_keep))
+
+ #controlnet_conditioning_scale_copy = controlnet_conditioning_scale.copy()
+ #sp_control = 1
+
+
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
+ image_embeds = self.prepare_ip_adapter_image_embeds(
+ ip_adapter_image,
+ ip_adapter_image_embeds,
+ device,
+ batch_size * num_images_per_prompt,
+ self.do_classifier_free_guidance,
+ )
+ # 6.1 Add image embeds for IP-Adapter
+ added_cond_kwargs = (
+ {"image_embeds": image_embeds}
+ if (ip_adapter_image is not None or ip_adapter_image_embeds is not None)
+ else None
+ )
+ #if controlnet_img is not None:
+ #controlnet_img_processing = controlnet_img.convert("RGB")
+ #transform = transforms.Compose([transforms.PILToTensor()])
+ #controlnet_img_processing = transform(controlnet_img)
+ #controlnet_img_processing=controlnet_img_processing.to(device=device, dtype=self.cnet.dtype)
+ if latent_processing == 1:
+ latents_process = [self.latent_to_image(latents,output_type)]
+ #sp_find_new = None
+ lst_latent_sigma = []
+ step_control = -1
+ adapter_state = None
+ adapter_sp_count = []
+ if image_t2i_adapter is not None:
+ adapter_state = preprocessing_t2i_adapter(self,image_t2i_adapter,width,height,adapter_conditioning_scale,1)
+ def model_fn(x, sigma):
+ nonlocal step_control,lst_latent_sigma,adapter_sp_count
+
+ if start_time > 0 and timeout > 0:
+ assert (time.time() - start_time) < timeout, "inference process timed out"
+
+ latent_model_input = torch.cat([x] * 2) if self.do_classifier_free_guidance else x
+ region_prompt = {
+ "region_state": region_state,
+ "sigma": sigma[0],
+ "weight_func": weight_func,
+ }
+ cross_attention_kwargs["region_prompt"] = region_prompt
+
+ #print(self.k_diffusion_model.sigma_to_t(sigma[0]))
+
+ if latent_model_input.dtype != text_embeddings.dtype:
+ latent_model_input = latent_model_input.to(text_embeddings.dtype)
+ ukwargs = {}
+
+ down_intrablock_additional_residuals = None
+ if adapter_state is not None:
+ if len(adapter_sp_count) < int( steps_denoising* adapter_conditioning_factor):
+ down_intrablock_additional_residuals = [state.clone() for state in adapter_state]
+ else:
+ down_intrablock_additional_residuals = None
+ sigma_string_t2i = str(sigma.item())
+ if sigma_string_t2i not in adapter_sp_count:
+ adapter_sp_count.append(sigma_string_t2i)
+
+ if self.controlnet is not None :
+ sigma_string = str(sigma.item())
+ if sigma_string not in lst_latent_sigma:
+ #sigmas_sp = sigma.detach().clone()
+ step_control+=1
+ lst_latent_sigma.append(sigma_string)
+
+ if isinstance(controlnet_keep[step_control], list):
+ cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[step_control])]
+ else:
+ controlnet_cond_scale = controlnet_conditioning_scale
+ if isinstance(controlnet_cond_scale, list):
+ controlnet_cond_scale = controlnet_cond_scale[0]
+ cond_scale = controlnet_cond_scale * controlnet_keep[step_control]
+
+ down_block_res_samples = None
+ mid_block_res_sample = None
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
+ latent_model_input / ((sigma**2 + 1) ** 0.5),
+ self.k_diffusion_model.sigma_to_t(sigma),
+ encoder_hidden_states=text_embeddings,
+ controlnet_cond=img_control,
+ conditioning_scale=cond_scale,
+ guess_mode=guess_mode,
+ return_dict=False,
+ )
+ if guess_mode and self.do_classifier_free_guidance:
+ # Infered ControlNet only for the conditional batch.
+ # To apply the output of ControlNet to both the unconditional and conditional batches,
+ # add 0 to the unconditional batch to keep it unchanged.
+ down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
+ mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
+ ukwargs ={
+ "down_block_additional_residuals": down_block_res_samples,
+ "mid_block_additional_residual":mid_block_res_sample,
+ }
+
+
+ noise_pred = self.k_diffusion_model(
+ latent_model_input, sigma, cond=text_embeddings,cross_attention_kwargs=cross_attention_kwargs,down_intrablock_additional_residuals=down_intrablock_additional_residuals,added_cond_kwargs=added_cond_kwargs, **ukwargs
+ )
+
+
+ if self.do_classifier_free_guidance:
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
+ noise_pred = noise_pred_uncond + guidance_scale * (
+ noise_pred_text - noise_pred_uncond
+ )
+ if guidance_rescale > 0.0:
+ noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
+ if latent_processing == 1:
+ latents_process.append(self.latent_to_image(noise_pred,output_type))
+ # noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=0.7)
+ return noise_pred
+ extra_args = self.get_sampler_extra_args_t2i(
+ sigmas, eta, num_inference_steps,sampler_opt,latents,seed, sampler
+ )
+ latents = sampler(model_fn, latents, **extra_args)
+ #latents = latents_process[0]
+ #print(len(latents_process))
+ self.maybe_free_model_hooks()
+ torch.cuda.empty_cache()
+ gc.collect()
+ if upscale:
+ vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
+ target_height = int(height * upscale_x // vae_scale_factor )* 8
+ target_width = int(width * upscale_x // vae_scale_factor)*8
+ latents = torch.nn.functional.interpolate(
+ latents,
+ size=(
+ int(target_height // vae_scale_factor),
+ int(target_width // vae_scale_factor),
+ ),
+ mode=upscale_method,
+ antialias=upscale_antialias,
+ )
+
+ #if controlnet_img is not None:
+ #controlnet_img = cv2.resize(controlnet_img,(latents.size(0), latents.size(1)))
+ #controlnet_img=controlnet_img.resize((latents.size(0), latents.size(1)), Image.LANCZOS)
+ latent_reisze= self.img2img(
+ prompt=prompt,
+ num_inference_steps=num_inference_steps,
+ guidance_scale=guidance_scale,
+ negative_prompt=negative_prompt,
+ generator=generator,
+ latents=latents,
+ strength=upscale_denoising_strength,
+ sampler_name=sampler_name_hires,
+ sampler_opt=sampler_opt_hires,
+ region_map_state = region_map_state,
+ latent_processing = latent_upscale_processing,
+ width = int(target_width),
+ height = int(target_height),
+ seed = seed,
+ ip_adapter_image = ip_adapter_image,
+ control_img = control_img,
+ controlnet_conditioning_scale = controlnet_conditioning_scale_copy,
+ control_guidance_start = control_guidance_start_copy,
+ control_guidance_end = control_guidance_end_copy,
+ image_t2i_adapter= image_t2i_adapter,
+ adapter_conditioning_scale = adapter_conditioning_scale,
+ adapter_conditioning_factor = adapter_conditioning_factor,
+ guidance_rescale = guidance_rescale,
+ cross_attention_kwargs = cross_attention_kwargs,
+ clip_skip = clip_skip,
+ long_encode = long_encode,
+ num_images_per_prompt = num_images_per_prompt,
+ )
+ '''if latent_processing == 1:
+ latents = latents_process.copy()
+ images = []
+ for i in latents:
+ images.append(self.decode_latents(i))
+ image = []
+ if output_type == "pil":
+ for i in images:
+ image.append(self.numpy_to_pil(i))
+ image[-1] = latent_reisze
+ return image'''
+ if latent_processing == 1:
+ latents_process= latents_process+latent_reisze
+ return latents_process
+ torch.cuda.empty_cache()
+ gc.collect()
+ return latent_reisze
+
+ # 8. Post-processing
+ '''if latent_processing == 1:
+ latents = latents_process.copy()
+ images = []
+ for i in latents:
+ images.append(self.decode_latents(i))
+ image = []
+ # 10. Convert to PIL
+ if output_type == "pil":
+ for i in images:
+ image.append(self.numpy_to_pil(i))
+ else:
+ image = self.decode_latents(latents)
+ # 10. Convert to PIL
+ if output_type == "pil":
+ image = self.numpy_to_pil(image)'''
+ if latent_processing == 1:
+ return latents_process
+ return [self.latent_to_image(latents,output_type)]
+
+
+ def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator):
+ if isinstance(generator, list):
+ image_latents = [
+ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i])
+ for i in range(image.shape[0])
+ ]
+ image_latents = torch.cat(image_latents, dim=0)
+ else:
+ image_latents = retrieve_latents(self.vae.encode(image), generator=generator)
+
+ image_latents = self.vae.config.scaling_factor * image_latents
+
+ return image_latents
+
+ def prepare_mask_latents(
+ self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance
+ ):
+ # resize the mask to latents shape as we concatenate the mask to the latents
+ # we do that before converting to dtype to avoid breaking in case we're using cpu_offload
+ # and half precision
+ mask = torch.nn.functional.interpolate(
+ mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)
+ )
+ mask = mask.to(device=device, dtype=dtype)
+
+ masked_image = masked_image.to(device=device, dtype=dtype)
+
+ if masked_image.shape[1] == 4:
+ masked_image_latents = masked_image
+ else:
+ masked_image_latents = self._encode_vae_image(masked_image, generator=generator)
+
+ # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
+ if mask.shape[0] < batch_size:
+ if not batch_size % mask.shape[0] == 0:
+ raise ValueError(
+ "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
+ f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number"
+ " of masks that you pass is divisible by the total requested batch size."
+ )
+ mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1)
+ if masked_image_latents.shape[0] < batch_size:
+ if not batch_size % masked_image_latents.shape[0] == 0:
+ raise ValueError(
+ "The passed images and the required batch size don't match. Images are supposed to be duplicated"
+ f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
+ " Make sure the number of images that you pass is divisible by the total requested batch size."
+ )
+ masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1)
+
+ mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
+ masked_image_latents = (
+ torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
+ )
+
+ # aligning device to prevent device errors when concating it with the latent model input
+ masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
+ return mask, masked_image_latents
+
+ '''def get_image_latents(self,batch_size,image,device,dtype,generator):
+ image = image.to(device=device, dtype=dtype)
+
+ if image.shape[1] == 4:
+ image_latents = image
+ else:
+ image_latents = self._encode_vae_image(image=image, generator=generator)
+ image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1)
+ return image_latents'''
+
+ def _sigma_to_alpha_sigma_t(self, sigma):
+ alpha_t = 1 / ((sigma**2 + 1) ** 0.5)
+ sigma_t = sigma * alpha_t
+
+ return alpha_t, sigma_t
+
+ def add_noise(self,init_latents_proper,noise,sigma):
+ if isinstance(sigma, torch.Tensor) and sigma.numel() > 1:
+ sigma,_ = sigma.sort(descending=True)
+ sigma = sigma[0].item()
+ #alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma)
+ init_latents_proper = init_latents_proper + sigma * noise
+ return init_latents_proper
+
+ def prepare_latents_inpating(
+ self,
+ batch_size,
+ num_channels_latents,
+ height,
+ width,
+ dtype,
+ device,
+ generator,
+ latents=None,
+ image=None,
+ sigma=None,
+ is_strength_max=True,
+ return_noise=False,
+ return_image_latents=False,
+ ):
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
+ if isinstance(generator, list) and len(generator) != batch_size:
+ raise ValueError(
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
+ )
+
+ if (image is None or sigma is None) and not is_strength_max:
+ raise ValueError(
+ "Since strength < 1. initial latents are to be initialised as a combination of Image + Noise."
+ "However, either the image or the noise sigma has not been provided."
+ )
+
+ if return_image_latents or (latents is None and not is_strength_max):
+ image = image.to(device=device, dtype=dtype)
+
+ if image.shape[1] == 4:
+ image_latents = image
+ else:
+ image_latents = self._encode_vae_image(image=image, generator=generator)
+ image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1)
+
+ if latents is None:
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
+ # if strength is 1. then initialise the latents to noise, else initial to image + noise
+ latents = noise if is_strength_max else self.add_noise(image_latents, noise, sigma)
+ # if pure noise then scale the initial latents by the Scheduler's init sigma
+ latents = latents * (sigma.item()**2 + 1) ** 0.5 if is_strength_max else latents
+ #latents = latents * sigma.item() if is_strength_max else latents #Nearly
+ else:
+ noise = latents.to(device)
+ latents = noise * (sigma.item()**2 + 1) ** 0.5
+ #latents = noise * sigma.item() #Nearly
+
+ outputs = (latents,)
+
+ if return_noise:
+ outputs += (noise,)
+
+ if return_image_latents:
+ outputs += (image_latents,)
+
+ return outputs
+
+ @torch.no_grad()
+ def inpaiting(
+ self,
+ prompt: Union[str, List[str]],
+ height: int = 512,
+ width: int = 512,
+ num_inference_steps: int = 50,
+ guidance_scale: float = 7.5,
+ negative_prompt: Optional[Union[str, List[str]]] = None,
+ eta: float = 0.0,
+ generator: Optional[torch.Generator] = None,
+ latents: Optional[torch.Tensor] = None,
+ output_type: Optional[str] = "pil",
+ callback_steps: Optional[int] = 1,
+ upscale=False,
+ upscale_x: float = 2.0,
+ upscale_method: str = "bicubic",
+ upscale_antialias: bool = False,
+ upscale_denoising_strength: int = 0.7,
+ region_map_state=None,
+ sampler_name="",
+ sampler_opt={},
+ start_time=-1,
+ timeout=180,
+ latent_processing = 0,
+ weight_func = lambda w, sigma, qk: w * sigma * qk.std(),
+ seed = 0,
+ sampler_name_hires= "",
+ sampler_opt_hires= {},
+ latent_upscale_processing = False,
+ ip_adapter_image = None,
+ control_img = None,
+ controlnet_conditioning_scale = None,
+ control_guidance_start = None,
+ control_guidance_end = None,
+ image_t2i_adapter : Optional[PipelineImageInput] = None,
+ adapter_conditioning_scale: Union[float, List[float]] = 1.0,
+ adapter_conditioning_factor: float = 1.0,
+ guidance_rescale: float = 0.0,
+ cross_attention_kwargs = None,
+ clip_skip = None,
+ long_encode = 0,
+ num_images_per_prompt = 1,
+ image: Union[torch.Tensor, PIL.Image.Image] = None,
+ mask_image: Union[torch.Tensor, PIL.Image.Image] = None,
+ masked_image_latents: torch.Tensor = None,
+ padding_mask_crop: Optional[int] = None,
+ strength: float = 1.0,
+ ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
+ ):
+ height, width = self._default_height_width(height, width, None)
+ if isinstance(sampler_name, str):
+ sampler = self.get_scheduler(sampler_name)
+ else:
+ sampler = sampler_name
+ # 1. Check inputs. Raise error if not correct
+ if image_t2i_adapter is not None:
+ height, width = default_height_width(self,height, width, image_t2i_adapter)
+ #print(default_height_width(self,height, width, image_t2i_adapter))
+ self.check_inputs(prompt, height, width, callback_steps)
+ # 2. Define call parameters
+ batch_size = 1 if isinstance(prompt, str) else len(prompt)
+ device = self._execution_device
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
+ # corresponds to doing no classifier free guidance.
+ '''do_classifier_free_guidance = True
+ if guidance_scale <= 1.0:
+ raise ValueError("has to use guidance_scale")'''
+
+ lora_scale = (
+ cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
+ )
+ self._do_classifier_free_guidance = False if guidance_scale <= 1.0 else True
+ '''if guidance_scale <= 1.0:
+ raise ValueError("has to use guidance_scale")'''
+ # 3. Encode input prompt
+
+ text_embeddings, negative_prompt_embeds, text_input_ids = encode_prompt_function(
+ self,
+ prompt,
+ device,
+ num_images_per_prompt,
+ self.do_classifier_free_guidance,
+ negative_prompt,
+ lora_scale = lora_scale,
+ clip_skip = clip_skip,
+ long_encode = long_encode,
+ )
+ if self.do_classifier_free_guidance:
+ text_embeddings = torch.cat([negative_prompt_embeds, text_embeddings])
+
+ text_embeddings = text_embeddings.to(self.unet.dtype)
+
+ # 4. Prepare timesteps
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
+ t_start = max(num_inference_steps - init_timestep, 0)
+ sigmas = self.get_sigmas(num_inference_steps, sampler_opt).to(
+ text_embeddings.device, dtype=text_embeddings.dtype
+ )
+ sigmas = sigmas[t_start:] if strength >= 0 and strength < 1.0 else sigmas
+ is_strength_max = strength == 1.0
+
+ '''if latents is None:
+ noise_inpaiting = randn_tensor((batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8), generator=generator, device=device, dtype=text_embeddings.dtype)
+ else:
+ noise_inpaiting = latents.to(device)'''
+
+
+ # 5. Prepare mask, image,
+ if padding_mask_crop is not None:
+ crops_coords = self.mask_processor.get_crop_region(mask_image, width, height, pad=padding_mask_crop)
+ resize_mode = "fill"
+ else:
+ crops_coords = None
+ resize_mode = "default"
+
+ original_image = image
+ init_image = self.image_processor.preprocess(
+ image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode
+ )
+ init_image = init_image.to(dtype=torch.float32)
+
+ # 6. Prepare latent variables
+ num_channels_latents = self.vae.config.latent_channels
+ num_channels_unet = self.unet.config.in_channels
+ return_image_latents = num_channels_unet == 4
+
+ image_latents = None
+ noise_inpaiting = None
+
+ '''latents = self.prepare_latents(
+ batch_size * num_images_per_prompt,
+ num_channels_unet,
+ height,
+ width,
+ text_embeddings.dtype,
+ device,
+ generator,
+ latents,
+ )'''
+ #latents = latents * sigmas[0]
+
+ latents_outputs = self.prepare_latents_inpating(
+ batch_size * num_images_per_prompt,
+ num_channels_latents,
+ height,
+ width,
+ text_embeddings.dtype,
+ device,
+ generator,
+ latents,
+ image=init_image,
+ sigma=sigmas[0],
+ is_strength_max=is_strength_max,
+ return_noise=True,
+ return_image_latents=return_image_latents,
+ )
+
+ if return_image_latents:
+ latents, noise_inpaiting, image_latents = latents_outputs
+ else:
+ latents, noise_inpaiting = latents_outputs
+
+ # 7. Prepare mask latent variables
+ mask_condition = self.mask_processor.preprocess(
+ mask_image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords
+ )
+
+ if masked_image_latents is None:
+ masked_image = init_image * (mask_condition < 0.5)
+ else:
+ masked_image = masked_image_latents
+
+ mask, masked_image_latents = self.prepare_mask_latents(
+ mask_condition,
+ masked_image,
+ batch_size * num_images_per_prompt,
+ height,
+ width,
+ text_embeddings.dtype,
+ device,
+ generator,
+ self.do_classifier_free_guidance,
+ )
+
+ # 8. Check that sizes of mask, masked image and latents match
+ if num_channels_unet == 9:
+ # default case for runwayml/stable-diffusion-inpainting
+ num_channels_mask = mask.shape[1]
+ num_channels_masked_image = masked_image_latents.shape[1]
+ if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
+ raise ValueError(
+ f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
+ f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
+ f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
+ f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of"
+ " `pipeline.unet` or your `mask_image` or `image` input."
+ )
+ elif num_channels_unet != 4:
+ raise ValueError(
+ f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}."
+ )
+
+ steps_denoising = len(sigmas)
+ self.k_diffusion_model.sigmas = self.k_diffusion_model.sigmas.to(latents.device)
+ self.k_diffusion_model.log_sigmas = self.k_diffusion_model.log_sigmas.to(
+ latents.device
+ )
+
+ region_state = encode_region_map(
+ self,
+ region_map_state,
+ width = width,
+ height = height,
+ num_images_per_prompt = num_images_per_prompt,
+ text_ids=text_input_ids,
+ )
+ if cross_attention_kwargs is None:
+ cross_attention_kwargs ={}
+ controlnet_conditioning_scale_copy = controlnet_conditioning_scale.copy() if isinstance(controlnet_conditioning_scale, list) else controlnet_conditioning_scale
+ control_guidance_start_copy = control_guidance_start.copy() if isinstance(control_guidance_start, list) else control_guidance_start
+ control_guidance_end_copy = control_guidance_end.copy() if isinstance(control_guidance_end, list) else control_guidance_end
+ guess_mode = False
+
+ if self.controlnet is not None:
+ img_control,controlnet_keep,guess_mode,controlnet_conditioning_scale = self.preprocess_controlnet(controlnet_conditioning_scale,control_guidance_start,control_guidance_end,control_img,width,height,num_inference_steps,batch_size,num_images_per_prompt)
+ #print(len(controlnet_keep))
+
+ #controlnet_conditioning_scale_copy = controlnet_conditioning_scale.copy()
+ #sp_control = 1
+
+
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
+ image_embeds = self.prepare_ip_adapter_image_embeds(
+ ip_adapter_image,
+ ip_adapter_image_embeds,
+ device,
+ batch_size * num_images_per_prompt,
+ self.do_classifier_free_guidance,
+ )
+ # 6.1 Add image embeds for IP-Adapter
+ added_cond_kwargs = (
+ {"image_embeds": image_embeds}
+ if (ip_adapter_image is not None or ip_adapter_image_embeds is not None)
+ else None
+ )
+ #if controlnet_img is not None:
+ #controlnet_img_processing = controlnet_img.convert("RGB")
+ #transform = transforms.Compose([transforms.PILToTensor()])
+ #controlnet_img_processing = transform(controlnet_img)
+ #controlnet_img_processing=controlnet_img_processing.to(device=device, dtype=self.cnet.dtype)
+ if latent_processing == 1:
+ latents_process = [self.latent_to_image(latents,output_type)]
+ #sp_find_new = None
+ lst_latent_sigma = []
+ step_control = -1
+ adapter_state = None
+ adapter_sp_count = []
+ flag_add_noise_inpaiting = 0
+ if image_t2i_adapter is not None:
+ adapter_state = preprocessing_t2i_adapter(self,image_t2i_adapter,width,height,adapter_conditioning_scale,1)
+ def model_fn(x, sigma):
+ nonlocal step_control,lst_latent_sigma,adapter_sp_count,flag_add_noise_inpaiting
+
+ if start_time > 0 and timeout > 0:
+ assert (time.time() - start_time) < timeout, "inference process timed out"
+
+ if num_channels_unet == 4 and flag_add_noise_inpaiting:
+ init_latents_proper = image_latents
+ if self.do_classifier_free_guidance:
+ init_mask, _ = mask.chunk(2)
+ else:
+ init_mask = mask
+
+ if sigma.item() > sigmas[-1].item():
+ #indices = torch.where(sigmas == sigma.item())[0]
+ #sigma_next = sigmas[indices+1]
+ alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma.item())
+ init_latents_proper = alpha_t * init_latents_proper + sigma_t * noise_inpaiting
+
+ rate_latent_timestep_sigma = (sigma**2 + 1) ** 0.5
+
+ x = ((1 - init_mask) * init_latents_proper + init_mask * x/ rate_latent_timestep_sigma ) * rate_latent_timestep_sigma
+
+ non_inpainting_latent_model_input = (
+ torch.cat([x] * 2) if self.do_classifier_free_guidance else x
+ )
+
+ inpainting_latent_model_input = torch.cat(
+ [non_inpainting_latent_model_input,mask, masked_image_latents], dim=1
+ ) if num_channels_unet == 9 else non_inpainting_latent_model_input
+ region_prompt = {
+ "region_state": region_state,
+ "sigma": sigma[0],
+ "weight_func": weight_func,
+ }
+ cross_attention_kwargs["region_prompt"] = region_prompt
+
+ #print(self.k_diffusion_model.sigma_to_t(sigma[0]))
+
+ if non_inpainting_latent_model_input.dtype != text_embeddings.dtype:
+ non_inpainting_latent_model_input = non_inpainting_latent_model_input.to(text_embeddings.dtype)
+
+ if inpainting_latent_model_input.dtype != text_embeddings.dtype:
+ inpainting_latent_model_input = inpainting_latent_model_input.to(text_embeddings.dtype)
+ ukwargs = {}
+
+ down_intrablock_additional_residuals = None
+ if adapter_state is not None:
+ if len(adapter_sp_count) < int( steps_denoising* adapter_conditioning_factor):
+ down_intrablock_additional_residuals = [state.clone() for state in adapter_state]
+ else:
+ down_intrablock_additional_residuals = None
+ sigma_string_t2i = str(sigma.item())
+ if sigma_string_t2i not in adapter_sp_count:
+ adapter_sp_count.append(sigma_string_t2i)
+
+ if self.controlnet is not None :
+ sigma_string = str(sigma.item())
+ if sigma_string not in lst_latent_sigma:
+ #sigmas_sp = sigma.detach().clone()
+ step_control+=1
+ lst_latent_sigma.append(sigma_string)
+
+ if isinstance(controlnet_keep[step_control], list):
+ cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[step_control])]
+ else:
+ controlnet_cond_scale = controlnet_conditioning_scale
+ if isinstance(controlnet_cond_scale, list):
+ controlnet_cond_scale = controlnet_cond_scale[0]
+ cond_scale = controlnet_cond_scale * controlnet_keep[step_control]
+
+ down_block_res_samples = None
+ mid_block_res_sample = None
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
+ non_inpainting_latent_model_input / ((sigma**2 + 1) ** 0.5),
+ self.k_diffusion_model.sigma_to_t(sigma),
+ encoder_hidden_states=text_embeddings,
+ controlnet_cond=img_control,
+ conditioning_scale=cond_scale,
+ guess_mode=guess_mode,
+ return_dict=False,
+ )
+ if guess_mode and self.do_classifier_free_guidance:
+ # Infered ControlNet only for the conditional batch.
+ # To apply the output of ControlNet to both the unconditional and conditional batches,
+ # add 0 to the unconditional batch to keep it unchanged.
+ down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
+ mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
+ ukwargs ={
+ "down_block_additional_residuals": down_block_res_samples,
+ "mid_block_additional_residual":mid_block_res_sample,
+ }
+
+
+ noise_pred = self.k_diffusion_model(
+ inpainting_latent_model_input, sigma, cond=text_embeddings,cross_attention_kwargs=cross_attention_kwargs,down_intrablock_additional_residuals=down_intrablock_additional_residuals,added_cond_kwargs=added_cond_kwargs, **ukwargs
+ )
+
+
+ if self.do_classifier_free_guidance:
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
+ noise_pred = noise_pred_uncond + guidance_scale * (
+ noise_pred_text - noise_pred_uncond
+ )
+ if guidance_rescale > 0.0:
+ noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
+
+
+ if latent_processing == 1:
+ latents_process.append(self.latent_to_image(noise_pred,output_type))
+ flag_add_noise_inpaiting = 1
+ return noise_pred
+ extra_args = self.get_sampler_extra_args_t2i(
+ sigmas, eta, num_inference_steps,sampler_opt,latents,seed, sampler
+ )
+ latents = sampler(model_fn, latents, **extra_args)
+ #latents = latents_process[0]
+ #print(len(latents_process))
+ self.maybe_free_model_hooks()
+ torch.cuda.empty_cache()
+ gc.collect()
+ if upscale:
+ vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
+ target_height = int(height * upscale_x // vae_scale_factor )* 8
+ target_width = int(width * upscale_x // vae_scale_factor)*8
+ latents = torch.nn.functional.interpolate(
+ latents,
+ size=(
+ int(target_height // vae_scale_factor),
+ int(target_width // vae_scale_factor),
+ ),
+ mode=upscale_method,
+ antialias=upscale_antialias,
+ )
+
+ #if controlnet_img is not None:
+ #controlnet_img = cv2.resize(controlnet_img,(latents.size(0), latents.size(1)))
+ #controlnet_img=controlnet_img.resize((latents.size(0), latents.size(1)), Image.LANCZOS)
+ latent_reisze= self.img2img(
+ prompt=prompt,
+ num_inference_steps=num_inference_steps,
+ guidance_scale=guidance_scale,
+ negative_prompt=negative_prompt,
+ generator=generator,
+ latents=latents,
+ strength=upscale_denoising_strength,
+ sampler_name=sampler_name_hires,
+ sampler_opt=sampler_opt_hires,
+ region_map_state = region_map_state,
+ latent_processing = latent_upscale_processing,
+ width = int(target_width),
+ height = int(target_height),
+ seed = seed,
+ ip_adapter_image = ip_adapter_image,
+ control_img = control_img,
+ controlnet_conditioning_scale = controlnet_conditioning_scale_copy,
+ control_guidance_start = control_guidance_start_copy,
+ control_guidance_end = control_guidance_end_copy,
+ image_t2i_adapter= image_t2i_adapter,
+ adapter_conditioning_scale = adapter_conditioning_scale,
+ adapter_conditioning_factor = adapter_conditioning_factor,
+ guidance_rescale = guidance_rescale,
+ cross_attention_kwargs = cross_attention_kwargs,
+ clip_skip = clip_skip,
+ long_encode = long_encode,
+ num_images_per_prompt = num_images_per_prompt,
+ )
+ '''if latent_processing == 1:
+ latents = latents_process.copy()
+ images = []
+ for i in latents:
+ images.append(self.decode_latents(i))
+ image = []
+ if output_type == "pil":
+ for i in images:
+ image.append(self.numpy_to_pil(i))
+ image[-1] = latent_reisze
+ return image'''
+ if latent_processing == 1:
+ latents_process= latents_process+latent_reisze
+ return latents_process
+ torch.cuda.empty_cache()
+ gc.collect()
+ return latent_reisze
+
+ # 8. Post-processing
+ '''if latent_processing == 1:
+ latents = latents_process.copy()
+ images = []
+ for i in latents:
+ images.append(self.decode_latents(i))
+ image = []
+ # 10. Convert to PIL
+ if output_type == "pil":
+ for i in images:
+ image.append(self.numpy_to_pil(i))
+ else:
+ image = self.decode_latents(latents)
+ # 10. Convert to PIL
+ if output_type == "pil":
+ image = self.numpy_to_pil(image)'''
+ if latent_processing == 1:
+ return latents_process
+ return [self.latent_to_image(latents,output_type)]
+
+
+
diff --git a/modules/preprocessing_segmentation.py b/modules/preprocessing_segmentation.py
new file mode 100644
index 0000000000000000000000000000000000000000..d84085218c55983fa2e9d74b09e1349e000dc7f0
--- /dev/null
+++ b/modules/preprocessing_segmentation.py
@@ -0,0 +1,47 @@
+import torch
+import os
+from PIL import Image
+import numpy as np
+from transformers import AutoImageProcessor, UperNetForSemanticSegmentation
+import random
+
+lst_model_segmentation = {
+ "Convnet tiny": "openmmlab/upernet-convnext-tiny",
+ "Convnet small": "openmmlab/upernet-convnext-small",
+ "Convnet base": "openmmlab/upernet-convnext-base",
+ "Convnet large": "openmmlab/upernet-convnext-large",
+ "Convnet xlarge": "openmmlab/upernet-convnext-xlarge",
+ "Swin tiny": "openmmlab/upernet-swin-tiny",
+ "Swin small": "openmmlab/upernet-swin-small",
+ "Swin base": "openmmlab/upernet-swin-base",
+ "Swin large": "openmmlab/upernet-swin-large",
+}
+
+def preprocessing_segmentation(method,image):
+ global lst_model_segmentation
+ method = lst_model_segmentation[method]
+ device = 'cpu'
+ if torch.cuda.is_available():
+ device = 'cuda'
+ image_processor = AutoImageProcessor.from_pretrained(method)
+ image_segmentor = UperNetForSemanticSegmentation.from_pretrained(method).to(device)
+
+ pixel_values = image_processor(image, return_tensors="pt").pixel_values.to(device)
+ with torch.no_grad():
+ outputs = image_segmentor(pixel_values)
+ seg = image_processor.post_process_semantic_segmentation(outputs, target_sizes=[image.size[::-1]])[0]
+ color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8) # height, width, 3
+
+ seg = seg.to('cpu')
+ unique_values = torch.unique(seg)
+
+ lst_color = []
+ for i in unique_values:
+ color = [random.randrange(0,256), random.randrange(0,256), random.randrange(0,256)]
+ while color in lst_color:
+ color = [random.randrange(0,256), random.randrange(0,256), random.randrange(0,256)]
+ color_seg[seg == i, :] = color
+ lst_color.append(color)
+ color_seg = color_seg.astype(np.uint8)
+ control_image = Image.fromarray(color_seg)
+ return control_image
\ No newline at end of file
diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py
new file mode 100644
index 0000000000000000000000000000000000000000..d73bc821357016e10be6e1989d5540a10a159c4d
--- /dev/null
+++ b/modules/prompt_parser.py
@@ -0,0 +1,392 @@
+
+import re
+import math
+import numpy as np
+import torch
+
+# Code from https://github.com/AUTOMATIC1111/stable-diffusion-webui/commit/8e2aeee4a127b295bfc880800e4a312e0f049b85, modified.
+
+class PromptChunk:
+ """
+ This object contains token ids, weight (multipliers:1.4) and textual inversion embedding info for a chunk of prompt.
+ If a prompt is short, it is represented by one PromptChunk, otherwise, multiple are necessary.
+ Each PromptChunk contains an exact amount of tokens - 77, which includes one for start and end token,
+ so just 75 tokens from prompt.
+ """
+
+ def __init__(self):
+ self.tokens = []
+ self.multipliers = []
+ self.fixes = []
+
+
+class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module):
+ """A pytorch module that is a wrapper for FrozenCLIPEmbedder module. it enhances FrozenCLIPEmbedder, making it possible to
+ have unlimited prompt length and assign weights to tokens in prompt.
+ """
+
+ def __init__(self, text_encoder, enable_emphasis=True):
+ super().__init__()
+
+ self.device = lambda: text_encoder.device
+ self.enable_emphasis = enable_emphasis
+ """Original FrozenCLIPEmbedder module; can also be FrozenOpenCLIPEmbedder or xlmr.BertSeriesModelWithTransformation,
+ depending on model."""
+
+ self.chunk_length = 75
+
+ def empty_chunk(self):
+ """creates an empty PromptChunk and returns it"""
+
+ chunk = PromptChunk()
+ chunk.tokens = [self.id_start] + [self.id_end] * (self.chunk_length + 1)
+ chunk.multipliers = [1.0] * (self.chunk_length + 2)
+ return chunk
+
+ def get_target_prompt_token_count(self, token_count):
+ """returns the maximum number of tokens a prompt of a known length can have before it requires one more PromptChunk to be represented"""
+
+ return math.ceil(max(token_count, 1) / self.chunk_length) * self.chunk_length
+
+ def tokenize_line(self, line):
+ """
+ this transforms a single prompt into a list of PromptChunk objects - as many as needed to
+ represent the prompt.
+ Returns the list and the total number of tokens in the prompt.
+ """
+
+ if self.enable_emphasis:
+ parsed = parse_prompt_attention(line)
+ else:
+ parsed = [[line, 1.0]]
+
+ tokenized = self.tokenize([text for text, _ in parsed])
+
+ chunks = []
+ chunk = PromptChunk()
+ token_count = 0
+ last_comma = -1
+
+ def next_chunk(is_last=False):
+ """puts current chunk into the list of results and produces the next one - empty;
+ if is_last is true, tokens tokens at the end won't add to token_count"""
+ nonlocal token_count
+ nonlocal last_comma
+ nonlocal chunk
+
+ if is_last:
+ token_count += len(chunk.tokens)
+ else:
+ token_count += self.chunk_length
+
+ to_add = self.chunk_length - len(chunk.tokens)
+ if to_add > 0:
+ chunk.tokens += [self.id_end] * to_add
+ chunk.multipliers += [1.0] * to_add
+
+ chunk.tokens = [self.id_start] + chunk.tokens + [self.id_end]
+ chunk.multipliers = [1.0] + chunk.multipliers + [1.0]
+
+ last_comma = -1
+ chunks.append(chunk)
+ chunk = PromptChunk()
+
+ comma_padding_backtrack = 20 # default value in https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/6cff4401824299a983c8e13424018efc347b4a2b/modules/shared.py#L410
+ for tokens, (text, weight) in zip(tokenized, parsed):
+ if text == "BREAK" and weight == -1:
+ next_chunk()
+ continue
+
+ position = 0
+ while position < len(tokens):
+ token = tokens[position]
+
+ if token == self.comma_token:
+ last_comma = len(chunk.tokens)
+
+ # this is when we are at the end of alloted 75 tokens for the current chunk, and the current token is not a comma. opts.comma_padding_backtrack
+ # is a setting that specifies that if there is a comma nearby, the text after the comma should be moved out of this chunk and into the next.
+ elif (
+ comma_padding_backtrack != 0
+ and len(chunk.tokens) == self.chunk_length
+ and last_comma != -1
+ and len(chunk.tokens) - last_comma <= comma_padding_backtrack
+ ):
+ break_location = last_comma + 1
+
+ reloc_tokens = chunk.tokens[break_location:]
+ reloc_mults = chunk.multipliers[break_location:]
+
+ chunk.tokens = chunk.tokens[:break_location]
+ chunk.multipliers = chunk.multipliers[:break_location]
+
+ next_chunk()
+ chunk.tokens = reloc_tokens
+ chunk.multipliers = reloc_mults
+
+ if len(chunk.tokens) == self.chunk_length:
+ next_chunk()
+
+ chunk.tokens.append(token)
+ chunk.multipliers.append(weight)
+ position += 1
+
+ if len(chunk.tokens) > 0 or len(chunks) == 0:
+ next_chunk(is_last=True)
+
+ return chunks, token_count
+
+ def process_texts(self, texts):
+ """
+ Accepts a list of texts and calls tokenize_line() on each, with cache. Returns the list of results and maximum
+ length, in tokens, of all texts.
+ """
+
+ token_count = 0
+
+ cache = {}
+ batch_chunks = []
+ for line in texts:
+ if line in cache:
+ chunks = cache[line]
+ else:
+ chunks, current_token_count = self.tokenize_line(line)
+ token_count = max(current_token_count, token_count)
+
+ cache[line] = chunks
+
+ batch_chunks.append(chunks)
+
+ return batch_chunks, token_count
+
+ def forward(self, texts):
+ """
+ Accepts an array of texts; Passes texts through transformers network to create a tensor with numerical representation of those texts.
+ Returns a tensor with shape of (B, T, C), where B is length of the array; T is length, in tokens, of texts (including padding) - T will
+ be a multiple of 77; and C is dimensionality of each token - for SD1 it's 768, and for SD2 it's 1024.
+ An example shape returned by this function can be: (2, 77, 768).
+ Webui usually sends just one text at a time through this function - the only time when texts is an array with more than one elemenet
+ is when you do prompt editing: "a picture of a [cat:dog:0.4] eating ice cream"
+ """
+
+ batch_chunks, token_count = self.process_texts(texts)
+ chunk_count = max([len(x) for x in batch_chunks])
+
+ zs = []
+ ts = []
+ for i in range(chunk_count):
+ batch_chunk = [
+ chunks[i] if i < len(chunks) else self.empty_chunk()
+ for chunks in batch_chunks
+ ]
+
+ tokens = [x.tokens for x in batch_chunk]
+ multipliers = [x.multipliers for x in batch_chunk]
+ # self.embeddings.fixes = [x.fixes for x in batch_chunk]
+
+ # for fixes in self.embeddings.fixes:
+ # for position, embedding in fixes:
+ # used_embeddings[embedding.name] = embedding
+
+ z = self.process_tokens(tokens, multipliers)
+ zs.append(z)
+ ts.append(tokens)
+
+ return np.hstack(ts), torch.hstack(zs)
+
+ def process_tokens(self, remade_batch_tokens, batch_multipliers):
+ """
+ sends one single prompt chunk to be encoded by transformers neural network.
+ remade_batch_tokens is a batch of tokens - a list, where every element is a list of tokens; usually
+ there are exactly 77 tokens in the list. batch_multipliers is the same but for multipliers instead of tokens.
+ Multipliers are used to give more or less weight to the outputs of transformers network. Each multiplier
+ corresponds to one token.
+ """
+ tokens = torch.asarray(remade_batch_tokens).to(self.device())
+
+ # this is for SD2: SD1 uses the same token for padding and end of text, while SD2 uses different ones.
+ if self.id_end != self.id_pad:
+ for batch_pos in range(len(remade_batch_tokens)):
+ index = remade_batch_tokens[batch_pos].index(self.id_end)
+ tokens[batch_pos, index + 1 : tokens.shape[1]] = self.id_pad
+
+ z = self.encode_with_transformers(tokens)
+
+ # restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise
+ batch_multipliers = torch.asarray(batch_multipliers).to(self.device())
+ original_mean = z.mean()
+ z = z * batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape)
+ new_mean = z.mean()
+ z = z * (original_mean / new_mean)
+
+ return z
+
+
+class FrozenCLIPEmbedderWithCustomWords(FrozenCLIPEmbedderWithCustomWordsBase):
+ def __init__(self, tokenizer, text_encoder,CLIP_stop_at_last_layers):
+ super().__init__(text_encoder)
+ self.tokenizer = tokenizer
+ self.text_encoder = text_encoder
+ self.CLIP_stop_at_last_layers = CLIP_stop_at_last_layers
+
+ vocab = self.tokenizer.get_vocab()
+
+ self.comma_token = vocab.get(",", None)
+
+ self.token_mults = {}
+ tokens_with_parens = [
+ (k, v)
+ for k, v in vocab.items()
+ if "(" in k or ")" in k or "[" in k or "]" in k
+ ]
+ for text, ident in tokens_with_parens:
+ mult = 1.0
+ for c in text:
+ if c == "[":
+ mult /= 1.1
+ if c == "]":
+ mult *= 1.1
+ if c == "(":
+ mult *= 1.1
+ if c == ")":
+ mult /= 1.1
+
+ if mult != 1.0:
+ self.token_mults[ident] = mult
+
+ self.id_start = self.tokenizer.bos_token_id
+ self.id_end = self.tokenizer.eos_token_id
+ self.id_pad = self.id_end
+
+ def tokenize(self, texts):
+ tokenized = self.tokenizer(
+ texts, truncation=False, add_special_tokens=False
+ )["input_ids"]
+
+ return tokenized
+
+ def encode_with_transformers(self, tokens):
+ CLIP_stop_at_last_layers = self.CLIP_stop_at_last_layers
+ tokens = tokens.to(self.text_encoder.device)
+ outputs = self.text_encoder(tokens, output_hidden_states=True)
+
+ if CLIP_stop_at_last_layers > 1:
+ z = outputs.hidden_states[-CLIP_stop_at_last_layers]
+ z = self.text_encoder.text_model.final_layer_norm(z)
+ else:
+ z = outputs.last_hidden_state
+
+ return z
+
+
+re_attention = re.compile(
+ r"""
+\\\(|
+\\\)|
+\\\[|
+\\]|
+\\\\|
+\\|
+\(|
+\[|
+:([+-]?[.\d]+)\)|
+\)|
+]|
+[^\\()\[\]:]+|
+:
+""",
+ re.X,
+)
+
+re_break = re.compile(r"\s*\bBREAK\b\s*", re.S)
+
+
+def parse_prompt_attention(text):
+ """
+ Parses a string with attention tokens and returns a list of pairs: text and its associated weight.
+ Accepted tokens are:
+ (abc) - increases attention to abc by a multiplier of 1.1
+ (abc:3.12) - increases attention to abc by a multiplier of 3.12
+ [abc] - decreases attention to abc by a multiplier of 1.1
+ \( - literal character '('
+ \[ - literal character '['
+ \) - literal character ')'
+ \] - literal character ']'
+ \\ - literal character '\'
+ anything else - just text
+
+ >>> parse_prompt_attention('normal text')
+ [['normal text', 1.0]]
+ >>> parse_prompt_attention('an (important) word')
+ [['an ', 1.0], ['important', 1.1], [' word', 1.0]]
+ >>> parse_prompt_attention('(unbalanced')
+ [['unbalanced', 1.1]]
+ >>> parse_prompt_attention('\(literal\]')
+ [['(literal]', 1.0]]
+ >>> parse_prompt_attention('(unnecessary)(parens)')
+ [['unnecessaryparens', 1.1]]
+ >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).')
+ [['a ', 1.0],
+ ['house', 1.5730000000000004],
+ [' ', 1.1],
+ ['on', 1.0],
+ [' a ', 1.1],
+ ['hill', 0.55],
+ [', sun, ', 1.1],
+ ['sky', 1.4641000000000006],
+ ['.', 1.1]]
+ """
+
+ res = []
+ round_brackets = []
+ square_brackets = []
+
+ round_bracket_multiplier = 1.1
+ square_bracket_multiplier = 1 / 1.1
+
+ def multiply_range(start_position, multiplier):
+ for p in range(start_position, len(res)):
+ res[p][1] *= multiplier
+
+ for m in re_attention.finditer(text):
+ text = m.group(0)
+ weight = m.group(1)
+
+ if text.startswith("\\"):
+ res.append([text[1:], 1.0])
+ elif text == "(":
+ round_brackets.append(len(res))
+ elif text == "[":
+ square_brackets.append(len(res))
+ elif weight is not None and len(round_brackets) > 0:
+ multiply_range(round_brackets.pop(), float(weight))
+ elif text == ")" and len(round_brackets) > 0:
+ multiply_range(round_brackets.pop(), round_bracket_multiplier)
+ elif text == "]" and len(square_brackets) > 0:
+ multiply_range(square_brackets.pop(), square_bracket_multiplier)
+ else:
+ parts = re.split(re_break, text)
+ for i, part in enumerate(parts):
+ if i > 0:
+ res.append(["BREAK", -1])
+ res.append([part, 1.0])
+
+ for pos in round_brackets:
+ multiply_range(pos, round_bracket_multiplier)
+
+ for pos in square_brackets:
+ multiply_range(pos, square_bracket_multiplier)
+
+ if len(res) == 0:
+ res = [["", 1.0]]
+
+ # merge runs of identical weights
+ i = 0
+ while i + 1 < len(res):
+ if res[i][1] == res[i + 1][1]:
+ res[i][0] += res[i + 1][0]
+ res.pop(i + 1)
+ else:
+ i += 1
+
+ return res
diff --git a/modules/safe.py b/modules/safe.py
new file mode 100644
index 0000000000000000000000000000000000000000..532c7dab3f60f5a68b068299d2adc0b776a423f9
--- /dev/null
+++ b/modules/safe.py
@@ -0,0 +1,188 @@
+# this code is adapted from the script contributed by anon from /h/
+# modified, from https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/6cff4401824299a983c8e13424018efc347b4a2b/modules/safe.py
+
+import io
+import pickle
+import collections
+import sys
+import traceback
+
+import torch
+import numpy
+import _codecs
+import zipfile
+import re
+
+
+# PyTorch 1.13 and later have _TypedStorage renamed to TypedStorage
+TypedStorage = torch.storage.TypedStorage if hasattr(torch.storage, 'TypedStorage') else torch.storage._TypedStorage
+
+
+def encode(*args):
+ out = _codecs.encode(*args)
+ return out
+
+
+class RestrictedUnpickler(pickle.Unpickler):
+ extra_handler = None
+
+ def persistent_load(self, saved_id):
+ assert saved_id[0] == 'storage'
+ return TypedStorage()
+
+ def find_class(self, module, name):
+ if self.extra_handler is not None:
+ res = self.extra_handler(module, name)
+ if res is not None:
+ return res
+
+ if module == 'collections' and name == 'OrderedDict':
+ return getattr(collections, name)
+ if module == 'torch._utils' and name in ['_rebuild_tensor_v2', '_rebuild_parameter', '_rebuild_device_tensor_from_numpy']:
+ return getattr(torch._utils, name)
+ if module == 'torch' and name in ['FloatStorage', 'HalfStorage', 'IntStorage', 'LongStorage', 'DoubleStorage', 'ByteStorage', 'float32']:
+ return getattr(torch, name)
+ if module == 'torch.nn.modules.container' and name in ['ParameterDict']:
+ return getattr(torch.nn.modules.container, name)
+ if module == 'numpy.core.multiarray' and name in ['scalar', '_reconstruct']:
+ return getattr(numpy.core.multiarray, name)
+ if module == 'numpy' and name in ['dtype', 'ndarray']:
+ return getattr(numpy, name)
+ if module == '_codecs' and name == 'encode':
+ return encode
+ if module == "pytorch_lightning.callbacks" and name == 'model_checkpoint':
+ import pytorch_lightning.callbacks
+ return pytorch_lightning.callbacks.model_checkpoint
+ if module == "pytorch_lightning.callbacks.model_checkpoint" and name == 'ModelCheckpoint':
+ import pytorch_lightning.callbacks.model_checkpoint
+ return pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint
+ if module == "__builtin__" and name == 'set':
+ return set
+
+ # Forbid everything else.
+ raise Exception(f"global '{module}/{name}' is forbidden")
+
+
+# Regular expression that accepts 'dirname/version', 'dirname/data.pkl', and 'dirname/data/'
+allowed_zip_names_re = re.compile(r"^([^/]+)/((data/\d+)|version|(data\.pkl))$")
+data_pkl_re = re.compile(r"^([^/]+)/data\.pkl$")
+
+def check_zip_filenames(filename, names):
+ for name in names:
+ if allowed_zip_names_re.match(name):
+ continue
+
+ raise Exception(f"bad file inside {filename}: {name}")
+
+
+def check_pt(filename, extra_handler):
+ try:
+
+ # new pytorch format is a zip file
+ with zipfile.ZipFile(filename) as z:
+ check_zip_filenames(filename, z.namelist())
+
+ # find filename of data.pkl in zip file: '/data.pkl'
+ data_pkl_filenames = [f for f in z.namelist() if data_pkl_re.match(f)]
+ if len(data_pkl_filenames) == 0:
+ raise Exception(f"data.pkl not found in {filename}")
+ if len(data_pkl_filenames) > 1:
+ raise Exception(f"Multiple data.pkl found in {filename}")
+ with z.open(data_pkl_filenames[0]) as file:
+ unpickler = RestrictedUnpickler(file)
+ unpickler.extra_handler = extra_handler
+ unpickler.load()
+
+ except zipfile.BadZipfile:
+
+ # if it's not a zip file, it's an olf pytorch format, with five objects written to pickle
+ with open(filename, "rb") as file:
+ unpickler = RestrictedUnpickler(file)
+ unpickler.extra_handler = extra_handler
+ for i in range(5):
+ unpickler.load()
+
+
+def load(filename, *args, **kwargs):
+ return load_with_extra(filename, extra_handler=global_extra_handler, *args, **kwargs)
+
+
+def load_with_extra(filename, extra_handler=None, *args, **kwargs):
+ """
+ this function is intended to be used by extensions that want to load models with
+ some extra classes in them that the usual unpickler would find suspicious.
+
+ Use the extra_handler argument to specify a function that takes module and field name as text,
+ and returns that field's value:
+
+ ```python
+ def extra(module, name):
+ if module == 'collections' and name == 'OrderedDict':
+ return collections.OrderedDict
+
+ return None
+
+ safe.load_with_extra('model.pt', extra_handler=extra)
+ ```
+
+ The alternative to this is just to use safe.unsafe_torch_load('model.pt'), which as the name implies is
+ definitely unsafe.
+ """
+
+ try:
+ check_pt(filename, extra_handler)
+
+ except pickle.UnpicklingError:
+ print(f"Error verifying pickled file from {filename}:", file=sys.stderr)
+ print(traceback.format_exc(), file=sys.stderr)
+ print("The file is most likely corrupted.", file=sys.stderr)
+ return None
+
+ except Exception:
+ print(f"Error verifying pickled file from {filename}:", file=sys.stderr)
+ print(traceback.format_exc(), file=sys.stderr)
+ print("\nThe file may be malicious, so the program is not going to read it.", file=sys.stderr)
+ print("You can skip this check with --disable-safe-unpickle commandline argument.\n\n", file=sys.stderr)
+ return None
+
+ return unsafe_torch_load(filename, *args, **kwargs)
+
+
+class Extra:
+ """
+ A class for temporarily setting the global handler for when you can't explicitly call load_with_extra
+ (because it's not your code making the torch.load call). The intended use is like this:
+
+```
+import torch
+from modules import safe
+
+def handler(module, name):
+ if module == 'torch' and name in ['float64', 'float16']:
+ return getattr(torch, name)
+
+ return None
+
+with safe.Extra(handler):
+ x = torch.load('model.pt')
+```
+ """
+
+ def __init__(self, handler):
+ self.handler = handler
+
+ def __enter__(self):
+ global global_extra_handler
+
+ assert global_extra_handler is None, 'already inside an Extra() block'
+ global_extra_handler = self.handler
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ global global_extra_handler
+
+ global_extra_handler = None
+
+
+unsafe_torch_load = torch.load
+torch.load = load
+global_extra_handler = None
diff --git a/modules/samplers_extra_k_diffusion.py b/modules/samplers_extra_k_diffusion.py
new file mode 100644
index 0000000000000000000000000000000000000000..bec49596b191e257116e385518ec666d0bee19e3
--- /dev/null
+++ b/modules/samplers_extra_k_diffusion.py
@@ -0,0 +1,176 @@
+import torch
+import tqdm
+import k_diffusion.sampling
+from k_diffusion.sampling import default_noise_sampler,to_d, get_sigmas_karras
+from tqdm.auto import trange
+@torch.no_grad()
+def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1., restart_list=None):
+ """Implements restart sampling in Restart Sampling for Improving Generative Processes (2023)
+ Restart_list format: {min_sigma: [ restart_steps, restart_times, max_sigma]}
+ If restart_list is None: will choose restart_list automatically, otherwise will use the given restart_list
+ """
+ extra_args = {} if extra_args is None else extra_args
+ s_in = x.new_ones([x.shape[0]])
+ step_id = 0
+
+ def heun_step(x, old_sigma, new_sigma, second_order=True):
+ nonlocal step_id
+ denoised = model(x, old_sigma * s_in, **extra_args)
+ d = to_d(x, old_sigma, denoised)
+ if callback is not None:
+ callback({'x': x, 'i': step_id, 'sigma': new_sigma, 'sigma_hat': old_sigma, 'denoised': denoised})
+ dt = new_sigma - old_sigma
+ if new_sigma == 0 or not second_order:
+ # Euler method
+ x = x + d * dt
+ else:
+ # Heun's method
+ x_2 = x + d * dt
+ denoised_2 = model(x_2, new_sigma * s_in, **extra_args)
+ d_2 = to_d(x_2, new_sigma, denoised_2)
+ d_prime = (d + d_2) / 2
+ x = x + d_prime * dt
+ step_id += 1
+ return x
+
+ steps = sigmas.shape[0] - 1
+ if restart_list is None:
+ if steps >= 20:
+ restart_steps = 9
+ restart_times = 1
+ if steps >= 36:
+ restart_steps = steps // 4
+ restart_times = 2
+ sigmas = get_sigmas_karras(steps - restart_steps * restart_times, sigmas[-2].item(), sigmas[0].item(), device=sigmas.device)
+ restart_list = {0.1: [restart_steps + 1, restart_times, 2]}
+ else:
+ restart_list = {}
+
+ restart_list = {int(torch.argmin(abs(sigmas - key), dim=0)): value for key, value in restart_list.items()}
+
+ step_list = []
+ for i in range(len(sigmas) - 1):
+ step_list.append((sigmas[i], sigmas[i + 1]))
+ if i + 1 in restart_list:
+ restart_steps, restart_times, restart_max = restart_list[i + 1]
+ min_idx = i + 1
+ max_idx = int(torch.argmin(abs(sigmas - restart_max), dim=0))
+ if max_idx < min_idx:
+ sigma_restart = get_sigmas_karras(restart_steps, sigmas[min_idx].item(), sigmas[max_idx].item(), device=sigmas.device)[:-1]
+ while restart_times > 0:
+ restart_times -= 1
+ step_list.extend([(old_sigma, new_sigma) for (old_sigma, new_sigma) in zip(sigma_restart[:-1], sigma_restart[1:])])
+
+ last_sigma = None
+ for old_sigma, new_sigma in tqdm.tqdm(step_list, disable=disable):
+ if last_sigma is None:
+ last_sigma = old_sigma
+ elif last_sigma < old_sigma:
+ x = x + k_diffusion.sampling.torch.randn_like(x) * s_noise * (old_sigma ** 2 - last_sigma ** 2) ** 0.5
+ x = heun_step(x, old_sigma, new_sigma)
+ last_sigma = new_sigma
+
+ return x
+
+
+def DDPMSampler_step(x, sigma, sigma_prev, noise, noise_sampler):
+ alpha_cumprod = 1 / ((sigma * sigma) + 1)
+ alpha_cumprod_prev = 1 / ((sigma_prev * sigma_prev) + 1)
+ alpha = (alpha_cumprod / alpha_cumprod_prev)
+
+ mu = (1.0 / alpha).sqrt() * (x - (1 - alpha) * noise / (1 - alpha_cumprod).sqrt())
+ if sigma_prev > 0:
+ mu += ((1 - alpha) * (1. - alpha_cumprod_prev) / (1. - alpha_cumprod)).sqrt() * noise_sampler(sigma, sigma_prev)
+ return mu
+
+
+def generic_step_sampler(model, x, sigmas, extra_args=None, callback=None, disable=None, noise_sampler=None, step_function=None):
+ extra_args = {} if extra_args is None else extra_args
+ noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
+ s_in = x.new_ones([x.shape[0]])
+
+ for i in trange(len(sigmas) - 1, disable=disable):
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
+ if callback is not None:
+ callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
+ x = step_function(x / torch.sqrt(1.0 + sigmas[i] ** 2.0), sigmas[i], sigmas[i + 1], (x - denoised) / sigmas[i], noise_sampler)
+ if sigmas[i + 1] != 0:
+ x *= torch.sqrt(1.0 + sigmas[i + 1] ** 2.0)
+ return x
+
+
+@torch.no_grad()
+def sample_ddpm(model, x, sigmas, extra_args=None, callback=None, disable=None, noise_sampler=None):
+ return generic_step_sampler(model, x, sigmas, extra_args, callback, disable, noise_sampler, DDPMSampler_step)
+
+
+@torch.no_grad()
+def sample_lcm(model, x, sigmas, extra_args=None, callback=None, disable=None, noise_sampler=None):
+ extra_args = {} if extra_args is None else extra_args
+ noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
+ s_in = x.new_ones([x.shape[0]])
+ for i in trange(len(sigmas) - 1, disable=disable):
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
+ if callback is not None:
+ callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
+
+ x = denoised
+ if sigmas[i + 1] > 0:
+ x += sigmas[i + 1] * noise_sampler(sigmas[i], sigmas[i + 1])
+ return x
+
+@torch.no_grad()
+def sample_heunpp2(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.):
+ # From MIT licensed: https://github.com/Carzit/sd-webui-samplers-scheduler/
+ extra_args = {} if extra_args is None else extra_args
+ s_in = x.new_ones([x.shape[0]])
+ s_end = sigmas[-1]
+ for i in trange(len(sigmas) - 1, disable=disable):
+ gamma = min(s_churn / (len(sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.
+ eps = torch.randn_like(x) * s_noise
+ sigma_hat = sigmas[i] * (gamma + 1)
+ if gamma > 0:
+ x = x + eps * (sigma_hat ** 2 - sigmas[i] ** 2) ** 0.5
+ denoised = model(x, sigma_hat * s_in, **extra_args)
+ d = to_d(x, sigma_hat, denoised)
+ if callback is not None:
+ callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
+ dt = sigmas[i + 1] - sigma_hat
+ if sigmas[i + 1] == s_end:
+ # Euler method
+ x = x + d * dt
+ elif sigmas[i + 2] == s_end:
+
+ # Heun's method
+ x_2 = x + d * dt
+ denoised_2 = model(x_2, sigmas[i + 1] * s_in, **extra_args)
+ d_2 = to_d(x_2, sigmas[i + 1], denoised_2)
+
+ w = 2 * sigmas[0]
+ w2 = sigmas[i+1]/w
+ w1 = 1 - w2
+
+ d_prime = d * w1 + d_2 * w2
+
+
+ x = x + d_prime * dt
+
+ else:
+ # Heun++
+ x_2 = x + d * dt
+ denoised_2 = model(x_2, sigmas[i + 1] * s_in, **extra_args)
+ d_2 = to_d(x_2, sigmas[i + 1], denoised_2)
+ dt_2 = sigmas[i + 2] - sigmas[i + 1]
+
+ x_3 = x_2 + d_2 * dt_2
+ denoised_3 = model(x_3, sigmas[i + 2] * s_in, **extra_args)
+ d_3 = to_d(x_3, sigmas[i + 2], denoised_3)
+
+ w = 3 * sigmas[0]
+ w2 = sigmas[i + 1] / w
+ w3 = sigmas[i + 2] / w
+ w1 = 1 - w2 - w3
+
+ d_prime = w1 * d + w2 * d_2 + w3 * d_3
+ x = x + d_prime * dt
+ return x
\ No newline at end of file
diff --git a/modules/t2i_adapter.py b/modules/t2i_adapter.py
new file mode 100644
index 0000000000000000000000000000000000000000..27cd3f269d273a917c17db0d2e1c5c78bad61368
--- /dev/null
+++ b/modules/t2i_adapter.py
@@ -0,0 +1,144 @@
+import importlib
+import inspect
+import math
+from pathlib import Path
+import re
+from collections import defaultdict
+from typing import List, Optional, Union
+import cv2
+import time
+import k_diffusion
+import numpy as np
+import PIL
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from einops import rearrange
+from .external_k_diffusion import CompVisDenoiser, CompVisVDenoiser
+from .prompt_parser import FrozenCLIPEmbedderWithCustomWords
+from torch import einsum
+from torch.autograd.function import Function
+
+from diffusers import DiffusionPipeline
+from diffusers.utils import PIL_INTERPOLATION, is_accelerate_available
+from diffusers.utils import logging
+from diffusers.utils.torch_utils import randn_tensor,is_compiled_module,is_torch_version
+from diffusers.image_processor import VaeImageProcessor,PipelineImageInput
+from safetensors.torch import load_file
+from diffusers import ControlNetModel
+from PIL import Image
+import torchvision.transforms as transforms
+from typing import Any, Callable, Dict, List, Optional, Union
+from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
+from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
+from diffusers import AutoencoderKL, LMSDiscreteScheduler
+from .u_net_condition_modify import UNet2DConditionModel
+from diffusers.models.lora import adjust_lora_scale_text_encoder
+from diffusers.models import AutoencoderKL, ImageProjection, MultiAdapter, T2IAdapter
+from diffusers.schedulers import KarrasDiffusionSchedulers
+from diffusers.utils import (
+ PIL_INTERPOLATION,
+ USE_PEFT_BACKEND,
+ BaseOutput,
+ deprecate,
+ logging,
+ replace_example_docstring,
+ scale_lora_layers,
+ unscale_lora_layers,
+)
+from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
+from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
+from diffusers.pipelines.pipeline_utils import DiffusionPipeline
+from packaging import version
+from diffusers.configuration_utils import FrozenDict
+
+def _preprocess_adapter_image(image, height, width):
+ if isinstance(image, torch.Tensor):
+ return image
+ elif isinstance(image, PIL.Image.Image):
+ image = [image]
+
+ if isinstance(image[0], PIL.Image.Image):
+ image = [np.array(i.resize((width, height), resample=PIL_INTERPOLATION["lanczos"])) for i in image]
+ image = [
+ i[None, ..., None] if i.ndim == 2 else i[None, ...] for i in image
+ ] # expand [h, w] or [h, w, c] to [b, h, w, c]
+ image = np.concatenate(image, axis=0)
+ image = np.array(image).astype(np.float32) / 255.0
+ image = image.transpose(0, 3, 1, 2)
+ image = torch.from_numpy(image)
+ elif isinstance(image[0], torch.Tensor):
+ if image[0].ndim == 3:
+ image = torch.stack(image, dim=0)
+ elif image[0].ndim == 4:
+ image = torch.cat(image, dim=0)
+ else:
+ raise ValueError(
+ f"Invalid image tensor! Expecting image tensor with 3 or 4 dimension, but recive: {image[0].ndim}"
+ )
+ return image
+
+#t2i_adapter setup
+def setup_model_t2i_adapter(class_name,adapter = None):
+ if isinstance(adapter, (list, tuple)):
+ adapter = MultiAdapter(adapter)
+ class_name.adapter = adapter
+
+
+
+def preprocessing_t2i_adapter(class_name,image,width,height,adapter_conditioning_scale,num_images_per_prompt = 1):
+ if isinstance(class_name.adapter, MultiAdapter):
+ adapter_input = []
+ for one_image in image:
+ one_image = _preprocess_adapter_image(one_image, height, width)
+ one_image = one_image.to(device=class_name.device, dtype=class_name.adapter.dtype)
+ adapter_input.append(one_image)
+ else:
+ adapter_input = _preprocess_adapter_image(image, height, width)
+ adapter_input = adapter_input.to(device=class_name.device, dtype=class_name.adapter.dtype)
+
+ if isinstance(class_name.adapter, MultiAdapter):
+ adapter_state = class_name.adapter(adapter_input, adapter_conditioning_scale)
+ for k, v in enumerate(adapter_state):
+ adapter_state[k] = v
+ else:
+ adapter_state = class_name.adapter(adapter_input)
+ for k, v in enumerate(adapter_state):
+ adapter_state[k] = v * adapter_conditioning_scale
+
+
+ if num_images_per_prompt > 1:
+ for k, v in enumerate(adapter_state):
+ adapter_state[k] = v.repeat(num_images_per_prompt, 1, 1, 1)
+ if class_name.do_classifier_free_guidance:
+ for k, v in enumerate(adapter_state):
+ adapter_state[k] = torch.cat([v] * 2, dim=0)
+ return adapter_state
+
+
+def default_height_width(class_name, height, width, image):
+ # NOTE: It is possible that a list of images have different
+ # dimensions for each image, so just checking the first image
+ # is not _exactly_ correct, but it is simple.
+ while isinstance(image, list):
+ image = image[0]
+
+ if height is None:
+ if isinstance(image, PIL.Image.Image):
+ height = image.height
+ elif isinstance(image, torch.Tensor):
+ height = image.shape[-2]
+
+ # round down to nearest multiple of `self.adapter.downscale_factor`
+ height = (height // class_name.adapter.downscale_factor) * class_name.adapter.downscale_factor
+
+ if width is None:
+ if isinstance(image, PIL.Image.Image):
+ width = image.width
+ elif isinstance(image, torch.Tensor):
+ width = image.shape[-1]
+
+ # round down to nearest multiple of `self.adapter.downscale_factor`
+ width = (width // class_name.adapter.downscale_factor) * class_name.adapter.downscale_factor
+
+ return height, width
\ No newline at end of file
diff --git a/modules/u_net_condition_modify.py b/modules/u_net_condition_modify.py
new file mode 100644
index 0000000000000000000000000000000000000000..a61192af4ab2fa2ece8ed28435b727e0a4d9a654
--- /dev/null
+++ b/modules/u_net_condition_modify.py
@@ -0,0 +1,1318 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from dataclasses import dataclass
+from typing import Any, Dict, List, Optional, Tuple, Union
+
+import torch
+import torch.nn as nn
+import torch.utils.checkpoint
+
+from diffusers.configuration_utils import ConfigMixin, register_to_config
+from diffusers.loaders import PeftAdapterMixin
+from .u_net_modify import UNet2DConditionLoadersMixin_modify
+from diffusers.loaders.single_file_model import FromOriginalModelMixin
+from diffusers.utils import USE_PEFT_BACKEND, BaseOutput, deprecate, logging, scale_lora_layers, unscale_lora_layers
+from diffusers.models.activations import get_activation
+from diffusers.models.attention_processor import (
+ ADDED_KV_ATTENTION_PROCESSORS,
+ CROSS_ATTENTION_PROCESSORS,
+ Attention,
+ AttentionProcessor,
+ AttnAddedKVProcessor,
+ AttnProcessor,
+)
+
+from diffusers.models.embeddings import (
+ GaussianFourierProjection,
+ GLIGENTextBoundingboxProjection,
+ ImageHintTimeEmbedding,
+ ImageProjection,
+ ImageTimeEmbedding,
+ TextImageProjection,
+ TextImageTimeEmbedding,
+ TextTimeEmbedding,
+ TimestepEmbedding,
+ Timesteps,
+)
+from diffusers.models.modeling_utils import ModelMixin
+from diffusers.models.unets.unet_2d_blocks import (
+ get_down_block,
+ get_mid_block,
+ get_up_block,
+)
+
+
+logger = logging.get_logger(__name__) # pylint: disable=invalid-name
+
+
+@dataclass
+class UNet2DConditionOutput(BaseOutput):
+ """
+ The output of [`UNet2DConditionModel`].
+
+ Args:
+ sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)`):
+ The hidden states output conditioned on `encoder_hidden_states` input. Output of last layer of model.
+ """
+
+ sample: torch.Tensor = None
+
+
+class UNet2DConditionModel(
+ ModelMixin, ConfigMixin, FromOriginalModelMixin, UNet2DConditionLoadersMixin_modify, PeftAdapterMixin
+):
+ r"""
+ A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample
+ shaped output.
+
+ This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
+ for all models (such as downloading or saving).
+
+ Parameters:
+ sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):
+ Height and width of input/output sample.
+ in_channels (`int`, *optional*, defaults to 4): Number of channels in the input sample.
+ out_channels (`int`, *optional*, defaults to 4): Number of channels in the output.
+ center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample.
+ flip_sin_to_cos (`bool`, *optional*, defaults to `True`):
+ Whether to flip the sin to cos in the time embedding.
+ freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding.
+ down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`):
+ The tuple of downsample blocks to use.
+ mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2DCrossAttn"`):
+ Block type for middle of UNet, it can be one of `UNetMidBlock2DCrossAttn`, `UNetMidBlock2D`, or
+ `UNetMidBlock2DSimpleCrossAttn`. If `None`, the mid block layer is skipped.
+ up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")`):
+ The tuple of upsample blocks to use.
+ only_cross_attention(`bool` or `Tuple[bool]`, *optional*, default to `False`):
+ Whether to include self-attention in the basic transformer blocks, see
+ [`~models.attention.BasicTransformerBlock`].
+ block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
+ The tuple of output channels for each block.
+ layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.
+ downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.
+ mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.
+ dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
+ act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
+ norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.
+ If `None`, normalization and activation layers is skipped in post-processing.
+ norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.
+ cross_attention_dim (`int` or `Tuple[int]`, *optional*, defaults to 1280):
+ The dimension of the cross attention features.
+ transformer_layers_per_block (`int`, `Tuple[int]`, or `Tuple[Tuple]` , *optional*, defaults to 1):
+ The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for
+ [`~models.unets.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unets.unet_2d_blocks.CrossAttnUpBlock2D`],
+ [`~models.unets.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
+ reverse_transformer_layers_per_block : (`Tuple[Tuple]`, *optional*, defaults to None):
+ The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`], in the upsampling
+ blocks of the U-Net. Only relevant if `transformer_layers_per_block` is of type `Tuple[Tuple]` and for
+ [`~models.unets.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unets.unet_2d_blocks.CrossAttnUpBlock2D`],
+ [`~models.unets.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
+ encoder_hid_dim (`int`, *optional*, defaults to None):
+ If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim`
+ dimension to `cross_attention_dim`.
+ encoder_hid_dim_type (`str`, *optional*, defaults to `None`):
+ If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text
+ embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`.
+ attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.
+ num_attention_heads (`int`, *optional*):
+ The number of attention heads. If not defined, defaults to `attention_head_dim`
+ resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config
+ for ResNet blocks (see [`~models.resnet.ResnetBlock2D`]). Choose from `default` or `scale_shift`.
+ class_embed_type (`str`, *optional*, defaults to `None`):
+ The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`,
+ `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`.
+ addition_embed_type (`str`, *optional*, defaults to `None`):
+ Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or
+ "text". "text" will use the `TextTimeEmbedding` layer.
+ addition_time_embed_dim: (`int`, *optional*, defaults to `None`):
+ Dimension for the timestep embeddings.
+ num_class_embeds (`int`, *optional*, defaults to `None`):
+ Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing
+ class conditioning with `class_embed_type` equal to `None`.
+ time_embedding_type (`str`, *optional*, defaults to `positional`):
+ The type of position embedding to use for timesteps. Choose from `positional` or `fourier`.
+ time_embedding_dim (`int`, *optional*, defaults to `None`):
+ An optional override for the dimension of the projected time embedding.
+ time_embedding_act_fn (`str`, *optional*, defaults to `None`):
+ Optional activation function to use only once on the time embeddings before they are passed to the rest of
+ the UNet. Choose from `silu`, `mish`, `gelu`, and `swish`.
+ timestep_post_act (`str`, *optional*, defaults to `None`):
+ The second activation function to use in timestep embedding. Choose from `silu`, `mish` and `gelu`.
+ time_cond_proj_dim (`int`, *optional*, defaults to `None`):
+ The dimension of `cond_proj` layer in the timestep embedding.
+ conv_in_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_in` layer.
+ conv_out_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_out` layer.
+ projection_class_embeddings_input_dim (`int`, *optional*): The dimension of the `class_labels` input when
+ `class_embed_type="projection"`. Required when `class_embed_type="projection"`.
+ class_embeddings_concat (`bool`, *optional*, defaults to `False`): Whether to concatenate the time
+ embeddings with the class embeddings.
+ mid_block_only_cross_attention (`bool`, *optional*, defaults to `None`):
+ Whether to use cross attention with the mid block when using the `UNetMidBlock2DSimpleCrossAttn`. If
+ `only_cross_attention` is given as a single boolean and `mid_block_only_cross_attention` is `None`, the
+ `only_cross_attention` value is used as the value for `mid_block_only_cross_attention`. Default to `False`
+ otherwise.
+ """
+
+ _supports_gradient_checkpointing = True
+ _no_split_modules = ["BasicTransformerBlock", "ResnetBlock2D", "CrossAttnUpBlock2D"]
+
+ @register_to_config
+ def __init__(
+ self,
+ sample_size: Optional[int] = None,
+ in_channels: int = 4,
+ out_channels: int = 4,
+ center_input_sample: bool = False,
+ flip_sin_to_cos: bool = True,
+ freq_shift: int = 0,
+ down_block_types: Tuple[str] = (
+ "CrossAttnDownBlock2D",
+ "CrossAttnDownBlock2D",
+ "CrossAttnDownBlock2D",
+ "DownBlock2D",
+ ),
+ mid_block_type: Optional[str] = "UNetMidBlock2DCrossAttn",
+ up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"),
+ only_cross_attention: Union[bool, Tuple[bool]] = False,
+ block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
+ layers_per_block: Union[int, Tuple[int]] = 2,
+ downsample_padding: int = 1,
+ mid_block_scale_factor: float = 1,
+ dropout: float = 0.0,
+ act_fn: str = "silu",
+ norm_num_groups: Optional[int] = 32,
+ norm_eps: float = 1e-5,
+ cross_attention_dim: Union[int, Tuple[int]] = 1280,
+ transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]] = 1,
+ reverse_transformer_layers_per_block: Optional[Tuple[Tuple[int]]] = None,
+ encoder_hid_dim: Optional[int] = None,
+ encoder_hid_dim_type: Optional[str] = None,
+ attention_head_dim: Union[int, Tuple[int]] = 8,
+ num_attention_heads: Optional[Union[int, Tuple[int]]] = None,
+ dual_cross_attention: bool = False,
+ use_linear_projection: bool = False,
+ class_embed_type: Optional[str] = None,
+ addition_embed_type: Optional[str] = None,
+ addition_time_embed_dim: Optional[int] = None,
+ num_class_embeds: Optional[int] = None,
+ upcast_attention: bool = False,
+ resnet_time_scale_shift: str = "default",
+ resnet_skip_time_act: bool = False,
+ resnet_out_scale_factor: float = 1.0,
+ time_embedding_type: str = "positional",
+ time_embedding_dim: Optional[int] = None,
+ time_embedding_act_fn: Optional[str] = None,
+ timestep_post_act: Optional[str] = None,
+ time_cond_proj_dim: Optional[int] = None,
+ conv_in_kernel: int = 3,
+ conv_out_kernel: int = 3,
+ projection_class_embeddings_input_dim: Optional[int] = None,
+ attention_type: str = "default",
+ class_embeddings_concat: bool = False,
+ mid_block_only_cross_attention: Optional[bool] = None,
+ cross_attention_norm: Optional[str] = None,
+ addition_embed_type_num_heads: int = 64,
+ ):
+ super().__init__()
+
+ self.sample_size = sample_size
+
+ if num_attention_heads is not None:
+ raise ValueError(
+ "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19."
+ )
+
+ # If `num_attention_heads` is not defined (which is the case for most models)
+ # it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
+ # The reason for this behavior is to correct for incorrectly named variables that were introduced
+ # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
+ # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
+ # which is why we correct for the naming here.
+ num_attention_heads = num_attention_heads or attention_head_dim
+
+ # Check inputs
+ self._check_config(
+ down_block_types=down_block_types,
+ up_block_types=up_block_types,
+ only_cross_attention=only_cross_attention,
+ block_out_channels=block_out_channels,
+ layers_per_block=layers_per_block,
+ cross_attention_dim=cross_attention_dim,
+ transformer_layers_per_block=transformer_layers_per_block,
+ reverse_transformer_layers_per_block=reverse_transformer_layers_per_block,
+ attention_head_dim=attention_head_dim,
+ num_attention_heads=num_attention_heads,
+ )
+
+ # input
+ conv_in_padding = (conv_in_kernel - 1) // 2
+ self.conv_in = nn.Conv2d(
+ in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
+ )
+
+ # time
+ time_embed_dim, timestep_input_dim = self._set_time_proj(
+ time_embedding_type,
+ block_out_channels=block_out_channels,
+ flip_sin_to_cos=flip_sin_to_cos,
+ freq_shift=freq_shift,
+ time_embedding_dim=time_embedding_dim,
+ )
+
+ self.time_embedding = TimestepEmbedding(
+ timestep_input_dim,
+ time_embed_dim,
+ act_fn=act_fn,
+ post_act_fn=timestep_post_act,
+ cond_proj_dim=time_cond_proj_dim,
+ )
+
+ self._set_encoder_hid_proj(
+ encoder_hid_dim_type,
+ cross_attention_dim=cross_attention_dim,
+ encoder_hid_dim=encoder_hid_dim,
+ )
+
+ # class embedding
+ self._set_class_embedding(
+ class_embed_type,
+ act_fn=act_fn,
+ num_class_embeds=num_class_embeds,
+ projection_class_embeddings_input_dim=projection_class_embeddings_input_dim,
+ time_embed_dim=time_embed_dim,
+ timestep_input_dim=timestep_input_dim,
+ )
+
+ self._set_add_embedding(
+ addition_embed_type,
+ addition_embed_type_num_heads=addition_embed_type_num_heads,
+ addition_time_embed_dim=addition_time_embed_dim,
+ cross_attention_dim=cross_attention_dim,
+ encoder_hid_dim=encoder_hid_dim,
+ flip_sin_to_cos=flip_sin_to_cos,
+ freq_shift=freq_shift,
+ projection_class_embeddings_input_dim=projection_class_embeddings_input_dim,
+ time_embed_dim=time_embed_dim,
+ )
+
+ if time_embedding_act_fn is None:
+ self.time_embed_act = None
+ else:
+ self.time_embed_act = get_activation(time_embedding_act_fn)
+
+ self.down_blocks = nn.ModuleList([])
+ self.up_blocks = nn.ModuleList([])
+
+ if isinstance(only_cross_attention, bool):
+ if mid_block_only_cross_attention is None:
+ mid_block_only_cross_attention = only_cross_attention
+
+ only_cross_attention = [only_cross_attention] * len(down_block_types)
+
+ if mid_block_only_cross_attention is None:
+ mid_block_only_cross_attention = False
+
+ if isinstance(num_attention_heads, int):
+ num_attention_heads = (num_attention_heads,) * len(down_block_types)
+
+ if isinstance(attention_head_dim, int):
+ attention_head_dim = (attention_head_dim,) * len(down_block_types)
+
+ if isinstance(cross_attention_dim, int):
+ cross_attention_dim = (cross_attention_dim,) * len(down_block_types)
+
+ if isinstance(layers_per_block, int):
+ layers_per_block = [layers_per_block] * len(down_block_types)
+
+ if isinstance(transformer_layers_per_block, int):
+ transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types)
+
+ if class_embeddings_concat:
+ # The time embeddings are concatenated with the class embeddings. The dimension of the
+ # time embeddings passed to the down, middle, and up blocks is twice the dimension of the
+ # regular time embeddings
+ blocks_time_embed_dim = time_embed_dim * 2
+ else:
+ blocks_time_embed_dim = time_embed_dim
+
+ # down
+ output_channel = block_out_channels[0]
+ for i, down_block_type in enumerate(down_block_types):
+ input_channel = output_channel
+ output_channel = block_out_channels[i]
+ is_final_block = i == len(block_out_channels) - 1
+
+ down_block = get_down_block(
+ down_block_type,
+ num_layers=layers_per_block[i],
+ transformer_layers_per_block=transformer_layers_per_block[i],
+ in_channels=input_channel,
+ out_channels=output_channel,
+ temb_channels=blocks_time_embed_dim,
+ add_downsample=not is_final_block,
+ resnet_eps=norm_eps,
+ resnet_act_fn=act_fn,
+ resnet_groups=norm_num_groups,
+ cross_attention_dim=cross_attention_dim[i],
+ num_attention_heads=num_attention_heads[i],
+ downsample_padding=downsample_padding,
+ dual_cross_attention=dual_cross_attention,
+ use_linear_projection=use_linear_projection,
+ only_cross_attention=only_cross_attention[i],
+ upcast_attention=upcast_attention,
+ resnet_time_scale_shift=resnet_time_scale_shift,
+ attention_type=attention_type,
+ resnet_skip_time_act=resnet_skip_time_act,
+ resnet_out_scale_factor=resnet_out_scale_factor,
+ cross_attention_norm=cross_attention_norm,
+ attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,
+ dropout=dropout,
+ )
+ self.down_blocks.append(down_block)
+
+ # mid
+ self.mid_block = get_mid_block(
+ mid_block_type,
+ temb_channels=blocks_time_embed_dim,
+ in_channels=block_out_channels[-1],
+ resnet_eps=norm_eps,
+ resnet_act_fn=act_fn,
+ resnet_groups=norm_num_groups,
+ output_scale_factor=mid_block_scale_factor,
+ transformer_layers_per_block=transformer_layers_per_block[-1],
+ num_attention_heads=num_attention_heads[-1],
+ cross_attention_dim=cross_attention_dim[-1],
+ dual_cross_attention=dual_cross_attention,
+ use_linear_projection=use_linear_projection,
+ mid_block_only_cross_attention=mid_block_only_cross_attention,
+ upcast_attention=upcast_attention,
+ resnet_time_scale_shift=resnet_time_scale_shift,
+ attention_type=attention_type,
+ resnet_skip_time_act=resnet_skip_time_act,
+ cross_attention_norm=cross_attention_norm,
+ attention_head_dim=attention_head_dim[-1],
+ dropout=dropout,
+ )
+
+ # count how many layers upsample the images
+ self.num_upsamplers = 0
+
+ # up
+ reversed_block_out_channels = list(reversed(block_out_channels))
+ reversed_num_attention_heads = list(reversed(num_attention_heads))
+ reversed_layers_per_block = list(reversed(layers_per_block))
+ reversed_cross_attention_dim = list(reversed(cross_attention_dim))
+ reversed_transformer_layers_per_block = (
+ list(reversed(transformer_layers_per_block))
+ if reverse_transformer_layers_per_block is None
+ else reverse_transformer_layers_per_block
+ )
+ only_cross_attention = list(reversed(only_cross_attention))
+
+ output_channel = reversed_block_out_channels[0]
+ for i, up_block_type in enumerate(up_block_types):
+ is_final_block = i == len(block_out_channels) - 1
+
+ prev_output_channel = output_channel
+ output_channel = reversed_block_out_channels[i]
+ input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
+
+ # add upsample block for all BUT final layer
+ if not is_final_block:
+ add_upsample = True
+ self.num_upsamplers += 1
+ else:
+ add_upsample = False
+
+ up_block = get_up_block(
+ up_block_type,
+ num_layers=reversed_layers_per_block[i] + 1,
+ transformer_layers_per_block=reversed_transformer_layers_per_block[i],
+ in_channels=input_channel,
+ out_channels=output_channel,
+ prev_output_channel=prev_output_channel,
+ temb_channels=blocks_time_embed_dim,
+ add_upsample=add_upsample,
+ resnet_eps=norm_eps,
+ resnet_act_fn=act_fn,
+ resolution_idx=i,
+ resnet_groups=norm_num_groups,
+ cross_attention_dim=reversed_cross_attention_dim[i],
+ num_attention_heads=reversed_num_attention_heads[i],
+ dual_cross_attention=dual_cross_attention,
+ use_linear_projection=use_linear_projection,
+ only_cross_attention=only_cross_attention[i],
+ upcast_attention=upcast_attention,
+ resnet_time_scale_shift=resnet_time_scale_shift,
+ attention_type=attention_type,
+ resnet_skip_time_act=resnet_skip_time_act,
+ resnet_out_scale_factor=resnet_out_scale_factor,
+ cross_attention_norm=cross_attention_norm,
+ attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,
+ dropout=dropout,
+ )
+ self.up_blocks.append(up_block)
+ prev_output_channel = output_channel
+
+ # out
+ if norm_num_groups is not None:
+ self.conv_norm_out = nn.GroupNorm(
+ num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps
+ )
+
+ self.conv_act = get_activation(act_fn)
+
+ else:
+ self.conv_norm_out = None
+ self.conv_act = None
+
+ conv_out_padding = (conv_out_kernel - 1) // 2
+ self.conv_out = nn.Conv2d(
+ block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding
+ )
+
+ self._set_pos_net_if_use_gligen(attention_type=attention_type, cross_attention_dim=cross_attention_dim)
+
+ def _check_config(
+ self,
+ down_block_types: Tuple[str],
+ up_block_types: Tuple[str],
+ only_cross_attention: Union[bool, Tuple[bool]],
+ block_out_channels: Tuple[int],
+ layers_per_block: Union[int, Tuple[int]],
+ cross_attention_dim: Union[int, Tuple[int]],
+ transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple[int]]],
+ reverse_transformer_layers_per_block: bool,
+ attention_head_dim: int,
+ num_attention_heads: Optional[Union[int, Tuple[int]]],
+ ):
+ if len(down_block_types) != len(up_block_types):
+ raise ValueError(
+ f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}."
+ )
+
+ if len(block_out_channels) != len(down_block_types):
+ raise ValueError(
+ f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
+ )
+
+ if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):
+ raise ValueError(
+ f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}."
+ )
+
+ if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types):
+ raise ValueError(
+ f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}."
+ )
+
+ if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):
+ raise ValueError(
+ f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}."
+ )
+
+ if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types):
+ raise ValueError(
+ f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}."
+ )
+
+ if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types):
+ raise ValueError(
+ f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}."
+ )
+ if isinstance(transformer_layers_per_block, list) and reverse_transformer_layers_per_block is None:
+ for layer_number_per_block in transformer_layers_per_block:
+ if isinstance(layer_number_per_block, list):
+ raise ValueError("Must provide 'reverse_transformer_layers_per_block` if using asymmetrical UNet.")
+
+ def _set_time_proj(
+ self,
+ time_embedding_type: str,
+ block_out_channels: int,
+ flip_sin_to_cos: bool,
+ freq_shift: float,
+ time_embedding_dim: int,
+ ) -> Tuple[int, int]:
+ if time_embedding_type == "fourier":
+ time_embed_dim = time_embedding_dim or block_out_channels[0] * 2
+ if time_embed_dim % 2 != 0:
+ raise ValueError(f"`time_embed_dim` should be divisible by 2, but is {time_embed_dim}.")
+ self.time_proj = GaussianFourierProjection(
+ time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos
+ )
+ timestep_input_dim = time_embed_dim
+ elif time_embedding_type == "positional":
+ time_embed_dim = time_embedding_dim or block_out_channels[0] * 4
+
+ self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
+ timestep_input_dim = block_out_channels[0]
+ else:
+ raise ValueError(
+ f"{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`."
+ )
+
+ return time_embed_dim, timestep_input_dim
+
+ def _set_encoder_hid_proj(
+ self,
+ encoder_hid_dim_type: Optional[str],
+ cross_attention_dim: Union[int, Tuple[int]],
+ encoder_hid_dim: Optional[int],
+ ):
+ if encoder_hid_dim_type is None and encoder_hid_dim is not None:
+ encoder_hid_dim_type = "text_proj"
+ self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type)
+ logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.")
+
+ if encoder_hid_dim is None and encoder_hid_dim_type is not None:
+ raise ValueError(
+ f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}."
+ )
+
+ if encoder_hid_dim_type == "text_proj":
+ self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim)
+ elif encoder_hid_dim_type == "text_image_proj":
+ # image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much
+ # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
+ # case when `addition_embed_type == "text_image_proj"` (Kandinsky 2.1)`
+ self.encoder_hid_proj = TextImageProjection(
+ text_embed_dim=encoder_hid_dim,
+ image_embed_dim=cross_attention_dim,
+ cross_attention_dim=cross_attention_dim,
+ )
+ elif encoder_hid_dim_type == "image_proj":
+ # Kandinsky 2.2
+ self.encoder_hid_proj = ImageProjection(
+ image_embed_dim=encoder_hid_dim,
+ cross_attention_dim=cross_attention_dim,
+ )
+ elif encoder_hid_dim_type is not None:
+ raise ValueError(
+ f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'."
+ )
+ else:
+ self.encoder_hid_proj = None
+
+ def _set_class_embedding(
+ self,
+ class_embed_type: Optional[str],
+ act_fn: str,
+ num_class_embeds: Optional[int],
+ projection_class_embeddings_input_dim: Optional[int],
+ time_embed_dim: int,
+ timestep_input_dim: int,
+ ):
+ if class_embed_type is None and num_class_embeds is not None:
+ self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
+ elif class_embed_type == "timestep":
+ self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn)
+ elif class_embed_type == "identity":
+ self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
+ elif class_embed_type == "projection":
+ if projection_class_embeddings_input_dim is None:
+ raise ValueError(
+ "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set"
+ )
+ # The projection `class_embed_type` is the same as the timestep `class_embed_type` except
+ # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings
+ # 2. it projects from an arbitrary input dimension.
+ #
+ # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.
+ # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.
+ # As a result, `TimestepEmbedding` can be passed arbitrary vectors.
+ self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
+ elif class_embed_type == "simple_projection":
+ if projection_class_embeddings_input_dim is None:
+ raise ValueError(
+ "`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set"
+ )
+ self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim)
+ else:
+ self.class_embedding = None
+
+ def _set_add_embedding(
+ self,
+ addition_embed_type: str,
+ addition_embed_type_num_heads: int,
+ addition_time_embed_dim: Optional[int],
+ flip_sin_to_cos: bool,
+ freq_shift: float,
+ cross_attention_dim: Optional[int],
+ encoder_hid_dim: Optional[int],
+ projection_class_embeddings_input_dim: Optional[int],
+ time_embed_dim: int,
+ ):
+ if addition_embed_type == "text":
+ if encoder_hid_dim is not None:
+ text_time_embedding_from_dim = encoder_hid_dim
+ else:
+ text_time_embedding_from_dim = cross_attention_dim
+
+ self.add_embedding = TextTimeEmbedding(
+ text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads
+ )
+ elif addition_embed_type == "text_image":
+ # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much
+ # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
+ # case when `addition_embed_type == "text_image"` (Kandinsky 2.1)`
+ self.add_embedding = TextImageTimeEmbedding(
+ text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim
+ )
+ elif addition_embed_type == "text_time":
+ self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift)
+ self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
+ elif addition_embed_type == "image":
+ # Kandinsky 2.2
+ self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim)
+ elif addition_embed_type == "image_hint":
+ # Kandinsky 2.2 ControlNet
+ self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim)
+ elif addition_embed_type is not None:
+ raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.")
+
+ def _set_pos_net_if_use_gligen(self, attention_type: str, cross_attention_dim: int):
+ if attention_type in ["gated", "gated-text-image"]:
+ positive_len = 768
+ if isinstance(cross_attention_dim, int):
+ positive_len = cross_attention_dim
+ elif isinstance(cross_attention_dim, (list, tuple)):
+ positive_len = cross_attention_dim[0]
+
+ feature_type = "text-only" if attention_type == "gated" else "text-image"
+ self.position_net = GLIGENTextBoundingboxProjection(
+ positive_len=positive_len, out_dim=cross_attention_dim, feature_type=feature_type
+ )
+
+ @property
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
+ r"""
+ Returns:
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
+ indexed by its weight name.
+ """
+ # set recursively
+ processors = {}
+
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
+ if hasattr(module, "get_processor"):
+ processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True)
+
+ for sub_name, child in module.named_children():
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
+
+ return processors
+
+ for name, module in self.named_children():
+ fn_recursive_add_processors(name, module, processors)
+
+ return processors
+
+ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
+ r"""
+ Sets the attention processor to use to compute attention.
+
+ Parameters:
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
+ for **all** `Attention` layers.
+
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
+ processor. This is strongly recommended when setting trainable attention processors.
+
+ """
+ count = len(self.attn_processors.keys())
+
+ if isinstance(processor, dict) and len(processor) != count:
+ raise ValueError(
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
+ )
+
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
+ if hasattr(module, "set_processor"):
+ if not isinstance(processor, dict):
+ module.set_processor(processor)
+ else:
+ module.set_processor(processor.pop(f"{name}.processor"))
+
+ for sub_name, child in module.named_children():
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
+
+ for name, module in self.named_children():
+ fn_recursive_attn_processor(name, module, processor)
+
+ def set_default_attn_processor(self):
+ """
+ Disables custom attention processors and sets the default attention implementation.
+ """
+ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
+ processor = AttnAddedKVProcessor()
+ elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
+ processor = AttnProcessor()
+ else:
+ raise ValueError(
+ f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
+ )
+
+ self.set_attn_processor(processor)
+
+ def set_attention_slice(self, slice_size: Union[str, int, List[int]] = "auto"):
+ r"""
+ Enable sliced attention computation.
+
+ When this option is enabled, the attention module splits the input tensor in slices to compute attention in
+ several steps. This is useful for saving some memory in exchange for a small decrease in speed.
+
+ Args:
+ slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
+ When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If
+ `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is
+ provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
+ must be a multiple of `slice_size`.
+ """
+ sliceable_head_dims = []
+
+ def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module):
+ if hasattr(module, "set_attention_slice"):
+ sliceable_head_dims.append(module.sliceable_head_dim)
+
+ for child in module.children():
+ fn_recursive_retrieve_sliceable_dims(child)
+
+ # retrieve number of attention layers
+ for module in self.children():
+ fn_recursive_retrieve_sliceable_dims(module)
+
+ num_sliceable_layers = len(sliceable_head_dims)
+
+ if slice_size == "auto":
+ # half the attention head size is usually a good trade-off between
+ # speed and memory
+ slice_size = [dim // 2 for dim in sliceable_head_dims]
+ elif slice_size == "max":
+ # make smallest slice possible
+ slice_size = num_sliceable_layers * [1]
+
+ slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
+
+ if len(slice_size) != len(sliceable_head_dims):
+ raise ValueError(
+ f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
+ f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
+ )
+
+ for i in range(len(slice_size)):
+ size = slice_size[i]
+ dim = sliceable_head_dims[i]
+ if size is not None and size > dim:
+ raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
+
+ # Recursively walk through all the children.
+ # Any children which exposes the set_attention_slice method
+ # gets the message
+ def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
+ if hasattr(module, "set_attention_slice"):
+ module.set_attention_slice(slice_size.pop())
+
+ for child in module.children():
+ fn_recursive_set_attention_slice(child, slice_size)
+
+ reversed_slice_size = list(reversed(slice_size))
+ for module in self.children():
+ fn_recursive_set_attention_slice(module, reversed_slice_size)
+
+ def _set_gradient_checkpointing(self, module, value=False):
+ if hasattr(module, "gradient_checkpointing"):
+ module.gradient_checkpointing = value
+
+ def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
+ r"""Enables the FreeU mechanism from https://arxiv.org/abs/2309.11497.
+
+ The suffixes after the scaling factors represent the stage blocks where they are being applied.
+
+ Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of values that
+ are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
+
+ Args:
+ s1 (`float`):
+ Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
+ mitigate the "oversmoothing effect" in the enhanced denoising process.
+ s2 (`float`):
+ Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
+ mitigate the "oversmoothing effect" in the enhanced denoising process.
+ b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
+ b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
+ """
+ for i, upsample_block in enumerate(self.up_blocks):
+ setattr(upsample_block, "s1", s1)
+ setattr(upsample_block, "s2", s2)
+ setattr(upsample_block, "b1", b1)
+ setattr(upsample_block, "b2", b2)
+
+ def disable_freeu(self):
+ """Disables the FreeU mechanism."""
+ freeu_keys = {"s1", "s2", "b1", "b2"}
+ for i, upsample_block in enumerate(self.up_blocks):
+ for k in freeu_keys:
+ if hasattr(upsample_block, k) or getattr(upsample_block, k, None) is not None:
+ setattr(upsample_block, k, None)
+
+ def fuse_qkv_projections(self):
+ """
+ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value)
+ are fused. For cross-attention modules, key and value projection matrices are fused.
+
+
+
+ This API is ๐งช experimental.
+
+
+ """
+ self.original_attn_processors = None
+
+ for _, attn_processor in self.attn_processors.items():
+ if "Added" in str(attn_processor.__class__.__name__):
+ raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.")
+
+ self.original_attn_processors = self.attn_processors
+
+ for module in self.modules():
+ if isinstance(module, Attention):
+ module.fuse_projections(fuse=True)
+
+ def unfuse_qkv_projections(self):
+ """Disables the fused QKV projection if enabled.
+
+
+
+ This API is ๐งช experimental.
+
+
+
+ """
+ if self.original_attn_processors is not None:
+ self.set_attn_processor(self.original_attn_processors)
+
+ def unload_lora(self):
+ """Unloads LoRA weights."""
+ deprecate(
+ "unload_lora",
+ "0.28.0",
+ "Calling `unload_lora()` is deprecated and will be removed in a future version. Please install `peft` and then call `disable_adapters().",
+ )
+ for module in self.modules():
+ if hasattr(module, "set_lora_layer"):
+ module.set_lora_layer(None)
+
+ def get_time_embed(
+ self, sample: torch.Tensor, timestep: Union[torch.Tensor, float, int]
+ ) -> Optional[torch.Tensor]:
+ timesteps = timestep
+ if not torch.is_tensor(timesteps):
+ # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
+ # This would be a good case for the `match` statement (Python 3.10+)
+ is_mps = sample.device.type == "mps"
+ if isinstance(timestep, float):
+ dtype = torch.float32 if is_mps else torch.float64
+ else:
+ dtype = torch.int32 if is_mps else torch.int64
+ timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
+ elif len(timesteps.shape) == 0:
+ timesteps = timesteps[None].to(sample.device)
+
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
+ timesteps = timesteps.expand(sample.shape[0])
+
+ t_emb = self.time_proj(timesteps)
+ # `Timesteps` does not contain any weights and will always return f32 tensors
+ # but time_embedding might actually be running in fp16. so we need to cast here.
+ # there might be better ways to encapsulate this.
+ t_emb = t_emb.to(dtype=sample.dtype)
+ return t_emb
+
+ def get_class_embed(self, sample: torch.Tensor, class_labels: Optional[torch.Tensor]) -> Optional[torch.Tensor]:
+ class_emb = None
+ if self.class_embedding is not None:
+ if class_labels is None:
+ raise ValueError("class_labels should be provided when num_class_embeds > 0")
+
+ if self.config.class_embed_type == "timestep":
+ class_labels = self.time_proj(class_labels)
+
+ # `Timesteps` does not contain any weights and will always return f32 tensors
+ # there might be better ways to encapsulate this.
+ class_labels = class_labels.to(dtype=sample.dtype)
+
+ class_emb = self.class_embedding(class_labels).to(dtype=sample.dtype)
+ return class_emb
+
+ def get_aug_embed(
+ self, emb: torch.Tensor, encoder_hidden_states: torch.Tensor, added_cond_kwargs: Dict[str, Any]
+ ) -> Optional[torch.Tensor]:
+ aug_emb = None
+ if self.config.addition_embed_type == "text":
+ aug_emb = self.add_embedding(encoder_hidden_states)
+ elif self.config.addition_embed_type == "text_image":
+ # Kandinsky 2.1 - style
+ if "image_embeds" not in added_cond_kwargs:
+ raise ValueError(
+ f"{self.__class__} has the config param `addition_embed_type` set to 'text_image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
+ )
+
+ image_embs = added_cond_kwargs.get("image_embeds")
+ text_embs = added_cond_kwargs.get("text_embeds", encoder_hidden_states)
+ aug_emb = self.add_embedding(text_embs, image_embs)
+ elif self.config.addition_embed_type == "text_time":
+ # SDXL - style
+ if "text_embeds" not in added_cond_kwargs:
+ raise ValueError(
+ f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`"
+ )
+ text_embeds = added_cond_kwargs.get("text_embeds")
+ if "time_ids" not in added_cond_kwargs:
+ raise ValueError(
+ f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`"
+ )
+ time_ids = added_cond_kwargs.get("time_ids")
+ time_embeds = self.add_time_proj(time_ids.flatten())
+ time_embeds = time_embeds.reshape((text_embeds.shape[0], -1))
+ add_embeds = torch.concat([text_embeds, time_embeds], dim=-1)
+ add_embeds = add_embeds.to(emb.dtype)
+ aug_emb = self.add_embedding(add_embeds)
+ elif self.config.addition_embed_type == "image":
+ # Kandinsky 2.2 - style
+ if "image_embeds" not in added_cond_kwargs:
+ raise ValueError(
+ f"{self.__class__} has the config param `addition_embed_type` set to 'image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
+ )
+ image_embs = added_cond_kwargs.get("image_embeds")
+ aug_emb = self.add_embedding(image_embs)
+ elif self.config.addition_embed_type == "image_hint":
+ # Kandinsky 2.2 - style
+ if "image_embeds" not in added_cond_kwargs or "hint" not in added_cond_kwargs:
+ raise ValueError(
+ f"{self.__class__} has the config param `addition_embed_type` set to 'image_hint' which requires the keyword arguments `image_embeds` and `hint` to be passed in `added_cond_kwargs`"
+ )
+ image_embs = added_cond_kwargs.get("image_embeds")
+ hint = added_cond_kwargs.get("hint")
+ aug_emb = self.add_embedding(image_embs, hint)
+ return aug_emb
+
+ def process_encoder_hidden_states(
+ self, encoder_hidden_states: torch.Tensor, added_cond_kwargs: Dict[str, Any]
+ ) -> torch.Tensor:
+ if self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_proj":
+ encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states)
+ elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_image_proj":
+ # Kandinsky 2.1 - style
+ if "image_embeds" not in added_cond_kwargs:
+ raise ValueError(
+ f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'text_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`"
+ )
+
+ image_embeds = added_cond_kwargs.get("image_embeds")
+ encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states, image_embeds)
+ elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "image_proj":
+ # Kandinsky 2.2 - style
+ if "image_embeds" not in added_cond_kwargs:
+ raise ValueError(
+ f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`"
+ )
+ image_embeds = added_cond_kwargs.get("image_embeds")
+ encoder_hidden_states = self.encoder_hid_proj(image_embeds)
+ elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "ip_image_proj":
+ if "image_embeds" not in added_cond_kwargs:
+ raise ValueError(
+ f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'ip_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`"
+ )
+ image_embeds = added_cond_kwargs.get("image_embeds")
+ image_embeds = self.encoder_hid_proj(image_embeds)
+ encoder_hidden_states = (encoder_hidden_states, image_embeds)
+ return encoder_hidden_states
+
+ def forward(
+ self,
+ sample: torch.Tensor,
+ timestep: Union[torch.Tensor, float, int],
+ encoder_hidden_states: torch.Tensor,
+ class_labels: Optional[torch.Tensor] = None,
+ timestep_cond: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
+ added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
+ down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
+ mid_block_additional_residual: Optional[torch.Tensor] = None,
+ down_intrablock_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ return_dict: bool = True,
+ ) -> Union[UNet2DConditionOutput, Tuple]:
+ r"""
+ The [`UNet2DConditionModel`] forward method.
+
+ Args:
+ sample (`torch.Tensor`):
+ The noisy input tensor with the following shape `(batch, channel, height, width)`.
+ timestep (`torch.Tensor` or `float` or `int`): The number of timesteps to denoise an input.
+ encoder_hidden_states (`torch.Tensor`):
+ The encoder hidden states with shape `(batch, sequence_length, feature_dim)`.
+ class_labels (`torch.Tensor`, *optional*, defaults to `None`):
+ Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
+ timestep_cond: (`torch.Tensor`, *optional*, defaults to `None`):
+ Conditional embeddings for timestep. If provided, the embeddings will be summed with the samples passed
+ through the `self.time_embedding` layer to obtain the timestep embeddings.
+ attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
+ An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
+ is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
+ negative values to the attention scores corresponding to "discard" tokens.
+ cross_attention_kwargs (`dict`, *optional*):
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
+ `self.processor` in
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
+ added_cond_kwargs: (`dict`, *optional*):
+ A kwargs dictionary containing additional embeddings that if specified are added to the embeddings that
+ are passed along to the UNet blocks.
+ down_block_additional_residuals: (`tuple` of `torch.Tensor`, *optional*):
+ A tuple of tensors that if specified are added to the residuals of down unet blocks.
+ mid_block_additional_residual: (`torch.Tensor`, *optional*):
+ A tensor that if specified is added to the residual of the middle unet block.
+ down_intrablock_additional_residuals (`tuple` of `torch.Tensor`, *optional*):
+ additional residuals to be added within UNet down blocks, for example from T2I-Adapter side model(s)
+ encoder_attention_mask (`torch.Tensor`):
+ A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If
+ `True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias,
+ which adds large negative values to the attention scores corresponding to "discard" tokens.
+ return_dict (`bool`, *optional*, defaults to `True`):
+ Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
+ tuple.
+
+ Returns:
+ [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:
+ If `return_dict` is True, an [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] is returned,
+ otherwise a `tuple` is returned where the first element is the sample tensor.
+ """
+ # By default samples have to be AT least a multiple of the overall upsampling factor.
+ # The overall upsampling factor is equal to 2 ** (# num of upsampling layers).
+ # However, the upsampling interpolation output size can be forced to fit any upsampling size
+ # on the fly if necessary.
+ default_overall_up_factor = 2**self.num_upsamplers
+
+ # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`
+ forward_upsample_size = False
+ upsample_size = None
+
+ for dim in sample.shape[-2:]:
+ if dim % default_overall_up_factor != 0:
+ # Forward upsample size to force interpolation output size.
+ forward_upsample_size = True
+ break
+
+ # ensure attention_mask is a bias, and give it a singleton query_tokens dimension
+ # expects mask of shape:
+ # [batch, key_tokens]
+ # adds singleton query_tokens dimension:
+ # [batch, 1, key_tokens]
+ # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
+ # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)
+ # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)
+ if attention_mask is not None:
+ # assume that mask is expressed as:
+ # (1 = keep, 0 = discard)
+ # convert mask into a bias that can be added to attention scores:
+ # (keep = +0, discard = -10000.0)
+ attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
+ attention_mask = attention_mask.unsqueeze(1)
+
+ # convert encoder_attention_mask to a bias the same way we do for attention_mask
+ if encoder_attention_mask is not None:
+ encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0
+ encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
+
+ # 0. center input if necessary
+ if self.config.center_input_sample:
+ sample = 2 * sample - 1.0
+
+ # 1. time
+ t_emb = self.get_time_embed(sample=sample, timestep=timestep)
+ emb = self.time_embedding(t_emb, timestep_cond)
+ aug_emb = None
+
+ class_emb = self.get_class_embed(sample=sample, class_labels=class_labels)
+ if class_emb is not None:
+ if self.config.class_embeddings_concat:
+ emb = torch.cat([emb, class_emb], dim=-1)
+ else:
+ emb = emb + class_emb
+
+ aug_emb = self.get_aug_embed(
+ emb=emb, encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs
+ )
+ if self.config.addition_embed_type == "image_hint":
+ aug_emb, hint = aug_emb
+ sample = torch.cat([sample, hint], dim=1)
+
+ emb = emb + aug_emb if aug_emb is not None else emb
+
+ if self.time_embed_act is not None:
+ emb = self.time_embed_act(emb)
+
+ encoder_hidden_states = self.process_encoder_hidden_states(
+ encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs
+ )
+
+ # 2. pre-process
+ sample = self.conv_in(sample)
+
+ # 2.5 GLIGEN position net
+ if cross_attention_kwargs is not None and cross_attention_kwargs.get("gligen", None) is not None:
+ cross_attention_kwargs = cross_attention_kwargs.copy()
+ gligen_args = cross_attention_kwargs.pop("gligen")
+ cross_attention_kwargs["gligen"] = {"objs": self.position_net(**gligen_args)}
+
+ # 3. down
+ # we're popping the `scale` instead of getting it because otherwise `scale` will be propagated
+ # to the internal blocks and will raise deprecation warnings. this will be confusing for our users.
+ if cross_attention_kwargs is not None:
+ cross_attention_kwargs = cross_attention_kwargs.copy()
+ lora_scale = cross_attention_kwargs.pop("scale", 1.0)
+ else:
+ lora_scale = 1.0
+
+ if USE_PEFT_BACKEND:
+ # weight the lora layers by setting `lora_scale` for each PEFT layer
+ scale_lora_layers(self, lora_scale)
+
+ is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None
+ # using new arg down_intrablock_additional_residuals for T2I-Adapters, to distinguish from controlnets
+ is_adapter = down_intrablock_additional_residuals is not None
+ # maintain backward compatibility for legacy usage, where
+ # T2I-Adapter and ControlNet both use down_block_additional_residuals arg
+ # but can only use one or the other
+ if not is_adapter and mid_block_additional_residual is None and down_block_additional_residuals is not None:
+ deprecate(
+ "T2I should not use down_block_additional_residuals",
+ "1.3.0",
+ "Passing intrablock residual connections with `down_block_additional_residuals` is deprecated \
+ and will be removed in diffusers 1.3.0. `down_block_additional_residuals` should only be used \
+ for ControlNet. Please make sure use `down_intrablock_additional_residuals` instead. ",
+ standard_warn=False,
+ )
+ down_intrablock_additional_residuals = down_block_additional_residuals
+ is_adapter = True
+
+ down_block_res_samples = (sample,)
+ for downsample_block in self.down_blocks:
+ if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
+ # For t2i-adapter CrossAttnDownBlock2D
+ additional_residuals = {}
+ if is_adapter and len(down_intrablock_additional_residuals) > 0:
+ additional_residuals["additional_residuals"] = down_intrablock_additional_residuals.pop(0)
+
+ sample, res_samples = downsample_block(
+ hidden_states=sample,
+ temb=emb,
+ encoder_hidden_states=encoder_hidden_states,
+ attention_mask=attention_mask,
+ cross_attention_kwargs=cross_attention_kwargs,
+ encoder_attention_mask=encoder_attention_mask,
+ **additional_residuals,
+ )
+ else:
+ sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
+ if is_adapter and len(down_intrablock_additional_residuals) > 0:
+ sample += down_intrablock_additional_residuals.pop(0)
+
+ down_block_res_samples += res_samples
+
+ if is_controlnet:
+ new_down_block_res_samples = ()
+
+ for down_block_res_sample, down_block_additional_residual in zip(
+ down_block_res_samples, down_block_additional_residuals
+ ):
+ down_block_res_sample = down_block_res_sample + down_block_additional_residual
+ new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,)
+
+ down_block_res_samples = new_down_block_res_samples
+
+ # 4. mid
+ if self.mid_block is not None:
+ if hasattr(self.mid_block, "has_cross_attention") and self.mid_block.has_cross_attention:
+ sample = self.mid_block(
+ sample,
+ emb,
+ encoder_hidden_states=encoder_hidden_states,
+ attention_mask=attention_mask,
+ cross_attention_kwargs=cross_attention_kwargs,
+ encoder_attention_mask=encoder_attention_mask,
+ )
+ else:
+ sample = self.mid_block(sample, emb)
+
+ # To support T2I-Adapter-XL
+ if (
+ is_adapter
+ and len(down_intrablock_additional_residuals) > 0
+ and sample.shape == down_intrablock_additional_residuals[0].shape
+ ):
+ sample += down_intrablock_additional_residuals.pop(0)
+
+ if is_controlnet:
+ sample = sample + mid_block_additional_residual
+
+ # 5. up
+ for i, upsample_block in enumerate(self.up_blocks):
+ is_final_block = i == len(self.up_blocks) - 1
+
+ res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
+ down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
+
+ # if we have not reached the final block and need to forward the
+ # upsample size, we do it here
+ if not is_final_block and forward_upsample_size:
+ upsample_size = down_block_res_samples[-1].shape[2:]
+
+ if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention:
+ sample = upsample_block(
+ hidden_states=sample,
+ temb=emb,
+ res_hidden_states_tuple=res_samples,
+ encoder_hidden_states=encoder_hidden_states,
+ cross_attention_kwargs=cross_attention_kwargs,
+ upsample_size=upsample_size,
+ attention_mask=attention_mask,
+ encoder_attention_mask=encoder_attention_mask,
+ )
+ else:
+ sample = upsample_block(
+ hidden_states=sample,
+ temb=emb,
+ res_hidden_states_tuple=res_samples,
+ upsample_size=upsample_size,
+ )
+
+ # 6. post-process
+ if self.conv_norm_out:
+ sample = self.conv_norm_out(sample)
+ sample = self.conv_act(sample)
+ sample = self.conv_out(sample)
+
+ if USE_PEFT_BACKEND:
+ # remove `lora_scale` from each PEFT layer
+ unscale_lora_layers(self, lora_scale)
+
+ if not return_dict:
+ return (sample,)
+
+ return UNet2DConditionOutput(sample=sample)
\ No newline at end of file
diff --git a/modules/u_net_modify.py b/modules/u_net_modify.py
new file mode 100644
index 0000000000000000000000000000000000000000..f17bc35106111d516205708fc4acd5d8a6e5bd41
--- /dev/null
+++ b/modules/u_net_modify.py
@@ -0,0 +1,315 @@
+
+import inspect
+import os
+from collections import defaultdict
+from contextlib import nullcontext
+from functools import partial
+from pathlib import Path
+from typing import Callable, Dict, List, Optional, Union
+
+
+import safetensors
+import torch
+import torch.nn.functional as F
+from huggingface_hub.utils import validate_hf_hub_args
+from torch import nn
+
+from diffusers.models.embeddings import (
+ ImageProjection,
+ IPAdapterFaceIDImageProjection,
+ IPAdapterFaceIDPlusImageProjection,
+ IPAdapterFullImageProjection,
+ IPAdapterPlusImageProjection,
+ MultiIPAdapterImageProjection,
+)
+
+from diffusers.models.modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT, load_model_dict_into_meta, load_state_dict
+
+from diffusers.loaders.unet import UNet2DConditionLoadersMixin
+from diffusers.utils import (
+ USE_PEFT_BACKEND,
+ _get_model_file,
+ delete_adapter_layers,
+ is_accelerate_available,
+ is_torch_version,
+ logging,
+ set_adapter_layers,
+ set_weights_and_activate_adapters,
+)
+
+from diffusers.loaders.utils import AttnProcsLayers
+
+from .attention_modify import AttnProcessor,IPAdapterAttnProcessor,AttnProcessor2_0,IPAdapterAttnProcessor2_0
+
+if is_accelerate_available():
+ from accelerate import init_empty_weights
+ from accelerate.hooks import AlignDevicesHook, CpuOffload, remove_hook_from_module
+
+logger = logging.get_logger(__name__)
+
+
+
+class UNet2DConditionLoadersMixin_modify(UNet2DConditionLoadersMixin):
+ def _convert_ip_adapter_attn_to_diffusers(self, state_dicts, low_cpu_mem_usage=False):
+
+ if low_cpu_mem_usage:
+ if is_accelerate_available():
+ from accelerate import init_empty_weights
+
+ else:
+ low_cpu_mem_usage = False
+ logger.warning(
+ "Cannot initialize model with low cpu memory usage because `accelerate` was not found in the"
+ " environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install"
+ " `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip"
+ " install accelerate\n```\n."
+ )
+
+ if low_cpu_mem_usage is True and not is_torch_version(">=", "1.9.0"):
+ raise NotImplementedError(
+ "Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set"
+ " `low_cpu_mem_usage=False`."
+ )
+
+ # set ip-adapter cross-attention processors & load state_dict
+ attn_procs = {}
+ key_id = 1
+ init_context = init_empty_weights if low_cpu_mem_usage else nullcontext
+ for name in self.attn_processors.keys():
+ cross_attention_dim = None if name.endswith("attn1.processor") else self.config.cross_attention_dim
+ if name.startswith("mid_block"):
+ hidden_size = self.config.block_out_channels[-1]
+ elif name.startswith("up_blocks"):
+ block_id = int(name[len("up_blocks.")])
+ hidden_size = list(reversed(self.config.block_out_channels))[block_id]
+ elif name.startswith("down_blocks"):
+ block_id = int(name[len("down_blocks.")])
+ hidden_size = self.config.block_out_channels[block_id]
+
+ if cross_attention_dim is None or "motion_modules" in name:
+ attn_processor_class = (
+ AttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else AttnProcessor
+ )
+ attn_procs[name] = attn_processor_class()
+
+ else:
+ attn_processor_class = (
+ IPAdapterAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else IPAdapterAttnProcessor
+ )
+ num_image_text_embeds = []
+ for state_dict in state_dicts:
+ if "proj.weight" in state_dict["image_proj"]:
+ # IP-Adapter
+ num_image_text_embeds += [4]
+ elif "proj.3.weight" in state_dict["image_proj"]:
+ # IP-Adapter Full Face
+ num_image_text_embeds += [257] # 256 CLIP tokens + 1 CLS token
+ elif "perceiver_resampler.proj_in.weight" in state_dict["image_proj"]:
+ # IP-Adapter Face ID Plus
+ num_image_text_embeds += [4]
+ elif "norm.weight" in state_dict["image_proj"]:
+ # IP-Adapter Face ID
+ num_image_text_embeds += [4]
+ else:
+ # IP-Adapter Plus
+ num_image_text_embeds += [state_dict["image_proj"]["latents"].shape[1]]
+
+ with init_context():
+ attn_procs[name] = attn_processor_class(
+ hidden_size=hidden_size,
+ cross_attention_dim=cross_attention_dim,
+ scale=1.0,
+ num_tokens=num_image_text_embeds,
+ )
+
+ value_dict = {}
+ for i, state_dict in enumerate(state_dicts):
+ value_dict.update({f"to_k_ip.{i}.weight": state_dict["ip_adapter"][f"{key_id}.to_k_ip.weight"]})
+ value_dict.update({f"to_v_ip.{i}.weight": state_dict["ip_adapter"][f"{key_id}.to_v_ip.weight"]})
+
+ if not low_cpu_mem_usage:
+ attn_procs[name].load_state_dict(value_dict)
+ else:
+ device = next(iter(value_dict.values())).device
+ dtype = next(iter(value_dict.values())).dtype
+ load_model_dict_into_meta(attn_procs[name], value_dict, device=device, dtype=dtype)
+
+ key_id += 2
+
+ return attn_procs
+
+ def _load_ip_adapter_weights(self, state_dicts, low_cpu_mem_usage=False):
+ if not isinstance(state_dicts, list):
+ state_dicts = [state_dicts]
+ # Set encoder_hid_proj after loading ip_adapter weights,
+ # because `IPAdapterPlusImageProjection` also has `attn_processors`.
+ self.encoder_hid_proj = None
+
+ attn_procs = self._convert_ip_adapter_attn_to_diffusers(state_dicts, low_cpu_mem_usage=low_cpu_mem_usage)
+ self.set_attn_processor(attn_procs)
+
+ # convert IP-Adapter Image Projection layers to diffusers
+ image_projection_layers = []
+ for state_dict in state_dicts:
+ image_projection_layer = self._convert_ip_adapter_image_proj_to_diffusers(
+ state_dict["image_proj"], low_cpu_mem_usage=low_cpu_mem_usage
+ )
+ image_projection_layers.append(image_projection_layer)
+
+ self.encoder_hid_proj = MultiIPAdapterImageProjection(image_projection_layers)
+ self.config.encoder_hid_dim_type = "ip_image_proj"
+
+ self.to(dtype=self.dtype, device=self.device)
+
+ def _load_ip_adapter_loras(self, state_dicts):
+ lora_dicts = {}
+ for key_id, name in enumerate(self.attn_processors.keys()):
+ for i, state_dict in enumerate(state_dicts):
+ if f"{key_id}.to_k_lora.down.weight" in state_dict["ip_adapter"]:
+ if i not in lora_dicts:
+ lora_dicts[i] = {}
+ lora_dicts[i].update(
+ {
+ f"unet.{name}.to_k_lora.down.weight": state_dict["ip_adapter"][
+ f"{key_id}.to_k_lora.down.weight"
+ ]
+ }
+ )
+ lora_dicts[i].update(
+ {
+ f"unet.{name}.to_q_lora.down.weight": state_dict["ip_adapter"][
+ f"{key_id}.to_q_lora.down.weight"
+ ]
+ }
+ )
+ lora_dicts[i].update(
+ {
+ f"unet.{name}.to_v_lora.down.weight": state_dict["ip_adapter"][
+ f"{key_id}.to_v_lora.down.weight"
+ ]
+ }
+ )
+ lora_dicts[i].update(
+ {
+ f"unet.{name}.to_out_lora.down.weight": state_dict["ip_adapter"][
+ f"{key_id}.to_out_lora.down.weight"
+ ]
+ }
+ )
+ lora_dicts[i].update(
+ {f"unet.{name}.to_k_lora.up.weight": state_dict["ip_adapter"][f"{key_id}.to_k_lora.up.weight"]}
+ )
+ lora_dicts[i].update(
+ {f"unet.{name}.to_q_lora.up.weight": state_dict["ip_adapter"][f"{key_id}.to_q_lora.up.weight"]}
+ )
+ lora_dicts[i].update(
+ {f"unet.{name}.to_v_lora.up.weight": state_dict["ip_adapter"][f"{key_id}.to_v_lora.up.weight"]}
+ )
+ lora_dicts[i].update(
+ {
+ f"unet.{name}.to_out_lora.up.weight": state_dict["ip_adapter"][
+ f"{key_id}.to_out_lora.up.weight"
+ ]
+ }
+ )
+ return lora_dicts
+
+
+class FromOriginalUNetMixin:
+ """
+ Load pretrained UNet model weights saved in the `.ckpt` or `.safetensors` format into a [`StableCascadeUNet`].
+ """
+
+ @classmethod
+ @validate_hf_hub_args
+ def from_single_file(cls, pretrained_model_link_or_path, **kwargs):
+ r"""
+ Instantiate a [`StableCascadeUNet`] from pretrained StableCascadeUNet weights saved in the original `.ckpt` or
+ `.safetensors` format. The pipeline is set in evaluation mode (`model.eval()`) by default.
+
+ Parameters:
+ pretrained_model_link_or_path (`str` or `os.PathLike`, *optional*):
+ Can be either:
+ - A link to the `.ckpt` file (for example
+ `"https://huggingface.co//blob/main/.ckpt"`) on the Hub.
+ - A path to a *file* containing all pipeline weights.
+ config: (`dict`, *optional*):
+ Dictionary containing the configuration of the model:
+ torch_dtype (`str` or `torch.dtype`, *optional*):
+ Override the default `torch.dtype` and load the model with another dtype. If `"auto"` is passed, the
+ dtype is automatically derived from the model's weights.
+ force_download (`bool`, *optional*, defaults to `False`):
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
+ cached versions if they exist.
+ cache_dir (`Union[str, os.PathLike]`, *optional*):
+ Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
+ is not used.
+ resume_download (`bool`, *optional*, defaults to `False`):
+ Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
+ incompletely downloaded files are deleted.
+ proxies (`Dict[str, str]`, *optional*):
+ A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
+ local_files_only (`bool`, *optional*, defaults to `False`):
+ Whether to only load local model weights and configuration files or not. If set to True, the model
+ won't be downloaded from the Hub.
+ token (`str` or *bool*, *optional*):
+ The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
+ `diffusers-cli login` (stored in `~/.huggingface`) is used.
+ revision (`str`, *optional*, defaults to `"main"`):
+ The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
+ allowed by Git.
+ kwargs (remaining dictionary of keyword arguments, *optional*):
+ Can be used to overwrite load and saveable variables of the model.
+
+ """
+ class_name = cls.__name__
+ if class_name != "StableCascadeUNet":
+ raise ValueError("FromOriginalUNetMixin is currently only compatible with StableCascadeUNet")
+
+ config = kwargs.pop("config", None)
+ resume_download = kwargs.pop("resume_download", False)
+ force_download = kwargs.pop("force_download", False)
+ proxies = kwargs.pop("proxies", None)
+ token = kwargs.pop("token", None)
+ cache_dir = kwargs.pop("cache_dir", None)
+ local_files_only = kwargs.pop("local_files_only", None)
+ revision = kwargs.pop("revision", None)
+ torch_dtype = kwargs.pop("torch_dtype", None)
+
+ checkpoint = load_single_file_model_checkpoint(
+ pretrained_model_link_or_path,
+ resume_download=resume_download,
+ force_download=force_download,
+ proxies=proxies,
+ token=token,
+ cache_dir=cache_dir,
+ local_files_only=local_files_only,
+ revision=revision,
+ )
+
+ if config is None:
+ config = infer_stable_cascade_single_file_config(checkpoint)
+ model_config = cls.load_config(**config, **kwargs)
+ else:
+ model_config = config
+
+ ctx = init_empty_weights if is_accelerate_available() else nullcontext
+ with ctx():
+ model = cls.from_config(model_config, **kwargs)
+
+ diffusers_format_checkpoint = convert_stable_cascade_unet_single_file_to_diffusers(checkpoint)
+ if is_accelerate_available():
+ unexpected_keys = load_model_dict_into_meta(model, diffusers_format_checkpoint, dtype=torch_dtype)
+ if len(unexpected_keys) > 0:
+ logger.warn(
+ f"Some weights of the model checkpoint were not used when initializing {cls.__name__}: \n {[', '.join(unexpected_keys)]}"
+ )
+
+ else:
+ model.load_state_dict(diffusers_format_checkpoint)
+
+ if torch_dtype is not None:
+ model.to(torch_dtype)
+
+ return model
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..17e578a279b1d29980382dfec0d76230a4b4876d
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,16 @@
+torch
+einops==0.8.0
+diffusers==0.29.0
+transformers==4.41.2
+k_diffusion==0.1.1.post1
+safetensors==0.4.3
+gradio==3.44.4
+timm==0.6.7
+basicsr==1.4.2
+controlnet-aux==0.0.9
+mediapipe==0.10.14
+kaleido==0.2.1
+insightface==0.7.3
+onnxruntime-gpu
+peft
+pytorch_lightning==2.2.5
\ No newline at end of file