Spaces:
Running
on
Zero
Running
on
Zero
| # Copyright 2023 The HuggingFace Team. All rights reserved. | |
| # | |
| # Licensed under the Apache License, Version 2.0 (the "License"); | |
| # you may not use this file except in compliance with the License. | |
| # You may obtain a copy of the License at | |
| # | |
| # http://www.apache.org/licenses/LICENSE-2.0 | |
| # | |
| # Unless required by applicable law or agreed to in writing, software | |
| # distributed under the License is distributed on an "AS IS" BASIS, | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # See the License for the specific language governing permissions and | |
| # limitations under the License. | |
| import inspect | |
| from typing import Any, Callable, List, Optional, Union | |
| import numpy as np | |
| import PIL.Image | |
| import torch | |
| from transformers import CLIPImageProcessor, CLIPTokenizer | |
| from ...configuration_utils import FrozenDict | |
| from ...schedulers import DDPMScheduler, KarrasDiffusionSchedulers | |
| from ...utils import deprecate, logging | |
| from ..onnx_utils import ORT_TO_NP_TYPE, OnnxRuntimeModel | |
| from ..pipeline_utils import DiffusionPipeline | |
| from . import StableDiffusionPipelineOutput | |
| logger = logging.get_logger(__name__) | |
| def preprocess(image): | |
| if isinstance(image, torch.Tensor): | |
| return image | |
| elif isinstance(image, PIL.Image.Image): | |
| image = [image] | |
| if isinstance(image[0], PIL.Image.Image): | |
| w, h = image[0].size | |
| w, h = (x - x % 64 for x in (w, h)) # resize to integer multiple of 32 | |
| image = [np.array(i.resize((w, h)))[None, :] for i in image] | |
| image = np.concatenate(image, axis=0) | |
| image = np.array(image).astype(np.float32) / 255.0 | |
| image = image.transpose(0, 3, 1, 2) | |
| image = 2.0 * image - 1.0 | |
| image = torch.from_numpy(image) | |
| elif isinstance(image[0], torch.Tensor): | |
| image = torch.cat(image, dim=0) | |
| return image | |
| class OnnxStableDiffusionUpscalePipeline(DiffusionPipeline): | |
| vae: OnnxRuntimeModel | |
| text_encoder: OnnxRuntimeModel | |
| tokenizer: CLIPTokenizer | |
| unet: OnnxRuntimeModel | |
| low_res_scheduler: DDPMScheduler | |
| scheduler: KarrasDiffusionSchedulers | |
| safety_checker: OnnxRuntimeModel | |
| feature_extractor: CLIPImageProcessor | |
| _optional_components = ["safety_checker", "feature_extractor"] | |
| _is_onnx = True | |
| def __init__( | |
| self, | |
| vae: OnnxRuntimeModel, | |
| text_encoder: OnnxRuntimeModel, | |
| tokenizer: Any, | |
| unet: OnnxRuntimeModel, | |
| low_res_scheduler: DDPMScheduler, | |
| scheduler: KarrasDiffusionSchedulers, | |
| safety_checker: Optional[OnnxRuntimeModel] = None, | |
| feature_extractor: Optional[CLIPImageProcessor] = None, | |
| max_noise_level: int = 350, | |
| num_latent_channels=4, | |
| num_unet_input_channels=7, | |
| requires_safety_checker: bool = True, | |
| ): | |
| super().__init__() | |
| if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: | |
| deprecation_message = ( | |
| f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" | |
| f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " | |
| "to update the config accordingly as leaving `steps_offset` might led to incorrect results" | |
| " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," | |
| " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" | |
| " file" | |
| ) | |
| deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) | |
| new_config = dict(scheduler.config) | |
| new_config["steps_offset"] = 1 | |
| scheduler._internal_dict = FrozenDict(new_config) | |
| if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: | |
| deprecation_message = ( | |
| f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." | |
| " `clip_sample` should be set to False in the configuration file. Please make sure to update the" | |
| " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" | |
| " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" | |
| " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" | |
| ) | |
| deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) | |
| new_config = dict(scheduler.config) | |
| new_config["clip_sample"] = False | |
| scheduler._internal_dict = FrozenDict(new_config) | |
| if safety_checker is None and requires_safety_checker: | |
| logger.warning( | |
| f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" | |
| " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" | |
| " results in services or applications open to the public. Both the diffusers team and Hugging Face" | |
| " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" | |
| " it only for use-cases that involve analyzing network behavior or auditing its results. For more" | |
| " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." | |
| ) | |
| if safety_checker is not None and feature_extractor is None: | |
| raise ValueError( | |
| "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" | |
| " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." | |
| ) | |
| self.register_modules( | |
| vae=vae, | |
| text_encoder=text_encoder, | |
| tokenizer=tokenizer, | |
| unet=unet, | |
| scheduler=scheduler, | |
| low_res_scheduler=low_res_scheduler, | |
| safety_checker=safety_checker, | |
| feature_extractor=feature_extractor, | |
| ) | |
| self.register_to_config( | |
| max_noise_level=max_noise_level, | |
| num_latent_channels=num_latent_channels, | |
| num_unet_input_channels=num_unet_input_channels, | |
| ) | |
| def check_inputs( | |
| self, | |
| prompt: Union[str, List[str]], | |
| image, | |
| noise_level, | |
| callback_steps, | |
| negative_prompt=None, | |
| prompt_embeds=None, | |
| negative_prompt_embeds=None, | |
| ): | |
| if (callback_steps is None) or ( | |
| callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) | |
| ): | |
| raise ValueError( | |
| f"`callback_steps` has to be a positive integer but is {callback_steps} of type" | |
| f" {type(callback_steps)}." | |
| ) | |
| if prompt is not None and prompt_embeds is not None: | |
| raise ValueError( | |
| f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" | |
| " only forward one of the two." | |
| ) | |
| elif prompt is None and prompt_embeds is None: | |
| raise ValueError( | |
| "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." | |
| ) | |
| elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): | |
| raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") | |
| if negative_prompt is not None and negative_prompt_embeds is not None: | |
| raise ValueError( | |
| f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" | |
| f" {negative_prompt_embeds}. Please make sure to only forward one of the two." | |
| ) | |
| if prompt_embeds is not None and negative_prompt_embeds is not None: | |
| if prompt_embeds.shape != negative_prompt_embeds.shape: | |
| raise ValueError( | |
| "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" | |
| f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" | |
| f" {negative_prompt_embeds.shape}." | |
| ) | |
| if ( | |
| not isinstance(image, torch.Tensor) | |
| and not isinstance(image, PIL.Image.Image) | |
| and not isinstance(image, np.ndarray) | |
| and not isinstance(image, list) | |
| ): | |
| raise ValueError( | |
| f"`image` has to be of type `torch.Tensor`, `np.ndarray`, `PIL.Image.Image` or `list` but is {type(image)}" | |
| ) | |
| # verify batch size of prompt and image are same if image is a list or tensor or numpy array | |
| if isinstance(image, list) or isinstance(image, np.ndarray): | |
| if prompt is not None and isinstance(prompt, str): | |
| batch_size = 1 | |
| elif prompt is not None and isinstance(prompt, list): | |
| batch_size = len(prompt) | |
| else: | |
| batch_size = prompt_embeds.shape[0] | |
| if isinstance(image, list): | |
| image_batch_size = len(image) | |
| else: | |
| image_batch_size = image.shape[0] | |
| if batch_size != image_batch_size: | |
| raise ValueError( | |
| f"`prompt` has batch size {batch_size} and `image` has batch size {image_batch_size}." | |
| " Please make sure that passed `prompt` matches the batch size of `image`." | |
| ) | |
| # check noise level | |
| if noise_level > self.config.max_noise_level: | |
| raise ValueError(f"`noise_level` has to be <= {self.config.max_noise_level} but is {noise_level}") | |
| if (callback_steps is None) or ( | |
| callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) | |
| ): | |
| raise ValueError( | |
| f"`callback_steps` has to be a positive integer but is {callback_steps} of type" | |
| f" {type(callback_steps)}." | |
| ) | |
| def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, generator, latents=None): | |
| shape = (batch_size, num_channels_latents, height, width) | |
| if latents is None: | |
| latents = generator.randn(*shape).astype(dtype) | |
| elif latents.shape != shape: | |
| raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") | |
| return latents | |
| def decode_latents(self, latents): | |
| latents = 1 / 0.08333 * latents | |
| image = self.vae(latent_sample=latents)[0] | |
| image = np.clip(image / 2 + 0.5, 0, 1) | |
| image = image.transpose((0, 2, 3, 1)) | |
| return image | |
| def _encode_prompt( | |
| self, | |
| prompt: Union[str, List[str]], | |
| num_images_per_prompt: Optional[int], | |
| do_classifier_free_guidance: bool, | |
| negative_prompt: Optional[str], | |
| prompt_embeds: Optional[np.ndarray] = None, | |
| negative_prompt_embeds: Optional[np.ndarray] = None, | |
| ): | |
| r""" | |
| Encodes the prompt into text encoder hidden states. | |
| Args: | |
| prompt (`str` or `List[str]`): | |
| prompt to be encoded | |
| num_images_per_prompt (`int`): | |
| number of images that should be generated per prompt | |
| do_classifier_free_guidance (`bool`): | |
| whether to use classifier free guidance or not | |
| negative_prompt (`str` or `List[str]`): | |
| The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored | |
| if `guidance_scale` is less than `1`). | |
| prompt_embeds (`np.ndarray`, *optional*): | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not | |
| provided, text embeddings will be generated from `prompt` input argument. | |
| negative_prompt_embeds (`np.ndarray`, *optional*): | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input | |
| argument. | |
| """ | |
| if prompt is not None and isinstance(prompt, str): | |
| batch_size = 1 | |
| elif prompt is not None and isinstance(prompt, list): | |
| batch_size = len(prompt) | |
| else: | |
| batch_size = prompt_embeds.shape[0] | |
| if prompt_embeds is None: | |
| # get prompt text embeddings | |
| text_inputs = self.tokenizer( | |
| prompt, | |
| padding="max_length", | |
| max_length=self.tokenizer.model_max_length, | |
| truncation=True, | |
| return_tensors="np", | |
| ) | |
| text_input_ids = text_inputs.input_ids | |
| untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="np").input_ids | |
| if not np.array_equal(text_input_ids, untruncated_ids): | |
| removed_text = self.tokenizer.batch_decode( | |
| untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] | |
| ) | |
| logger.warning( | |
| "The following part of your input was truncated because CLIP can only handle sequences up to" | |
| f" {self.tokenizer.model_max_length} tokens: {removed_text}" | |
| ) | |
| prompt_embeds = self.text_encoder(input_ids=text_input_ids.astype(np.int32))[0] | |
| prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0) | |
| # get unconditional embeddings for classifier free guidance | |
| if do_classifier_free_guidance and negative_prompt_embeds is None: | |
| uncond_tokens: List[str] | |
| if negative_prompt is None: | |
| uncond_tokens = [""] * batch_size | |
| elif type(prompt) is not type(negative_prompt): | |
| raise TypeError( | |
| f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" | |
| f" {type(prompt)}." | |
| ) | |
| elif isinstance(negative_prompt, str): | |
| uncond_tokens = [negative_prompt] * batch_size | |
| elif batch_size != len(negative_prompt): | |
| raise ValueError( | |
| f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" | |
| f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" | |
| " the batch size of `prompt`." | |
| ) | |
| else: | |
| uncond_tokens = negative_prompt | |
| max_length = prompt_embeds.shape[1] | |
| uncond_input = self.tokenizer( | |
| uncond_tokens, | |
| padding="max_length", | |
| max_length=max_length, | |
| truncation=True, | |
| return_tensors="np", | |
| ) | |
| negative_prompt_embeds = self.text_encoder(input_ids=uncond_input.input_ids.astype(np.int32))[0] | |
| if do_classifier_free_guidance: | |
| negative_prompt_embeds = np.repeat(negative_prompt_embeds, num_images_per_prompt, axis=0) | |
| # For classifier free guidance, we need to do two forward passes. | |
| # Here we concatenate the unconditional and text embeddings into a single batch | |
| # to avoid doing two forward passes | |
| prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds]) | |
| return prompt_embeds | |
| def __call__( | |
| self, | |
| prompt: Union[str, List[str]], | |
| image: Union[np.ndarray, PIL.Image.Image, List[PIL.Image.Image]], | |
| num_inference_steps: int = 75, | |
| guidance_scale: float = 9.0, | |
| noise_level: int = 20, | |
| negative_prompt: Optional[Union[str, List[str]]] = None, | |
| num_images_per_prompt: Optional[int] = 1, | |
| eta: float = 0.0, | |
| generator: Optional[Union[np.random.RandomState, List[np.random.RandomState]]] = None, | |
| latents: Optional[np.ndarray] = None, | |
| prompt_embeds: Optional[np.ndarray] = None, | |
| negative_prompt_embeds: Optional[np.ndarray] = None, | |
| output_type: Optional[str] = "pil", | |
| return_dict: bool = True, | |
| callback: Optional[Callable[[int, int, np.ndarray], None]] = None, | |
| callback_steps: Optional[int] = 1, | |
| ): | |
| r""" | |
| Function invoked when calling the pipeline for generation. | |
| Args: | |
| prompt (`str` or `List[str]`): | |
| The prompt or prompts to guide the image generation. | |
| image (`np.ndarray` or `PIL.Image.Image`): | |
| `Image`, or tensor representing an image batch, that will be used as the starting point for the | |
| process. | |
| num_inference_steps (`int`, *optional*, defaults to 50): | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference. This parameter will be modulated by `strength`. | |
| guidance_scale (`float`, *optional*, defaults to 7.5): | |
| Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). | |
| `guidance_scale` is defined as `w` of equation 2. of [Imagen | |
| Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > | |
| 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, | |
| usually at the expense of lower image quality. | |
| noise_level (`float`, defaults to 0.2): | |
| Deteremines the amount of noise to add to the initial image before performing upscaling. | |
| negative_prompt (`str` or `List[str]`, *optional*): | |
| The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored | |
| if `guidance_scale` is less than `1`). | |
| num_images_per_prompt (`int`, *optional*, defaults to 1): | |
| The number of images to generate per prompt. | |
| eta (`float`, *optional*, defaults to 0.0): | |
| Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to | |
| [`schedulers.DDIMScheduler`], will be ignored for others. | |
| generator (`np.random.RandomState`, *optional*): | |
| A np.random.RandomState to make generation deterministic. | |
| latents (`torch.FloatTensor`, *optional*): | |
| Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| tensor will ge generated by sampling using the supplied random `generator`. | |
| prompt_embeds (`np.ndarray`, *optional*): | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not | |
| provided, text embeddings will be generated from `prompt` input argument. | |
| negative_prompt_embeds (`np.ndarray`, *optional*): | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input | |
| argument. | |
| output_type (`str`, *optional*, defaults to `"pil"`): | |
| The output format of the generate image. Choose between | |
| [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. | |
| return_dict (`bool`, *optional*, defaults to `True`): | |
| Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a | |
| plain tuple. | |
| callback (`Callable`, *optional*): | |
| A function that will be called every `callback_steps` steps during inference. The function will be | |
| called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`. | |
| callback_steps (`int`, *optional*, defaults to 1): | |
| The frequency at which the `callback` function will be called. If not specified, the callback will be | |
| called at every step. | |
| Returns: | |
| [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: | |
| [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. | |
| When returning a tuple, the first element is a list with the generated images, and the second element is a | |
| list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" | |
| (nsfw) content, according to the `safety_checker`. | |
| """ | |
| # 1. Check inputs | |
| self.check_inputs( | |
| prompt, | |
| image, | |
| noise_level, | |
| callback_steps, | |
| negative_prompt, | |
| prompt_embeds, | |
| negative_prompt_embeds, | |
| ) | |
| # 2. Define call parameters | |
| if prompt is not None and isinstance(prompt, str): | |
| batch_size = 1 | |
| elif prompt is not None and isinstance(prompt, list): | |
| batch_size = len(prompt) | |
| else: | |
| batch_size = prompt_embeds.shape[0] | |
| if generator is None: | |
| generator = np.random | |
| # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) | |
| # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` | |
| # corresponds to doing no classifier free guidance. | |
| do_classifier_free_guidance = guidance_scale > 1.0 | |
| prompt_embeds = self._encode_prompt( | |
| prompt, | |
| num_images_per_prompt, | |
| do_classifier_free_guidance, | |
| negative_prompt, | |
| prompt_embeds=prompt_embeds, | |
| negative_prompt_embeds=negative_prompt_embeds, | |
| ) | |
| latents_dtype = prompt_embeds.dtype | |
| image = preprocess(image).cpu().numpy() | |
| height, width = image.shape[2:] | |
| latents = self.prepare_latents( | |
| batch_size * num_images_per_prompt, | |
| self.num_latent_channels, | |
| height, | |
| width, | |
| latents_dtype, | |
| generator, | |
| ) | |
| image = image.astype(latents_dtype) | |
| self.scheduler.set_timesteps(num_inference_steps) | |
| timesteps = self.scheduler.timesteps | |
| # Scale the initial noise by the standard deviation required by the scheduler | |
| latents = latents * np.float64(self.scheduler.init_noise_sigma) | |
| # 5. Add noise to image | |
| noise_level = np.array([noise_level]).astype(np.int64) | |
| noise = generator.randn(*image.shape).astype(latents_dtype) | |
| image = self.low_res_scheduler.add_noise( | |
| torch.from_numpy(image), torch.from_numpy(noise), torch.from_numpy(noise_level) | |
| ) | |
| image = image.numpy() | |
| batch_multiplier = 2 if do_classifier_free_guidance else 1 | |
| image = np.concatenate([image] * batch_multiplier * num_images_per_prompt) | |
| noise_level = np.concatenate([noise_level] * image.shape[0]) | |
| # 7. Check that sizes of image and latents match | |
| num_channels_image = image.shape[1] | |
| if self.num_latent_channels + num_channels_image != self.num_unet_input_channels: | |
| raise ValueError( | |
| "Incorrect configuration settings! The config of `pipeline.unet` expects" | |
| f" {self.num_unet_input_channels} but received `num_channels_latents`: {self.num_latent_channels} +" | |
| f" `num_channels_image`: {num_channels_image} " | |
| f" = {self.num_latent_channels + num_channels_image}. Please verify the config of" | |
| " `pipeline.unet` or your `image` input." | |
| ) | |
| # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline | |
| accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) | |
| extra_step_kwargs = {} | |
| if accepts_eta: | |
| extra_step_kwargs["eta"] = eta | |
| timestep_dtype = next( | |
| (input.type for input in self.unet.model.get_inputs() if input.name == "timestep"), "tensor(float)" | |
| ) | |
| timestep_dtype = ORT_TO_NP_TYPE[timestep_dtype] | |
| # 9. Denoising loop | |
| num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order | |
| with self.progress_bar(total=num_inference_steps) as progress_bar: | |
| for i, t in enumerate(timesteps): | |
| # expand the latents if we are doing classifier free guidance | |
| latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents | |
| # concat latents, mask, masked_image_latents in the channel dimension | |
| latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) | |
| latent_model_input = np.concatenate([latent_model_input, image], axis=1) | |
| # timestep to tensor | |
| timestep = np.array([t], dtype=timestep_dtype) | |
| # predict the noise residual | |
| noise_pred = self.unet( | |
| sample=latent_model_input, | |
| timestep=timestep, | |
| encoder_hidden_states=prompt_embeds, | |
| class_labels=noise_level, | |
| )[0] | |
| # perform guidance | |
| if do_classifier_free_guidance: | |
| noise_pred_uncond, noise_pred_text = np.split(noise_pred, 2) | |
| noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) | |
| # compute the previous noisy sample x_t -> x_t-1 | |
| latents = self.scheduler.step( | |
| torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs | |
| ).prev_sample | |
| latents = latents.numpy() | |
| # call the callback, if provided | |
| if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): | |
| progress_bar.update() | |
| if callback is not None and i % callback_steps == 0: | |
| step_idx = i // getattr(self.scheduler, "order", 1) | |
| callback(step_idx, t, latents) | |
| # 10. Post-processing | |
| image = self.decode_latents(latents) | |
| if self.safety_checker is not None: | |
| safety_checker_input = self.feature_extractor( | |
| self.numpy_to_pil(image), return_tensors="np" | |
| ).pixel_values.astype(image.dtype) | |
| images, has_nsfw_concept = [], [] | |
| for i in range(image.shape[0]): | |
| image_i, has_nsfw_concept_i = self.safety_checker( | |
| clip_input=safety_checker_input[i : i + 1], images=image[i : i + 1] | |
| ) | |
| images.append(image_i) | |
| has_nsfw_concept.append(has_nsfw_concept_i[0]) | |
| image = np.concatenate(images) | |
| else: | |
| has_nsfw_concept = None | |
| if output_type == "pil": | |
| image = self.numpy_to_pil(image) | |
| if not return_dict: | |
| return (image, has_nsfw_concept) | |
| return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) | |