500 lines
20 KiB
Python
500 lines
20 KiB
Python
# This file contains a mix of Apache and GPL code and should be treated as a GPL resource
|
|
#
|
|
# Original attribution:
|
|
#
|
|
# Copyright 2023 The HuggingFace Team.
|
|
# Converted for use with ONNX as part of https://github.com/Amblyopius/Stable-Diffusion-ONNX-FP16
|
|
# Special thanks to https://github.com/uchuusen for the initial conversion effort
|
|
|
|
import inspect
|
|
from typing import Callable, List, Optional, Union
|
|
|
|
import numpy as np
|
|
import PIL
|
|
import torch
|
|
from diffusers.configuration_utils import FrozenDict
|
|
from diffusers.pipelines.onnx_utils import ORT_TO_NP_TYPE, OnnxRuntimeModel
|
|
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
|
|
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
|
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
|
|
from diffusers.utils import PIL_INTERPOLATION, deprecate, logging
|
|
from transformers import CLIPImageProcessor, CLIPTokenizer
|
|
|
|
logger = logging.get_logger(__name__)
|
|
|
|
|
|
class OnnxStableDiffusionControlNetPipeline(DiffusionPipeline):
|
|
vae_encoder: OnnxRuntimeModel
|
|
vae_decoder: OnnxRuntimeModel
|
|
text_encoder: OnnxRuntimeModel
|
|
tokenizer: CLIPTokenizer
|
|
unet: OnnxRuntimeModel
|
|
controlnet: OnnxRuntimeModel
|
|
scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler]
|
|
|
|
def __init__(
|
|
self,
|
|
vae_encoder: OnnxRuntimeModel,
|
|
vae_decoder: OnnxRuntimeModel,
|
|
text_encoder: OnnxRuntimeModel,
|
|
tokenizer: CLIPTokenizer,
|
|
unet: OnnxRuntimeModel,
|
|
controlnet: OnnxRuntimeModel,
|
|
scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
|
|
safety_checker: OnnxRuntimeModel,
|
|
feature_extractor: CLIPImageProcessor,
|
|
requires_safety_checker: bool = False,
|
|
):
|
|
super().__init__()
|
|
|
|
if (
|
|
hasattr(scheduler.config, "steps_offset")
|
|
and scheduler.config.steps_offset != 1
|
|
):
|
|
deprecation_message = (
|
|
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
|
|
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
|
|
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
|
|
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
|
|
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
|
|
" file"
|
|
)
|
|
deprecate(
|
|
"steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False
|
|
)
|
|
new_config = dict(scheduler.config)
|
|
new_config["steps_offset"] = 1
|
|
scheduler._internal_dict = FrozenDict(new_config)
|
|
|
|
if (
|
|
hasattr(scheduler.config, "clip_sample")
|
|
and scheduler.config.clip_sample is True
|
|
):
|
|
deprecation_message = (
|
|
f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
|
|
" `clip_sample` should be set to False in the configuration file. Please make sure to update the"
|
|
" config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
|
|
" future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
|
|
" nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
|
|
)
|
|
deprecate(
|
|
"clip_sample not set", "1.0.0", deprecation_message, standard_warn=False
|
|
)
|
|
new_config = dict(scheduler.config)
|
|
new_config["clip_sample"] = False
|
|
scheduler._internal_dict = FrozenDict(new_config)
|
|
|
|
self.register_modules(
|
|
vae_encoder=vae_encoder,
|
|
vae_decoder=vae_decoder,
|
|
text_encoder=text_encoder,
|
|
tokenizer=tokenizer,
|
|
unet=unet,
|
|
controlnet=controlnet,
|
|
scheduler=scheduler,
|
|
)
|
|
|
|
def _default_height_width(self, height, width, image):
|
|
if isinstance(image, list):
|
|
image = image[0]
|
|
|
|
if height is None:
|
|
if isinstance(image, PIL.Image.Image):
|
|
height = image.height
|
|
elif isinstance(image, np.ndarray):
|
|
height = image.shape[3]
|
|
|
|
height = (height // 8) * 8 # round down to nearest multiple of 8
|
|
|
|
if width is None:
|
|
if isinstance(image, PIL.Image.Image):
|
|
width = image.width
|
|
elif isinstance(image, np.ndarray):
|
|
width = image.shape[2]
|
|
|
|
width = (width // 8) * 8 # round down to nearest multiple of 8
|
|
|
|
return height, width
|
|
|
|
def prepare_image(
|
|
self, image, width, height, batch_size, num_images_per_prompt, dtype
|
|
):
|
|
if not isinstance(image, np.ndarray):
|
|
if isinstance(image, PIL.Image.Image):
|
|
image = [image]
|
|
|
|
if isinstance(image[0], PIL.Image.Image):
|
|
image = [
|
|
np.array(
|
|
i.resize((width, height), resample=PIL_INTERPOLATION["lanczos"])
|
|
)[None, :]
|
|
for i in image
|
|
]
|
|
image = np.concatenate(image, axis=0)
|
|
image = np.array(image).astype(np.float32) / 255.0
|
|
image = image.transpose(0, 3, 1, 2)
|
|
image = torch.from_numpy(image)
|
|
elif isinstance(image[0], np.ndarray):
|
|
image = np.concatenate(image, axis=0)
|
|
image = torch.from_numpy(image)
|
|
|
|
image_batch_size = image.shape[0]
|
|
|
|
if image_batch_size == 1:
|
|
repeat_by = batch_size
|
|
else:
|
|
# image batch size is the same as prompt batch size
|
|
repeat_by = num_images_per_prompt
|
|
|
|
image = image.repeat_interleave(repeat_by, dim=0)
|
|
|
|
return image
|
|
|
|
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
|
|
def prepare_latents(
|
|
self,
|
|
batch_size,
|
|
num_channels_latents,
|
|
height,
|
|
width,
|
|
dtype,
|
|
generator,
|
|
latents=None,
|
|
):
|
|
shape = (batch_size, num_channels_latents, height // 8, width // 8)
|
|
if isinstance(generator, list) and len(generator) != batch_size:
|
|
raise ValueError(
|
|
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
|
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
|
)
|
|
|
|
if latents is None:
|
|
latents = generator.randn(*shape).astype(dtype)
|
|
|
|
# scale the initial noise by the standard deviation required by the scheduler
|
|
sigma = self.scheduler.init_noise_sigma
|
|
if torch.is_tensor(sigma):
|
|
sigma = sigma.numpy()
|
|
|
|
latents = latents * sigma
|
|
return latents
|
|
|
|
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
|
def prepare_extra_step_kwargs(self, generator, eta, torch_gen):
|
|
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
|
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
|
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
|
# and should be between [0, 1]
|
|
|
|
accepts_eta = "eta" in set(
|
|
inspect.signature(self.scheduler.step).parameters.keys()
|
|
)
|
|
extra_step_kwargs = {}
|
|
if accepts_eta:
|
|
extra_step_kwargs["eta"] = eta
|
|
|
|
# check if the scheduler accepts generator
|
|
accepts_generator = "generator" in set(
|
|
inspect.signature(self.scheduler.step).parameters.keys()
|
|
)
|
|
if accepts_generator:
|
|
extra_step_kwargs["generator"] = torch_gen
|
|
return extra_step_kwargs
|
|
|
|
def _encode_prompt(
|
|
self,
|
|
prompt,
|
|
num_images_per_prompt,
|
|
do_classifier_free_guidance,
|
|
negative_prompt,
|
|
):
|
|
r"""
|
|
Encodes the prompt into text encoder hidden states.
|
|
|
|
Args:
|
|
prompt (`str` or `List[str]`):
|
|
prompt to be encoded
|
|
num_images_per_prompt (`int`):
|
|
number of images that should be generated per prompt
|
|
do_classifier_free_guidance (`bool`):
|
|
whether to use classifier free guidance or not
|
|
negative_prompt (`str` or `List[str]`):
|
|
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
|
if `guidance_scale` is less than `1`).
|
|
"""
|
|
batch_size = len(prompt) if isinstance(prompt, list) else 1
|
|
|
|
# get prompt text embeddings
|
|
text_inputs = self.tokenizer(
|
|
prompt,
|
|
padding="max_length",
|
|
max_length=self.tokenizer.model_max_length,
|
|
truncation=True,
|
|
return_tensors="np",
|
|
)
|
|
text_input_ids = text_inputs.input_ids
|
|
untruncated_ids = self.tokenizer(
|
|
prompt, padding="max_length", return_tensors="np"
|
|
).input_ids
|
|
|
|
if not np.array_equal(text_input_ids, untruncated_ids):
|
|
removed_text = self.tokenizer.batch_decode(
|
|
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
|
)
|
|
logger.warning(
|
|
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
|
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
|
)
|
|
|
|
prompt_embeds = self.text_encoder(input_ids=text_input_ids.astype(np.int32))[0]
|
|
prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0)
|
|
|
|
# get unconditional embeddings for classifier free guidance
|
|
if do_classifier_free_guidance:
|
|
uncond_tokens: List[str]
|
|
if negative_prompt is None:
|
|
uncond_tokens = [""] * batch_size
|
|
elif type(prompt) is not type(negative_prompt):
|
|
raise TypeError(
|
|
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
|
f" {type(prompt)}."
|
|
)
|
|
elif isinstance(negative_prompt, str):
|
|
uncond_tokens = [negative_prompt] * batch_size
|
|
elif batch_size != len(negative_prompt):
|
|
raise ValueError(
|
|
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
|
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
|
" the batch size of `prompt`."
|
|
)
|
|
else:
|
|
uncond_tokens = negative_prompt
|
|
|
|
max_length = text_input_ids.shape[-1]
|
|
uncond_input = self.tokenizer(
|
|
uncond_tokens,
|
|
padding="max_length",
|
|
max_length=max_length,
|
|
truncation=True,
|
|
return_tensors="np",
|
|
)
|
|
negative_prompt_embeds = self.text_encoder(
|
|
input_ids=uncond_input.input_ids.astype(np.int32)
|
|
)[0]
|
|
negative_prompt_embeds = np.repeat(
|
|
negative_prompt_embeds, num_images_per_prompt, axis=0
|
|
)
|
|
|
|
# For classifier free guidance, we need to do two forward passes.
|
|
# Here we concatenate the unconditional and text embeddings into a single batch
|
|
# to avoid doing two forward passes
|
|
prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds])
|
|
|
|
return prompt_embeds
|
|
|
|
def __call__(
|
|
self,
|
|
prompt: Union[str, List[str]],
|
|
image: Union[np.ndarray, PIL.Image.Image] = None,
|
|
height: Optional[int] = None,
|
|
width: Optional[int] = None,
|
|
num_inference_steps: Optional[int] = 50,
|
|
guidance_scale: Optional[float] = 7.5,
|
|
negative_prompt: Optional[Union[str, List[str]]] = None,
|
|
num_images_per_prompt: Optional[int] = 1,
|
|
eta: Optional[float] = 0.0,
|
|
generator: Optional[np.random.RandomState] = None,
|
|
latents: Optional[np.ndarray] = None,
|
|
output_type: Optional[str] = "pil",
|
|
return_dict: bool = True,
|
|
callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
|
|
callback_steps: int = 1,
|
|
controlnet_conditioning_scale: float = 1.0,
|
|
):
|
|
if isinstance(prompt, str):
|
|
batch_size = 1
|
|
elif isinstance(prompt, list):
|
|
batch_size = len(prompt)
|
|
else:
|
|
raise ValueError(
|
|
f"`prompt` has to be of type `str` or `list` but is {type(prompt)}"
|
|
)
|
|
|
|
if generator:
|
|
torch_seed = generator.randint(2147483647)
|
|
torch_gen = torch.Generator().manual_seed(torch_seed)
|
|
else:
|
|
generator = np.random
|
|
torch_gen = None
|
|
|
|
height, width = self._default_height_width(height, width, image)
|
|
|
|
if height % 8 != 0 or width % 8 != 0:
|
|
raise ValueError(
|
|
f"`height` and `width` have to be divisible by 8 but are {height} and {width}."
|
|
)
|
|
|
|
if (callback_steps is None) or (
|
|
callback_steps is not None
|
|
and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
|
):
|
|
raise ValueError(
|
|
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
|
f" {type(callback_steps)}."
|
|
)
|
|
|
|
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
|
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
|
# corresponds to doing no classifier free guidance.
|
|
do_classifier_free_guidance = guidance_scale > 1.0
|
|
|
|
prompt_embeds = self._encode_prompt(
|
|
prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt
|
|
)
|
|
|
|
# 4. Prepare image
|
|
image = self.prepare_image(
|
|
image,
|
|
width,
|
|
height,
|
|
batch_size * num_images_per_prompt,
|
|
num_images_per_prompt,
|
|
np.float32,
|
|
).numpy()
|
|
|
|
if do_classifier_free_guidance:
|
|
image = np.concatenate([image] * 2)
|
|
|
|
# get the initial random noise unless the user supplied it
|
|
latents_dtype = prompt_embeds.dtype
|
|
num_channels_latents = 4
|
|
latents = self.prepare_latents(
|
|
batch_size * num_images_per_prompt,
|
|
num_channels_latents,
|
|
height,
|
|
width,
|
|
latents_dtype,
|
|
generator,
|
|
latents,
|
|
)
|
|
|
|
# set timesteps
|
|
self.scheduler.set_timesteps(num_inference_steps)
|
|
timesteps = self.scheduler.timesteps
|
|
|
|
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
|
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
|
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
|
# and should be between [0, 1]
|
|
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta, torch_gen)
|
|
|
|
timestep_dtype = next(
|
|
(
|
|
input.type
|
|
for input in self.unet.model.get_inputs()
|
|
if input.name == "timestep"
|
|
),
|
|
"tensor(float)",
|
|
)
|
|
timestep_dtype = ORT_TO_NP_TYPE[timestep_dtype]
|
|
|
|
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
|
|
|
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
|
for i, t in enumerate(timesteps):
|
|
# expand the latents if we are doing classifier free guidance
|
|
latent_model_input = (
|
|
np.concatenate([latents] * 2)
|
|
if do_classifier_free_guidance
|
|
else latents
|
|
)
|
|
latent_model_input = self.scheduler.scale_model_input(
|
|
torch.from_numpy(latent_model_input), t
|
|
)
|
|
latent_model_input = latent_model_input.cpu().numpy()
|
|
|
|
timestep = np.array([t], dtype=timestep_dtype)
|
|
|
|
blocksamples = self.controlnet(
|
|
sample=latent_model_input,
|
|
timestep=timestep,
|
|
encoder_hidden_states=prompt_embeds,
|
|
controlnet_cond=image,
|
|
)
|
|
|
|
mid_block_res_sample = blocksamples[12]
|
|
down_block_res_samples = blocksamples[0:12]
|
|
|
|
down_block_res_samples = [
|
|
down_block_res_sample * controlnet_conditioning_scale
|
|
for down_block_res_sample in down_block_res_samples
|
|
]
|
|
mid_block_res_sample *= controlnet_conditioning_scale
|
|
|
|
# predict the noise residual
|
|
|
|
noise_pred = self.unet(
|
|
sample=latent_model_input,
|
|
timestep=timestep,
|
|
encoder_hidden_states=prompt_embeds,
|
|
down_block_0=down_block_res_samples[0],
|
|
down_block_1=down_block_res_samples[1],
|
|
down_block_2=down_block_res_samples[2],
|
|
down_block_3=down_block_res_samples[3],
|
|
down_block_4=down_block_res_samples[4],
|
|
down_block_5=down_block_res_samples[5],
|
|
down_block_6=down_block_res_samples[6],
|
|
down_block_7=down_block_res_samples[7],
|
|
down_block_8=down_block_res_samples[8],
|
|
down_block_9=down_block_res_samples[9],
|
|
down_block_10=down_block_res_samples[10],
|
|
down_block_11=down_block_res_samples[11],
|
|
mid_block_additional_residual=mid_block_res_sample,
|
|
)
|
|
noise_pred = noise_pred[0]
|
|
|
|
# perform guidance
|
|
if do_classifier_free_guidance:
|
|
noise_pred_uncond, noise_pred_text = np.split(noise_pred, 2)
|
|
noise_pred = noise_pred_uncond + guidance_scale * (
|
|
noise_pred_text - noise_pred_uncond
|
|
)
|
|
|
|
# compute the previous noisy sample x_t -> x_t-1
|
|
scheduler_output = self.scheduler.step(
|
|
torch.from_numpy(noise_pred),
|
|
t,
|
|
torch.from_numpy(latents),
|
|
**extra_step_kwargs,
|
|
)
|
|
latents = scheduler_output.prev_sample.numpy()
|
|
|
|
# call the callback, if provided
|
|
if i == len(timesteps) - 1 or (
|
|
(i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0
|
|
):
|
|
progress_bar.update()
|
|
if callback is not None and i % callback_steps == 0:
|
|
callback(i, t, latents)
|
|
|
|
latents = 1 / 0.18215 * latents
|
|
# image = self.vae_decoder(latent_sample=latents)[0]
|
|
# it seems likes there is a strange result for using half-precision vae decoder if batchsize>1
|
|
image = np.concatenate(
|
|
[
|
|
self.vae_decoder(latent_sample=latents[i : i + 1])[0]
|
|
for i in range(latents.shape[0])
|
|
]
|
|
)
|
|
|
|
image = np.clip(image / 2 + 0.5, 0, 1)
|
|
image = image.transpose((0, 2, 3, 1))
|
|
|
|
if output_type == "pil":
|
|
image = self.numpy_to_pil(image)
|
|
|
|
if not return_dict:
|
|
return (image, None)
|
|
|
|
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=None)
|