2023-03-08 01:00:25 +00:00
|
|
|
from logging import getLogger
|
|
|
|
from math import ceil
|
2023-03-16 00:27:29 +00:00
|
|
|
from re import Pattern, compile
|
2023-03-15 13:30:31 +00:00
|
|
|
from typing import List, Optional, Tuple
|
2023-03-08 01:00:25 +00:00
|
|
|
|
|
|
|
import numpy as np
|
|
|
|
from diffusers import OnnxStableDiffusionPipeline
|
|
|
|
|
|
|
|
logger = getLogger(__name__)
|
|
|
|
|
2023-03-08 02:48:26 +00:00
|
|
|
|
2023-03-15 13:51:12 +00:00
|
|
|
INVERSION_TOKEN = compile(r"\<inversion:(\w+):([\.|\d]+)\>")
|
|
|
|
LORA_TOKEN = compile(r"\<lora:(\w+):([\.|\d]+)\>")
|
2023-03-08 01:00:25 +00:00
|
|
|
MAX_TOKENS_PER_GROUP = 77
|
2023-03-15 13:51:12 +00:00
|
|
|
PATTERN_RANGE = compile(r"(\w+)-{(\d+),(\d+)(?:,(\d+))?}")
|
2023-03-08 02:48:26 +00:00
|
|
|
|
|
|
|
|
|
|
|
def expand_prompt_ranges(prompt: str) -> str:
|
|
|
|
def expand_range(match):
|
|
|
|
(base_token, start, end, step) = match.groups(default=1)
|
2023-03-08 04:40:17 +00:00
|
|
|
num_tokens = [
|
|
|
|
f"{base_token}-{i}" for i in range(int(start), int(end), int(step))
|
|
|
|
]
|
2023-03-08 02:48:26 +00:00
|
|
|
return " ".join(num_tokens)
|
|
|
|
|
|
|
|
return PATTERN_RANGE.sub(expand_range, prompt)
|
2023-03-08 01:00:25 +00:00
|
|
|
|
|
|
|
|
|
|
|
def expand_prompt(
|
|
|
|
self: OnnxStableDiffusionPipeline,
|
|
|
|
prompt: str,
|
|
|
|
num_images_per_prompt: int,
|
|
|
|
do_classifier_free_guidance: bool,
|
|
|
|
negative_prompt: Optional[str] = None,
|
|
|
|
) -> "np.NDArray":
|
|
|
|
# self provides:
|
|
|
|
# tokenizer: CLIPTokenizer
|
|
|
|
# encoder: OnnxRuntimeModel
|
|
|
|
|
|
|
|
batch_size = len(prompt) if isinstance(prompt, list) else 1
|
2023-03-08 02:48:26 +00:00
|
|
|
prompt = expand_prompt_ranges(prompt)
|
2023-03-08 01:00:25 +00:00
|
|
|
|
|
|
|
# split prompt into 75 token chunks
|
|
|
|
tokens = self.tokenizer(
|
|
|
|
prompt,
|
|
|
|
padding="max_length",
|
|
|
|
return_tensors="np",
|
|
|
|
max_length=self.tokenizer.model_max_length,
|
|
|
|
truncation=False,
|
|
|
|
)
|
|
|
|
|
|
|
|
groups_count = ceil(tokens.input_ids.shape[1] / MAX_TOKENS_PER_GROUP)
|
2023-03-18 22:32:32 +00:00
|
|
|
logger.trace("splitting %s into %s groups", tokens.input_ids.shape, groups_count)
|
2023-03-08 01:00:25 +00:00
|
|
|
|
|
|
|
groups = []
|
|
|
|
# np.array_split(tokens.input_ids, groups_count, axis=1)
|
|
|
|
for i in range(groups_count):
|
|
|
|
group_start = i * MAX_TOKENS_PER_GROUP
|
|
|
|
group_end = min(
|
|
|
|
group_start + MAX_TOKENS_PER_GROUP, tokens.input_ids.shape[1]
|
|
|
|
) # or should this be 1?
|
2023-03-18 22:32:32 +00:00
|
|
|
logger.trace("building group for token slice [%s : %s]", group_start, group_end)
|
2023-03-08 01:00:25 +00:00
|
|
|
groups.append(tokens.input_ids[:, group_start:group_end])
|
|
|
|
|
|
|
|
# encode each chunk
|
2023-03-18 22:32:32 +00:00
|
|
|
logger.trace("group token shapes: %s", [t.shape for t in groups])
|
2023-03-08 01:00:25 +00:00
|
|
|
group_embeds = []
|
|
|
|
for group in groups:
|
2023-03-18 22:32:32 +00:00
|
|
|
logger.trace("encoding group: %s", group.shape)
|
2023-03-08 01:00:25 +00:00
|
|
|
embeds = self.text_encoder(input_ids=group.astype(np.int32))[0]
|
|
|
|
group_embeds.append(embeds)
|
|
|
|
|
|
|
|
# concat those embeds
|
2023-03-18 22:32:32 +00:00
|
|
|
logger.trace("group embeds shape: %s", [t.shape for t in group_embeds])
|
2023-03-08 01:00:25 +00:00
|
|
|
prompt_embeds = np.concatenate(group_embeds, axis=1)
|
|
|
|
prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0)
|
|
|
|
|
|
|
|
# get unconditional embeddings for classifier free guidance
|
|
|
|
if do_classifier_free_guidance:
|
|
|
|
uncond_tokens: List[str]
|
|
|
|
if negative_prompt is None:
|
|
|
|
uncond_tokens = [""] * batch_size
|
|
|
|
elif type(prompt) is not type(negative_prompt):
|
|
|
|
raise TypeError(
|
|
|
|
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
|
|
|
f" {type(prompt)}."
|
|
|
|
)
|
|
|
|
elif isinstance(negative_prompt, str):
|
|
|
|
uncond_tokens = [negative_prompt] * batch_size
|
|
|
|
elif batch_size != len(negative_prompt):
|
|
|
|
raise ValueError(
|
|
|
|
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
|
|
|
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
|
|
|
" the batch size of `prompt`."
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
uncond_tokens = negative_prompt
|
|
|
|
|
|
|
|
uncond_input = self.tokenizer(
|
|
|
|
uncond_tokens,
|
|
|
|
padding="max_length",
|
|
|
|
max_length=self.tokenizer.model_max_length,
|
|
|
|
truncation=True,
|
|
|
|
return_tensors="np",
|
|
|
|
)
|
|
|
|
negative_prompt_embeds = self.text_encoder(
|
|
|
|
input_ids=uncond_input.input_ids.astype(np.int32)
|
|
|
|
)[0]
|
|
|
|
negative_padding = tokens.input_ids.shape[1] - negative_prompt_embeds.shape[1]
|
2023-03-18 22:32:32 +00:00
|
|
|
logger.trace(
|
2023-03-08 01:00:25 +00:00
|
|
|
"padding negative prompt to match input: %s, %s, %s extra tokens",
|
|
|
|
tokens.input_ids.shape,
|
|
|
|
negative_prompt_embeds.shape,
|
|
|
|
negative_padding,
|
|
|
|
)
|
|
|
|
negative_prompt_embeds = np.pad(
|
|
|
|
negative_prompt_embeds,
|
|
|
|
[(0, 0), (0, negative_padding), (0, 0)],
|
|
|
|
mode="constant",
|
|
|
|
constant_values=0,
|
|
|
|
)
|
|
|
|
negative_prompt_embeds = np.repeat(
|
|
|
|
negative_prompt_embeds, num_images_per_prompt, axis=0
|
|
|
|
)
|
|
|
|
|
|
|
|
# For classifier free guidance, we need to do two forward passes.
|
|
|
|
# Here we concatenate the unconditional and text embeddings into a single batch
|
|
|
|
# to avoid doing two forward passes
|
|
|
|
prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds])
|
|
|
|
|
2023-03-18 22:32:32 +00:00
|
|
|
logger.trace("expanded prompt shape: %s", prompt_embeds.shape)
|
2023-03-08 01:00:25 +00:00
|
|
|
return prompt_embeds
|
2023-03-15 13:30:31 +00:00
|
|
|
|
|
|
|
|
2023-03-16 00:27:29 +00:00
|
|
|
def get_tokens_from_prompt(
|
2023-03-18 04:07:10 +00:00
|
|
|
prompt: str, pattern: Pattern
|
2023-03-16 00:27:29 +00:00
|
|
|
) -> Tuple[str, List[Tuple[str, float]]]:
|
2023-03-15 13:51:12 +00:00
|
|
|
"""
|
|
|
|
TODO: replace with Arpeggio
|
|
|
|
"""
|
2023-03-15 13:30:31 +00:00
|
|
|
remaining_prompt = prompt
|
|
|
|
|
2023-03-15 13:51:12 +00:00
|
|
|
tokens = []
|
|
|
|
next_match = pattern.search(remaining_prompt)
|
2023-03-15 13:30:31 +00:00
|
|
|
while next_match is not None:
|
2023-03-15 13:51:12 +00:00
|
|
|
logger.debug("found token in prompt: %s", next_match)
|
2023-03-15 13:30:31 +00:00
|
|
|
name, weight = next_match.groups()
|
2023-03-15 13:51:12 +00:00
|
|
|
tokens.append((name, float(weight)))
|
2023-03-15 13:30:31 +00:00
|
|
|
# remove this match and look for another
|
2023-03-16 00:27:29 +00:00
|
|
|
remaining_prompt = (
|
|
|
|
remaining_prompt[: next_match.start()]
|
|
|
|
+ remaining_prompt[next_match.end() :]
|
|
|
|
)
|
2023-03-15 13:51:12 +00:00
|
|
|
next_match = pattern.search(remaining_prompt)
|
|
|
|
|
|
|
|
return (remaining_prompt, tokens)
|
|
|
|
|
|
|
|
|
|
|
|
def get_loras_from_prompt(prompt: str) -> Tuple[str, List[Tuple[str, float]]]:
|
|
|
|
return get_tokens_from_prompt(prompt, LORA_TOKEN)
|
|
|
|
|
2023-03-15 13:30:31 +00:00
|
|
|
|
2023-03-15 13:51:12 +00:00
|
|
|
def get_inversions_from_prompt(prompt: str) -> Tuple[str, List[Tuple[str, float]]]:
|
|
|
|
return get_tokens_from_prompt(prompt, INVERSION_TOKEN)
|