2023-01-16 00:46:00 +00:00
|
|
|
from diffusers import (
|
2023-01-16 00:54:20 +00:00
|
|
|
DiffusionPipeline,
|
|
|
|
# onnx
|
|
|
|
OnnxStableDiffusionPipeline,
|
|
|
|
OnnxStableDiffusionImg2ImgPipeline,
|
|
|
|
OnnxStableDiffusionInpaintPipeline,
|
2023-01-16 00:46:00 +00:00
|
|
|
)
|
2023-01-16 00:54:20 +00:00
|
|
|
from os import environ
|
|
|
|
from PIL import Image
|
2023-01-16 01:14:58 +00:00
|
|
|
from typing import Any
|
2023-01-16 00:46:00 +00:00
|
|
|
|
|
|
|
import numpy as np
|
|
|
|
|
2023-01-16 00:54:20 +00:00
|
|
|
from .image import (
|
|
|
|
expand_image,
|
|
|
|
)
|
|
|
|
from .upscale import (
|
|
|
|
upscale_resrgan,
|
|
|
|
)
|
|
|
|
from .utils import (
|
2023-01-16 01:14:58 +00:00
|
|
|
safer_join,
|
|
|
|
BaseParams,
|
|
|
|
Border,
|
2023-01-16 01:33:40 +00:00
|
|
|
OutputPath,
|
2023-01-16 13:31:42 +00:00
|
|
|
ServerContext,
|
2023-01-16 01:14:58 +00:00
|
|
|
Size,
|
2023-01-16 00:54:20 +00:00
|
|
|
)
|
|
|
|
|
2023-01-16 00:46:00 +00:00
|
|
|
last_pipeline_instance = None
|
|
|
|
last_pipeline_options = (None, None, None)
|
|
|
|
last_pipeline_scheduler = None
|
|
|
|
|
|
|
|
# from https://www.travelneil.com/stable-diffusion-updates.html
|
|
|
|
|
|
|
|
|
2023-01-16 01:14:58 +00:00
|
|
|
def get_latents_from_seed(seed: int, size: Size) -> np.ndarray:
|
2023-01-16 00:46:00 +00:00
|
|
|
# 1 is batch size
|
2023-01-16 01:14:58 +00:00
|
|
|
latents_shape = (1, 4, size.height // 8, size.width // 8)
|
2023-01-16 00:46:00 +00:00
|
|
|
# Gotta use numpy instead of torch, because torch's randn() doesn't support DML
|
|
|
|
rng = np.random.default_rng(seed)
|
|
|
|
image_latents = rng.standard_normal(latents_shape).astype(np.float32)
|
|
|
|
return image_latents
|
|
|
|
|
|
|
|
|
2023-01-16 13:31:42 +00:00
|
|
|
def load_pipeline(pipeline: DiffusionPipeline, model: str, provider: str, scheduler: Any):
|
2023-01-16 00:46:00 +00:00
|
|
|
global last_pipeline_instance
|
|
|
|
global last_pipeline_scheduler
|
|
|
|
global last_pipeline_options
|
|
|
|
|
|
|
|
options = (pipeline, model, provider)
|
|
|
|
if last_pipeline_instance != None and last_pipeline_options == options:
|
|
|
|
print('reusing existing pipeline')
|
|
|
|
pipe = last_pipeline_instance
|
|
|
|
else:
|
|
|
|
print('loading different pipeline')
|
|
|
|
pipe = pipeline.from_pretrained(
|
|
|
|
model,
|
|
|
|
provider=provider,
|
|
|
|
safety_checker=None,
|
|
|
|
scheduler=scheduler.from_pretrained(model, subfolder='scheduler')
|
|
|
|
)
|
|
|
|
last_pipeline_instance = pipe
|
|
|
|
last_pipeline_options = options
|
|
|
|
last_pipeline_scheduler = scheduler
|
|
|
|
|
|
|
|
if last_pipeline_scheduler != scheduler:
|
|
|
|
print('changing pipeline scheduler')
|
|
|
|
pipe.scheduler = scheduler.from_pretrained(
|
|
|
|
model, subfolder='scheduler')
|
|
|
|
last_pipeline_scheduler = scheduler
|
|
|
|
|
|
|
|
return pipe
|
2023-01-16 00:54:20 +00:00
|
|
|
|
|
|
|
|
2023-01-16 13:31:42 +00:00
|
|
|
def run_txt2img_pipeline(
|
|
|
|
ctx: ServerContext,
|
|
|
|
params: BaseParams,
|
|
|
|
size: Size,
|
|
|
|
output: OutputPath
|
|
|
|
):
|
2023-01-16 00:54:20 +00:00
|
|
|
pipe = load_pipeline(OnnxStableDiffusionPipeline,
|
2023-01-16 01:14:58 +00:00
|
|
|
params.model, params.provider, params.scheduler)
|
2023-01-16 00:54:20 +00:00
|
|
|
|
2023-01-16 01:49:40 +00:00
|
|
|
latents = get_latents_from_seed(params.seed, size)
|
2023-01-16 01:14:58 +00:00
|
|
|
rng = np.random.RandomState(params.seed)
|
2023-01-16 00:54:20 +00:00
|
|
|
|
|
|
|
image = pipe(
|
2023-01-16 01:14:58 +00:00
|
|
|
params.prompt,
|
|
|
|
size.width,
|
|
|
|
size.height,
|
2023-01-16 00:54:20 +00:00
|
|
|
generator=rng,
|
2023-01-16 01:14:58 +00:00
|
|
|
guidance_scale=params.cfg,
|
2023-01-16 00:54:20 +00:00
|
|
|
latents=latents,
|
2023-01-16 01:14:58 +00:00
|
|
|
negative_prompt=params.negative_prompt,
|
|
|
|
num_inference_steps=params.steps,
|
2023-01-16 00:54:20 +00:00
|
|
|
).images[0]
|
2023-01-16 13:36:35 +00:00
|
|
|
image = upscale_resrgan(image, ctx.model_path)
|
2023-01-16 02:00:26 +00:00
|
|
|
image.save(output.path)
|
2023-01-16 00:54:20 +00:00
|
|
|
|
2023-01-16 01:33:40 +00:00
|
|
|
print('saved txt2img output: %s' % (output.file))
|
2023-01-16 00:54:20 +00:00
|
|
|
|
|
|
|
|
2023-01-16 13:31:42 +00:00
|
|
|
def run_img2img_pipeline(
|
|
|
|
ctx: ServerContext,
|
|
|
|
params: BaseParams,
|
|
|
|
output: OutputPath,
|
|
|
|
source_image: Image,
|
|
|
|
strength: float
|
|
|
|
):
|
2023-01-16 00:54:20 +00:00
|
|
|
pipe = load_pipeline(OnnxStableDiffusionImg2ImgPipeline,
|
2023-01-16 01:14:58 +00:00
|
|
|
params.model, params.provider, params.scheduler)
|
2023-01-16 00:54:20 +00:00
|
|
|
|
2023-01-16 01:14:58 +00:00
|
|
|
rng = np.random.RandomState(params.seed)
|
2023-01-16 00:54:20 +00:00
|
|
|
|
|
|
|
image = pipe(
|
2023-01-16 01:14:58 +00:00
|
|
|
params.prompt,
|
2023-01-16 00:54:20 +00:00
|
|
|
generator=rng,
|
2023-01-16 01:14:58 +00:00
|
|
|
guidance_scale=params.cfg,
|
2023-01-16 13:31:42 +00:00
|
|
|
image=source_image,
|
2023-01-16 01:14:58 +00:00
|
|
|
negative_prompt=params.negative_prompt,
|
|
|
|
num_inference_steps=params.steps,
|
2023-01-16 00:54:20 +00:00
|
|
|
strength=strength,
|
|
|
|
).images[0]
|
2023-01-16 13:31:42 +00:00
|
|
|
image = upscale_resrgan(image, ctx.model_path)
|
2023-01-16 02:00:26 +00:00
|
|
|
image.save(output.path)
|
2023-01-16 00:54:20 +00:00
|
|
|
|
2023-01-16 02:00:26 +00:00
|
|
|
print('saved img2img output: %s' % (output.file))
|
2023-01-16 00:54:20 +00:00
|
|
|
|
|
|
|
|
|
|
|
def run_inpaint_pipeline(
|
2023-01-16 13:31:42 +00:00
|
|
|
ctx: ServerContext,
|
2023-01-16 01:14:58 +00:00
|
|
|
params: BaseParams,
|
|
|
|
size: Size,
|
2023-01-16 01:33:40 +00:00
|
|
|
output: OutputPath,
|
2023-01-16 00:54:20 +00:00
|
|
|
source_image: Image,
|
|
|
|
mask_image: Image,
|
2023-01-16 01:14:58 +00:00
|
|
|
expand: Border,
|
2023-01-16 00:54:20 +00:00
|
|
|
noise_source: Any,
|
|
|
|
mask_filter: Any
|
|
|
|
):
|
|
|
|
pipe = load_pipeline(OnnxStableDiffusionInpaintPipeline,
|
2023-01-16 01:14:58 +00:00
|
|
|
params.model, params.provider, params.scheduler)
|
2023-01-16 00:54:20 +00:00
|
|
|
|
2023-01-16 01:14:58 +00:00
|
|
|
latents = get_latents_from_seed(params.seed, size)
|
|
|
|
rng = np.random.RandomState(params.seed)
|
2023-01-16 00:54:20 +00:00
|
|
|
|
|
|
|
print('applying mask filter and generating noise source')
|
|
|
|
source_image, mask_image, noise_image, _full_dims = expand_image(
|
|
|
|
source_image,
|
|
|
|
mask_image,
|
2023-01-16 01:14:58 +00:00
|
|
|
expand,
|
2023-01-16 00:54:20 +00:00
|
|
|
noise_source=noise_source,
|
|
|
|
mask_filter=mask_filter)
|
|
|
|
|
|
|
|
if environ.get('DEBUG') is not None:
|
2023-01-16 01:33:40 +00:00
|
|
|
source_image.save(safer_join(output.path, 'last-source.png'))
|
|
|
|
mask_image.save(safer_join(output.path, 'last-mask.png'))
|
|
|
|
noise_image.save(safer_join(output.path, 'last-noise.png'))
|
2023-01-16 00:54:20 +00:00
|
|
|
|
|
|
|
image = pipe(
|
2023-01-16 01:14:58 +00:00
|
|
|
params.prompt,
|
2023-01-16 00:54:20 +00:00
|
|
|
generator=rng,
|
2023-01-16 01:14:58 +00:00
|
|
|
guidance_scale=params.cfg,
|
|
|
|
height=size.height,
|
2023-01-16 00:54:20 +00:00
|
|
|
image=source_image,
|
|
|
|
latents=latents,
|
|
|
|
mask_image=mask_image,
|
2023-01-16 01:14:58 +00:00
|
|
|
negative_prompt=params.negative_prompt,
|
|
|
|
num_inference_steps=params.steps,
|
|
|
|
width=size.width,
|
2023-01-16 00:54:20 +00:00
|
|
|
).images[0]
|
|
|
|
|
2023-01-16 02:00:26 +00:00
|
|
|
image.save(output.path)
|
2023-01-16 00:54:20 +00:00
|
|
|
|
2023-01-16 01:33:40 +00:00
|
|
|
print('saved inpaint output: %s' % (output.file))
|