1
0
Fork 0
onnx-web/api/onnx_web/chain/blend_inpaint.py

116 lines
3.6 KiB
Python
Raw Normal View History

2023-01-28 23:09:19 +00:00
from logging import getLogger
from typing import Callable, Optional, Tuple
import numpy as np
2023-02-05 23:55:04 +00:00
import torch
2023-02-05 13:53:26 +00:00
from diffusers import OnnxStableDiffusionInpaintPipeline
from PIL import Image
from ..diffusion.load import get_latents_from_seed, load_pipeline
from ..image import expand_image, mask_filter_none, noise_source_histogram
from ..output import save_image
from ..params import Border, ImageParams, Size, SizeChart, StageParams
2023-02-19 02:28:21 +00:00
from ..server import JobContext, ProgressCallback, ServerContext
from ..utils import is_debug
from .utils import process_tile_order
2023-01-28 23:09:19 +00:00
logger = getLogger(__name__)
def blend_inpaint(
job: JobContext,
server: ServerContext,
stage: StageParams,
params: ImageParams,
source: Image.Image,
*,
expand: Border,
mask: Optional[Image.Image] = None,
2023-02-05 13:53:26 +00:00
fill_color: str = "white",
mask_filter: Callable = mask_filter_none,
noise_source: Callable = noise_source_histogram,
2023-02-12 18:22:11 +00:00
callback: ProgressCallback = None,
**kwargs,
) -> Image.Image:
params = params.with_args(**kwargs)
expand = expand.with_args(**kwargs)
logger.info(
"blending image using inpaint, %s steps: %s", params.steps, params.prompt
)
if mask is None:
# if no mask was provided, keep the full source image
mask = Image.new("RGB", source.size, "black")
source, mask, noise, _full_dims = expand_image(
source,
mask,
expand,
fill=fill_color,
noise_source=noise_source,
2023-02-05 13:53:26 +00:00
mask_filter=mask_filter,
)
if is_debug():
save_image(server, "last-source.png", source)
save_image(server, "last-mask.png", mask)
save_image(server, "last-noise.png", noise)
2023-02-18 23:59:13 +00:00
def outpaint(tile_source: Image.Image, dims: Tuple[int, int, int]):
left, top, tile = dims
2023-02-18 23:59:13 +00:00
size = Size(*tile_source.size)
tile_mask = mask.crop((left, top, left + tile, top + tile))
if is_debug():
2023-02-18 23:59:13 +00:00
save_image(server, "tile-source.png", tile_source)
save_image(server, "tile-mask.png", tile_mask)
latents = get_latents_from_seed(params.seed, size)
2023-02-05 13:53:26 +00:00
pipe = load_pipeline(
server,
2023-02-05 13:53:26 +00:00
OnnxStableDiffusionInpaintPipeline,
params.model,
params.scheduler,
job.get_device(),
2023-02-05 23:15:37 +00:00
params.lpw,
2023-02-05 13:53:26 +00:00
)
2023-02-05 23:15:37 +00:00
if params.lpw:
2023-02-05 23:55:04 +00:00
logger.debug("using LPW pipeline for inpaint")
rng = torch.manual_seed(params.seed)
result = pipe.inpaint(
params.prompt,
generator=rng,
guidance_scale=params.cfg,
height=size.height,
2023-02-18 23:59:13 +00:00
image=tile_source,
latents=latents,
2023-02-19 00:54:24 +00:00
mask_image=tile_mask,
negative_prompt=params.negative_prompt,
num_inference_steps=params.steps,
width=size.width,
callback=callback,
)
else:
rng = np.random.RandomState(params.seed)
result = pipe(
params.prompt,
generator=rng,
guidance_scale=params.cfg,
height=size.height,
2023-02-18 23:59:13 +00:00
image=tile_source,
latents=latents,
2023-02-19 00:54:24 +00:00
mask_image=mask,
negative_prompt=params.negative_prompt,
num_inference_steps=params.steps,
width=size.width,
callback=callback,
)
return result.images[0]
2023-02-18 23:59:13 +00:00
output = process_tile_order(stage.tile_order, source, SizeChart.auto, 1, [outpaint])
2023-02-05 13:53:26 +00:00
logger.info("final output image size", output.size)
return output