1
0
Fork 0
onnx-web/api/onnx_web/chain/upscale_outpaint.py

127 lines
4.2 KiB
Python
Raw Normal View History

2023-01-28 23:09:19 +00:00
from logging import getLogger
from typing import Callable, List, Optional, Tuple
import numpy as np
2023-02-05 23:55:04 +00:00
import torch
from PIL import Image, ImageDraw, ImageOps
2023-02-05 13:53:26 +00:00
from ..diffusers.load import load_pipeline
from ..diffusers.utils import (
encode_prompt,
get_latents_from_seed,
get_tile_latents,
parse_prompt,
)
2023-02-05 13:53:26 +00:00
from ..image import expand_image, mask_filter_none, noise_source_histogram
from ..output import save_image
2023-02-12 00:10:36 +00:00
from ..params import Border, ImageParams, Size, SizeChart, StageParams
2023-02-26 05:49:39 +00:00
from ..server import ServerContext
2023-02-19 02:28:21 +00:00
from ..utils import is_debug
2023-02-26 20:15:30 +00:00
from ..worker import ProgressCallback, WorkerContext
from .stage import BaseStage
from .tile import complete_tile, process_tile_grid, process_tile_order
2023-01-28 23:09:19 +00:00
logger = getLogger(__name__)
class UpscaleOutpaintStage(BaseStage):
max_tile = SizeChart.unlimited
def run(
self,
job: WorkerContext,
server: ServerContext,
stage: StageParams,
params: ImageParams,
sources: List[Image.Image],
2023-07-09 04:56:20 +00:00
tile_mask: Image.Image,
*,
border: Border,
stage_source: Optional[Image.Image] = None,
stage_mask: Optional[Image.Image] = None,
fill_color: str = "white",
mask_filter: Callable = mask_filter_none,
noise_source: Callable = noise_source_histogram,
callback: Optional[ProgressCallback] = None,
**kwargs,
) -> List[Image.Image]:
prompt_pairs, loras, inversions, (prompt, negative_prompt) = parse_prompt(
params
)
pipe_type = params.get_valid_pipeline("inpaint", params.pipeline)
pipe = load_pipeline(
server,
params,
pipe_type,
job.get_device(),
inversions=inversions,
loras=loras,
2023-02-05 13:53:26 +00:00
)
outputs = []
for source in sources:
2023-07-09 04:56:20 +00:00
save_image(server, "tile-source.png", source)
save_image(server, "tile-mask.png", tile_mask)
#if the tile mask is all black, skip processing this tile
if not tile_mask.getbbox():
outputs.append(source)
continue
source_width, source_height = source.size
source_size = Size(source_width, source_height)
tile_size = params.tiles
if max(source_size) > tile_size:
latent_size = Size(tile_size,tile_size)
latents = get_latents_from_seed(params.seed, latent_size)
pipe_width=pipe_height=tile_size
else:
latent_size = Size(source_size.width,source_size.height)
latents = get_latents_from_seed(params.seed, latent_size)
pipe_width=source_size.width
pipe_height=source_size.height
if params.lpw():
logger.debug("using LPW pipeline for inpaint")
rng = torch.manual_seed(params.seed)
result = pipe.inpaint(
source,
2023-07-09 04:56:20 +00:00
tile_mask,
prompt,
negative_prompt=negative_prompt,
height=pipe_height,
width=pipe_width,
num_inference_steps=params.steps,
guidance_scale=params.cfg,
generator=rng,
latents=latents,
callback=callback,
)
else:
2023-07-09 04:56:20 +00:00
# encode and record alternative prompts outside of LPW
prompt_embeds = encode_prompt(
pipe, prompt_pairs, params.batch, params.do_cfg()
)
pipe.unet.set_prompts(prompt_embeds)
2023-07-09 04:56:20 +00:00
rng = np.random.RandomState(params.seed)
result = pipe(
prompt,
source,
tile_mask,
negative_prompt=negative_prompt,
height=pipe_height,
width=pipe_width,
num_inference_steps=params.steps,
guidance_scale=params.cfg,
generator=rng,
latents=latents,
callback=callback,
)
outputs.extend(result.images)
return outputs