2023-01-28 23:09:19 +00:00
|
|
|
from logging import getLogger
|
2023-07-09 05:02:27 +00:00
|
|
|
from typing import Callable, List, Optional
|
2023-01-28 14:19:40 +00:00
|
|
|
|
2023-02-05 23:24:08 +00:00
|
|
|
import numpy as np
|
2023-02-05 23:55:04 +00:00
|
|
|
import torch
|
2023-07-09 05:02:27 +00:00
|
|
|
from PIL import Image
|
2023-02-05 13:53:26 +00:00
|
|
|
|
2023-04-23 20:03:11 +00:00
|
|
|
from ..diffusers.load import load_pipeline
|
2023-07-10 03:19:02 +00:00
|
|
|
from ..diffusers.utils import encode_prompt, get_latents_from_seed, get_tile_latents, parse_prompt
|
2023-07-09 05:02:27 +00:00
|
|
|
from ..image import mask_filter_none, noise_source_histogram
|
2023-02-05 13:53:26 +00:00
|
|
|
from ..output import save_image
|
2023-02-12 00:10:36 +00:00
|
|
|
from ..params import Border, ImageParams, Size, SizeChart, StageParams
|
2023-02-26 05:49:39 +00:00
|
|
|
from ..server import ServerContext
|
2023-02-19 02:28:21 +00:00
|
|
|
from ..utils import is_debug
|
2023-02-26 20:15:30 +00:00
|
|
|
from ..worker import ProgressCallback, WorkerContext
|
2023-07-02 23:21:21 +00:00
|
|
|
from .stage import BaseStage
|
2023-01-28 14:19:40 +00:00
|
|
|
|
2023-01-28 23:09:19 +00:00
|
|
|
logger = getLogger(__name__)
|
|
|
|
|
2023-01-28 14:19:40 +00:00
|
|
|
|
2023-07-02 23:21:21 +00:00
|
|
|
class UpscaleOutpaintStage(BaseStage):
|
2023-07-05 22:16:30 +00:00
|
|
|
max_tile = SizeChart.unlimited
|
|
|
|
|
2023-07-01 12:10:53 +00:00
|
|
|
def run(
|
|
|
|
self,
|
|
|
|
job: WorkerContext,
|
|
|
|
server: ServerContext,
|
|
|
|
stage: StageParams,
|
|
|
|
params: ImageParams,
|
2023-07-04 18:29:58 +00:00
|
|
|
sources: List[Image.Image],
|
2023-07-01 12:10:53 +00:00
|
|
|
*,
|
|
|
|
border: Border,
|
2023-07-10 03:19:02 +00:00
|
|
|
dims: Tuple[int, int, int],
|
|
|
|
tile_mask: Image.Image,
|
2023-07-01 12:10:53 +00:00
|
|
|
fill_color: str = "white",
|
|
|
|
mask_filter: Callable = mask_filter_none,
|
|
|
|
noise_source: Callable = noise_source_histogram,
|
2023-07-10 03:19:02 +00:00
|
|
|
latents: Optional[np.ndarray] = None,
|
2023-07-01 12:10:53 +00:00
|
|
|
callback: Optional[ProgressCallback] = None,
|
2023-07-10 03:19:02 +00:00
|
|
|
stage_source: Optional[Image.Image] = None,
|
|
|
|
stage_mask: Optional[Image.Image] = None,
|
2023-07-01 12:10:53 +00:00
|
|
|
**kwargs,
|
2023-07-04 18:29:58 +00:00
|
|
|
) -> List[Image.Image]:
|
2023-07-07 01:39:08 +00:00
|
|
|
prompt_pairs, loras, inversions, (prompt, negative_prompt) = parse_prompt(
|
|
|
|
params
|
|
|
|
)
|
2023-07-04 18:29:58 +00:00
|
|
|
|
|
|
|
pipe_type = params.get_valid_pipeline("inpaint", params.pipeline)
|
|
|
|
pipe = load_pipeline(
|
|
|
|
server,
|
|
|
|
params,
|
|
|
|
pipe_type,
|
|
|
|
job.get_device(),
|
|
|
|
inversions=inversions,
|
|
|
|
loras=loras,
|
2023-02-05 13:53:26 +00:00
|
|
|
)
|
2023-01-28 14:19:40 +00:00
|
|
|
|
2023-07-04 18:29:58 +00:00
|
|
|
outputs = []
|
|
|
|
for source in sources:
|
2023-07-09 05:02:27 +00:00
|
|
|
if is_debug():
|
|
|
|
save_image(server, "tile-source.png", source)
|
|
|
|
save_image(server, "tile-mask.png", tile_mask)
|
|
|
|
|
|
|
|
# if the tile mask is all black, skip processing this tile
|
2023-07-09 04:56:20 +00:00
|
|
|
if not tile_mask.getbbox():
|
|
|
|
outputs.append(source)
|
|
|
|
continue
|
2023-07-09 05:02:27 +00:00
|
|
|
|
2023-07-10 03:19:02 +00:00
|
|
|
size = Size(*source.size)
|
2023-07-09 04:56:20 +00:00
|
|
|
tile_size = params.tiles
|
2023-07-10 03:19:02 +00:00
|
|
|
|
|
|
|
# generate new latents or slice existing
|
|
|
|
if latents is None:
|
|
|
|
if max(size) > tile_size:
|
|
|
|
latent_size = Size(tile_size, tile_size)
|
|
|
|
pipe_width = pipe_height = tile_size
|
|
|
|
else:
|
|
|
|
latent_size = Size(size.width, size.height)
|
|
|
|
pipe_width = size.width
|
|
|
|
pipe_height = size.height
|
|
|
|
|
|
|
|
# generate new latents
|
|
|
|
latents = get_latents_from_seed(params.seed, latent_size, params.batch)
|
2023-07-09 05:02:27 +00:00
|
|
|
else:
|
2023-07-10 03:19:02 +00:00
|
|
|
# slice existing latents
|
|
|
|
latents = get_tile_latents(latents, dims, size)
|
|
|
|
pipe_width, pipe_height, _tile_size = dims
|
2023-07-09 05:02:27 +00:00
|
|
|
|
2023-07-09 04:56:20 +00:00
|
|
|
if params.lpw():
|
|
|
|
logger.debug("using LPW pipeline for inpaint")
|
|
|
|
rng = torch.manual_seed(params.seed)
|
|
|
|
result = pipe.inpaint(
|
2023-07-04 18:29:58 +00:00
|
|
|
source,
|
2023-07-09 04:56:20 +00:00
|
|
|
tile_mask,
|
|
|
|
prompt,
|
|
|
|
negative_prompt=negative_prompt,
|
|
|
|
height=pipe_height,
|
|
|
|
width=pipe_width,
|
|
|
|
num_inference_steps=params.steps,
|
|
|
|
guidance_scale=params.cfg,
|
|
|
|
generator=rng,
|
|
|
|
latents=latents,
|
|
|
|
callback=callback,
|
2023-07-01 12:10:53 +00:00
|
|
|
)
|
2023-07-04 18:29:58 +00:00
|
|
|
else:
|
2023-07-09 04:56:20 +00:00
|
|
|
# encode and record alternative prompts outside of LPW
|
|
|
|
prompt_embeds = encode_prompt(
|
|
|
|
pipe, prompt_pairs, params.batch, params.do_cfg()
|
|
|
|
)
|
|
|
|
pipe.unet.set_prompts(prompt_embeds)
|
2023-07-01 12:10:53 +00:00
|
|
|
|
2023-07-09 04:56:20 +00:00
|
|
|
rng = np.random.RandomState(params.seed)
|
|
|
|
result = pipe(
|
|
|
|
prompt,
|
|
|
|
source,
|
|
|
|
tile_mask,
|
|
|
|
negative_prompt=negative_prompt,
|
|
|
|
height=pipe_height,
|
|
|
|
width=pipe_width,
|
|
|
|
num_inference_steps=params.steps,
|
|
|
|
guidance_scale=params.cfg,
|
|
|
|
generator=rng,
|
|
|
|
latents=latents,
|
|
|
|
callback=callback,
|
|
|
|
)
|
2023-07-09 05:02:27 +00:00
|
|
|
|
2023-07-09 04:56:20 +00:00
|
|
|
outputs.extend(result.images)
|
2023-07-01 12:10:53 +00:00
|
|
|
|
2023-07-04 18:29:58 +00:00
|
|
|
return outputs
|