2023-01-28 23:09:19 +00:00
|
|
|
from logging import getLogger
|
2023-04-14 13:54:21 +00:00
|
|
|
from typing import Any, List, Optional, Tuple
|
2023-01-16 00:46:00 +00:00
|
|
|
|
2023-02-05 23:24:08 +00:00
|
|
|
import numpy as np
|
2023-02-05 23:55:04 +00:00
|
|
|
import torch
|
2023-02-16 03:01:25 +00:00
|
|
|
from PIL import Image
|
2023-02-05 13:53:26 +00:00
|
|
|
|
2023-02-27 02:09:42 +00:00
|
|
|
from ..chain import blend_mask, upscale_outpaint
|
2023-04-12 04:06:32 +00:00
|
|
|
from ..chain.utils import process_tile_order
|
2023-02-05 13:53:26 +00:00
|
|
|
from ..output import save_image, save_params
|
2023-04-01 17:06:31 +00:00
|
|
|
from ..params import (
|
|
|
|
Border,
|
|
|
|
HighresParams,
|
|
|
|
ImageParams,
|
|
|
|
Size,
|
|
|
|
StageParams,
|
|
|
|
TileOrder,
|
|
|
|
UpscaleParams,
|
|
|
|
)
|
2023-02-26 05:49:39 +00:00
|
|
|
from ..server import ServerContext
|
2023-04-14 01:06:33 +00:00
|
|
|
from ..server.load import get_source_filters
|
2023-04-24 22:40:50 +00:00
|
|
|
from ..utils import run_gc, show_system_toast
|
2023-02-26 20:15:30 +00:00
|
|
|
from ..worker import WorkerContext
|
2023-04-14 13:54:21 +00:00
|
|
|
from ..worker.context import ProgressCallback
|
2023-04-23 20:03:11 +00:00
|
|
|
from .load import load_pipeline
|
2023-04-01 16:26:10 +00:00
|
|
|
from .upscale import run_upscale_correction
|
2023-04-23 20:18:51 +00:00
|
|
|
from .utils import encode_prompt, get_latents_from_seed, parse_prompt
|
2023-01-28 05:28:14 +00:00
|
|
|
|
2023-01-28 23:09:19 +00:00
|
|
|
logger = getLogger(__name__)
|
|
|
|
|
2023-02-02 03:20:48 +00:00
|
|
|
|
2023-04-22 15:39:23 +00:00
|
|
|
def run_loopback(
|
|
|
|
job: WorkerContext,
|
|
|
|
server: ServerContext,
|
|
|
|
params: ImageParams,
|
2023-04-22 16:01:47 +00:00
|
|
|
strength: float,
|
2023-04-22 15:39:23 +00:00
|
|
|
image: Image.Image,
|
|
|
|
progress: ProgressCallback,
|
|
|
|
inversions: List[Tuple[str, float]],
|
|
|
|
loras: List[Tuple[str, float]],
|
2023-04-22 16:47:01 +00:00
|
|
|
pipeline: Optional[Any] = None,
|
2023-04-22 15:39:23 +00:00
|
|
|
) -> Image.Image:
|
|
|
|
if params.loopback == 0:
|
|
|
|
return image
|
|
|
|
|
|
|
|
# load img2img pipeline once
|
2023-04-27 12:22:00 +00:00
|
|
|
pipe_type = params.get_valid_pipeline("img2img")
|
2023-04-29 23:50:26 +00:00
|
|
|
if pipe_type == "controlnet":
|
2023-04-30 03:45:48 +00:00
|
|
|
logger.debug(
|
|
|
|
"controlnet pipeline cannot be used for loopback, switching to img2img"
|
|
|
|
)
|
2023-04-29 23:50:26 +00:00
|
|
|
pipe_type = "img2img"
|
|
|
|
|
2023-04-27 12:22:00 +00:00
|
|
|
logger.debug("using %s pipeline for loopback", pipe_type)
|
|
|
|
|
2023-04-22 16:47:01 +00:00
|
|
|
pipe = pipeline or load_pipeline(
|
2023-04-22 15:39:23 +00:00
|
|
|
server,
|
2023-05-02 04:20:40 +00:00
|
|
|
params,
|
2023-04-22 15:39:23 +00:00
|
|
|
pipe_type,
|
|
|
|
job.get_device(),
|
|
|
|
inversions=inversions,
|
|
|
|
loras=loras,
|
|
|
|
)
|
|
|
|
|
|
|
|
def loopback_iteration(source: Image.Image):
|
2023-04-27 18:22:07 +00:00
|
|
|
if pipe_type == "lpw":
|
2023-04-22 15:39:23 +00:00
|
|
|
rng = torch.manual_seed(params.seed)
|
|
|
|
result = pipe.img2img(
|
|
|
|
source,
|
|
|
|
params.prompt,
|
|
|
|
generator=rng,
|
|
|
|
guidance_scale=params.cfg,
|
|
|
|
negative_prompt=params.negative_prompt,
|
|
|
|
num_images_per_prompt=1,
|
|
|
|
num_inference_steps=params.steps,
|
2023-04-22 16:01:47 +00:00
|
|
|
strength=strength,
|
2023-04-22 15:39:23 +00:00
|
|
|
eta=params.eta,
|
2023-04-30 22:27:51 +00:00
|
|
|
callback=progress,
|
2023-04-22 15:39:23 +00:00
|
|
|
)
|
|
|
|
return result.images[0]
|
|
|
|
else:
|
|
|
|
rng = np.random.RandomState(params.seed)
|
|
|
|
result = pipe(
|
|
|
|
params.prompt,
|
|
|
|
source,
|
|
|
|
generator=rng,
|
|
|
|
guidance_scale=params.cfg,
|
|
|
|
negative_prompt=params.negative_prompt,
|
|
|
|
num_images_per_prompt=1,
|
|
|
|
num_inference_steps=params.steps,
|
2023-04-22 16:01:47 +00:00
|
|
|
strength=strength,
|
2023-04-22 15:39:23 +00:00
|
|
|
eta=params.eta,
|
2023-04-30 22:27:51 +00:00
|
|
|
callback=progress,
|
2023-04-22 15:39:23 +00:00
|
|
|
)
|
|
|
|
return result.images[0]
|
|
|
|
|
|
|
|
for _i in range(params.loopback):
|
|
|
|
image = loopback_iteration(image)
|
|
|
|
|
|
|
|
return image
|
|
|
|
|
|
|
|
|
2023-04-14 13:54:21 +00:00
|
|
|
def run_highres(
|
|
|
|
job: WorkerContext,
|
|
|
|
server: ServerContext,
|
|
|
|
params: ImageParams,
|
|
|
|
size: Size,
|
|
|
|
upscale: UpscaleParams,
|
|
|
|
highres: HighresParams,
|
|
|
|
image: Image.Image,
|
|
|
|
progress: ProgressCallback,
|
|
|
|
inversions: List[Tuple[str, float]],
|
|
|
|
loras: List[Tuple[str, float]],
|
2023-04-22 16:47:01 +00:00
|
|
|
pipeline: Optional[Any] = None,
|
2023-04-22 15:39:23 +00:00
|
|
|
) -> Image.Image:
|
2023-04-15 01:29:44 +00:00
|
|
|
if highres.scale <= 1:
|
|
|
|
return image
|
|
|
|
|
2023-04-14 13:54:21 +00:00
|
|
|
if upscale.faces and (
|
|
|
|
upscale.upscale_order == "correction-both"
|
|
|
|
or upscale.upscale_order == "correction-first"
|
|
|
|
):
|
|
|
|
image = run_upscale_correction(
|
|
|
|
job,
|
|
|
|
server,
|
|
|
|
StageParams(),
|
|
|
|
params,
|
|
|
|
image,
|
|
|
|
upscale=upscale.with_args(
|
|
|
|
scale=1,
|
|
|
|
outscale=1,
|
|
|
|
),
|
2023-04-30 22:27:51 +00:00
|
|
|
callback=progress,
|
2023-04-14 13:54:21 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
# load img2img pipeline once
|
2023-04-27 12:22:00 +00:00
|
|
|
pipe_type = params.get_valid_pipeline("img2img")
|
|
|
|
logger.debug("using %s pipeline for highres", pipe_type)
|
|
|
|
|
2023-04-22 16:47:01 +00:00
|
|
|
highres_pipe = pipeline or load_pipeline(
|
2023-04-14 13:54:21 +00:00
|
|
|
server,
|
2023-05-02 04:20:40 +00:00
|
|
|
params,
|
2023-04-15 20:37:45 +00:00
|
|
|
pipe_type,
|
2023-04-14 13:54:21 +00:00
|
|
|
job.get_device(),
|
|
|
|
inversions=inversions,
|
|
|
|
loras=loras,
|
|
|
|
)
|
|
|
|
|
|
|
|
def highres_tile(tile: Image.Image, dims):
|
|
|
|
if highres.method == "bilinear":
|
|
|
|
logger.debug("using bilinear interpolation for highres")
|
|
|
|
tile = tile.resize(
|
|
|
|
(size.height, size.width), resample=Image.Resampling.BILINEAR
|
|
|
|
)
|
|
|
|
elif highres.method == "lanczos":
|
|
|
|
logger.debug("using Lanczos interpolation for highres")
|
|
|
|
tile = tile.resize(
|
|
|
|
(size.height, size.width), resample=Image.Resampling.LANCZOS
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
logger.debug("using upscaling pipeline for highres")
|
|
|
|
tile = run_upscale_correction(
|
|
|
|
job,
|
|
|
|
server,
|
|
|
|
StageParams(),
|
|
|
|
params,
|
|
|
|
tile,
|
|
|
|
upscale=upscale.with_args(
|
|
|
|
faces=False,
|
|
|
|
scale=highres.scale,
|
|
|
|
outscale=highres.scale,
|
|
|
|
),
|
2023-04-30 22:27:51 +00:00
|
|
|
callback=progress,
|
2023-04-14 13:54:21 +00:00
|
|
|
)
|
|
|
|
|
2023-04-27 18:22:07 +00:00
|
|
|
if pipe_type == "lpw":
|
2023-04-14 13:54:21 +00:00
|
|
|
rng = torch.manual_seed(params.seed)
|
|
|
|
result = highres_pipe.img2img(
|
|
|
|
tile,
|
|
|
|
params.prompt,
|
|
|
|
generator=rng,
|
|
|
|
guidance_scale=params.cfg,
|
|
|
|
negative_prompt=params.negative_prompt,
|
|
|
|
num_images_per_prompt=1,
|
|
|
|
num_inference_steps=highres.steps,
|
|
|
|
strength=highres.strength,
|
|
|
|
eta=params.eta,
|
2023-04-30 22:27:51 +00:00
|
|
|
callback=progress,
|
2023-04-14 13:54:21 +00:00
|
|
|
)
|
|
|
|
return result.images[0]
|
|
|
|
else:
|
|
|
|
rng = np.random.RandomState(params.seed)
|
|
|
|
result = highres_pipe(
|
|
|
|
params.prompt,
|
|
|
|
tile,
|
|
|
|
generator=rng,
|
|
|
|
guidance_scale=params.cfg,
|
|
|
|
negative_prompt=params.negative_prompt,
|
|
|
|
num_images_per_prompt=1,
|
|
|
|
num_inference_steps=highres.steps,
|
|
|
|
strength=highres.strength,
|
|
|
|
eta=params.eta,
|
2023-04-30 22:27:51 +00:00
|
|
|
callback=progress,
|
2023-04-14 13:54:21 +00:00
|
|
|
)
|
|
|
|
return result.images[0]
|
|
|
|
|
|
|
|
logger.info(
|
|
|
|
"running highres fix for %s iterations at %s scale",
|
|
|
|
highres.iterations,
|
|
|
|
highres.scale,
|
|
|
|
)
|
|
|
|
|
|
|
|
for _i in range(highres.iterations):
|
|
|
|
image = process_tile_order(
|
|
|
|
TileOrder.grid,
|
|
|
|
image,
|
|
|
|
size.height // highres.scale,
|
|
|
|
highres.scale,
|
|
|
|
[highres_tile],
|
2023-06-04 01:35:21 +00:00
|
|
|
overlap=params.overlap,
|
2023-04-14 13:54:21 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
return image
|
|
|
|
|
|
|
|
|
2023-01-16 13:31:42 +00:00
|
|
|
def run_txt2img_pipeline(
|
2023-02-26 05:49:39 +00:00
|
|
|
job: WorkerContext,
|
2023-02-04 16:06:22 +00:00
|
|
|
server: ServerContext,
|
2023-01-27 23:08:36 +00:00
|
|
|
params: ImageParams,
|
2023-01-16 13:31:42 +00:00
|
|
|
size: Size,
|
2023-02-20 14:35:18 +00:00
|
|
|
outputs: List[str],
|
2023-02-05 13:53:26 +00:00
|
|
|
upscale: UpscaleParams,
|
2023-04-01 16:26:10 +00:00
|
|
|
highres: HighresParams,
|
2023-01-28 18:42:02 +00:00
|
|
|
) -> None:
|
2023-02-21 03:57:46 +00:00
|
|
|
latents = get_latents_from_seed(params.seed, size, batch=params.batch)
|
2023-04-23 20:03:11 +00:00
|
|
|
prompt_pairs, loras, inversions = parse_prompt(params)
|
2023-03-15 03:28:18 +00:00
|
|
|
|
2023-04-27 12:22:00 +00:00
|
|
|
pipe_type = params.get_valid_pipeline("txt2img")
|
|
|
|
logger.debug("using %s pipeline for txt2img", pipe_type)
|
|
|
|
|
2023-02-05 13:53:26 +00:00
|
|
|
pipe = load_pipeline(
|
2023-02-14 00:04:46 +00:00
|
|
|
server,
|
2023-05-02 04:20:40 +00:00
|
|
|
params,
|
2023-04-15 20:37:45 +00:00
|
|
|
pipe_type,
|
2023-02-05 23:55:04 +00:00
|
|
|
job.get_device(),
|
2023-04-13 05:02:47 +00:00
|
|
|
inversions=inversions,
|
|
|
|
loras=loras,
|
2023-02-05 13:53:26 +00:00
|
|
|
)
|
2023-02-05 23:36:00 +00:00
|
|
|
progress = job.get_progress_callback()
|
|
|
|
|
2023-04-27 18:22:07 +00:00
|
|
|
if pipe_type == "lpw":
|
2023-02-05 23:24:08 +00:00
|
|
|
rng = torch.manual_seed(params.seed)
|
2023-02-05 23:36:00 +00:00
|
|
|
result = pipe.text2img(
|
|
|
|
params.prompt,
|
|
|
|
height=size.height,
|
|
|
|
width=size.width,
|
|
|
|
generator=rng,
|
|
|
|
guidance_scale=params.cfg,
|
|
|
|
latents=latents,
|
|
|
|
negative_prompt=params.negative_prompt,
|
2023-02-20 14:35:18 +00:00
|
|
|
num_images_per_prompt=params.batch,
|
2023-04-01 16:26:10 +00:00
|
|
|
num_inference_steps=params.steps,
|
2023-02-20 05:29:26 +00:00
|
|
|
eta=params.eta,
|
2023-02-05 23:36:00 +00:00
|
|
|
callback=progress,
|
|
|
|
)
|
2023-02-05 23:24:08 +00:00
|
|
|
else:
|
2023-04-23 22:33:13 +00:00
|
|
|
# encode and record alternative prompts outside of LPW
|
|
|
|
prompt_embeds = encode_prompt(
|
|
|
|
pipe,
|
|
|
|
prompt_pairs,
|
|
|
|
num_images_per_prompt=params.batch,
|
|
|
|
do_classifier_free_guidance=params.do_cfg(),
|
|
|
|
)
|
|
|
|
pipe.unet.set_prompts(prompt_embeds)
|
|
|
|
|
2023-02-05 23:24:08 +00:00
|
|
|
rng = np.random.RandomState(params.seed)
|
2023-02-05 23:36:00 +00:00
|
|
|
result = pipe(
|
|
|
|
params.prompt,
|
|
|
|
height=size.height,
|
|
|
|
width=size.width,
|
|
|
|
generator=rng,
|
|
|
|
guidance_scale=params.cfg,
|
|
|
|
latents=latents,
|
|
|
|
negative_prompt=params.negative_prompt,
|
2023-02-20 14:35:18 +00:00
|
|
|
num_images_per_prompt=params.batch,
|
2023-04-01 16:26:10 +00:00
|
|
|
num_inference_steps=params.steps,
|
2023-02-20 05:29:26 +00:00
|
|
|
eta=params.eta,
|
2023-02-05 23:36:00 +00:00
|
|
|
callback=progress,
|
|
|
|
)
|
2023-01-16 00:54:20 +00:00
|
|
|
|
2023-04-01 19:26:45 +00:00
|
|
|
image_outputs = list(zip(result.images, outputs))
|
|
|
|
del result
|
|
|
|
del pipe
|
|
|
|
|
|
|
|
for image, output in image_outputs:
|
2023-04-15 01:29:44 +00:00
|
|
|
image = run_highres(
|
|
|
|
job,
|
|
|
|
server,
|
|
|
|
params,
|
|
|
|
size,
|
|
|
|
upscale,
|
|
|
|
highres,
|
|
|
|
image,
|
|
|
|
progress,
|
|
|
|
inversions,
|
|
|
|
loras,
|
|
|
|
)
|
2023-04-01 04:27:16 +00:00
|
|
|
|
2023-02-20 14:35:18 +00:00
|
|
|
image = run_upscale_correction(
|
|
|
|
job,
|
|
|
|
server,
|
|
|
|
StageParams(),
|
|
|
|
params,
|
|
|
|
image,
|
|
|
|
upscale=upscale,
|
|
|
|
callback=progress,
|
|
|
|
)
|
2023-01-16 00:54:20 +00:00
|
|
|
|
2023-02-20 14:35:18 +00:00
|
|
|
dest = save_image(server, output, image)
|
2023-04-01 16:26:10 +00:00
|
|
|
save_params(server, output, params, size, upscale=upscale, highres=highres)
|
2023-01-16 13:42:10 +00:00
|
|
|
|
2023-02-17 00:11:35 +00:00
|
|
|
run_gc([job.get_device()])
|
2023-04-24 22:40:50 +00:00
|
|
|
show_system_toast(f"finished txt2img job: {dest}")
|
2023-02-05 13:53:26 +00:00
|
|
|
logger.info("finished txt2img job: %s", dest)
|
2023-01-16 00:54:20 +00:00
|
|
|
|
|
|
|
|
2023-01-16 13:31:42 +00:00
|
|
|
def run_img2img_pipeline(
|
2023-02-26 05:49:39 +00:00
|
|
|
job: WorkerContext,
|
2023-02-04 16:06:22 +00:00
|
|
|
server: ServerContext,
|
2023-01-27 23:08:36 +00:00
|
|
|
params: ImageParams,
|
2023-02-20 14:35:18 +00:00
|
|
|
outputs: List[str],
|
2023-01-16 19:12:08 +00:00
|
|
|
upscale: UpscaleParams,
|
2023-04-14 13:54:21 +00:00
|
|
|
highres: HighresParams,
|
2023-02-18 22:35:57 +00:00
|
|
|
source: Image.Image,
|
2023-01-16 19:12:08 +00:00
|
|
|
strength: float,
|
2023-04-14 01:06:33 +00:00
|
|
|
source_filter: Optional[str] = None,
|
2023-01-28 18:42:02 +00:00
|
|
|
) -> None:
|
2023-04-23 20:03:11 +00:00
|
|
|
prompt_pairs, loras, inversions = parse_prompt(params)
|
2023-03-15 03:28:18 +00:00
|
|
|
|
2023-04-14 01:06:33 +00:00
|
|
|
# filter the source image
|
|
|
|
if source_filter is not None:
|
2023-04-14 02:10:00 +00:00
|
|
|
f = get_source_filters().get(source_filter, None)
|
|
|
|
if f is not None:
|
2023-04-22 16:26:21 +00:00
|
|
|
logger.debug("running source filter: %s", f.__name__)
|
2023-04-14 02:10:00 +00:00
|
|
|
source = f(server, source)
|
2023-04-14 01:06:33 +00:00
|
|
|
|
2023-04-27 12:22:00 +00:00
|
|
|
pipe_type = params.get_valid_pipeline("img2img")
|
2023-02-05 13:53:26 +00:00
|
|
|
pipe = load_pipeline(
|
2023-02-14 00:04:46 +00:00
|
|
|
server,
|
2023-05-02 04:20:40 +00:00
|
|
|
params,
|
2023-04-27 12:22:00 +00:00
|
|
|
pipe_type,
|
2023-02-05 13:53:26 +00:00
|
|
|
job.get_device(),
|
2023-04-12 13:43:15 +00:00
|
|
|
inversions=inversions,
|
|
|
|
loras=loras,
|
2023-02-05 13:53:26 +00:00
|
|
|
)
|
2023-04-13 04:30:59 +00:00
|
|
|
|
|
|
|
pipe_params = {}
|
2023-04-27 12:22:00 +00:00
|
|
|
if pipe_type == "controlnet":
|
2023-04-13 04:33:01 +00:00
|
|
|
pipe_params["controlnet_conditioning_scale"] = strength
|
2023-04-27 12:22:00 +00:00
|
|
|
elif pipe_type == "img2img":
|
|
|
|
pipe_params["strength"] = strength
|
|
|
|
elif pipe_type == "panorama":
|
2023-04-13 04:33:01 +00:00
|
|
|
pipe_params["strength"] = strength
|
2023-04-27 12:22:00 +00:00
|
|
|
elif pipe_type == "pix2pix":
|
2023-04-14 01:06:33 +00:00
|
|
|
pipe_params["image_guidance_scale"] = strength
|
2023-04-13 04:30:59 +00:00
|
|
|
|
2023-02-05 23:36:00 +00:00
|
|
|
progress = job.get_progress_callback()
|
2023-04-27 18:22:07 +00:00
|
|
|
if pipe_type == "lpw":
|
2023-02-05 23:55:04 +00:00
|
|
|
logger.debug("using LPW pipeline for img2img")
|
2023-02-05 23:24:08 +00:00
|
|
|
rng = torch.manual_seed(params.seed)
|
2023-02-05 23:36:00 +00:00
|
|
|
result = pipe.img2img(
|
2023-02-18 22:35:57 +00:00
|
|
|
source,
|
2023-02-05 23:36:00 +00:00
|
|
|
params.prompt,
|
|
|
|
generator=rng,
|
|
|
|
guidance_scale=params.cfg,
|
|
|
|
negative_prompt=params.negative_prompt,
|
2023-02-20 14:35:18 +00:00
|
|
|
num_images_per_prompt=params.batch,
|
2023-02-05 23:36:00 +00:00
|
|
|
num_inference_steps=params.steps,
|
2023-02-20 05:29:26 +00:00
|
|
|
eta=params.eta,
|
2023-02-05 23:36:00 +00:00
|
|
|
callback=progress,
|
2023-04-13 04:30:59 +00:00
|
|
|
**pipe_params,
|
2023-02-05 23:36:00 +00:00
|
|
|
)
|
2023-02-05 23:24:08 +00:00
|
|
|
else:
|
2023-04-23 22:33:13 +00:00
|
|
|
# encode and record alternative prompts outside of LPW
|
|
|
|
prompt_embeds = encode_prompt(pipe, prompt_pairs, params.batch, params.do_cfg())
|
|
|
|
pipe.unet.set_prompts(prompt_embeds)
|
|
|
|
|
2023-02-05 23:24:08 +00:00
|
|
|
rng = np.random.RandomState(params.seed)
|
2023-02-05 23:36:00 +00:00
|
|
|
result = pipe(
|
|
|
|
params.prompt,
|
2023-02-18 22:35:57 +00:00
|
|
|
source,
|
2023-02-05 23:36:00 +00:00
|
|
|
generator=rng,
|
|
|
|
guidance_scale=params.cfg,
|
|
|
|
negative_prompt=params.negative_prompt,
|
2023-02-20 14:35:18 +00:00
|
|
|
num_images_per_prompt=params.batch,
|
2023-02-05 23:36:00 +00:00
|
|
|
num_inference_steps=params.steps,
|
2023-02-20 05:29:26 +00:00
|
|
|
eta=params.eta,
|
2023-02-05 23:36:00 +00:00
|
|
|
callback=progress,
|
2023-04-13 04:30:59 +00:00
|
|
|
**pipe_params,
|
2023-02-05 23:36:00 +00:00
|
|
|
)
|
2023-01-16 00:54:20 +00:00
|
|
|
|
2023-04-14 03:51:59 +00:00
|
|
|
images = result.images
|
2023-04-22 16:38:05 +00:00
|
|
|
if source_filter is not None and source_filter != "none":
|
2023-04-14 03:51:59 +00:00
|
|
|
images.append(source)
|
|
|
|
|
|
|
|
for image, output in zip(images, outputs):
|
2023-04-22 15:54:39 +00:00
|
|
|
image = run_loopback(
|
|
|
|
job,
|
|
|
|
server,
|
|
|
|
params,
|
2023-04-22 16:01:47 +00:00
|
|
|
strength,
|
2023-04-22 15:54:39 +00:00
|
|
|
image,
|
|
|
|
progress,
|
|
|
|
inversions,
|
|
|
|
loras,
|
|
|
|
)
|
|
|
|
|
2023-04-15 01:29:44 +00:00
|
|
|
image = run_highres(
|
|
|
|
job,
|
|
|
|
server,
|
|
|
|
params,
|
|
|
|
Size(source.width, source.height),
|
|
|
|
upscale,
|
|
|
|
highres,
|
|
|
|
image,
|
|
|
|
progress,
|
|
|
|
inversions,
|
|
|
|
loras,
|
|
|
|
)
|
2023-04-14 13:54:21 +00:00
|
|
|
|
2023-02-20 14:35:18 +00:00
|
|
|
image = run_upscale_correction(
|
|
|
|
job,
|
|
|
|
server,
|
|
|
|
StageParams(),
|
|
|
|
params,
|
|
|
|
image,
|
|
|
|
upscale=upscale,
|
|
|
|
callback=progress,
|
|
|
|
)
|
2023-01-16 00:54:20 +00:00
|
|
|
|
2023-02-20 14:35:18 +00:00
|
|
|
dest = save_image(server, output, image)
|
|
|
|
size = Size(*source.size)
|
|
|
|
save_params(server, output, params, size, upscale=upscale)
|
2023-01-16 13:42:10 +00:00
|
|
|
|
2023-02-17 00:11:35 +00:00
|
|
|
run_gc([job.get_device()])
|
2023-04-24 22:40:50 +00:00
|
|
|
show_system_toast(f"finished img2img job: {dest}")
|
2023-02-05 13:53:26 +00:00
|
|
|
logger.info("finished img2img job: %s", dest)
|
2023-01-16 00:54:20 +00:00
|
|
|
|
|
|
|
|
|
|
|
def run_inpaint_pipeline(
|
2023-02-26 05:49:39 +00:00
|
|
|
job: WorkerContext,
|
2023-02-04 16:06:22 +00:00
|
|
|
server: ServerContext,
|
2023-01-27 23:08:36 +00:00
|
|
|
params: ImageParams,
|
2023-02-02 04:20:40 +00:00
|
|
|
size: Size,
|
2023-02-20 14:35:18 +00:00
|
|
|
outputs: List[str],
|
2023-01-16 19:12:08 +00:00
|
|
|
upscale: UpscaleParams,
|
2023-04-14 13:54:21 +00:00
|
|
|
highres: HighresParams,
|
2023-02-18 22:35:57 +00:00
|
|
|
source: Image.Image,
|
|
|
|
mask: Image.Image,
|
2023-02-02 04:20:40 +00:00
|
|
|
border: Border,
|
2023-01-16 00:54:20 +00:00
|
|
|
noise_source: Any,
|
2023-01-17 23:50:36 +00:00
|
|
|
mask_filter: Any,
|
2023-01-18 14:41:02 +00:00
|
|
|
fill_color: str,
|
2023-02-12 00:00:18 +00:00
|
|
|
tile_order: str,
|
2023-01-28 18:42:02 +00:00
|
|
|
) -> None:
|
2023-02-12 18:33:36 +00:00
|
|
|
progress = job.get_progress_callback()
|
2023-02-12 00:00:18 +00:00
|
|
|
stage = StageParams(tile_order=tile_order)
|
2023-02-04 16:06:22 +00:00
|
|
|
|
2023-04-23 20:03:11 +00:00
|
|
|
_prompt_pairs, loras, inversions = parse_prompt(params)
|
2023-04-15 01:29:44 +00:00
|
|
|
|
2023-02-15 23:45:25 +00:00
|
|
|
logger.debug("applying mask filter and generating noise source")
|
2023-01-29 16:39:10 +00:00
|
|
|
image = upscale_outpaint(
|
2023-02-05 03:23:34 +00:00
|
|
|
job,
|
2023-02-04 16:06:22 +00:00
|
|
|
server,
|
2023-01-29 16:39:10 +00:00
|
|
|
stage,
|
|
|
|
params,
|
2023-02-18 22:35:57 +00:00
|
|
|
source,
|
2023-02-02 04:20:40 +00:00
|
|
|
border=border,
|
2023-02-19 04:11:44 +00:00
|
|
|
stage_mask=mask,
|
2023-01-29 16:39:10 +00:00
|
|
|
fill_color=fill_color,
|
|
|
|
mask_filter=mask_filter,
|
|
|
|
noise_source=noise_source,
|
2023-02-12 18:33:36 +00:00
|
|
|
callback=progress,
|
2023-01-22 04:28:13 +00:00
|
|
|
)
|
2023-01-16 00:54:20 +00:00
|
|
|
|
2023-04-15 01:29:44 +00:00
|
|
|
image = run_highres(
|
|
|
|
job,
|
|
|
|
server,
|
|
|
|
params,
|
|
|
|
size,
|
|
|
|
upscale,
|
|
|
|
highres,
|
|
|
|
image,
|
|
|
|
progress,
|
|
|
|
inversions,
|
|
|
|
loras,
|
|
|
|
)
|
|
|
|
|
2023-02-12 18:33:36 +00:00
|
|
|
image = run_upscale_correction(
|
2023-04-12 00:29:25 +00:00
|
|
|
job,
|
|
|
|
server,
|
|
|
|
stage,
|
|
|
|
params,
|
|
|
|
image,
|
|
|
|
upscale=upscale,
|
|
|
|
callback=progress,
|
2023-02-12 18:33:36 +00:00
|
|
|
)
|
2023-01-16 19:12:08 +00:00
|
|
|
|
2023-02-20 14:35:18 +00:00
|
|
|
dest = save_image(server, outputs[0], image)
|
|
|
|
save_params(server, outputs[0], params, size, upscale=upscale, border=border)
|
2023-01-16 00:54:20 +00:00
|
|
|
|
2023-01-22 04:28:13 +00:00
|
|
|
del image
|
2023-02-17 00:11:35 +00:00
|
|
|
|
|
|
|
run_gc([job.get_device()])
|
2023-04-24 22:40:50 +00:00
|
|
|
show_system_toast(f"finished inpaint job: {dest}")
|
2023-02-05 13:53:26 +00:00
|
|
|
logger.info("finished inpaint job: %s", dest)
|
2023-01-17 05:45:54 +00:00
|
|
|
|
2023-01-17 23:50:36 +00:00
|
|
|
|
2023-01-17 05:45:54 +00:00
|
|
|
def run_upscale_pipeline(
|
2023-02-26 05:49:39 +00:00
|
|
|
job: WorkerContext,
|
2023-02-04 16:06:22 +00:00
|
|
|
server: ServerContext,
|
2023-01-27 23:08:36 +00:00
|
|
|
params: ImageParams,
|
2023-02-02 04:20:40 +00:00
|
|
|
size: Size,
|
2023-02-20 14:35:18 +00:00
|
|
|
outputs: List[str],
|
2023-01-17 05:45:54 +00:00
|
|
|
upscale: UpscaleParams,
|
2023-04-14 13:54:21 +00:00
|
|
|
highres: HighresParams,
|
2023-02-18 22:35:57 +00:00
|
|
|
source: Image.Image,
|
2023-01-28 18:42:02 +00:00
|
|
|
) -> None:
|
2023-02-12 18:33:36 +00:00
|
|
|
progress = job.get_progress_callback()
|
2023-02-04 16:06:22 +00:00
|
|
|
stage = StageParams()
|
|
|
|
|
2023-04-23 20:03:11 +00:00
|
|
|
_prompt_pairs, loras, inversions = parse_prompt(params)
|
2023-04-15 01:29:44 +00:00
|
|
|
|
2023-01-28 04:48:06 +00:00
|
|
|
image = run_upscale_correction(
|
2023-02-18 22:35:57 +00:00
|
|
|
job, server, stage, params, source, upscale=upscale, callback=progress
|
2023-02-05 13:53:26 +00:00
|
|
|
)
|
2023-01-17 05:45:54 +00:00
|
|
|
|
2023-04-15 01:29:44 +00:00
|
|
|
# TODO: should this come first?
|
|
|
|
image = run_highres(
|
|
|
|
job,
|
|
|
|
server,
|
|
|
|
params,
|
|
|
|
size,
|
|
|
|
upscale,
|
|
|
|
highres,
|
|
|
|
image,
|
|
|
|
progress,
|
|
|
|
inversions,
|
|
|
|
loras,
|
|
|
|
)
|
|
|
|
|
2023-02-20 14:35:18 +00:00
|
|
|
dest = save_image(server, outputs[0], image)
|
|
|
|
save_params(server, outputs[0], params, size, upscale=upscale)
|
2023-01-17 05:45:54 +00:00
|
|
|
|
2023-01-22 04:28:13 +00:00
|
|
|
del image
|
2023-02-17 00:11:35 +00:00
|
|
|
|
|
|
|
run_gc([job.get_device()])
|
2023-04-24 22:40:50 +00:00
|
|
|
show_system_toast(f"finished upscale job: {dest}")
|
2023-02-05 13:53:26 +00:00
|
|
|
logger.info("finished upscale job: %s", dest)
|
2023-02-13 23:34:42 +00:00
|
|
|
|
|
|
|
|
|
|
|
def run_blend_pipeline(
|
2023-02-26 05:49:39 +00:00
|
|
|
job: WorkerContext,
|
2023-02-13 23:34:42 +00:00
|
|
|
server: ServerContext,
|
|
|
|
params: ImageParams,
|
|
|
|
size: Size,
|
2023-02-20 14:35:18 +00:00
|
|
|
outputs: List[str],
|
2023-02-13 23:34:42 +00:00
|
|
|
upscale: UpscaleParams,
|
2023-04-22 17:40:51 +00:00
|
|
|
# highres: HighresParams,
|
2023-02-13 23:34:42 +00:00
|
|
|
sources: List[Image.Image],
|
|
|
|
mask: Image.Image,
|
|
|
|
) -> None:
|
|
|
|
progress = job.get_progress_callback()
|
|
|
|
stage = StageParams()
|
|
|
|
|
|
|
|
image = blend_mask(
|
|
|
|
job,
|
|
|
|
server,
|
|
|
|
stage,
|
|
|
|
params,
|
2023-02-18 22:27:48 +00:00
|
|
|
sources=sources,
|
2023-02-19 04:11:44 +00:00
|
|
|
stage_mask=mask,
|
2023-02-13 23:34:42 +00:00
|
|
|
callback=progress,
|
|
|
|
)
|
2023-02-15 23:49:42 +00:00
|
|
|
image = image.convert("RGB")
|
2023-02-13 23:34:42 +00:00
|
|
|
|
|
|
|
image = run_upscale_correction(
|
|
|
|
job, server, stage, params, image, upscale=upscale, callback=progress
|
|
|
|
)
|
|
|
|
|
2023-02-20 14:35:18 +00:00
|
|
|
dest = save_image(server, outputs[0], image)
|
|
|
|
save_params(server, outputs[0], params, size, upscale=upscale)
|
2023-02-13 23:34:42 +00:00
|
|
|
|
|
|
|
del image
|
2023-02-17 00:11:35 +00:00
|
|
|
|
|
|
|
run_gc([job.get_device()])
|
2023-04-24 22:40:50 +00:00
|
|
|
show_system_toast(f"finished blend job: {dest}")
|
2023-02-13 23:34:42 +00:00
|
|
|
logger.info("finished blend job: %s", dest)
|