1
0
Fork 0
onnx-web/api/onnx_web/diffusers/run.py

624 lines
16 KiB
Python
Raw Normal View History

2023-01-28 23:09:19 +00:00
from logging import getLogger
from typing import Any, List, Optional, Tuple
2023-01-16 00:46:00 +00:00
import numpy as np
2023-02-05 23:55:04 +00:00
import torch
2023-02-16 03:01:25 +00:00
from PIL import Image
2023-02-05 13:53:26 +00:00
2023-02-27 02:09:42 +00:00
from ..chain import blend_mask, upscale_outpaint
from ..chain.base import ChainProgress
2023-04-12 04:06:32 +00:00
from ..chain.utils import process_tile_order
2023-02-05 13:53:26 +00:00
from ..output import save_image, save_params
2023-04-01 17:06:31 +00:00
from ..params import (
Border,
HighresParams,
ImageParams,
Size,
StageParams,
TileOrder,
UpscaleParams,
)
2023-02-26 05:49:39 +00:00
from ..server import ServerContext
from ..server.load import get_source_filters
2023-02-19 02:28:21 +00:00
from ..utils import run_gc
2023-02-26 20:15:30 +00:00
from ..worker import WorkerContext
from ..worker.context import ProgressCallback
from .load import get_latents_from_seed, load_pipeline
2023-04-01 16:26:10 +00:00
from .upscale import run_upscale_correction
from .utils import get_inversions_from_prompt, get_loras_from_prompt
2023-01-28 23:09:19 +00:00
logger = getLogger(__name__)
2023-04-22 15:42:22 +00:00
def parse_prompt(
params: ImageParams,
) -> Tuple[List[Tuple[str, float]], List[Tuple[str, float]]]:
prompt, loras = get_loras_from_prompt(params.input_prompt)
prompt, inversions = get_inversions_from_prompt(prompt)
params.prompt = prompt
if params.input_negative_prompt is not None:
neg_prompt, neg_loras = get_loras_from_prompt(params.input_negative_prompt)
neg_prompt, neg_inversions = get_inversions_from_prompt(neg_prompt)
params.negative_prompt = neg_prompt
# TODO: check whether these need to be * -1
loras.extend(neg_loras)
inversions.extend(neg_inversions)
return loras, inversions
2023-04-22 15:39:23 +00:00
def run_loopback(
job: WorkerContext,
server: ServerContext,
params: ImageParams,
2023-04-22 16:01:47 +00:00
strength: float,
2023-04-22 15:39:23 +00:00
image: Image.Image,
progress: ProgressCallback,
inversions: List[Tuple[str, float]],
loras: List[Tuple[str, float]],
pipeline: Optional[Any] = None,
2023-04-22 15:39:23 +00:00
) -> Image.Image:
if params.loopback == 0:
return image
loopback_progress = ChainProgress.from_progress(progress)
2023-04-22 15:39:23 +00:00
# load img2img pipeline once
pipe_type = "lpw" if params.lpw() else "img2img"
pipe = pipeline or load_pipeline(
2023-04-22 15:39:23 +00:00
server,
pipe_type,
params.model,
params.scheduler,
job.get_device(),
inversions=inversions,
loras=loras,
)
def loopback_iteration(source: Image.Image):
if params.lpw():
logger.debug("using LPW pipeline for loopback")
rng = torch.manual_seed(params.seed)
result = pipe.img2img(
source,
params.prompt,
generator=rng,
guidance_scale=params.cfg,
negative_prompt=params.negative_prompt,
num_images_per_prompt=1,
num_inference_steps=params.steps,
2023-04-22 16:01:47 +00:00
strength=strength,
2023-04-22 15:39:23 +00:00
eta=params.eta,
callback=loopback_progress,
2023-04-22 15:39:23 +00:00
)
return result.images[0]
else:
logger.debug("using img2img pipeline for loopback")
rng = np.random.RandomState(params.seed)
result = pipe(
params.prompt,
source,
generator=rng,
guidance_scale=params.cfg,
negative_prompt=params.negative_prompt,
num_images_per_prompt=1,
num_inference_steps=params.steps,
2023-04-22 16:01:47 +00:00
strength=strength,
2023-04-22 15:39:23 +00:00
eta=params.eta,
callback=loopback_progress,
2023-04-22 15:39:23 +00:00
)
return result.images[0]
for _i in range(params.loopback):
image = loopback_iteration(image)
return image
def run_highres(
job: WorkerContext,
server: ServerContext,
params: ImageParams,
size: Size,
upscale: UpscaleParams,
highres: HighresParams,
image: Image.Image,
progress: ProgressCallback,
inversions: List[Tuple[str, float]],
loras: List[Tuple[str, float]],
pipeline: Optional[Any] = None,
2023-04-22 15:39:23 +00:00
) -> Image.Image:
if highres.scale <= 1:
return image
highres_progress = ChainProgress.from_progress(progress)
if upscale.faces and (
upscale.upscale_order == "correction-both"
or upscale.upscale_order == "correction-first"
):
image = run_upscale_correction(
job,
server,
StageParams(),
params,
image,
upscale=upscale.with_args(
scale=1,
outscale=1,
),
callback=highres_progress,
)
# load img2img pipeline once
pipe_type = "lpw" if params.lpw() else "img2img"
highres_pipe = pipeline or load_pipeline(
server,
pipe_type,
params.model,
params.scheduler,
job.get_device(),
inversions=inversions,
loras=loras,
)
def highres_tile(tile: Image.Image, dims):
if highres.method == "bilinear":
logger.debug("using bilinear interpolation for highres")
tile = tile.resize(
(size.height, size.width), resample=Image.Resampling.BILINEAR
)
elif highres.method == "lanczos":
logger.debug("using Lanczos interpolation for highres")
tile = tile.resize(
(size.height, size.width), resample=Image.Resampling.LANCZOS
)
else:
logger.debug("using upscaling pipeline for highres")
tile = run_upscale_correction(
job,
server,
StageParams(),
params,
tile,
upscale=upscale.with_args(
faces=False,
scale=highres.scale,
outscale=highres.scale,
),
callback=highres_progress,
)
if params.lpw():
logger.debug("using LPW pipeline for highres")
rng = torch.manual_seed(params.seed)
result = highres_pipe.img2img(
tile,
params.prompt,
generator=rng,
guidance_scale=params.cfg,
negative_prompt=params.negative_prompt,
num_images_per_prompt=1,
num_inference_steps=highres.steps,
strength=highres.strength,
eta=params.eta,
callback=highres_progress,
)
return result.images[0]
else:
2023-04-22 15:39:23 +00:00
logger.debug("using img2img pipeline for highres")
rng = np.random.RandomState(params.seed)
result = highres_pipe(
params.prompt,
tile,
generator=rng,
guidance_scale=params.cfg,
negative_prompt=params.negative_prompt,
num_images_per_prompt=1,
num_inference_steps=highres.steps,
strength=highres.strength,
eta=params.eta,
callback=highres_progress,
)
return result.images[0]
logger.info(
"running highres fix for %s iterations at %s scale",
highres.iterations,
highres.scale,
)
for _i in range(highres.iterations):
image = process_tile_order(
TileOrder.grid,
image,
size.height // highres.scale,
highres.scale,
[highres_tile],
overlap=0,
)
return image
def run_txt2img_pipeline(
2023-02-26 05:49:39 +00:00
job: WorkerContext,
server: ServerContext,
params: ImageParams,
size: Size,
outputs: List[str],
2023-02-05 13:53:26 +00:00
upscale: UpscaleParams,
2023-04-01 16:26:10 +00:00
highres: HighresParams,
) -> None:
2023-02-21 03:57:46 +00:00
latents = get_latents_from_seed(params.seed, size, batch=params.batch)
loras, inversions = parse_prompt(params)
pipe_type = "lpw" if params.lpw() else "txt2img"
2023-02-05 13:53:26 +00:00
pipe = load_pipeline(
server,
pipe_type,
2023-02-05 23:55:04 +00:00
params.model,
params.scheduler,
job.get_device(),
2023-04-13 05:02:47 +00:00
inversions=inversions,
loras=loras,
2023-02-05 13:53:26 +00:00
)
progress = job.get_progress_callback()
if params.lpw():
2023-02-05 23:55:04 +00:00
logger.debug("using LPW pipeline for txt2img")
rng = torch.manual_seed(params.seed)
result = pipe.text2img(
params.prompt,
height=size.height,
width=size.width,
generator=rng,
guidance_scale=params.cfg,
latents=latents,
negative_prompt=params.negative_prompt,
num_images_per_prompt=params.batch,
2023-04-01 16:26:10 +00:00
num_inference_steps=params.steps,
2023-02-20 05:29:26 +00:00
eta=params.eta,
callback=progress,
)
else:
rng = np.random.RandomState(params.seed)
result = pipe(
params.prompt,
height=size.height,
width=size.width,
generator=rng,
guidance_scale=params.cfg,
latents=latents,
negative_prompt=params.negative_prompt,
num_images_per_prompt=params.batch,
2023-04-01 16:26:10 +00:00
num_inference_steps=params.steps,
2023-02-20 05:29:26 +00:00
eta=params.eta,
callback=progress,
)
2023-01-16 00:54:20 +00:00
image_outputs = list(zip(result.images, outputs))
del result
del pipe
for image, output in image_outputs:
image = run_highres(
job,
server,
params,
size,
upscale,
highres,
image,
progress,
inversions,
loras,
)
image = run_upscale_correction(
job,
server,
StageParams(),
params,
image,
upscale=upscale,
callback=progress,
)
2023-01-16 00:54:20 +00:00
dest = save_image(server, output, image)
2023-04-01 16:26:10 +00:00
save_params(server, output, params, size, upscale=upscale, highres=highres)
run_gc([job.get_device()])
2023-02-05 13:53:26 +00:00
logger.info("finished txt2img job: %s", dest)
2023-01-16 00:54:20 +00:00
def run_img2img_pipeline(
2023-02-26 05:49:39 +00:00
job: WorkerContext,
server: ServerContext,
params: ImageParams,
outputs: List[str],
upscale: UpscaleParams,
highres: HighresParams,
source: Image.Image,
strength: float,
source_filter: Optional[str] = None,
) -> None:
loras, inversions = parse_prompt(params)
# filter the source image
if source_filter is not None:
f = get_source_filters().get(source_filter, None)
if f is not None:
logger.debug("running source filter: %s", f.__name__)
source = f(server, source)
2023-02-05 13:53:26 +00:00
pipe = load_pipeline(
server,
2023-04-13 04:11:53 +00:00
params.pipeline, # this is one of the only places this can actually vary between different pipelines
2023-02-05 13:53:26 +00:00
params.model,
params.scheduler,
job.get_device(),
control=params.control,
inversions=inversions,
loras=loras,
2023-02-05 13:53:26 +00:00
)
2023-04-13 04:30:59 +00:00
pipe_params = {}
if params.pipeline == "controlnet":
2023-04-13 04:33:01 +00:00
pipe_params["controlnet_conditioning_scale"] = strength
2023-04-13 04:30:59 +00:00
elif params.pipeline == "img2img":
2023-04-13 04:33:01 +00:00
pipe_params["strength"] = strength
elif params.pipeline == "pix2pix":
pipe_params["image_guidance_scale"] = strength
2023-04-13 04:30:59 +00:00
progress = job.get_progress_callback()
if params.lpw():
2023-02-05 23:55:04 +00:00
logger.debug("using LPW pipeline for img2img")
rng = torch.manual_seed(params.seed)
result = pipe.img2img(
source,
params.prompt,
generator=rng,
guidance_scale=params.cfg,
negative_prompt=params.negative_prompt,
num_images_per_prompt=params.batch,
num_inference_steps=params.steps,
2023-02-20 05:29:26 +00:00
eta=params.eta,
callback=progress,
2023-04-13 04:30:59 +00:00
**pipe_params,
)
else:
rng = np.random.RandomState(params.seed)
result = pipe(
params.prompt,
source,
generator=rng,
guidance_scale=params.cfg,
negative_prompt=params.negative_prompt,
num_images_per_prompt=params.batch,
num_inference_steps=params.steps,
2023-02-20 05:29:26 +00:00
eta=params.eta,
callback=progress,
2023-04-13 04:30:59 +00:00
**pipe_params,
)
2023-01-16 00:54:20 +00:00
2023-04-14 03:51:59 +00:00
images = result.images
2023-04-22 16:38:05 +00:00
if source_filter is not None and source_filter != "none":
2023-04-14 03:51:59 +00:00
images.append(source)
for image, output in zip(images, outputs):
2023-04-22 15:54:39 +00:00
image = run_loopback(
job,
server,
params,
2023-04-22 16:01:47 +00:00
strength,
2023-04-22 15:54:39 +00:00
image,
progress,
inversions,
loras,
pipeline=pipe,
2023-04-22 15:54:39 +00:00
)
image = run_highres(
job,
server,
params,
Size(source.width, source.height),
upscale,
highres,
image,
progress,
inversions,
loras,
pipeline=pipe,
)
image = run_upscale_correction(
job,
server,
StageParams(),
params,
image,
upscale=upscale,
callback=progress,
)
2023-01-16 00:54:20 +00:00
dest = save_image(server, output, image)
size = Size(*source.size)
save_params(server, output, params, size, upscale=upscale)
run_gc([job.get_device()])
2023-02-05 13:53:26 +00:00
logger.info("finished img2img job: %s", dest)
2023-01-16 00:54:20 +00:00
def run_inpaint_pipeline(
2023-02-26 05:49:39 +00:00
job: WorkerContext,
server: ServerContext,
params: ImageParams,
size: Size,
outputs: List[str],
upscale: UpscaleParams,
highres: HighresParams,
source: Image.Image,
mask: Image.Image,
border: Border,
2023-01-16 00:54:20 +00:00
noise_source: Any,
mask_filter: Any,
fill_color: str,
tile_order: str,
) -> None:
2023-02-12 18:33:36 +00:00
progress = job.get_progress_callback()
stage = StageParams(tile_order=tile_order)
loras, inversions = parse_prompt(params)
# calling the upscale_outpaint stage directly needs accumulating progress
progress = ChainProgress.from_progress(progress)
logger.debug("applying mask filter and generating noise source")
image = upscale_outpaint(
2023-02-05 03:23:34 +00:00
job,
server,
stage,
params,
source,
border=border,
stage_mask=mask,
fill_color=fill_color,
mask_filter=mask_filter,
noise_source=noise_source,
2023-02-12 18:33:36 +00:00
callback=progress,
)
2023-01-16 00:54:20 +00:00
image = run_highres(
job,
server,
params,
size,
upscale,
highres,
image,
progress,
inversions,
loras,
)
2023-02-12 18:33:36 +00:00
image = run_upscale_correction(
job,
server,
stage,
params,
image,
upscale=upscale,
callback=progress,
2023-02-12 18:33:36 +00:00
)
dest = save_image(server, outputs[0], image)
save_params(server, outputs[0], params, size, upscale=upscale, border=border)
2023-01-16 00:54:20 +00:00
del image
run_gc([job.get_device()])
2023-02-05 13:53:26 +00:00
logger.info("finished inpaint job: %s", dest)
2023-01-17 05:45:54 +00:00
2023-01-17 05:45:54 +00:00
def run_upscale_pipeline(
2023-02-26 05:49:39 +00:00
job: WorkerContext,
server: ServerContext,
params: ImageParams,
size: Size,
outputs: List[str],
2023-01-17 05:45:54 +00:00
upscale: UpscaleParams,
highres: HighresParams,
source: Image.Image,
) -> None:
2023-02-12 18:33:36 +00:00
progress = job.get_progress_callback()
stage = StageParams()
loras, inversions = parse_prompt(params)
image = run_upscale_correction(
job, server, stage, params, source, upscale=upscale, callback=progress
2023-02-05 13:53:26 +00:00
)
2023-01-17 05:45:54 +00:00
# TODO: should this come first?
image = run_highres(
job,
server,
params,
size,
upscale,
highres,
image,
progress,
inversions,
loras,
)
dest = save_image(server, outputs[0], image)
save_params(server, outputs[0], params, size, upscale=upscale)
2023-01-17 05:45:54 +00:00
del image
run_gc([job.get_device()])
2023-02-05 13:53:26 +00:00
logger.info("finished upscale job: %s", dest)
def run_blend_pipeline(
2023-02-26 05:49:39 +00:00
job: WorkerContext,
server: ServerContext,
params: ImageParams,
size: Size,
outputs: List[str],
upscale: UpscaleParams,
highres: HighresParams,
sources: List[Image.Image],
mask: Image.Image,
) -> None:
progress = job.get_progress_callback()
stage = StageParams()
image = blend_mask(
job,
server,
stage,
params,
sources=sources,
stage_mask=mask,
callback=progress,
)
image = image.convert("RGB")
# TODO: blend tab doesn't have a prompt
image = run_highres(
job,
server,
params,
size,
upscale,
highres,
image,
progress,
[],
[],
)
image = run_upscale_correction(
job, server, stage, params, image, upscale=upscale, callback=progress
)
dest = save_image(server, outputs[0], image)
save_params(server, outputs[0], params, size, upscale=upscale)
del image
run_gc([job.get_device()])
logger.info("finished blend job: %s", dest)