1
0
Fork 0
onnx-web/api/onnx_web/diffusion/run.py

281 lines
7.2 KiB
Python
Raw Normal View History

2023-01-28 23:09:19 +00:00
from logging import getLogger
from typing import Any, List
2023-01-16 00:46:00 +00:00
import numpy as np
2023-02-05 23:55:04 +00:00
import torch
2023-02-05 13:53:26 +00:00
from diffusers import OnnxStableDiffusionImg2ImgPipeline, OnnxStableDiffusionPipeline
2023-02-16 03:01:25 +00:00
from PIL import Image
2023-02-05 13:53:26 +00:00
from onnx_web.chain import blend_mask
from onnx_web.chain.base import ChainProgress
2023-02-05 13:53:26 +00:00
from ..chain import upscale_outpaint
from ..output import save_image, save_params
2023-02-19 02:28:21 +00:00
from ..params import Border, ImageParams, Size, StageParams, UpscaleParams
from ..server import JobContext, ServerContext
from ..upscale import run_upscale_correction
from ..utils import run_gc
2023-02-05 13:53:26 +00:00
from .load import get_latents_from_seed, load_pipeline
2023-01-28 23:09:19 +00:00
logger = getLogger(__name__)
def run_txt2img_pipeline(
job: JobContext,
server: ServerContext,
params: ImageParams,
size: Size,
outputs: List[str],
2023-02-05 13:53:26 +00:00
upscale: UpscaleParams,
) -> None:
2023-02-21 03:57:46 +00:00
latents = get_latents_from_seed(params.seed, size, batch=params.batch)
2023-02-05 13:53:26 +00:00
pipe = load_pipeline(
server,
2023-02-05 23:55:04 +00:00
OnnxStableDiffusionPipeline,
params.model,
params.scheduler,
job.get_device(),
params.lpw,
params.inversion,
2023-02-05 13:53:26 +00:00
)
progress = job.get_progress_callback()
2023-02-05 23:15:37 +00:00
if params.lpw:
2023-02-05 23:55:04 +00:00
logger.debug("using LPW pipeline for txt2img")
rng = torch.manual_seed(params.seed)
result = pipe.text2img(
params.prompt,
height=size.height,
width=size.width,
generator=rng,
guidance_scale=params.cfg,
latents=latents,
negative_prompt=params.negative_prompt,
num_images_per_prompt=params.batch,
num_inference_steps=params.steps,
2023-02-20 05:29:26 +00:00
eta=params.eta,
callback=progress,
)
else:
rng = np.random.RandomState(params.seed)
result = pipe(
params.prompt,
height=size.height,
width=size.width,
generator=rng,
guidance_scale=params.cfg,
latents=latents,
negative_prompt=params.negative_prompt,
num_images_per_prompt=params.batch,
num_inference_steps=params.steps,
2023-02-20 05:29:26 +00:00
eta=params.eta,
callback=progress,
)
2023-01-16 00:54:20 +00:00
for image, output in zip(result.images, outputs):
image = run_upscale_correction(
job,
server,
StageParams(),
params,
image,
upscale=upscale,
callback=progress,
)
2023-01-16 00:54:20 +00:00
dest = save_image(server, output, image)
save_params(server, output, params, size, upscale=upscale)
del pipe
del result
run_gc([job.get_device()])
2023-02-05 13:53:26 +00:00
logger.info("finished txt2img job: %s", dest)
2023-01-16 00:54:20 +00:00
def run_img2img_pipeline(
job: JobContext,
server: ServerContext,
params: ImageParams,
outputs: List[str],
upscale: UpscaleParams,
source: Image.Image,
strength: float,
) -> None:
2023-02-05 13:53:26 +00:00
pipe = load_pipeline(
server,
2023-02-05 13:53:26 +00:00
OnnxStableDiffusionImg2ImgPipeline,
params.model,
params.scheduler,
job.get_device(),
2023-02-05 23:55:04 +00:00
params.lpw,
params.inversion,
2023-02-05 13:53:26 +00:00
)
progress = job.get_progress_callback()
2023-02-05 23:15:37 +00:00
if params.lpw:
2023-02-05 23:55:04 +00:00
logger.debug("using LPW pipeline for img2img")
rng = torch.manual_seed(params.seed)
result = pipe.img2img(
source,
params.prompt,
generator=rng,
guidance_scale=params.cfg,
negative_prompt=params.negative_prompt,
num_images_per_prompt=params.batch,
num_inference_steps=params.steps,
strength=strength,
2023-02-20 05:29:26 +00:00
eta=params.eta,
callback=progress,
)
else:
rng = np.random.RandomState(params.seed)
result = pipe(
params.prompt,
source,
generator=rng,
guidance_scale=params.cfg,
negative_prompt=params.negative_prompt,
num_images_per_prompt=params.batch,
num_inference_steps=params.steps,
strength=strength,
2023-02-20 05:29:26 +00:00
eta=params.eta,
callback=progress,
)
2023-01-16 00:54:20 +00:00
for image, output in zip(result.images, outputs):
image = run_upscale_correction(
job,
server,
StageParams(),
params,
image,
upscale=upscale,
callback=progress,
)
2023-01-16 00:54:20 +00:00
dest = save_image(server, output, image)
size = Size(*source.size)
save_params(server, output, params, size, upscale=upscale)
del pipe
del result
run_gc([job.get_device()])
2023-02-05 13:53:26 +00:00
logger.info("finished img2img job: %s", dest)
2023-01-16 00:54:20 +00:00
def run_inpaint_pipeline(
job: JobContext,
server: ServerContext,
params: ImageParams,
size: Size,
outputs: List[str],
upscale: UpscaleParams,
source: Image.Image,
mask: Image.Image,
border: Border,
2023-01-16 00:54:20 +00:00
noise_source: Any,
mask_filter: Any,
fill_color: str,
tile_order: str,
) -> None:
2023-02-12 18:33:36 +00:00
progress = job.get_progress_callback()
stage = StageParams(tile_order=tile_order)
# calling the upscale_outpaint stage directly needs accumulating progress
progress = ChainProgress.from_progress(progress)
logger.debug("applying mask filter and generating noise source")
image = upscale_outpaint(
2023-02-05 03:23:34 +00:00
job,
server,
stage,
params,
source,
border=border,
stage_mask=mask,
fill_color=fill_color,
mask_filter=mask_filter,
noise_source=noise_source,
2023-02-12 18:33:36 +00:00
callback=progress,
)
2023-01-16 00:54:20 +00:00
2023-02-12 18:33:36 +00:00
image = run_upscale_correction(
job, server, stage, params, image, upscale=upscale, callback=progress
)
dest = save_image(server, outputs[0], image)
save_params(server, outputs[0], params, size, upscale=upscale, border=border)
2023-01-16 00:54:20 +00:00
del image
run_gc([job.get_device()])
2023-02-05 13:53:26 +00:00
logger.info("finished inpaint job: %s", dest)
2023-01-17 05:45:54 +00:00
2023-01-17 05:45:54 +00:00
def run_upscale_pipeline(
job: JobContext,
server: ServerContext,
params: ImageParams,
size: Size,
outputs: List[str],
2023-01-17 05:45:54 +00:00
upscale: UpscaleParams,
source: Image.Image,
) -> None:
2023-02-12 18:33:36 +00:00
progress = job.get_progress_callback()
stage = StageParams()
image = run_upscale_correction(
job, server, stage, params, source, upscale=upscale, callback=progress
2023-02-05 13:53:26 +00:00
)
2023-01-17 05:45:54 +00:00
dest = save_image(server, outputs[0], image)
save_params(server, outputs[0], params, size, upscale=upscale)
2023-01-17 05:45:54 +00:00
del image
run_gc([job.get_device()])
2023-02-05 13:53:26 +00:00
logger.info("finished upscale job: %s", dest)
def run_blend_pipeline(
job: JobContext,
server: ServerContext,
params: ImageParams,
size: Size,
outputs: List[str],
upscale: UpscaleParams,
sources: List[Image.Image],
mask: Image.Image,
) -> None:
progress = job.get_progress_callback()
stage = StageParams()
image = blend_mask(
job,
server,
stage,
params,
sources=sources,
stage_mask=mask,
callback=progress,
)
image = image.convert("RGB")
image = run_upscale_correction(
job, server, stage, params, image, upscale=upscale, callback=progress
)
dest = save_image(server, outputs[0], image)
save_params(server, outputs[0], params, size, upscale=upscale)
del image
run_gc([job.get_device()])
logger.info("finished blend job: %s", dest)