1
0
Fork 0
onnx-web/api/onnx_web/chain/upscale_stable_diffusion.py

66 lines
1.7 KiB
Python
Raw Normal View History

2023-01-28 23:09:19 +00:00
from logging import getLogger
from os import path
2023-03-01 04:04:55 +00:00
from typing import Optional
2023-02-05 13:53:26 +00:00
import torch
from PIL import Image
from ..diffusers.load import load_pipeline
2023-04-23 22:16:46 +00:00
from ..diffusers.utils import encode_prompt, parse_prompt
from ..params import ImageParams, StageParams, UpscaleParams
2023-02-26 05:49:39 +00:00
from ..server import ServerContext
2023-02-26 20:15:30 +00:00
from ..worker import ProgressCallback, WorkerContext
2023-01-28 23:09:19 +00:00
logger = getLogger(__name__)
def upscale_stable_diffusion(
2023-02-26 05:49:39 +00:00
job: WorkerContext,
server: ServerContext,
_stage: StageParams,
params: ImageParams,
source: Image.Image,
*,
upscale: UpscaleParams,
stage_source: Optional[Image.Image] = None,
callback: Optional[ProgressCallback] = None,
**kwargs,
) -> Image.Image:
params = params.with_args(**kwargs)
upscale = upscale.with_args(**kwargs)
source = stage_source or source
2023-02-18 23:59:13 +00:00
logger.info(
"upscaling with Stable Diffusion, %s steps: %s", params.steps, params.prompt
)
2023-04-23 22:46:26 +00:00
prompt_pairs, _loras, _inversions = parse_prompt(params)
pipeline = load_pipeline(
server,
params,
"upscale",
job.get_device(),
2023-05-02 04:25:55 +00:00
model=path.join(server.model_path, upscale.upscale_model),
)
2023-01-30 00:42:05 +00:00
generator = torch.manual_seed(params.seed)
2023-04-23 22:16:46 +00:00
prompt_embeds = encode_prompt(
pipeline,
prompt_pairs,
num_images_per_prompt=params.batch,
do_classifier_free_guidance=params.do_cfg(),
)
pipeline.unet.set_prompts(prompt_embeds)
return pipeline(
params.prompt,
source,
2023-01-29 22:57:18 +00:00
generator=generator,
guidance_scale=params.cfg,
negative_prompt=params.negative_prompt,
num_inference_steps=params.steps,
2023-02-20 05:29:26 +00:00
eta=params.eta,
noise_level=upscale.denoise,
callback=callback,
).images[0]