2023-01-28 23:09:19 +00:00
|
|
|
from logging import getLogger
|
2023-04-24 23:23:56 +00:00
|
|
|
from os import path
|
2023-03-01 04:04:55 +00:00
|
|
|
from typing import Optional
|
2023-02-05 13:53:26 +00:00
|
|
|
|
|
|
|
import torch
|
2023-01-28 05:28:14 +00:00
|
|
|
from PIL import Image
|
|
|
|
|
2023-04-23 22:33:13 +00:00
|
|
|
from ..diffusers.load import load_pipeline
|
2023-04-23 22:16:46 +00:00
|
|
|
from ..diffusers.utils import encode_prompt, parse_prompt
|
2023-04-23 22:33:13 +00:00
|
|
|
from ..params import ImageParams, StageParams, UpscaleParams
|
2023-02-26 05:49:39 +00:00
|
|
|
from ..server import ServerContext
|
2023-02-26 20:15:30 +00:00
|
|
|
from ..worker import ProgressCallback, WorkerContext
|
2023-01-28 05:28:14 +00:00
|
|
|
|
2023-01-28 23:09:19 +00:00
|
|
|
logger = getLogger(__name__)
|
|
|
|
|
2023-01-28 05:28:14 +00:00
|
|
|
|
|
|
|
def upscale_stable_diffusion(
|
2023-02-26 05:49:39 +00:00
|
|
|
job: WorkerContext,
|
2023-02-05 03:17:39 +00:00
|
|
|
server: ServerContext,
|
2023-01-28 05:28:14 +00:00
|
|
|
_stage: StageParams,
|
|
|
|
params: ImageParams,
|
|
|
|
source: Image.Image,
|
|
|
|
*,
|
|
|
|
upscale: UpscaleParams,
|
2023-03-01 03:44:52 +00:00
|
|
|
stage_source: Optional[Image.Image] = None,
|
|
|
|
callback: Optional[ProgressCallback] = None,
|
2023-01-29 04:31:34 +00:00
|
|
|
**kwargs,
|
2023-01-28 05:28:14 +00:00
|
|
|
) -> Image.Image:
|
2023-02-18 22:27:48 +00:00
|
|
|
params = params.with_args(**kwargs)
|
|
|
|
upscale = upscale.with_args(**kwargs)
|
2023-02-19 04:11:44 +00:00
|
|
|
source = stage_source or source
|
2023-02-18 23:59:13 +00:00
|
|
|
logger.info(
|
|
|
|
"upscaling with Stable Diffusion, %s steps: %s", params.steps, params.prompt
|
|
|
|
)
|
2023-01-28 05:28:14 +00:00
|
|
|
|
2023-04-23 22:46:26 +00:00
|
|
|
prompt_pairs, _loras, _inversions = parse_prompt(params)
|
2023-04-23 22:33:13 +00:00
|
|
|
|
|
|
|
pipeline = load_pipeline(
|
|
|
|
server,
|
2023-05-02 04:20:40 +00:00
|
|
|
params,
|
2023-04-23 22:33:13 +00:00
|
|
|
"upscale",
|
|
|
|
job.get_device(),
|
2023-05-02 04:25:55 +00:00
|
|
|
model=path.join(server.model_path, upscale.upscale_model),
|
2023-04-23 22:33:13 +00:00
|
|
|
)
|
2023-01-30 00:42:05 +00:00
|
|
|
generator = torch.manual_seed(params.seed)
|
2023-01-28 05:28:14 +00:00
|
|
|
|
2023-04-23 22:16:46 +00:00
|
|
|
prompt_embeds = encode_prompt(
|
|
|
|
pipeline,
|
|
|
|
prompt_pairs,
|
|
|
|
num_images_per_prompt=params.batch,
|
|
|
|
do_classifier_free_guidance=params.do_cfg(),
|
|
|
|
)
|
|
|
|
pipeline.unet.set_prompts(prompt_embeds)
|
|
|
|
|
2023-01-28 05:28:14 +00:00
|
|
|
return pipeline(
|
|
|
|
params.prompt,
|
|
|
|
source,
|
2023-01-29 22:57:18 +00:00
|
|
|
generator=generator,
|
2023-02-20 05:30:48 +00:00
|
|
|
guidance_scale=params.cfg,
|
|
|
|
negative_prompt=params.negative_prompt,
|
2023-01-28 05:28:14 +00:00
|
|
|
num_inference_steps=params.steps,
|
2023-02-20 05:29:26 +00:00
|
|
|
eta=params.eta,
|
2023-03-18 19:02:30 +00:00
|
|
|
noise_level=upscale.denoise,
|
2023-02-12 18:17:36 +00:00
|
|
|
callback=callback,
|
2023-01-28 05:28:14 +00:00
|
|
|
).images[0]
|