2023-01-28 05:28:14 +00:00
|
|
|
from diffusers import (
|
|
|
|
AutoencoderKL,
|
|
|
|
DDPMScheduler,
|
|
|
|
StableDiffusionUpscalePipeline,
|
|
|
|
)
|
|
|
|
from os import path
|
|
|
|
from PIL import Image
|
|
|
|
|
|
|
|
from ..onnx import (
|
|
|
|
OnnxStableDiffusionUpscalePipeline,
|
|
|
|
)
|
|
|
|
from ..params import (
|
|
|
|
ImageParams,
|
|
|
|
StageParams,
|
|
|
|
UpscaleParams,
|
|
|
|
)
|
|
|
|
from ..utils import (
|
|
|
|
ServerContext,
|
|
|
|
)
|
|
|
|
|
|
|
|
import torch
|
|
|
|
|
|
|
|
|
|
|
|
last_pipeline_instance = None
|
|
|
|
last_pipeline_params = (None, None)
|
|
|
|
|
|
|
|
|
|
|
|
def load_stable_diffusion(ctx: ServerContext, upscale: UpscaleParams):
|
2023-01-28 06:08:52 +00:00
|
|
|
global last_pipeline_instance
|
|
|
|
global last_pipeline_params
|
|
|
|
|
2023-01-28 05:28:14 +00:00
|
|
|
model_path = path.join(ctx.model_path, upscale.upscale_model)
|
2023-01-28 06:05:37 +00:00
|
|
|
cache_params = (model_path, upscale.format)
|
2023-01-28 05:28:14 +00:00
|
|
|
|
2023-01-28 06:05:37 +00:00
|
|
|
if last_pipeline_instance != None and cache_params == last_pipeline_params:
|
2023-01-28 05:28:14 +00:00
|
|
|
print('reusing existing Stable Diffusion upscale pipeline')
|
|
|
|
return last_pipeline_instance
|
|
|
|
|
|
|
|
if upscale.format == 'onnx':
|
|
|
|
# ValueError: Pipeline <class 'onnx_web.onnx.pipeline_onnx_stable_diffusion_upscale.OnnxStableDiffusionUpscalePipeline'>
|
|
|
|
# expected {'vae', 'unet', 'text_encoder', 'tokenizer', 'scheduler', 'low_res_scheduler'},
|
|
|
|
# but only {'scheduler', 'tokenizer', 'text_encoder', 'unet'} were passed.
|
|
|
|
pipeline = OnnxStableDiffusionUpscalePipeline.from_pretrained(
|
|
|
|
model_path,
|
|
|
|
vae=AutoencoderKL.from_pretrained(
|
|
|
|
model_path, subfolder='vae_encoder'),
|
|
|
|
low_res_scheduler=DDPMScheduler.from_pretrained(
|
|
|
|
model_path, subfolder='scheduler'),
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
pipeline = StableDiffusionUpscalePipeline.from_pretrained(
|
|
|
|
'stabilityai/stable-diffusion-x4-upscaler')
|
|
|
|
|
2023-01-28 06:05:37 +00:00
|
|
|
last_pipeline_instance = pipeline
|
|
|
|
last_pipeline_params = cache_params
|
|
|
|
|
2023-01-28 05:28:14 +00:00
|
|
|
return pipeline
|
|
|
|
|
|
|
|
|
|
|
|
def upscale_stable_diffusion(
|
|
|
|
ctx: ServerContext,
|
|
|
|
_stage: StageParams,
|
|
|
|
params: ImageParams,
|
|
|
|
source: Image.Image,
|
|
|
|
*,
|
|
|
|
upscale: UpscaleParams,
|
|
|
|
) -> Image.Image:
|
|
|
|
print('upscaling with Stable Diffusion')
|
|
|
|
|
|
|
|
pipeline = load_stable_diffusion(ctx, upscale)
|
|
|
|
generator = torch.manual_seed(params.seed)
|
|
|
|
seed = generator.initial_seed()
|
|
|
|
|
|
|
|
return pipeline(
|
|
|
|
params.prompt,
|
|
|
|
source,
|
|
|
|
generator=torch.manual_seed(seed),
|
|
|
|
num_inference_steps=params.steps,
|
|
|
|
).images[0]
|