1
0
Fork 0

attempt to run SD upscaling in smaller tiles

This commit is contained in:
Sean Sube 2023-01-26 22:44:20 -06:00
parent fb5c46d90c
commit 936ebba510
2 changed files with 41 additions and 8 deletions

View File

@ -1,12 +1,13 @@
from numpy import random
from PIL import Image, ImageChops, ImageFilter
from typing import Tuple
from typing import Callable, List
import numpy as np
from .utils import (
Border,
Point,
Size,
)
@ -185,3 +186,27 @@ def expand_image(
full_noise, full_source, full_mask.convert('L'))
return (full_source, full_mask, full_noise, (full_width, full_height))
def process_tiles(
source: Image,
tile: int,
scale: int,
filters: List[Callable],
) -> Image:
width, height = source.size
image = Image.new('RGB', (width * scale, height * scale))
for x in range(width // tile):
for y in range(height // tile):
left = x * tile
top = y * tile
print('processing tile', x, y, left, top)
tile = source.crop((left, top, left + tile, top + tile))
for filter in filters:
tile = filter(tile)
image.paste(tile, (left * scale, top * scale))
return image

View File

@ -2,6 +2,7 @@ from basicsr.archs.rrdbnet_arch import RRDBNet
from diffusers import (
AutoencoderKL,
DDPMScheduler,
StableDiffusionUpscalePipeline,
)
from gfpgan import GFPGANer
from os import path
@ -11,6 +12,9 @@ from typing import Literal, Union
import numpy as np
from .image import (
process_tiles
)
from .onnx import (
ONNXNet,
OnnxStableDiffusionUpscalePipeline,
@ -135,13 +139,17 @@ def upscale_stable_diffusion(ctx: ServerContext, params: UpscaleParams, image: I
# ValueError: Pipeline <class 'onnx_web.onnx.pipeline_onnx_stable_diffusion_upscale.OnnxStableDiffusionUpscalePipeline'>
# expected {'vae', 'unet', 'text_encoder', 'tokenizer', 'scheduler', 'low_res_scheduler'},
# but only {'scheduler', 'tokenizer', 'text_encoder', 'unet'} were passed.
pipeline = OnnxStableDiffusionUpscalePipeline.from_pretrained(
model_path,
vae=AutoencoderKL.from_pretrained(model_path, subfolder='vae_encoder'),
low_res_scheduler=DDPMScheduler.from_pretrained(model_path, subfolder='scheduler'),
)
result = pipeline('', image=image)
return result.images[0]
# pipeline = OnnxStableDiffusionUpscalePipeline.from_pretrained(
# model_path,
# vae=AutoencoderKL.from_pretrained(model_path, subfolder='vae_encoder'),
# low_res_scheduler=DDPMScheduler.from_pretrained(model_path, subfolder='scheduler'),
# )
# result = pipeline('', image=image)
pipeline = StableDiffusionUpscalePipeline.from_pretrained('stabilityai/stable-diffusion-x4-upscaling')
upscale = lambda i: pipeline('an astronaut eating a hamburger', image=i).images[0]
result = process_tiles(image, 64, 4, [upscale])
return result
def run_upscale_correction(ctx: ServerContext, params: UpscaleParams, image: Image) -> Image: