1
0
Fork 0

apply lint fixes again

This commit is contained in:
Sean Sube 2023-02-05 17:55:04 -06:00
parent 20467aafac
commit 7462c96616
Signed by: ssube
GPG Key ID: 3EED7B957D362AF1
8 changed files with 40 additions and 23 deletions

View File

@ -1,7 +1,7 @@
from logging import getLogger
import torch
import numpy as np
import torch
from diffusers import OnnxStableDiffusionImg2ImgPipeline
from PIL import Image
@ -35,7 +35,7 @@ def blend_img2img(
params.lpw,
)
if params.lpw:
logger.debug('using LPW pipeline for img2img')
logger.debug("using LPW pipeline for img2img")
rng = torch.manual_seed(params.seed)
result = pipe.img2img(
prompt,

View File

@ -1,8 +1,8 @@
from logging import getLogger
from typing import Callable, Tuple
import torch
import numpy as np
import torch
from diffusers import OnnxStableDiffusionInpaintPipeline
from PIL import Image
@ -70,7 +70,7 @@ def blend_inpaint(
)
if params.lpw:
logger.debug('using LPW pipeline for inpaint')
logger.debug("using LPW pipeline for inpaint")
rng = torch.manual_seed(params.seed)
result = pipe.inpaint(
params.prompt,

View File

@ -1,7 +1,7 @@
from logging import getLogger
import torch
import numpy as np
import torch
from diffusers import OnnxStableDiffusionPipeline
from PIL import Image
@ -34,11 +34,15 @@ def source_txt2img(
latents = get_latents_from_seed(params.seed, size)
pipe = load_pipeline(
OnnxStableDiffusionPipeline, params.model, params.scheduler, job.get_device(), params.lpw
OnnxStableDiffusionPipeline,
params.model,
params.scheduler,
job.get_device(),
params.lpw,
)
if params.lpw:
logger.debug('using LPW pipeline for txt2img')
logger.debug("using LPW pipeline for txt2img")
rng = torch.manual_seed(params.seed)
result = pipe.text2img(
prompt,

View File

@ -1,8 +1,8 @@
from logging import getLogger
from typing import Callable, Tuple
import torch
import numpy as np
import torch
from diffusers import OnnxStableDiffusionInpaintPipeline
from PIL import Image, ImageDraw
@ -75,7 +75,7 @@ def upscale_outpaint(
params.lpw,
)
if params.lpw:
logger.debug('using LPW pipeline for inpaint')
logger.debug("using LPW pipeline for inpaint")
rng = torch.manual_seed(params.seed)
result = pipe.inpaint(
image,
@ -102,10 +102,8 @@ def upscale_outpaint(
negative_prompt=params.negative_prompt,
num_inference_steps=params.steps,
width=size.width,
)
# once part of the image has been drawn, keep it
draw_mask.rectangle((left, top, left + tile, top + tile), fill="black")
return result.images[0]
@ -116,7 +114,9 @@ def upscale_outpaint(
if border.left == border.right and border.top == border.bottom:
logger.debug("outpainting with an even border, using spiral tiling")
output = process_tile_spiral(source_image, SizeChart.auto, 1, [outpaint], overlap=overlap)
output = process_tile_spiral(
source_image, SizeChart.auto, 1, [outpaint], overlap=overlap
)
else:
logger.debug("outpainting with an uneven border, using grid tiling")
output = process_tile_grid(source_image, SizeChart.auto, 1, [outpaint])

View File

@ -60,7 +60,7 @@ base_models: Models = {
"correction-codeformer",
"https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth",
1,
)
),
],
"upscaling": [
(

View File

@ -47,7 +47,11 @@ def get_tile_latents(
def load_pipeline(
pipeline: DiffusionPipeline, model: str, scheduler: Any, device: DeviceParams, lpw: bool
pipeline: DiffusionPipeline,
model: str,
scheduler: Any,
device: DeviceParams,
lpw: bool,
):
global last_pipeline_instance
global last_pipeline_scheduler

View File

@ -1,8 +1,8 @@
from logging import getLogger
from typing import Any
import torch
import numpy as np
import torch
from diffusers import OnnxStableDiffusionImg2ImgPipeline, OnnxStableDiffusionPipeline
from PIL import Image, ImageChops
@ -27,12 +27,16 @@ def run_txt2img_pipeline(
) -> None:
latents = get_latents_from_seed(params.seed, size)
pipe = load_pipeline(
OnnxStableDiffusionPipeline, params.model, params.scheduler, job.get_device(), params.lpw
OnnxStableDiffusionPipeline,
params.model,
params.scheduler,
job.get_device(),
params.lpw,
)
progress = job.get_progress_callback()
if params.lpw:
logger.debug('using LPW pipeline for txt2img')
logger.debug("using LPW pipeline for txt2img")
rng = torch.manual_seed(params.seed)
result = pipe.text2img(
params.prompt,
@ -59,7 +63,6 @@ def run_txt2img_pipeline(
callback=progress,
)
image = result.images[0]
image = run_upscale_correction(
job, server, StageParams(), params, image, upscale=upscale
@ -89,11 +92,11 @@ def run_img2img_pipeline(
params.model,
params.scheduler,
job.get_device(),
params.lpw
params.lpw,
)
progress = job.get_progress_callback()
if params.lpw:
logger.debug('using LPW pipeline for img2img')
logger.debug("using LPW pipeline for img2img")
rng = torch.manual_seed(params.seed)
result = pipe.img2img(
source_image,
@ -118,7 +121,6 @@ def run_img2img_pipeline(
callback=progress,
)
image = result.images[0]
image = run_upscale_correction(
job, server, StageParams(), params, image, upscale=upscale

View File

@ -234,7 +234,14 @@ def pipeline_from_request() -> Tuple[DeviceParams, ImageParams, Size]:
)
params = ImageParams(
model_path, scheduler, prompt, cfg, steps, seed, lpw=lpw, negative_prompt=negative_prompt
model_path,
scheduler,
prompt,
cfg,
steps,
seed,
lpw=lpw,
negative_prompt=negative_prompt,
)
size = Size(width, height)
return (device, params, size)