1
0
Fork 0

apply lint, fix shadowed names

This commit is contained in:
Sean Sube 2023-02-18 17:59:13 -06:00
parent 20c7fb8b33
commit 4d93c13431
Signed by: ssube
GPG Key ID: 3EED7B957D362AF1
5 changed files with 27 additions and 24 deletions

View File

@ -1,5 +1,4 @@
from logging import getLogger
from typing import Optional
import numpy as np
import torch
@ -25,7 +24,9 @@ def blend_img2img(
**kwargs,
) -> Image.Image:
params = params.with_args(**kwargs)
logger.info("blending image using img2img, %s steps: %s", params.steps, params.prompt)
logger.info(
"blending image using img2img, %s steps: %s", params.steps, params.prompt
)
pipe = load_pipeline(
server,

View File

@ -56,14 +56,14 @@ def blend_inpaint(
save_image(server, "last-mask.png", mask)
save_image(server, "last-noise.png", noise)
def outpaint(image: Image.Image, dims: Tuple[int, int, int]):
def outpaint(tile_source: Image.Image, dims: Tuple[int, int, int]):
left, top, tile = dims
size = Size(*image.size)
mask = mask.crop((left, top, left + tile, top + tile))
size = Size(*tile_source.size)
tile_mask = mask.crop((left, top, left + tile, top + tile))
if is_debug():
save_image(server, "tile-source.png", image)
save_image(server, "tile-mask.png", mask)
save_image(server, "tile-source.png", tile_source)
save_image(server, "tile-mask.png", tile_mask)
latents = get_latents_from_seed(params.seed, size)
pipe = load_pipeline(
@ -83,9 +83,9 @@ def blend_inpaint(
generator=rng,
guidance_scale=params.cfg,
height=size.height,
image=image,
image=tile_source,
latents=latents,
mask=mask,
mask=tile_mask,
negative_prompt=params.negative_prompt,
num_inference_steps=params.steps,
width=size.width,
@ -98,7 +98,7 @@ def blend_inpaint(
generator=rng,
guidance_scale=params.cfg,
height=size.height,
image=image,
image=tile_source,
latents=latents,
mask=mask,
negative_prompt=params.negative_prompt,
@ -109,9 +109,7 @@ def blend_inpaint(
return result.images[0]
output = process_tile_order(
stage.tile_order, source, SizeChart.auto, 1, [outpaint]
)
output = process_tile_order(stage.tile_order, source, SizeChart.auto, 1, [outpaint])
logger.info("final output image size", output.size)
return output

View File

@ -26,7 +26,9 @@ def source_txt2img(
) -> Image.Image:
params = params.with_args(**kwargs)
size = size.with_args(**kwargs)
logger.info("generating image using txt2img, %s steps: %s", params.steps, params.prompt)
logger.info(
"generating image using txt2img, %s steps: %s", params.steps, params.prompt
)
if source is not None:
logger.warn(

View File

@ -62,14 +62,14 @@ def upscale_outpaint(
save_image(server, "last-mask.png", mask)
save_image(server, "last-noise.png", noise)
def outpaint(image: Image.Image, dims: Tuple[int, int, int]):
def outpaint(tile_source: Image.Image, dims: Tuple[int, int, int]):
left, top, tile = dims
size = Size(*image.size)
mask = mask.crop((left, top, left + tile, top + tile))
size = Size(*tile_source.size)
tile_mask = mask.crop((left, top, left + tile, top + tile))
if is_debug():
save_image(server, "tile-source.png", image)
save_image(server, "tile-mask.png", mask)
save_image(server, "tile-source.png", tile_source)
save_image(server, "tile-mask.png", tile_mask)
latents = get_tile_latents(full_latents, dims)
pipe = load_pipeline(
@ -84,8 +84,8 @@ def upscale_outpaint(
logger.debug("using LPW pipeline for inpaint")
rng = torch.manual_seed(params.seed)
result = pipe.inpaint(
image,
mask,
tile_source,
tile_mask,
prompt,
generator=rng,
guidance_scale=params.cfg,
@ -100,12 +100,12 @@ def upscale_outpaint(
rng = np.random.RandomState(params.seed)
result = pipe(
prompt,
image,
tile_source,
generator=rng,
guidance_scale=params.cfg,
height=size.height,
latents=latents,
mask=mask,
mask=tile_mask,
negative_prompt=params.negative_prompt,
num_inference_steps=params.steps,
width=size.width,

View File

@ -74,7 +74,9 @@ def upscale_stable_diffusion(
) -> Image.Image:
params = params.with_args(**kwargs)
upscale = upscale.with_args(**kwargs)
logger.info("upscaling with Stable Diffusion, %s steps: %s", params.steps, params.prompt)
logger.info(
"upscaling with Stable Diffusion, %s steps: %s", params.steps, params.prompt
)
pipeline = load_stable_diffusion(server, upscale, job.get_device())
generator = torch.manual_seed(params.seed)