1
0
Fork 0

feat(api): prefer chain stage parameters over request parameters (#138)

This commit is contained in:
Sean Sube 2023-02-18 16:27:48 -06:00
parent bfdb071c2d
commit 7b8ced0f68
Signed by: ssube
GPG Key ID: 3EED7B957D362AF1
9 changed files with 82 additions and 35 deletions

View File

@ -19,15 +19,13 @@ def blend_img2img(
server: ServerContext, server: ServerContext,
_stage: StageParams, _stage: StageParams,
params: ImageParams, params: ImageParams,
source_image: Image.Image, source: Image.Image,
*, *,
strength: float,
prompt: Optional[str] = None,
callback: ProgressCallback = None, callback: ProgressCallback = None,
**kwargs, **kwargs,
) -> Image.Image: ) -> Image.Image:
prompt = prompt or params.prompt params = params.with_args(**kwargs)
logger.info("blending image using img2img, %s steps: %s", params.steps, prompt) logger.info("blending image using img2img, %s steps: %s", params.steps, params.prompt)
pipe = load_pipeline( pipe = load_pipeline(
server, server,
@ -41,25 +39,25 @@ def blend_img2img(
logger.debug("using LPW pipeline for img2img") logger.debug("using LPW pipeline for img2img")
rng = torch.manual_seed(params.seed) rng = torch.manual_seed(params.seed)
result = pipe.img2img( result = pipe.img2img(
prompt, params.prompt,
generator=rng, generator=rng,
guidance_scale=params.cfg, guidance_scale=params.cfg,
image=source_image, image=source,
negative_prompt=params.negative_prompt, negative_prompt=params.negative_prompt,
num_inference_steps=params.steps, num_inference_steps=params.steps,
strength=strength, strength=params.strength,
callback=callback, callback=callback,
) )
else: else:
rng = np.random.RandomState(params.seed) rng = np.random.RandomState(params.seed)
result = pipe( result = pipe(
prompt, params.prompt,
generator=rng, generator=rng,
guidance_scale=params.cfg, guidance_scale=params.cfg,
image=source_image, image=source,
negative_prompt=params.negative_prompt, negative_prompt=params.negative_prompt,
num_inference_steps=params.steps, num_inference_steps=params.steps,
strength=strength, strength=params.strength,
callback=callback, callback=callback,
) )

View File

@ -22,27 +22,29 @@ def blend_inpaint(
server: ServerContext, server: ServerContext,
stage: StageParams, stage: StageParams,
params: ImageParams, params: ImageParams,
source_image: Image.Image, source: Image.Image,
*, *,
expand: Border, expand: Border,
mask_image: Optional[Image.Image] = None, mask: Optional[Image.Image] = None,
fill_color: str = "white", fill_color: str = "white",
mask_filter: Callable = mask_filter_none, mask_filter: Callable = mask_filter_none,
noise_source: Callable = noise_source_histogram, noise_source: Callable = noise_source_histogram,
callback: ProgressCallback = None, callback: ProgressCallback = None,
**kwargs, **kwargs,
) -> Image.Image: ) -> Image.Image:
params = params.with_args(**kwargs)
expand = expand.with_args(**kwargs)
logger.info( logger.info(
"blending image using inpaint, %s steps: %s", params.steps, params.prompt "blending image using inpaint, %s steps: %s", params.steps, params.prompt
) )
if mask_image is None: if mask is None:
# if no mask was provided, keep the full source image # if no mask was provided, keep the full source image
mask_image = Image.new("RGB", source_image.size, "black") mask = Image.new("RGB", source.size, "black")
source_image, mask_image, noise_image, _full_dims = expand_image( source, mask, noise, _full_dims = expand_image(
source_image, source,
mask_image, mask,
expand, expand,
fill=fill_color, fill=fill_color,
noise_source=noise_source, noise_source=noise_source,
@ -50,14 +52,14 @@ def blend_inpaint(
) )
if is_debug(): if is_debug():
save_image(server, "last-source.png", source_image) save_image(server, "last-source.png", source)
save_image(server, "last-mask.png", mask_image) save_image(server, "last-mask.png", mask)
save_image(server, "last-noise.png", noise_image) save_image(server, "last-noise.png", noise)
def outpaint(image: Image.Image, dims: Tuple[int, int, int]): def outpaint(image: Image.Image, dims: Tuple[int, int, int]):
left, top, tile = dims left, top, tile = dims
size = Size(*image.size) size = Size(*image.size)
mask = mask_image.crop((left, top, left + tile, top + tile)) mask = mask.crop((left, top, left + tile, top + tile))
if is_debug(): if is_debug():
save_image(server, "tile-source.png", image) save_image(server, "tile-source.png", image)
@ -108,7 +110,7 @@ def blend_inpaint(
return result.images[0] return result.images[0]
output = process_tile_order( output = process_tile_order(
stage.tile_order, source_image, SizeChart.auto, 1, [outpaint] stage.tile_order, source, SizeChart.auto, 1, [outpaint]
) )
logger.info("final output image size", output.size) logger.info("final output image size", output.size)

View File

@ -19,7 +19,7 @@ def blend_mask(
_stage: StageParams, _stage: StageParams,
_params: ImageParams, _params: ImageParams,
*, *,
resized: Optional[List[Image.Image]] = None, sources: Optional[List[Image.Image]] = None,
mask: Optional[Image.Image] = None, mask: Optional[Image.Image] = None,
_callback: ProgressCallback = None, _callback: ProgressCallback = None,
**kwargs, **kwargs,
@ -36,7 +36,7 @@ def blend_mask(
resized = [ resized = [
valid_image(s, min_dims=mult_mask.size, max_dims=mult_mask.size) valid_image(s, min_dims=mult_mask.size, max_dims=mult_mask.size)
for s in resized for s in sources
] ]
return Image.composite(resized[0], resized[1], mult_mask) return Image.composite(resized[0], resized[1], mult_mask)

View File

@ -25,6 +25,8 @@ def correct_codeformer(
# must be within the load function for patch to take effect # must be within the load function for patch to take effect
from codeformer import CodeFormer from codeformer import CodeFormer
upscale = upscale.with_args(**kwargs)
device = job.get_device() device = job.get_device()
pipe = CodeFormer(upscale=upscale.face_outscale).to(device.torch_str()) pipe = CodeFormer(upscale=upscale.face_outscale).to(device.torch_str())
return pipe(stage_source or source) return pipe(stage_source or source)

View File

@ -55,6 +55,8 @@ def correct_gfpgan(
upscale: UpscaleParams, upscale: UpscaleParams,
**kwargs, **kwargs,
) -> Image.Image: ) -> Image.Image:
upscale = upscale.with_args(**kwargs)
if upscale.correction_model is None: if upscale.correction_model is None:
logger.warn("no face model given, skipping") logger.warn("no face model given, skipping")
return source_image return source_image

View File

@ -21,12 +21,12 @@ def source_txt2img(
source_image: Image.Image, source_image: Image.Image,
*, *,
size: Size, size: Size,
prompt: str = None,
callback: ProgressCallback = None, callback: ProgressCallback = None,
**kwargs, **kwargs,
) -> Image.Image: ) -> Image.Image:
prompt = prompt or params.prompt params = params.with_args(**kwargs)
logger.info("generating image using txt2img, %s steps: %s", params.steps, prompt) size = size.with_args(**kwargs)
logger.info("generating image using txt2img, %s steps: %s", params.steps, params.prompt)
if source_image is not None: if source_image is not None:
logger.warn( logger.warn(
@ -47,7 +47,7 @@ def source_txt2img(
logger.debug("using LPW pipeline for txt2img") logger.debug("using LPW pipeline for txt2img")
rng = torch.manual_seed(params.seed) rng = torch.manual_seed(params.seed)
result = pipe.text2img( result = pipe.text2img(
prompt, params.prompt,
height=size.height, height=size.height,
width=size.width, width=size.width,
generator=rng, generator=rng,
@ -60,7 +60,7 @@ def source_txt2img(
else: else:
rng = np.random.RandomState(params.seed) rng = np.random.RandomState(params.seed)
result = pipe( result = pipe(
prompt, params.prompt,
height=size.height, height=size.height,
width=size.width, width=size.width,
generator=rng, generator=rng,

View File

@ -69,12 +69,12 @@ def upscale_stable_diffusion(
source: Image.Image, source: Image.Image,
*, *,
upscale: UpscaleParams, upscale: UpscaleParams,
prompt: str = None,
callback: ProgressCallback = None, callback: ProgressCallback = None,
**kwargs, **kwargs,
) -> Image.Image: ) -> Image.Image:
prompt = prompt or params.prompt params = params.with_args(**kwargs)
logger.info("upscaling with Stable Diffusion, %s steps: %s", params.steps, prompt) upscale = upscale.with_args(**kwargs)
logger.info("upscaling with Stable Diffusion, %s steps: %s", params.steps, params.prompt)
pipeline = load_stable_diffusion(server, upscale, job.get_device()) pipeline = load_stable_diffusion(server, upscale, job.get_device())
generator = torch.manual_seed(params.seed) generator = torch.manual_seed(params.seed)

View File

@ -255,7 +255,7 @@ def run_blend_pipeline(
server, server,
stage, stage,
params, params,
resized=sources, sources=sources,
mask=mask, mask=mask,
callback=progress, callback=progress,
) )

View File

@ -47,6 +47,14 @@ class Border:
"bottom": self.bottom, "bottom": self.bottom,
} }
def with_args(self, **kwargs):
return Border(
kwargs.get("left", self.left),
kwargs.get("right", self.right),
kwargs.get("top", self.top),
kwargs.get("bottom", self.bottom),
)
@classmethod @classmethod
def even(cls, all: int): def even(cls, all: int):
return Border(all, all, all, all) return Border(all, all, all, all)
@ -75,6 +83,12 @@ class Size:
"width": self.width, "width": self.width,
} }
def with_args(self, **kwargs):
return Size(
kwargs.get("height", self.height),
kwargs.get("width", self.width),
)
class DeviceParams: class DeviceParams:
def __init__( def __init__(
@ -156,13 +170,25 @@ class ImageParams:
"model": self.model, "model": self.model,
"scheduler": self.scheduler.__name__, "scheduler": self.scheduler.__name__,
"prompt": self.prompt, "prompt": self.prompt,
"negativePrompt": self.negative_prompt, "negative_prompt": self.negative_prompt,
"cfg": self.cfg, "cfg": self.cfg,
"seed": self.seed, "seed": self.seed,
"steps": self.steps, "steps": self.steps,
"lpw": self.lpw, "lpw": self.lpw,
} }
def with_args(self, **kwargs):
return ImageParams(
kwargs.get("model", self.model),
kwargs.get("scheduler", self.scheduler),
kwargs.get("prompt", self.prompt),
kwargs.get("cfg", self.cfg),
kwargs.get("steps", self.steps),
kwargs.get("seed", self.seed),
kwargs.get("negative_prompt", self.negative_prompt),
kwargs.get("lpw", self.lpw),
)
class StageParams: class StageParams:
""" """
@ -259,3 +285,20 @@ class UpscaleParams:
"tile_pad": self.tile_pad, "tile_pad": self.tile_pad,
"upscale_order": self.upscale_order, "upscale_order": self.upscale_order,
} }
def with_args(self, **kwargs):
return ImageParams(
kwargs.get("upscale_model", self.upscale_model),
kwargs.get("correction_model", self.correction_model),
kwargs.get("denoise", self.denoise),
kwargs.get("faces", self.faces),
kwargs.get("face_outscale", self.face_outscale),
kwargs.get("face_strength", self.face_strength),
kwargs.get("format", self.format),
kwargs.get("half", self.half),
kwargs.get("outscale", self.outscale),
kwargs.get("pre_pad", self.pre_pad),
kwargs.get("scale", self.scale),
kwargs.get("tile_pad", self.tile_pad),
kwargs.get("upscale_order", self.upscale_order),
)