1
0
Fork 0

add more misc logging

This commit is contained in:
Sean Sube 2023-11-06 08:48:35 -06:00
parent 408e3d725b
commit 7c67d595fb
Signed by: ssube
GPG Key ID: 3EED7B957D362AF1
7 changed files with 38 additions and 11 deletions

View File

@ -252,7 +252,8 @@ class ChainPipeline:
except Exception:
worker.retries = worker.retries - 1
logger.exception(
"error while running stage pipeline, %s retries left", worker.retries
"error while running stage pipeline, %s retries left",
worker.retries,
)
server.cache.clear()
run_gc([worker.get_device()])

View File

@ -28,6 +28,10 @@ class PersistDiskStage(BaseStage):
stage_source: Optional[Image.Image] = None,
**kwargs,
) -> List[Image.Image]:
logger.info(
"persisting images to disk: %s, %s", [s.size for s in sources], output
)
for source, name in zip(sources, output):
dest = save_image(server, name, source, params=params, size=size)
logger.info("saved image to %s", dest)

View File

@ -47,7 +47,10 @@ class SourceTxt2ImgStage(BaseStage):
params = params.with_args(prompt=slice_prompt(params.prompt, prompt_index))
logger.info(
"generating image using txt2img, %s steps: %s", params.steps, params.prompt
"generating image using txt2img, %s steps of %s: %s",
params.steps,
params.model,
params.prompt,
)
if len(sources):
@ -125,6 +128,7 @@ class SourceTxt2ImgStage(BaseStage):
output = list(sources)
output.extend(result.images)
logger.debug("produced %s outputs", len(output))
return output
def steps(

View File

@ -69,6 +69,10 @@ def convert_diffusion_diffusers_xl(
else:
pipeline.vae = AutoencoderKL.from_pretrained(vae_path)
if path.exists(temp_path):
logger.debug("torch model already exists for %s: %s", source, temp_path)
else:
logger.debug("exporting torch model for %s: %s", source, temp_path)
pipeline.save_pretrained(temp_path)
# directory -> onnx using optimum exporters

View File

@ -264,9 +264,7 @@ def load_pipeline(
if hasattr(pipe, vae):
vae_model = getattr(pipe, vae)
vae_model.set_tiled(tiled=params.tiled_vae)
vae_model.set_window_size(
params.vae_tile // 8, params.vae_overlap
)
vae_model.set_window_size(params.vae_tile // 8, params.vae_overlap)
# update panorama params
if params.is_panorama():

View File

@ -330,7 +330,11 @@ class StableDiffusionXLPanoramaPipelineMixin(StableDiffusionXLImg2ImgPipelineMix
)
add_region_embeds.append(
np.concatenate(
(region_negative_pooled_prompt_embeds, region_pooled_prompt_embeds), axis=0
(
region_negative_pooled_prompt_embeds,
region_pooled_prompt_embeds,
),
axis=0,
)
)
@ -440,7 +444,15 @@ class StableDiffusionXLPanoramaPipelineMixin(StableDiffusionXLImg2ImgPipelineMix
for r in range(len(regions)):
top, left, bottom, right, mult, prompt = regions[r]
logger.debug("running region prompt: %s, %s, %s, %s, %s, %s", top, left, bottom, right, mult, prompt)
logger.debug(
"running region prompt: %s, %s, %s, %s, %s, %s",
top,
left,
bottom,
right,
mult,
prompt,
)
# convert coordinates to latent space
h_start = top // 8
@ -476,7 +488,9 @@ class StableDiffusionXLPanoramaPipelineMixin(StableDiffusionXLImg2ImgPipelineMix
# perform guidance
if do_classifier_free_guidance:
region_noise_pred_uncond, region_noise_pred_text = np.split(region_noise_pred, 2)
region_noise_pred_uncond, region_noise_pred_text = np.split(
region_noise_pred, 2
)
region_noise_pred = region_noise_pred_uncond + guidance_scale * (
region_noise_pred_text - region_noise_pred_uncond
)
@ -501,7 +515,9 @@ class StableDiffusionXLPanoramaPipelineMixin(StableDiffusionXLImg2ImgPipelineMix
value[:, :, h_start:h_end, w_start:w_end] = latents_region_denoised
count[:, :, h_start:h_end, w_start:w_end] = 1
else:
value[:, :, h_start:h_end, w_start:w_end] += latents_region_denoised * mult
value[:, :, h_start:h_end, w_start:w_end] += (
latents_region_denoised * mult
)
count[:, :, h_start:h_end, w_start:w_end] += mult
# take the MultiDiffusion step. Eq. 5 in MultiDiffusion paper: https://arxiv.org/abs/2302.08113

View File

@ -228,7 +228,7 @@ def parse_float_group(group: Tuple[str, str]) -> Tuple[str, float]:
def get_tokens_from_prompt(
prompt: str,
pattern: Pattern,
parser = parse_float_group,
parser=parse_float_group,
) -> Tuple[str, List[Tuple[str, float]]]:
"""
TODO: replace with Arpeggio