1
0
Fork 0

remove timestep pinning

This commit is contained in:
Sean Sube 2024-01-15 08:01:03 -06:00
parent 500561b1f5
commit ff11d75784
Signed by: ssube
GPG Key ID: 3EED7B957D362AF1
2 changed files with 0 additions and 135 deletions

View File

@ -578,8 +578,6 @@ class OnnxStableDiffusionPanoramaPipeline(DiffusionPipeline):
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)): for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
last = i == (len(self.scheduler.timesteps) - 1) last = i == (len(self.scheduler.timesteps) - 1)
next_step_index = None
count.fill(0) count.fill(0)
value.fill(0) value.fill(0)
@ -614,11 +612,6 @@ class OnnxStableDiffusionPanoramaPipeline(DiffusionPipeline):
noise_pred_text - noise_pred_uncond noise_pred_text - noise_pred_uncond
) )
# freeze the scheduler's internal timestep
prev_step_index = None
if hasattr(self.scheduler, "_step_index"):
prev_step_index = self.scheduler._step_index
# compute the previous noisy sample x_t -> x_t-1 # compute the previous noisy sample x_t -> x_t-1
scheduler_output = self.scheduler.step( scheduler_output = self.scheduler.step(
torch.from_numpy(noise_pred), torch.from_numpy(noise_pred),
@ -628,16 +621,6 @@ class OnnxStableDiffusionPanoramaPipeline(DiffusionPipeline):
) )
latents_view_denoised = scheduler_output.prev_sample.numpy() latents_view_denoised = scheduler_output.prev_sample.numpy()
# reset the scheduler's internal timestep
if prev_step_index is not None:
logger.debug(
"resetting scheduler internal step index from %s to %s",
self.scheduler._step_index,
prev_step_index,
)
next_step_index = self.scheduler._step_index
self.scheduler._step_index = prev_step_index
value[:, :, h_start:h_end, w_start:w_end] += latents_view_denoised value[:, :, h_start:h_end, w_start:w_end] += latents_view_denoised
count[:, :, h_start:h_end, w_start:w_end] += 1 count[:, :, h_start:h_end, w_start:w_end] += 1
@ -703,11 +686,6 @@ class OnnxStableDiffusionPanoramaPipeline(DiffusionPipeline):
* (region_noise_pred_text - region_noise_pred_uncond) * (region_noise_pred_text - region_noise_pred_uncond)
) )
# freeze the scheduler's internal timestep
prev_step_index = None
if hasattr(self.scheduler, "_step_index"):
prev_step_index = self.scheduler._step_index
# compute the previous noisy sample x_t -> x_t-1 # compute the previous noisy sample x_t -> x_t-1
scheduler_output = self.scheduler.step( scheduler_output = self.scheduler.step(
torch.from_numpy(region_noise_pred), torch.from_numpy(region_noise_pred),
@ -717,16 +695,6 @@ class OnnxStableDiffusionPanoramaPipeline(DiffusionPipeline):
) )
latents_region_denoised = scheduler_output.prev_sample.numpy() latents_region_denoised = scheduler_output.prev_sample.numpy()
# reset the scheduler's internal timestep
if prev_step_index is not None:
logger.debug(
"resetting scheduler internal step index from %s to %s",
self.scheduler._step_index,
prev_step_index,
)
next_step_index = self.scheduler._step_index
self.scheduler._step_index = prev_step_index
if feather[0] > 0.0: if feather[0] > 0.0:
mask = make_tile_mask( mask = make_tile_mask(
(h_end - h_start, w_end - w_start), (h_end - h_start, w_end - w_start),
@ -755,16 +723,6 @@ class OnnxStableDiffusionPanoramaPipeline(DiffusionPipeline):
latents = np.where(count > 0, value / count, value) latents = np.where(count > 0, value / count, value)
latents = repair_nan(latents) latents = repair_nan(latents)
# update the scheduler's internal timestep
if not last and next_step_index is not None:
logger.debug(
"updating scheduler internal step index from %s to %s",
self.scheduler._step_index,
next_step_index,
)
self.scheduler._step_index = next_step_index
next_step_index = None
# call the callback, if provided # call the callback, if provided
if callback is not None and i % callback_steps == 0: if callback is not None and i % callback_steps == 0:
callback(i, t, latents) callback(i, t, latents)
@ -1036,8 +994,6 @@ class OnnxStableDiffusionPanoramaPipeline(DiffusionPipeline):
for i, t in enumerate(self.progress_bar(timesteps)): for i, t in enumerate(self.progress_bar(timesteps)):
last = i == (len(timesteps) - 1) last = i == (len(timesteps) - 1)
next_step_index = None
count.fill(0) count.fill(0)
value.fill(0) value.fill(0)
@ -1072,11 +1028,6 @@ class OnnxStableDiffusionPanoramaPipeline(DiffusionPipeline):
noise_pred_text - noise_pred_uncond noise_pred_text - noise_pred_uncond
) )
# freeze the scheduler's internal timestep
prev_step_index = None
if hasattr(self.scheduler, "_step_index"):
prev_step_index = self.scheduler._step_index
# compute the previous noisy sample x_t -> x_t-1 # compute the previous noisy sample x_t -> x_t-1
scheduler_output = self.scheduler.step( scheduler_output = self.scheduler.step(
torch.from_numpy(noise_pred), torch.from_numpy(noise_pred),
@ -1086,31 +1037,12 @@ class OnnxStableDiffusionPanoramaPipeline(DiffusionPipeline):
) )
latents_view_denoised = scheduler_output.prev_sample.numpy() latents_view_denoised = scheduler_output.prev_sample.numpy()
# reset the scheduler's internal timestep
if prev_step_index is not None:
logger.debug(
"resetting scheduler internal step index from %s to %s",
self.scheduler._step_index,
prev_step_index,
)
next_step_index = self.scheduler._step_index
self.scheduler._step_index = prev_step_index
value[:, :, h_start:h_end, w_start:w_end] += latents_view_denoised value[:, :, h_start:h_end, w_start:w_end] += latents_view_denoised
count[:, :, h_start:h_end, w_start:w_end] += 1 count[:, :, h_start:h_end, w_start:w_end] += 1
# take the MultiDiffusion step. Eq. 5 in MultiDiffusion paper: https://arxiv.org/abs/2302.08113 # take the MultiDiffusion step. Eq. 5 in MultiDiffusion paper: https://arxiv.org/abs/2302.08113
latents = np.where(count > 0, value / count, value) latents = np.where(count > 0, value / count, value)
# update the scheduler's internal timestep
if not last and next_step_index is not None:
logger.debug(
"updating scheduler internal step index from %s to %s",
self.scheduler._step_index,
next_step_index,
)
self.scheduler._step_index = next_step_index
# call the callback, if provided # call the callback, if provided
if callback is not None and i % callback_steps == 0: if callback is not None and i % callback_steps == 0:
callback(i, t, latents) callback(i, t, latents)

View File

@ -411,8 +411,6 @@ class StableDiffusionXLPanoramaPipelineMixin(StableDiffusionXLImg2ImgPipelineMix
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
for i, t in enumerate(self.progress_bar(timesteps)): for i, t in enumerate(self.progress_bar(timesteps)):
last = i == (len(timesteps) - 1) last = i == (len(timesteps) - 1)
next_step_index = None
count.fill(0) count.fill(0)
value.fill(0) value.fill(0)
@ -456,11 +454,6 @@ class StableDiffusionXLPanoramaPipelineMixin(StableDiffusionXLImg2ImgPipelineMix
guidance_rescale=guidance_rescale, guidance_rescale=guidance_rescale,
) )
# freeze the scheduler's internal timestep
prev_step_index = None
if hasattr(self.scheduler, "_step_index"):
prev_step_index = self.scheduler._step_index
# compute the previous noisy sample x_t -> x_t-1 # compute the previous noisy sample x_t -> x_t-1
scheduler_output = self.scheduler.step( scheduler_output = self.scheduler.step(
torch.from_numpy(noise_pred), torch.from_numpy(noise_pred),
@ -470,16 +463,6 @@ class StableDiffusionXLPanoramaPipelineMixin(StableDiffusionXLImg2ImgPipelineMix
) )
latents_view_denoised = scheduler_output.prev_sample.numpy() latents_view_denoised = scheduler_output.prev_sample.numpy()
# reset the scheduler's internal timestep
if prev_step_index is not None:
logger.debug(
"rewinding scheduler internal step index from %s to %s",
self.scheduler._step_index,
prev_step_index,
)
next_step_index = self.scheduler._step_index
self.scheduler._step_index = prev_step_index
value[:, :, h_start:h_end, w_start:w_end] += latents_view_denoised value[:, :, h_start:h_end, w_start:w_end] += latents_view_denoised
count[:, :, h_start:h_end, w_start:w_end] += 1 count[:, :, h_start:h_end, w_start:w_end] += 1
@ -554,11 +537,6 @@ class StableDiffusionXLPanoramaPipelineMixin(StableDiffusionXLImg2ImgPipelineMix
guidance_rescale=guidance_rescale, guidance_rescale=guidance_rescale,
) )
# freeze the scheduler's internal timestep
prev_step_index = None
if hasattr(self.scheduler, "_step_index"):
prev_step_index = self.scheduler._step_index
# compute the previous noisy sample x_t -> x_t-1 # compute the previous noisy sample x_t -> x_t-1
scheduler_output = self.scheduler.step( scheduler_output = self.scheduler.step(
torch.from_numpy(region_noise_pred), torch.from_numpy(region_noise_pred),
@ -568,16 +546,6 @@ class StableDiffusionXLPanoramaPipelineMixin(StableDiffusionXLImg2ImgPipelineMix
) )
latents_region_denoised = scheduler_output.prev_sample.numpy() latents_region_denoised = scheduler_output.prev_sample.numpy()
# reset the scheduler's internal timestep
if prev_step_index is not None:
logger.debug(
"resetting scheduler internal step index from %s to %s",
self.scheduler._step_index,
prev_step_index,
)
next_step_index = self.scheduler._step_index
self.scheduler._step_index = prev_step_index
if feather[0] > 0.0: if feather[0] > 0.0:
mask = make_tile_mask( mask = make_tile_mask(
(h_end - h_start, w_end - w_start), (h_end - h_start, w_end - w_start),
@ -606,15 +574,6 @@ class StableDiffusionXLPanoramaPipelineMixin(StableDiffusionXLImg2ImgPipelineMix
latents = np.where(count > 0, value / count, value) latents = np.where(count > 0, value / count, value)
latents = repair_nan(latents) latents = repair_nan(latents)
# update the scheduler's internal timestep, if set
if not last and next_step_index is not None:
logger.debug(
"updating scheduler internal step index from %s to %s",
self.scheduler._step_index,
next_step_index,
)
self.scheduler._step_index = next_step_index
# call the callback, if provided # call the callback, if provided
if i == len(timesteps) - 1 or ( if i == len(timesteps) - 1 or (
(i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0 (i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0
@ -877,8 +836,6 @@ class StableDiffusionXLPanoramaPipelineMixin(StableDiffusionXLImg2ImgPipelineMix
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
for i, t in enumerate(self.progress_bar(timesteps)): for i, t in enumerate(self.progress_bar(timesteps)):
last = i == (len(timesteps) - 1) last = i == (len(timesteps) - 1)
next_step_index = None
count.fill(0) count.fill(0)
value.fill(0) value.fill(0)
@ -922,11 +879,6 @@ class StableDiffusionXLPanoramaPipelineMixin(StableDiffusionXLImg2ImgPipelineMix
guidance_rescale=guidance_rescale, guidance_rescale=guidance_rescale,
) )
# freeze the scheduler's internal timestep
prev_step_index = None
if hasattr(self.scheduler, "_step_index"):
prev_step_index = self.scheduler._step_index
# compute the previous noisy sample x_t -> x_t-1 # compute the previous noisy sample x_t -> x_t-1
scheduler_output = self.scheduler.step( scheduler_output = self.scheduler.step(
torch.from_numpy(noise_pred), torch.from_numpy(noise_pred),
@ -936,31 +888,12 @@ class StableDiffusionXLPanoramaPipelineMixin(StableDiffusionXLImg2ImgPipelineMix
) )
latents_view_denoised = scheduler_output.prev_sample.numpy() latents_view_denoised = scheduler_output.prev_sample.numpy()
# reset the scheduler's internal timestep
if prev_step_index is not None:
logger.debug(
"resetting scheduler internal step index from %s to %s",
self.scheduler._step_index,
prev_step_index,
)
next_step_index = self.scheduler._step_index
self.scheduler._step_index = prev_step_index
value[:, :, h_start:h_end, w_start:w_end] += latents_view_denoised value[:, :, h_start:h_end, w_start:w_end] += latents_view_denoised
count[:, :, h_start:h_end, w_start:w_end] += 1 count[:, :, h_start:h_end, w_start:w_end] += 1
# take the MultiDiffusion step. Eq. 5 in MultiDiffusion paper: https://arxiv.org/abs/2302.08113 # take the MultiDiffusion step. Eq. 5 in MultiDiffusion paper: https://arxiv.org/abs/2302.08113
latents = np.where(count > 0, value / count, value) latents = np.where(count > 0, value / count, value)
# update the scheduler's internal timestep, if set
if not last and next_step_index is not None:
logger.debug(
"updating scheduler internal step index from %s to %s",
self.scheduler._step_index,
next_step_index,
)
self.scheduler._step_index = next_step_index
# call the callback, if provided # call the callback, if provided
if i == len(timesteps) - 1 or ( if i == len(timesteps) - 1 or (
(i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0 (i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0