1
0
Fork 0

clean up some exports, lint

This commit is contained in:
Sean Sube 2023-02-18 18:55:42 -06:00
parent 2b83f942af
commit 1a45aa68fa
Signed by: ssube
GPG Key ID: 3EED7B957D362AF1
2 changed files with 25 additions and 15 deletions

View File

@ -1,11 +1,14 @@
from . import logging from . import logging
from .chain import correct_gfpgan, upscale_resrgan, upscale_stable_diffusion from .chain import correct_gfpgan, upscale_resrgan, upscale_stable_diffusion
from .diffusion.load import get_latents_from_seed, load_pipeline from .diffusion.load import get_latents_from_seed, load_pipeline, optimize_pipeline
from .diffusion.run import ( from .diffusion.run import (
run_blend_pipeline,
run_img2img_pipeline, run_img2img_pipeline,
run_inpaint_pipeline, run_inpaint_pipeline,
run_txt2img_pipeline, run_txt2img_pipeline,
run_upscale_pipeline,
) )
from .diffusion.stub_scheduler import StubScheduler
from .image import ( from .image import (
expand_image, expand_image,
mask_filter_gaussian_multiply, mask_filter_gaussian_multiply,
@ -17,9 +20,28 @@ from .image import (
noise_source_histogram, noise_source_histogram,
noise_source_normal, noise_source_normal,
noise_source_uniform, noise_source_uniform,
valid_image,
)
from .onnx import OnnxNet, OnnxTensor
from .params import (
Border,
ImageParams,
Param,
Point,
Size,
StageParams,
UpscaleParams,
)
from .server import (
DeviceParams,
DevicePoolExecutor,
ModelCache,
apply_patch_basicsr,
apply_patch_codeformer,
apply_patch_facexlib,
apply_patches,
run_upscale_correction,
) )
from .params import Border, ImageParams, Param, Point, Size, StageParams, UpscaleParams
from .server.upscale import run_upscale_correction
from .utils import ( from .utils import (
ServerContext, ServerContext,
base_join, base_join,

View File

@ -240,15 +240,9 @@ class OnnxStableDiffusionUpscalePipeline(StableDiffusionUpscalePipeline):
f" {self.tokenizer.model_max_length} tokens: {removed_text}" f" {self.tokenizer.model_max_length} tokens: {removed_text}"
) )
# if hasattr(text_inputs, "attention_mask"):
# attention_mask = text_inputs.attention_mask.to(device)
# else:
# attention_mask = None
# no positional arguments to text_encoder # no positional arguments to text_encoder
text_embeddings = self.text_encoder( text_embeddings = self.text_encoder(
input_ids=text_input_ids.int().to(device), input_ids=text_input_ids.int().to(device),
# attention_mask=attention_mask,
) )
text_embeddings = text_embeddings[0] text_embeddings = text_embeddings[0]
@ -287,14 +281,8 @@ class OnnxStableDiffusionUpscalePipeline(StableDiffusionUpscalePipeline):
return_tensors="pt", return_tensors="pt",
) )
# if hasattr(uncond_input, "attention_mask"):
# attention_mask = uncond_input.attention_mask.to(device)
# else:
# attention_mask = None
uncond_embeddings = self.text_encoder( uncond_embeddings = self.text_encoder(
input_ids=uncond_input.input_ids.int().to(device), input_ids=uncond_input.input_ids.int().to(device),
# attention_mask=attention_mask,
) )
uncond_embeddings = uncond_embeddings[0] uncond_embeddings = uncond_embeddings[0]