1
0
Fork 0

lint(api): start breaking down model loading

This commit is contained in:
Sean Sube 2023-09-23 20:11:05 -05:00
parent 38d3999088
commit 6b6f63564e
Signed by: ssube
GPG Key ID: 3EED7B957D362AF1
3 changed files with 280 additions and 237 deletions

View File

@ -106,6 +106,9 @@ def get_scheduler_name(scheduler: Any) -> Optional[str]:
return None
VAE_COMPONENTS = ["vae", "vae_decoder", "vae_encoder"]
def load_pipeline(
server: ServerContext,
params: ImageParams,
@ -177,15 +180,109 @@ def load_pipeline(
}
# shared components
text_encoder = None
unet_type = "unet"
# ControlNet component
if params.is_control() and params.control is not None:
cnet_path = path.join(
server.model_path, "control", f"{params.control.name}.onnx"
logger.debug("loading ControlNet components")
control_components = load_controlnet(server, device, params)
components.update(control_components)
unet_type = "cnet"
# Textual Inversion blending
encoder_components = load_text_encoders(
server, device, model, inversions, loras, torch_dtype, params
)
components.update(encoder_components)
unet_components = load_unet(
server, device, model, loras, unet_type, params
)
components.update(unet_components)
vae_components = load_vae(server, device, model)
components.update(vae_components)
# additional options for panorama pipeline
if params.is_panorama():
components["window"] = params.tiles // 8
components["stride"] = params.stride // 8
pipeline_class = available_pipelines.get(pipeline, OnnxStableDiffusionPipeline)
logger.debug("loading pretrained SD pipeline for %s", pipeline_class.__name__)
pipe = pipeline_class.from_pretrained(
model,
provider=device.ort_provider(),
sess_options=device.sess_options(),
safety_checker=None,
torch_dtype=torch_dtype,
**components,
)
# make sure XL models are actually being used
if "text_encoder_session" in components:
pipe.text_encoder = ORTModelTextEncoder(
components["text_encoder_session"], pipe
)
if "text_encoder_2_session" in components:
pipe.text_encoder_2 = ORTModelTextEncoder(
components["text_encoder_2_session"], pipe
)
if "unet_session" in components:
# unload old UNet
pipe.unet = None
run_gc([device])
# attach correct one
pipe.unet = ORTModelUnet(components["unet_session"], pipe)
if "vae_decoder_session" in components:
pipe.vae_decoder = ORTModelVaeDecoder(
components["vae_decoder_session"],
pipe,
)
if "vae_encoder_session" in components:
pipe.vae_encoder = ORTModelVaeEncoder(
components["vae_encoder_session"],
pipe,
)
if not server.show_progress:
pipe.set_progress_bar_config(disable=True)
optimize_pipeline(server, pipe)
patch_pipeline(server, pipe, pipeline_class, params)
server.cache.set(ModelTypes.diffusion, pipe_key, pipe)
server.cache.set(ModelTypes.scheduler, scheduler_key, components["scheduler"])
for vae in VAE_COMPONENTS:
if hasattr(pipe, vae):
getattr(pipe, vae).set_tiled(tiled=params.tiled_vae)
# update panorama params
if params.is_panorama():
latent_window = params.tiles // 8
latent_stride = params.stride // 8
pipe.set_window_size(latent_window, latent_stride)
for vae in VAE_COMPONENTS:
if hasattr(pipe, vae):
getattr(pipe, vae).set_window_size(latent_window, params.overlap)
run_gc([device])
return pipe
def load_controlnet(server, device, params):
cnet_path = path.join(server.model_path, "control", f"{params.control.name}.onnx")
logger.debug("loading ControlNet weights from %s", cnet_path)
components = {}
components["controlnet"] = OnnxRuntimeModel(
OnnxRuntimeModel.load_model(
cnet_path,
@ -193,24 +290,30 @@ def load_pipeline(
sess_options=device.sess_options(),
)
)
return components
unet_type = "cnet"
# Textual Inversion blending
if inversions is not None and len(inversions) > 0:
logger.debug("blending Textual Inversions from %s", inversions)
inversion_names, inversion_weights = zip(*inversions)
inversion_models = [
path.join(server.model_path, "inversion", name)
for name in inversion_names
]
def load_text_encoders(
server, device, model: str, inversions, loras, torch_dtype, params
):
text_encoder = load_model(path.join(model, "text_encoder", ONNX_MODEL))
tokenizer = CLIPTokenizer.from_pretrained(
model,
subfolder="tokenizer",
torch_dtype=torch_dtype,
)
components = {}
components["tokenizer"] = tokenizer
if inversions is not None and len(inversions) > 0:
logger.debug("blending Textual Inversions from %s", inversions)
inversion_names, inversion_weights = zip(*inversions)
inversion_models = [
path.join(server.model_path, "inversion", name) for name in inversion_names
]
text_encoder, tokenizer = blend_textual_inversions(
server,
text_encoder,
@ -225,8 +328,6 @@ def load_pipeline(
),
)
components["tokenizer"] = tokenizer
# should be pretty small and should not need external data
if loras is None or len(loras) == 0:
# TODO: handle XL encoders
@ -237,19 +338,14 @@ def load_pipeline(
sess_options=device.sess_options(),
)
)
# LoRA blending
if loras is not None and len(loras) > 0:
else:
# blend and load text encoder
lora_names, lora_weights = zip(*loras)
lora_models = [
path.join(server.model_path, "lora", name) for name in lora_names
]
logger.info(
"blending base model %s with LoRA models: %s", model, lora_models
)
logger.info("blending base model %s with LoRA models: %s", model, lora_models)
# blend and load text encoder
text_encoder = text_encoder or path.join(model, "text_encoder", ONNX_MODEL)
text_encoder = blend_loras(
server,
text_encoder,
@ -258,9 +354,7 @@ def load_pipeline(
1 if params.is_xl() else None,
params.is_xl(),
)
(text_encoder, text_encoder_data) = buffer_external_data_tensors(
text_encoder
)
(text_encoder, text_encoder_data) = buffer_external_data_tensors(text_encoder)
text_encoder_names, text_encoder_values = zip(*text_encoder_data)
text_encoder_opts = device.sess_options(cache=False)
text_encoder_opts.add_external_initializers(
@ -311,8 +405,22 @@ def load_pipeline(
text_encoder_2_session._model_path = path.join(model, "text_encoder_2")
components["text_encoder_2_session"] = text_encoder_2_session
# blend and load unet
return components
def load_unet(server, device, model, loras, unet_type, params):
components = {}
unet = path.join(model, unet_type, ONNX_MODEL)
# LoRA blending
if loras is not None and len(loras) > 0:
lora_names, lora_weights = zip(*loras)
lora_models = [
path.join(server.model_path, "lora", name) for name in lora_names
]
logger.info("blending base model %s with LoRA models: %s", model, lora_models)
# blend and load unet
blended_unet = blend_loras(
server,
unet,
@ -354,11 +462,16 @@ def load_pipeline(
)
)
return components
def load_vae(server, device, model, params):
# one or more VAE models need to be loaded
vae = path.join(model, "vae", ONNX_MODEL)
vae_decoder = path.join(model, "vae_decoder", ONNX_MODEL)
vae_encoder = path.join(model, "vae_encoder", ONNX_MODEL)
components = {}
if not params.is_xl() and path.exists(vae):
logger.debug("loading VAE from %s", vae)
components["vae"] = OnnxRuntimeModel(
@ -378,7 +491,7 @@ def load_pipeline(
)
components[
"vae_decoder_session"
]._model_path = vae_decoder # "#\\not a real path on any system"
]._model_path = vae_decoder
logger.debug("loading VAE encoder from %s", vae_encoder)
components["vae_encoder_session"] = OnnxRuntimeModel.load_model(
@ -388,7 +501,7 @@ def load_pipeline(
)
components[
"vae_encoder_session"
]._model_path = vae_encoder # "#\\not a real path on any system"
]._model_path = vae_encoder
else:
logger.debug("loading VAE decoder from %s", vae_decoder)
@ -409,79 +522,7 @@ def load_pipeline(
)
)
# additional options for panorama pipeline
if params.is_panorama():
components["window"] = params.tiles // 8
components["stride"] = params.stride // 8
pipeline_class = available_pipelines.get(pipeline, OnnxStableDiffusionPipeline)
logger.debug("loading pretrained SD pipeline for %s", pipeline_class.__name__)
pipe = pipeline_class.from_pretrained(
model,
provider=device.ort_provider(),
sess_options=device.sess_options(),
safety_checker=None,
torch_dtype=torch_dtype,
**components,
)
# make sure XL models are actually being used
if "text_encoder_session" in components:
pipe.text_encoder = ORTModelTextEncoder(text_encoder_session, text_encoder)
if "text_encoder_2_session" in components:
pipe.text_encoder_2 = ORTModelTextEncoder(
text_encoder_2_session, text_encoder_2
)
if "unet_session" in components:
# unload old UNet first
pipe.unet = None
run_gc([device])
# load correct one
pipe.unet = ORTModelUnet(unet_session, unet_model)
if "vae_decoder_session" in components:
pipe.vae_decoder = ORTModelVaeDecoder(
components["vae_decoder_session"],
pipe,
)
if "vae_encoder_session" in components:
pipe.vae_encoder = ORTModelVaeEncoder(
components["vae_encoder_session"],
pipe,
)
if not server.show_progress:
pipe.set_progress_bar_config(disable=True)
optimize_pipeline(server, pipe)
patch_pipeline(server, pipe, pipeline_class, params)
server.cache.set(ModelTypes.diffusion, pipe_key, pipe)
server.cache.set(ModelTypes.scheduler, scheduler_key, components["scheduler"])
if hasattr(pipe, "vae_decoder"):
pipe.vae_decoder.set_tiled(tiled=params.tiled_vae)
if hasattr(pipe, "vae_encoder"):
pipe.vae_encoder.set_tiled(tiled=params.tiled_vae)
# update panorama params
if params.is_panorama():
latent_window = params.tiles // 8
latent_stride = params.stride // 8
pipe.set_window_size(latent_window, latent_stride)
if hasattr(pipe, "vae_decoder"):
pipe.vae_decoder.set_window_size(latent_window, params.overlap)
if hasattr(pipe, "vae_encoder"):
pipe.vae_encoder.set_window_size(latent_window, params.overlap)
run_gc([device])
return pipe
return components
def optimize_pipeline(

View File

@ -11,7 +11,6 @@ from onnx_web.diffusers.load import (
)
from onnx_web.diffusers.patches.unet import UNetWrapper
from onnx_web.diffusers.patches.vae import VAEWrapper
from onnx_web.diffusers.utils import expand_prompt
from onnx_web.params import ImageParams
from onnx_web.server.context import ServerContext
from tests.mocks import MockPipeline

View File

@ -26,6 +26,7 @@
"bokeh",
"Civitai",
"ckpt",
"cnet",
"codebook",
"codeformer",
"controlnet",
@ -53,6 +54,8 @@
"KDPM",
"Knollingcase",
"Lanczos",
"loha",
"loras",
"Multistep",
"ndarray",
"numpy",