From 829cedc934bb526b2663024ca951fbb42bf7e380 Mon Sep 17 00:00:00 2001 From: Sean Sube Date: Wed, 15 Mar 2023 08:35:44 -0500 Subject: [PATCH] fix error when prompt has no LoRA tokens --- api/onnx_web/diffusers/load.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/api/onnx_web/diffusers/load.py b/api/onnx_web/diffusers/load.py index 26316c5f..f9cd218b 100644 --- a/api/onnx_web/diffusers/load.py +++ b/api/onnx_web/diffusers/load.py @@ -227,11 +227,11 @@ def load_pipeline( ) # test LoRA blending - lora_names, lora_weights = zip(*loras) - lora_models = [path.join(server.model_path, "lora", f"{name}.safetensors") for name in lora_names] - logger.info("blending base model %s with LoRA models: %s", model, lora_models) + if len(loras) > 0: + lora_names, lora_weights = zip(*loras) + lora_models = [path.join(server.model_path, "lora", f"{name}.safetensors") for name in lora_names] + logger.info("blending base model %s with LoRA models: %s", model, lora_models) - if len(lora_models) > 0: # blend and load text encoder blended_text_encoder = merge_lora(path.join(model, "text_encoder", "model.onnx"), lora_models, "text_encoder", lora_weights=lora_weights) (text_encoder_model, text_encoder_data) = buffer_external_data_tensors(blended_text_encoder)