1
0
Fork 0

fix error when prompt has no LoRA tokens

This commit is contained in:
Sean Sube 2023-03-15 08:35:44 -05:00
parent a7f77a033d
commit 829cedc934
Signed by: ssube
GPG Key ID: 3EED7B957D362AF1
1 changed files with 4 additions and 4 deletions

View File

@ -227,11 +227,11 @@ def load_pipeline(
) )
# test LoRA blending # test LoRA blending
lora_names, lora_weights = zip(*loras) if len(loras) > 0:
lora_models = [path.join(server.model_path, "lora", f"{name}.safetensors") for name in lora_names] lora_names, lora_weights = zip(*loras)
logger.info("blending base model %s with LoRA models: %s", model, lora_models) lora_models = [path.join(server.model_path, "lora", f"{name}.safetensors") for name in lora_names]
logger.info("blending base model %s with LoRA models: %s", model, lora_models)
if len(lora_models) > 0:
# blend and load text encoder # blend and load text encoder
blended_text_encoder = merge_lora(path.join(model, "text_encoder", "model.onnx"), lora_models, "text_encoder", lora_weights=lora_weights) blended_text_encoder = merge_lora(path.join(model, "text_encoder", "model.onnx"), lora_models, "text_encoder", lora_weights=lora_weights)
(text_encoder_model, text_encoder_data) = buffer_external_data_tensors(blended_text_encoder) (text_encoder_model, text_encoder_data) = buffer_external_data_tensors(blended_text_encoder)