1
0
Fork 0

use conversion dest path when applying additional nets

This commit is contained in:
Sean Sube 2023-03-18 11:34:05 -05:00
parent 1f6105a8fe
commit f465120cad
Signed by: ssube
GPG Key ID: 3EED7B957D362AF1
2 changed files with 7 additions and 14 deletions

View File

@ -248,13 +248,13 @@ def convert_models(ctx: ConversionContext, args, models: Models):
converted = False
if model_format in model_formats_original:
converted, _dest = convert_diffusion_original(
converted, dest = convert_diffusion_original(
ctx,
model,
source,
)
else:
converted, _dest = convert_diffusion_diffusers(
converted, dest = convert_diffusion_diffusers(
ctx,
model,
source,
@ -272,8 +272,7 @@ def convert_models(ctx: ConversionContext, args, models: Models):
if "text_encoder" not in blend_models:
blend_models["text_encoder"] = load_model(
path.join(
ctx.model_path,
model,
dest,
"text_encoder",
"model.onnx",
)
@ -283,7 +282,7 @@ def convert_models(ctx: ConversionContext, args, models: Models):
blend_models[
"tokenizer"
] = CLIPTokenizer.from_pretrained(
path.join(ctx.model_path, model),
dest,
subfolder="tokenizer",
)
@ -292,7 +291,7 @@ def convert_models(ctx: ConversionContext, args, models: Models):
inversion_format = inversion.get("format", None)
inversion_source = fetch_model(
ctx,
f"{name}-inversion-{inversion_name}",
inversion_name,
inversion_source,
dest=inversion_dest,
)
@ -317,8 +316,7 @@ def convert_models(ctx: ConversionContext, args, models: Models):
if "text_encoder" not in blend_models:
blend_models["text_encoder"] = load_model(
path.join(
ctx.model_path,
model,
dest,
"text_encoder",
"model.onnx",
)
@ -326,9 +324,7 @@ def convert_models(ctx: ConversionContext, args, models: Models):
if "unet" not in blend_models:
blend_models["text_encoder"] = load_model(
path.join(
ctx.model_path, model, "unet", "model.onnx"
)
path.join(dest, "unet", "model.onnx")
)
# load models if not loaded yet

View File

@ -62,10 +62,7 @@ def blend_loras(
model_type: Literal["text_encoder", "unet"],
):
base_model = base_name if isinstance(base_name, ModelProto) else load(base_name)
lora_count = len(loras)
lora_models = [load_file(name) for name, _weight in loras]
lora_weights = lora_weights or (np.ones((lora_count)) / lora_count)
if model_type == "text_encoder":
lora_prefix = "lora_te_"