2023-03-14 23:00:26 +00:00
|
|
|
from argparse import ArgumentParser
|
2023-02-22 03:16:34 +00:00
|
|
|
from logging import getLogger
|
2023-02-25 18:03:00 +00:00
|
|
|
from os import path
|
2023-03-18 15:50:48 +00:00
|
|
|
from typing import Dict, List, Literal, Tuple, Union
|
2023-02-22 03:16:34 +00:00
|
|
|
|
2023-03-14 23:00:26 +00:00
|
|
|
import numpy as np
|
2023-02-25 18:03:00 +00:00
|
|
|
import torch
|
2023-03-15 00:38:27 +00:00
|
|
|
from onnx import ModelProto, load, numpy_helper
|
2023-03-12 18:38:51 +00:00
|
|
|
from onnx.checker import check_model
|
2023-03-14 23:00:26 +00:00
|
|
|
from onnx.external_data_helper import (
|
|
|
|
convert_model_to_external_data,
|
2023-03-15 00:38:27 +00:00
|
|
|
set_external_data,
|
2023-03-14 23:00:26 +00:00
|
|
|
write_external_data_tensors,
|
|
|
|
)
|
2023-03-15 04:32:47 +00:00
|
|
|
from onnxruntime import InferenceSession, OrtValue, SessionOptions
|
2023-02-25 13:40:51 +00:00
|
|
|
|
2023-03-16 00:27:29 +00:00
|
|
|
from ...server.context import ServerContext
|
2023-03-19 20:13:54 +00:00
|
|
|
from ..utils import ConversionContext, load_tensor
|
2023-02-22 03:16:34 +00:00
|
|
|
|
|
|
|
logger = getLogger(__name__)
|
|
|
|
|
|
|
|
|
2023-04-21 01:06:43 +00:00
|
|
|
def sum_weights(a: np.ndarray, b: np.ndarray) -> np.ndarray:
|
|
|
|
logger.trace("summing weights with shapes: %s + %s", a.shape, b.shape)
|
|
|
|
|
2023-04-21 01:24:37 +00:00
|
|
|
# if they are the same, simply add them
|
|
|
|
if len(a.shape) == len(b.shape):
|
|
|
|
return a + b
|
|
|
|
|
2023-04-21 01:06:43 +00:00
|
|
|
# get the kernel size from the tensor with the higher rank
|
|
|
|
if len(a.shape) > len(b.shape):
|
|
|
|
kernel = a.shape[-2:]
|
|
|
|
hr = a
|
|
|
|
lr = b
|
|
|
|
else:
|
|
|
|
kernel = b.shape[-2:]
|
|
|
|
hr = b
|
|
|
|
lr = a
|
|
|
|
|
|
|
|
if kernel == (1, 1):
|
|
|
|
lr = np.expand_dims(lr, axis=(2, 3))
|
|
|
|
|
|
|
|
return hr + lr
|
|
|
|
|
|
|
|
|
2023-03-15 04:32:47 +00:00
|
|
|
def buffer_external_data_tensors(
|
|
|
|
model: ModelProto,
|
|
|
|
) -> Tuple[ModelProto, List[Tuple[str, OrtValue]]]:
|
2023-03-15 00:38:27 +00:00
|
|
|
external_data = []
|
|
|
|
for tensor in model.graph.initializer:
|
|
|
|
name = tensor.name
|
|
|
|
|
2023-03-26 16:09:13 +00:00
|
|
|
logger.trace("externalizing tensor: %s", name)
|
2023-03-15 00:38:27 +00:00
|
|
|
if tensor.HasField("raw_data"):
|
|
|
|
npt = numpy_helper.to_array(tensor)
|
|
|
|
orv = OrtValue.ortvalue_from_numpy(npt)
|
|
|
|
external_data.append((name, orv))
|
|
|
|
# mimic set_external_data
|
|
|
|
set_external_data(tensor, location="foo.bin")
|
|
|
|
tensor.name = name
|
|
|
|
tensor.ClearField("raw_data")
|
|
|
|
|
|
|
|
return (model, external_data)
|
|
|
|
|
|
|
|
|
2023-03-14 23:00:26 +00:00
|
|
|
def fix_initializer_name(key: str):
|
2023-03-12 18:38:51 +00:00
|
|
|
# lora_unet_up_blocks_3_attentions_2_transformer_blocks_0_attn2_to_out_0.lora_down.weight
|
|
|
|
# lora, unet, up_block.3.attentions.2.transformer_blocks.0.attn2.to_out.0
|
|
|
|
return key.replace(".", "_")
|
2023-02-22 05:50:27 +00:00
|
|
|
|
|
|
|
|
2023-03-14 23:00:26 +00:00
|
|
|
def fix_node_name(key: str):
|
|
|
|
fixed_name = fix_initializer_name(key.replace("/", "_"))
|
|
|
|
if fixed_name[0] == "_":
|
|
|
|
return fixed_name[1:]
|
|
|
|
else:
|
|
|
|
return fixed_name
|
2023-03-12 18:38:51 +00:00
|
|
|
|
|
|
|
|
2023-03-15 22:14:52 +00:00
|
|
|
def blend_loras(
|
2023-04-10 01:33:03 +00:00
|
|
|
_conversion: ServerContext,
|
2023-03-18 15:50:48 +00:00
|
|
|
base_name: Union[str, ModelProto],
|
|
|
|
loras: List[Tuple[str, float]],
|
|
|
|
model_type: Literal["text_encoder", "unet"],
|
2023-03-14 23:00:26 +00:00
|
|
|
):
|
2023-03-19 20:13:54 +00:00
|
|
|
# always load to CPU for blending
|
|
|
|
device = torch.device("cpu")
|
2023-03-22 03:05:14 +00:00
|
|
|
dtype = torch.float32
|
2023-03-19 20:13:54 +00:00
|
|
|
|
2023-03-15 22:14:52 +00:00
|
|
|
base_model = base_name if isinstance(base_name, ModelProto) else load(base_name)
|
2023-03-19 20:13:54 +00:00
|
|
|
lora_models = [load_tensor(name, map_location=device) for name, _weight in loras]
|
2023-03-12 18:38:51 +00:00
|
|
|
|
2023-03-18 15:50:48 +00:00
|
|
|
if model_type == "text_encoder":
|
2023-03-12 18:38:51 +00:00
|
|
|
lora_prefix = "lora_te_"
|
|
|
|
else:
|
2023-03-18 15:50:48 +00:00
|
|
|
lora_prefix = f"lora_{model_type}_"
|
2023-03-14 23:00:26 +00:00
|
|
|
|
|
|
|
blended: Dict[str, np.ndarray] = {}
|
2023-03-18 15:50:48 +00:00
|
|
|
for (lora_name, lora_weight), lora_model in zip(loras, lora_models):
|
2023-03-26 16:09:13 +00:00
|
|
|
logger.debug("blending LoRA from %s with weight of %s", lora_name, lora_weight)
|
2023-03-19 20:38:43 +00:00
|
|
|
if lora_model is None:
|
|
|
|
logger.warning("unable to load tensor for LoRA")
|
|
|
|
continue
|
|
|
|
|
2023-03-14 23:00:26 +00:00
|
|
|
for key in lora_model.keys():
|
2023-04-07 23:50:12 +00:00
|
|
|
if ".hada_w1_a" in key and lora_prefix in key:
|
|
|
|
# LoHA
|
|
|
|
base_key = key[: key.index(".hada_w1_a")].replace(lora_prefix, "")
|
|
|
|
|
2023-04-10 04:10:14 +00:00
|
|
|
t1_key = key.replace("hada_w1_a", "hada_t1")
|
|
|
|
t2_key = key.replace("hada_w1_a", "hada_t2")
|
2023-04-07 23:50:12 +00:00
|
|
|
w1b_key = key.replace("hada_w1_a", "hada_w1_b")
|
|
|
|
w2a_key = key.replace("hada_w1_a", "hada_w2_a")
|
|
|
|
w2b_key = key.replace("hada_w1_a", "hada_w2_b")
|
|
|
|
alpha_key = key[: key.index("hada_w1_a")] + "alpha"
|
|
|
|
logger.trace(
|
|
|
|
"blending weights for LoHA keys: %s, %s, %s, %s, %s",
|
|
|
|
key,
|
|
|
|
w1b_key,
|
|
|
|
w2a_key,
|
|
|
|
w2b_key,
|
|
|
|
alpha_key,
|
|
|
|
)
|
|
|
|
|
|
|
|
w1a_weight = lora_model[key].to(dtype=dtype)
|
|
|
|
w1b_weight = lora_model[w1b_key].to(dtype=dtype)
|
|
|
|
w2a_weight = lora_model[w2a_key].to(dtype=dtype)
|
|
|
|
w2b_weight = lora_model[w2b_key].to(dtype=dtype)
|
|
|
|
|
2023-04-10 04:10:14 +00:00
|
|
|
t1_weight = lora_model.get(t1_key, None)
|
|
|
|
t2_weight = lora_model.get(t2_key, None)
|
|
|
|
|
2023-04-10 07:58:47 +00:00
|
|
|
dim = w1b_weight.size()[0]
|
2023-04-07 23:50:12 +00:00
|
|
|
alpha = lora_model.get(alpha_key, dim).to(dtype).numpy()
|
|
|
|
|
2023-04-10 04:10:14 +00:00
|
|
|
if t1_weight is not None and t2_weight is not None:
|
2023-04-10 04:12:32 +00:00
|
|
|
t1_weight = t1_weight.to(dtype=dtype)
|
|
|
|
t2_weight = t2_weight.to(dtype=dtype)
|
|
|
|
|
2023-04-10 04:10:14 +00:00
|
|
|
logger.trace(
|
|
|
|
"composing weights for LoHA node: (%s, %s, %s) * (%s, %s, %s)",
|
2023-04-10 04:12:32 +00:00
|
|
|
t1_weight.shape,
|
|
|
|
w1a_weight.shape,
|
|
|
|
w1b_weight.shape,
|
|
|
|
t2_weight.shape,
|
|
|
|
w2a_weight.shape,
|
|
|
|
w2b_weight.shape,
|
2023-04-10 04:10:14 +00:00
|
|
|
)
|
2023-04-10 13:09:29 +00:00
|
|
|
weights_1 = torch.einsum(
|
|
|
|
"i j k l, j r, i p -> p r k l",
|
|
|
|
t1_weight,
|
|
|
|
w1b_weight,
|
|
|
|
w1a_weight,
|
|
|
|
)
|
|
|
|
weights_2 = torch.einsum(
|
|
|
|
"i j k l, j r, i p -> p r k l",
|
|
|
|
t2_weight,
|
|
|
|
w2b_weight,
|
|
|
|
w2a_weight,
|
|
|
|
)
|
2023-04-10 04:10:14 +00:00
|
|
|
weights = weights_1 * weights_2
|
|
|
|
np_weights = weights.numpy() * (alpha / dim)
|
|
|
|
else:
|
|
|
|
logger.trace(
|
|
|
|
"blending weights for LoHA node: (%s @ %s) * (%s @ %s)",
|
2023-04-10 04:12:32 +00:00
|
|
|
w1a_weight.shape,
|
|
|
|
w1b_weight.shape,
|
|
|
|
w2a_weight.shape,
|
|
|
|
w2b_weight.shape,
|
2023-04-10 04:10:14 +00:00
|
|
|
)
|
|
|
|
weights = (w1a_weight @ w1b_weight) * (w2a_weight @ w2b_weight)
|
|
|
|
np_weights = weights.numpy() * (alpha / dim)
|
2023-04-07 23:50:12 +00:00
|
|
|
|
2023-04-10 03:12:17 +00:00
|
|
|
np_weights *= lora_weight
|
|
|
|
if base_key in blended:
|
2023-04-21 02:26:16 +00:00
|
|
|
logger.trace(
|
|
|
|
"summing LoHA weights: %s + %s",
|
|
|
|
blended[base_key].shape,
|
|
|
|
np_weights.shape,
|
|
|
|
)
|
2023-04-21 01:06:43 +00:00
|
|
|
blended[base_key] += sum_weights(blended[base_key], np_weights)
|
2023-04-10 03:12:17 +00:00
|
|
|
else:
|
|
|
|
blended[base_key] = np_weights
|
2023-04-07 23:50:12 +00:00
|
|
|
elif ".lora_down" in key and lora_prefix in key:
|
|
|
|
# LoRA or LoCON
|
2023-03-15 04:32:47 +00:00
|
|
|
base_key = key[: key.index(".lora_down")].replace(lora_prefix, "")
|
2023-03-14 23:00:26 +00:00
|
|
|
|
2023-04-10 04:10:14 +00:00
|
|
|
mid_key = key.replace("lora_down", "lora_mid")
|
2023-03-14 23:00:26 +00:00
|
|
|
up_key = key.replace("lora_down", "lora_up")
|
|
|
|
alpha_key = key[: key.index("lora_down")] + "alpha"
|
2023-04-01 20:39:22 +00:00
|
|
|
logger.trace(
|
2023-04-07 23:50:12 +00:00
|
|
|
"blending weights for LoRA keys: %s, %s, %s", key, up_key, alpha_key
|
2023-03-15 04:32:47 +00:00
|
|
|
)
|
2023-03-14 23:00:26 +00:00
|
|
|
|
2023-03-22 02:45:27 +00:00
|
|
|
down_weight = lora_model[key].to(dtype=dtype)
|
|
|
|
up_weight = lora_model[up_key].to(dtype=dtype)
|
2023-03-14 23:00:26 +00:00
|
|
|
|
2023-04-10 04:10:14 +00:00
|
|
|
mid_weight = None
|
|
|
|
if mid_key in lora_model:
|
|
|
|
mid_weight = lora_model[mid_key].to(dtype=dtype)
|
2023-04-07 23:50:12 +00:00
|
|
|
|
2023-03-14 23:00:26 +00:00
|
|
|
dim = down_weight.size()[0]
|
2023-04-10 04:10:14 +00:00
|
|
|
alpha = lora_model.get(alpha_key, dim)
|
2023-04-10 07:58:47 +00:00
|
|
|
|
2023-04-10 04:10:14 +00:00
|
|
|
if not isinstance(alpha, int):
|
|
|
|
alpha = alpha.to(dtype).numpy()
|
2023-03-14 23:00:26 +00:00
|
|
|
|
2023-04-10 02:38:04 +00:00
|
|
|
kernel = down_weight.shape[-2:]
|
|
|
|
if mid_weight is not None:
|
|
|
|
kernel = mid_weight.shape[-2:]
|
|
|
|
|
2023-04-10 03:12:17 +00:00
|
|
|
if len(down_weight.size()) == 2:
|
|
|
|
# blend for nn.Linear
|
|
|
|
logger.trace(
|
|
|
|
"blending weights for Linear node: (%s @ %s) * %s",
|
|
|
|
down_weight.shape,
|
|
|
|
up_weight.shape,
|
|
|
|
alpha,
|
|
|
|
)
|
|
|
|
weights = up_weight @ down_weight
|
|
|
|
np_weights = weights.numpy() * (alpha / dim)
|
|
|
|
elif len(down_weight.size()) == 4 and kernel == (
|
|
|
|
1,
|
|
|
|
1,
|
|
|
|
):
|
|
|
|
# blend for nn.Conv2d 1x1
|
|
|
|
logger.trace(
|
|
|
|
"blending weights for Conv 1x1 node: %s, %s, %s",
|
|
|
|
down_weight.shape,
|
|
|
|
up_weight.shape,
|
|
|
|
alpha,
|
|
|
|
)
|
|
|
|
weights = (
|
|
|
|
(
|
|
|
|
up_weight.squeeze(3).squeeze(2)
|
|
|
|
@ down_weight.squeeze(3).squeeze(2)
|
|
|
|
)
|
|
|
|
.unsqueeze(2)
|
|
|
|
.unsqueeze(3)
|
|
|
|
)
|
|
|
|
np_weights = weights.numpy() * (alpha / dim)
|
|
|
|
elif len(down_weight.size()) == 4 and kernel == (
|
|
|
|
3,
|
|
|
|
3,
|
|
|
|
):
|
|
|
|
if mid_weight is not None:
|
|
|
|
# blend for nn.Conv2d 3x3 with CP decomp
|
2023-03-17 00:37:25 +00:00
|
|
|
logger.trace(
|
2023-04-10 03:12:17 +00:00
|
|
|
"composing weights for Conv 3x3 node: %s, %s, %s, %s",
|
2023-03-15 04:32:47 +00:00
|
|
|
down_weight.shape,
|
|
|
|
up_weight.shape,
|
2023-04-10 03:12:17 +00:00
|
|
|
mid_weight.shape,
|
2023-03-15 04:32:47 +00:00
|
|
|
alpha,
|
|
|
|
)
|
2023-04-10 03:12:17 +00:00
|
|
|
weights = torch.zeros(
|
|
|
|
(up_weight.shape[0], down_weight.shape[1], *kernel)
|
|
|
|
)
|
2023-04-21 03:10:49 +00:00
|
|
|
|
2023-04-10 03:12:17 +00:00
|
|
|
for w in range(kernel[0]):
|
|
|
|
for h in range(kernel[1]):
|
|
|
|
weights[:, :, w, h] = (
|
2023-04-10 03:18:06 +00:00
|
|
|
up_weight.squeeze(3).squeeze(2)
|
|
|
|
@ mid_weight[:, :, w, h]
|
2023-04-10 03:31:43 +00:00
|
|
|
) @ down_weight.squeeze(3).squeeze(2)
|
2023-04-10 03:12:17 +00:00
|
|
|
|
2023-03-15 04:32:47 +00:00
|
|
|
np_weights = weights.numpy() * (alpha / dim)
|
2023-04-10 03:12:17 +00:00
|
|
|
else:
|
|
|
|
# blend for nn.Conv2d 3x3
|
2023-03-17 00:37:25 +00:00
|
|
|
logger.trace(
|
2023-04-10 03:12:17 +00:00
|
|
|
"blending weights for Conv 3x3 node: %s, %s, %s",
|
2023-03-15 04:32:47 +00:00
|
|
|
down_weight.shape,
|
|
|
|
up_weight.shape,
|
|
|
|
alpha,
|
|
|
|
)
|
2023-04-21 03:10:49 +00:00
|
|
|
weights = torch.zeros(
|
|
|
|
(up_weight.shape[0], down_weight.shape[1], *kernel)
|
|
|
|
)
|
|
|
|
|
|
|
|
for w in range(kernel[0]):
|
|
|
|
for h in range(kernel[1]):
|
|
|
|
weights[:, :, w, h] = up_weight.squeeze(3).squeeze(
|
|
|
|
2
|
|
|
|
) @ down_weight.squeeze(3).squeeze(2)
|
|
|
|
|
2023-03-15 04:32:47 +00:00
|
|
|
np_weights = weights.numpy() * (alpha / dim)
|
2023-04-10 03:12:17 +00:00
|
|
|
else:
|
|
|
|
logger.warning(
|
|
|
|
"unknown LoRA node type at %s: %s",
|
|
|
|
base_key,
|
|
|
|
up_weight.shape[-2:],
|
|
|
|
)
|
|
|
|
continue
|
2023-03-14 23:00:26 +00:00
|
|
|
|
2023-04-10 03:12:17 +00:00
|
|
|
np_weights *= lora_weight
|
|
|
|
if base_key in blended:
|
2023-04-21 02:26:16 +00:00
|
|
|
logger.trace(
|
|
|
|
"summing weights: %s + %s",
|
|
|
|
blended[base_key].shape,
|
|
|
|
np_weights.shape,
|
|
|
|
)
|
2023-04-21 01:06:43 +00:00
|
|
|
blended[base_key] = sum_weights(blended[base_key], np_weights)
|
2023-04-10 03:12:17 +00:00
|
|
|
else:
|
|
|
|
blended[base_key] = np_weights
|
2023-03-14 23:00:26 +00:00
|
|
|
|
2023-04-01 20:39:22 +00:00
|
|
|
logger.trace(
|
2023-03-14 23:00:26 +00:00
|
|
|
"updating %s of %s initializers: %s",
|
|
|
|
len(blended.keys()),
|
|
|
|
len(base_model.graph.initializer),
|
2023-03-15 04:32:47 +00:00
|
|
|
list(blended.keys()),
|
2023-03-14 23:00:26 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
fixed_initializer_names = [
|
|
|
|
fix_initializer_name(node.name) for node in base_model.graph.initializer
|
|
|
|
]
|
2023-03-17 03:29:07 +00:00
|
|
|
logger.trace("fixed initializer names: %s", fixed_initializer_names)
|
2023-03-14 23:00:26 +00:00
|
|
|
|
2023-03-15 04:32:47 +00:00
|
|
|
fixed_node_names = [fix_node_name(node.name) for node in base_model.graph.node]
|
2023-03-17 03:29:07 +00:00
|
|
|
logger.trace("fixed node names: %s", fixed_node_names)
|
2023-03-14 23:00:26 +00:00
|
|
|
|
2023-04-10 22:49:56 +00:00
|
|
|
unmatched_keys = []
|
2023-03-14 23:00:26 +00:00
|
|
|
for base_key, weights in blended.items():
|
|
|
|
conv_key = base_key + "_Conv"
|
2023-04-10 03:45:27 +00:00
|
|
|
gemm_key = base_key + "_Gemm"
|
2023-03-14 23:00:26 +00:00
|
|
|
matmul_key = base_key + "_MatMul"
|
|
|
|
|
2023-03-17 00:37:25 +00:00
|
|
|
logger.trace(
|
2023-03-15 04:32:47 +00:00
|
|
|
"key %s has conv: %s, matmul: %s",
|
|
|
|
base_key,
|
|
|
|
conv_key in fixed_node_names,
|
|
|
|
matmul_key in fixed_node_names,
|
|
|
|
)
|
2023-03-14 23:00:26 +00:00
|
|
|
|
2023-04-10 03:45:27 +00:00
|
|
|
if conv_key in fixed_node_names or gemm_key in fixed_node_names:
|
|
|
|
if conv_key in fixed_node_names:
|
|
|
|
conv_idx = fixed_node_names.index(conv_key)
|
|
|
|
conv_node = base_model.graph.node[conv_idx]
|
2023-04-10 13:09:29 +00:00
|
|
|
logger.trace(
|
|
|
|
"found conv node %s using %s", conv_node.name, conv_node.input
|
|
|
|
)
|
2023-04-10 03:45:27 +00:00
|
|
|
else:
|
|
|
|
conv_idx = fixed_node_names.index(gemm_key)
|
|
|
|
conv_node = base_model.graph.node[conv_idx]
|
2023-04-10 13:09:29 +00:00
|
|
|
logger.trace(
|
|
|
|
"found gemm node %s using %s", conv_node.name, conv_node.input
|
|
|
|
)
|
2023-03-14 23:00:26 +00:00
|
|
|
|
|
|
|
# find weight initializer
|
|
|
|
weight_name = [n for n in conv_node.input if ".weight" in n][0]
|
|
|
|
weight_name = fix_initializer_name(weight_name)
|
|
|
|
|
|
|
|
weight_idx = fixed_initializer_names.index(weight_name)
|
|
|
|
weight_node = base_model.graph.initializer[weight_idx]
|
2023-03-17 00:37:25 +00:00
|
|
|
logger.trace("found weight initializer: %s", weight_node.name)
|
2023-03-14 23:00:26 +00:00
|
|
|
|
|
|
|
# blending
|
2023-04-10 13:14:41 +00:00
|
|
|
onnx_weights = numpy_helper.to_array(weight_node)
|
2023-03-17 00:37:25 +00:00
|
|
|
logger.trace(
|
2023-03-15 04:32:47 +00:00
|
|
|
"found blended weights for conv: %s, %s",
|
2023-04-10 13:14:41 +00:00
|
|
|
onnx_weights.shape,
|
2023-03-15 04:32:47 +00:00
|
|
|
weights.shape,
|
|
|
|
)
|
2023-03-14 23:00:26 +00:00
|
|
|
|
2023-04-10 13:14:41 +00:00
|
|
|
if onnx_weights.shape[-2:] == (1, 1):
|
2023-04-08 01:54:42 +00:00
|
|
|
if weights.shape[-2:] == (1, 1):
|
2023-04-10 13:14:41 +00:00
|
|
|
blended = onnx_weights.squeeze((3, 2)) + weights.squeeze((3, 2))
|
2023-04-08 01:54:42 +00:00
|
|
|
else:
|
2023-04-10 13:14:41 +00:00
|
|
|
blended = onnx_weights.squeeze((3, 2)) + weights
|
2023-04-08 01:54:42 +00:00
|
|
|
|
2023-04-07 04:34:55 +00:00
|
|
|
blended = np.expand_dims(blended, (2, 3))
|
|
|
|
else:
|
2023-04-10 13:14:41 +00:00
|
|
|
if onnx_weights.shape != weights.shape:
|
|
|
|
logger.warning(
|
|
|
|
"reshaping weights for mismatched Conv node: %s, %s",
|
|
|
|
onnx_weights.shape,
|
|
|
|
weights.shape,
|
|
|
|
)
|
|
|
|
blended = onnx_weights + weights.reshape(onnx_weights.shape)
|
2023-04-08 10:46:05 +00:00
|
|
|
else:
|
2023-04-10 13:14:41 +00:00
|
|
|
blended = onnx_weights + weights
|
2023-04-07 04:34:55 +00:00
|
|
|
|
2023-03-17 00:37:25 +00:00
|
|
|
logger.trace("blended weight shape: %s", blended.shape)
|
2023-03-14 23:00:26 +00:00
|
|
|
|
|
|
|
# replace the original initializer
|
2023-03-22 03:19:50 +00:00
|
|
|
updated_node = numpy_helper.from_array(
|
2023-04-10 13:14:41 +00:00
|
|
|
blended.astype(onnx_weights.dtype), weight_node.name
|
2023-03-22 03:19:50 +00:00
|
|
|
)
|
2023-03-14 23:00:26 +00:00
|
|
|
del base_model.graph.initializer[weight_idx]
|
|
|
|
base_model.graph.initializer.insert(weight_idx, updated_node)
|
|
|
|
elif matmul_key in fixed_node_names:
|
|
|
|
weight_idx = fixed_node_names.index(matmul_key)
|
|
|
|
weight_node = base_model.graph.node[weight_idx]
|
2023-03-17 00:37:25 +00:00
|
|
|
logger.trace(
|
|
|
|
"found matmul node %s using %s", weight_node.name, weight_node.input
|
|
|
|
)
|
2023-03-14 23:00:26 +00:00
|
|
|
|
|
|
|
# find the MatMul initializer
|
|
|
|
matmul_name = [n for n in weight_node.input if "MatMul" in n][0]
|
|
|
|
|
|
|
|
matmul_idx = fixed_initializer_names.index(matmul_name)
|
|
|
|
matmul_node = base_model.graph.initializer[matmul_idx]
|
2023-03-17 00:37:25 +00:00
|
|
|
logger.trace("found matmul initializer: %s", matmul_node.name)
|
2023-03-14 23:00:26 +00:00
|
|
|
|
|
|
|
# blending
|
2023-04-10 13:14:41 +00:00
|
|
|
onnx_weights = numpy_helper.to_array(matmul_node)
|
2023-03-17 00:37:25 +00:00
|
|
|
logger.trace(
|
2023-03-15 04:32:47 +00:00
|
|
|
"found blended weights for matmul: %s, %s",
|
|
|
|
weights.shape,
|
2023-04-10 13:14:41 +00:00
|
|
|
onnx_weights.shape,
|
2023-03-15 04:32:47 +00:00
|
|
|
)
|
2023-03-14 23:00:26 +00:00
|
|
|
|
2023-04-10 13:14:41 +00:00
|
|
|
blended = onnx_weights + weights.transpose()
|
2023-03-17 00:37:25 +00:00
|
|
|
logger.trace("blended weight shape: %s", blended.shape)
|
2023-03-14 23:00:26 +00:00
|
|
|
|
|
|
|
# replace the original initializer
|
2023-03-22 03:19:50 +00:00
|
|
|
updated_node = numpy_helper.from_array(
|
2023-04-10 13:14:41 +00:00
|
|
|
blended.astype(onnx_weights.dtype), matmul_node.name
|
2023-03-22 03:19:50 +00:00
|
|
|
)
|
2023-03-14 23:00:26 +00:00
|
|
|
del base_model.graph.initializer[matmul_idx]
|
|
|
|
base_model.graph.initializer.insert(matmul_idx, updated_node)
|
2023-03-12 18:38:51 +00:00
|
|
|
else:
|
2023-04-10 22:49:56 +00:00
|
|
|
unmatched_keys.append(base_key)
|
2023-02-25 13:40:51 +00:00
|
|
|
|
2023-03-17 00:37:25 +00:00
|
|
|
logger.debug(
|
2023-03-15 02:27:23 +00:00
|
|
|
"node counts: %s -> %s, %s -> %s",
|
|
|
|
len(fixed_initializer_names),
|
|
|
|
len(base_model.graph.initializer),
|
|
|
|
len(fixed_node_names),
|
2023-03-15 04:32:47 +00:00
|
|
|
len(base_model.graph.node),
|
2023-03-15 02:27:23 +00:00
|
|
|
)
|
2023-02-25 13:40:51 +00:00
|
|
|
|
2023-04-10 22:49:56 +00:00
|
|
|
if len(unmatched_keys) > 0:
|
|
|
|
logger.warning("could not find nodes for some keys: %s", unmatched_keys)
|
|
|
|
|
2023-03-15 02:27:23 +00:00
|
|
|
return base_model
|
2023-02-25 13:40:51 +00:00
|
|
|
|
|
|
|
|
2023-02-22 03:16:34 +00:00
|
|
|
if __name__ == "__main__":
|
2023-03-16 00:27:29 +00:00
|
|
|
context = ConversionContext.from_environ()
|
2023-03-14 23:00:26 +00:00
|
|
|
parser = ArgumentParser()
|
|
|
|
parser.add_argument("--base", type=str)
|
|
|
|
parser.add_argument("--dest", type=str)
|
|
|
|
parser.add_argument("--type", type=str, choices=["text_encoder", "unet"])
|
2023-03-18 15:50:48 +00:00
|
|
|
parser.add_argument("--lora_models", nargs="+", type=str, default=[])
|
|
|
|
parser.add_argument("--lora_weights", nargs="+", type=float, default=[])
|
2023-03-14 23:00:26 +00:00
|
|
|
|
|
|
|
args = parser.parse_args()
|
2023-03-15 04:32:47 +00:00
|
|
|
logger.info(
|
|
|
|
"merging %s with %s with weights: %s",
|
|
|
|
args.lora_models,
|
|
|
|
args.base,
|
|
|
|
args.lora_weights,
|
|
|
|
)
|
2023-03-15 02:27:23 +00:00
|
|
|
|
2023-03-18 15:50:48 +00:00
|
|
|
default_weight = 1.0 / len(args.lora_models)
|
|
|
|
while len(args.lora_weights) < len(args.lora_models):
|
|
|
|
args.lora_weights.append(default_weight)
|
|
|
|
|
2023-03-16 00:27:29 +00:00
|
|
|
blend_model = blend_loras(
|
2023-03-18 15:50:48 +00:00
|
|
|
context,
|
|
|
|
args.base,
|
|
|
|
list(zip(args.lora_models, args.lora_weights)),
|
|
|
|
args.type,
|
2023-03-16 00:27:29 +00:00
|
|
|
)
|
2023-03-18 15:50:48 +00:00
|
|
|
if args.dest is None or args.dest == "" or args.dest == ":load":
|
2023-03-15 02:27:23 +00:00
|
|
|
# convert to external data and save to memory
|
|
|
|
(bare_model, external_data) = buffer_external_data_tensors(blend_model)
|
|
|
|
logger.info("saved external data for %s nodes", len(external_data))
|
|
|
|
|
|
|
|
external_names, external_values = zip(*external_data)
|
|
|
|
opts = SessionOptions()
|
|
|
|
opts.add_external_initializers(list(external_names), list(external_values))
|
2023-03-15 04:32:47 +00:00
|
|
|
sess = InferenceSession(
|
|
|
|
bare_model.SerializeToString(),
|
|
|
|
sess_options=opts,
|
|
|
|
providers=["CPUExecutionProvider"],
|
|
|
|
)
|
|
|
|
logger.info(
|
|
|
|
"successfully loaded blended model: %s", [i.name for i in sess.get_inputs()]
|
|
|
|
)
|
2023-03-15 02:27:23 +00:00
|
|
|
else:
|
2023-03-15 04:32:47 +00:00
|
|
|
convert_model_to_external_data(
|
|
|
|
blend_model, all_tensors_to_one_file=True, location=f"lora-{args.type}.pb"
|
|
|
|
)
|
2023-03-15 04:32:18 +00:00
|
|
|
bare_model = write_external_data_tensors(blend_model, args.dest)
|
|
|
|
dest_file = path.join(args.dest, f"lora-{args.type}.onnx")
|
2023-03-15 02:27:23 +00:00
|
|
|
|
|
|
|
with open(dest_file, "w+b") as model_file:
|
|
|
|
model_file.write(bare_model.SerializeToString())
|
|
|
|
|
|
|
|
logger.info("successfully saved blended model: %s", dest_file)
|
2023-03-15 03:10:33 +00:00
|
|
|
|
|
|
|
check_model(dest_file)
|
|
|
|
|
|
|
|
logger.info("checked blended model")
|