1
0
Fork 0

fix(api): load tokenizer with textual inversions

This commit is contained in:
Sean Sube 2023-03-01 19:08:31 -06:00
parent 21fc7c5968
commit 4b77a00ca7
Signed by: ssube
GPG Key ID: 3EED7B957D362AF1
2 changed files with 6 additions and 0 deletions

View File

@ -74,6 +74,8 @@ def convert_diffusion_textual_inversion(
return_tensors="pt",
)
tokenizer.save_pretrained(path.join(dest_path, "tokenizer"))
export(
text_encoder,
# casting to torch.int32 until the CLIP fix is released: https://github.com/huggingface/transformers/pull/18515/files

View File

@ -21,6 +21,7 @@ from diffusers import (
PNDMScheduler,
StableDiffusionPipeline,
)
from transformers import CLIPTokenizer
try:
from diffusers import DEISMultistepScheduler
@ -200,6 +201,9 @@ def load_pipeline(
provider=device.ort_provider(),
sess_options=device.sess_options(),
)
components["tokenizer"] = CLIPTokenizer.from_pretrained(
path.join(inversion, "tokenizer"),
)
pipe = pipeline.from_pretrained(
model,