diff --git a/api/onnx_web/convert/__main__.py b/api/onnx_web/convert/__main__.py index 16ac5853..283afd94 100644 --- a/api/onnx_web/convert/__main__.py +++ b/api/onnx_web/convert/__main__.py @@ -62,8 +62,8 @@ model_converters: Dict[str, Any] = { "img2img": convert_diffusion_diffusers, "img2img-sdxl": convert_diffusion_diffusers_xl, "inpaint": convert_diffusion_diffusers, - "txt2img": convert_diffusion_diffusers, - "txt2img-optimum": convert_diffusion_diffusers_optimum, + "txt2img": convert_diffusion_diffusers_optimum, + "txt2img-legacy": convert_diffusion_diffusers, "txt2img-sdxl": convert_diffusion_diffusers_xl, } diff --git a/api/onnx_web/convert/diffusion/diffusion.py b/api/onnx_web/convert/diffusion/diffusion.py index 55eb571f..ddb1aa73 100644 --- a/api/onnx_web/convert/diffusion/diffusion.py +++ b/api/onnx_web/convert/diffusion/diffusion.py @@ -818,7 +818,7 @@ def convert_diffusion_diffusers_optimum( logger.debug("loading VAE from single tensor file: %s", vae_path) pipeline.vae = AutoencoderKL.from_single_file(vae_path) else: - logger.debug("loading VAE from single tensor file: %s", vae_path) + logger.debug("loading pretrained VAE from path: %s", replace_vae) pipeline.vae = AutoencoderKL.from_pretrained(replace_vae) if is_torch_2_0: