1
0
Fork 0

feat(api): add option for HuggingFace token in convert script

This commit is contained in:
Sean Sube 2023-01-21 15:08:19 -06:00
parent 2a6df0f3aa
commit 45a3ddc2a9
1 changed files with 8 additions and 3 deletions

View File

@ -182,7 +182,7 @@ def onnx_export(
@torch.no_grad()
def convert_diffuser(name: str, url: str, opset: int, half: bool):
def convert_diffuser(name: str, url: str, opset: int, half: bool, token: str):
'''
From https://github.com/huggingface/diffusers/blob/main/scripts/convert_stable_diffusion_checkpoint_to_onnx.py
'''
@ -201,7 +201,7 @@ def convert_diffuser(name: str, url: str, opset: int, half: bool):
'Half precision model export is only supported on GPUs with CUDA')
pipeline = StableDiffusionPipeline.from_pretrained(
url, torch_dtype=dtype).to(training_device)
url, torch_dtype=dtype, use_auth_token=token).to(training_device)
output_path = Path(dest_path)
# TEXT ENCODER
@ -387,7 +387,7 @@ def load_models(args, models: Models):
if source[0] in args.skip:
print('Skipping model: %s' % source[0])
else:
convert_diffuser(*source, args.opset, args.half)
convert_diffuser(*source, args.opset, args.half, args.token)
if args.upscaling:
for source in models.get('upscaling'):
@ -429,6 +429,11 @@ def main() -> int:
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument(
'--token',
type=str,
help="HuggingFace token with read permissions for downloading models.",
)
args = parser.parse_args()
print(args)