diff --git a/README.md b/README.md index fd01e688..af8a5f10 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,10 @@ # ONNX Web -This is a web UI for running ONNX models with GPU acceleration locally or on a remote machine. +This is a web UI for running ONNX models with GPU acceleration or in software, running locally or on a remote machine. -The API runs on both Linux and Windows and provides access to the major functionality of `diffusers`, metadata -about the available models and accelerators, and the output of previous runs. +The API runs on both Linux and Windows and provides access to the major functionality of `diffusers`, along with +metadata about the available models and accelerators, and the output of previous runs. Hardware acceleration is +supported for AMD and experimental for Nvidia, with a CPU fallback capable of running on laptop-class machines. The GUI runs in all major browsers, including on mobile devices, and allows you to select the model and accelerator being used, along with the prompt and other image parameters. The last few output images are shown, making it easy to diff --git a/api/onnx_web/serve.py b/api/onnx_web/serve.py index 6db9320e..7e88879c 100644 --- a/api/onnx_web/serve.py +++ b/api/onnx_web/serve.py @@ -43,6 +43,7 @@ last_pipeline_scheduler = None platform_providers = { 'amd': 'DmlExecutionProvider', 'cpu': 'CPUExecutionProvider', + 'nvidia': 'CUDAExecutionProvider', } pipeline_schedulers = { 'ddim': DDIMScheduler,