diff --git a/api/entry.py b/api/entry.py new file mode 100644 index 00000000..e6f67891 --- /dev/null +++ b/api/entry.py @@ -0,0 +1,29 @@ +import os + +def script_method(fn, _rcb=None): + return fn + +def script(obj, optimize=True, _frames_up=0, _rcb=None): + return obj + +import torch.jit +torch.jit.script_method = script_method +torch.jit.script = script + +import multiprocessing + +if __name__ == '__main__': + multiprocessing.freeze_support() + try: + from onnx_web.main import main + app, pool = main() + print("starting workers") + pool.start() + print("starting flask") + app.run("0.0.0.0", 5000, debug=False) + input("press the any key") + pool.join() + except Exception as e: + print(e) + finally: + os.system("pause") diff --git a/docs/setup-guide.md b/docs/setup-guide.md new file mode 100644 index 00000000..4c2c4b59 --- /dev/null +++ b/docs/setup-guide.md @@ -0,0 +1,51 @@ +# Setup Guide + +This guide covers the setup process for onnx-web, including downloading the Windows bundle. + +## Contents + +- [Setup Guide](#setup-guide) + - [Contents](#contents) + - [Windows all-in-one bundle](#windows-all-in-one-bundle) + - [Windows Python installer](#windows-python-installer) + - [Windows Store Python](#windows-store-python) + +## Windows all-in-one bundle + +1. Download the latest ZIP file from TODO +2. Find the ZIP file and `Extract All` to a memorable folder +3. Open the folder where you extracted the files + 1. You can add models to the `models` folder + 2. Your images will be in the `outputs` folder, along with files containing the parameters used to generate them +4. Run the local server using one of the `onnx-web-*.bat` scripts + 1. Run `onnx-web-half.bat` if you are using a GPU and you have < 12GB of VRAM + - `-half` mode is compatible with both AMD and Nvidia GPUs + - `-half` mode is not compatible with CPU mode + 2. Run `onnx-web-full.bat` if you are using CPU mode or if you have >= 16GB of VRAM + - Try the `onnx-web-half.bat` script if you encounter out-of-memory errors or generating images is very slow +5. Wait for the models to be downloaded and converted + 1. Most models are distributed in PyTorch format and need to be converted into ONNX format + 2. This only happens once for each model and takes a few minutes +6. Open one of the URLs shown in the logs in your browser + 1. This will typically be http://127.0.0.1:5000?api=http://127.0.0.1:5000 + 2. If you running the server on a different PC and not accessing it from a browser on the same system, use that PC's + IP address instead of 127.0.0.1 + 3. Any modern browser should work, including Chrome, Edge, and Firefox + 4. Mobile browsers also work, but have stricter mixed-content policies + +## Windows Python installer + +1. Install Git +2. Install Python 3.10 +3. Open command prompt +4. Run one of the `setup-*.bat` scripts + 1. Run `setup-amd.bat` if you are using an AMD GPU and DirectML + 2. Run `setup-nvidia.bat` if you are using an Nvidia GPU and CUDA + 3. Run `setup-cpu.bat` if you are planning on only using CPU mode +5. In the future, run `launch.bat` + 1. You should only need to run the setup script once + 2. If you encounter any errors with Python imports, run the setup script again + +## Windows Store Python + +TODO diff --git a/exe/README.txt b/exe/README.txt new file mode 100644 index 00000000..145e5ca0 --- /dev/null +++ b/exe/README.txt @@ -0,0 +1,16 @@ +# onnx-web Windows bundle + +This is the Windows all-in-one bundle for onnx-web, a tool for running Stable Diffusion using ONNX hardware +acceleration: https://github.com/ssube/onnx-web. + +Please check the setup guide for the latest instructions, this may be an older version: https://github.com/ssube/onnx-web/blob/main/docs/setup-guide.md#windows-all-in-one-bundle + +## Running onnx-web + +You can run the local server using one of the setup scripts, onnx-web-full.bat or onnx-web-half.bat. Use the +onnx-web-half.bat script if you are using a GPU and have < 12GB of VRAM. Use the onnx-web-full.bat script if +you are using CPU mode or if you have >= 16GB of VRAM. + +The user interface should be available in your browser at http://127.0.0.1:5000?api=http://127.0.0.1:5000. If +your PC uses a different IP address or you are running the server on one PC and using it from another, use that IP +address instead. diff --git a/exe/onnx-web-full.bat b/exe/onnx-web-full.bat new file mode 100644 index 00000000..f51bbbbb --- /dev/null +++ b/exe/onnx-web-full.bat @@ -0,0 +1,7 @@ +set ONNX_WEB_BASE_PATH=%~dp0 +set ONNX_WEB_BUNDLE_PATH=%ONNX_WEB_BASE_PATH%\client +set ONNX_WEB_MODEL_PATH=%ONNX_WEB_BASE_PATH%\models +set ONNX_WEB_OUTPUT_PATH=%ONNX_WEB_BASE_PATH%\outputs + +@echo Launching onnx-web in full-precision mode... +server\onnx-web.exe --diffusion --correction --upscaling \ No newline at end of file diff --git a/exe/onnx-web-half.bat b/exe/onnx-web-half.bat new file mode 100644 index 00000000..cd5d1f3b --- /dev/null +++ b/exe/onnx-web-half.bat @@ -0,0 +1,9 @@ +set ONNX_WEB_BASE_PATH=%~dp0 +set ONNX_WEB_BUNDLE_PATH=%ONNX_WEB_BASE_PATH%\client +set ONNX_WEB_MODEL_PATH=%ONNX_WEB_BASE_PATH%\models +set ONNX_WEB_OUTPUT_PATH=%ONNX_WEB_BASE_PATH%\outputs + +set ONNX_WEB_BLOCK_PLATFORMS=cpu + +@echo Launching onnx-web in half-precision mode... +server\onnx-web.exe --diffusion --correction --upscaling --half \ No newline at end of file diff --git a/exe/win10.directml.dir.spec b/exe/win10.directml.dir.spec new file mode 100644 index 00000000..9b49253d --- /dev/null +++ b/exe/win10.directml.dir.spec @@ -0,0 +1,99 @@ +# -*- mode: python ; coding: utf-8 -*- + +from PyInstaller.utils.hooks import collect_data_files, copy_metadata + +import sys + +sys.setrecursionlimit(sys.getrecursionlimit() * 5) + + +block_cipher = None + +dynamic_packages = [ + "filelock", + "numpy", + "packaging", + "onnxruntime", + "onnxruntime-directml", + "regex", + "requests", + "tokenizers", + "tqdm", +] +metadatas = [copy_metadata(pkg) for pkg in dynamic_packages] +metadatas = sum(metadatas, []) + +datas = [ + collect_data_files("basicsr", include_py_files=True, includes=[ + "archs/**", + "data/**", + "losses/**", + "models/**", + "utils/**", + ]), + collect_data_files("realesrgan", include_py_files=True, includes=[ + "archs/**", + "data/**", + "losses/**", + "models/**", + "utils/**", + ]), + collect_data_files("onnxruntime", include_py_files=True, includes=[ + "transformers/**", + ]), + collect_data_files("transformers", include_py_files=True, includes=[ + "**", + ]), +] +datas = sum(datas, []) + +a = Analysis( + ['../api/entry.py'], + pathex=[], + binaries=[], + datas=[ + *metadatas, + *datas, + ], + hiddenimports=['onnxruntime', 'onnxruntime-directml', 'tqdm'], + hookspath=[], + hooksconfig={}, + runtime_hooks=[], + excludes=[], + win_no_prefer_redirects=False, + win_private_assemblies=False, + cipher=block_cipher, + noarchive=False, +) +pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher) + +exe = EXE( + pyz, + a.scripts, + [], + name='onnx-web', + debug=False, + exclude_binaries=True, + bootloader_ignore_signals=False, + strip=False, + upx=True, + upx_exclude=[], + runtime_tmpdir=None, + console=True, + disable_windowed_traceback=False, + argv_emulation=False, + target_arch=None, + codesign_identity=None, + entitlements_file=None, +) + +dir = COLLECT( + exe, + a.binaries, + a.zipfiles, + a.datas, + strip=False, + upx=True, + upx_exclude=[], + name='server', +) \ No newline at end of file diff --git a/exe/win10.directml.file.spec b/exe/win10.directml.file.spec new file mode 100644 index 00000000..24a7ab8e --- /dev/null +++ b/exe/win10.directml.file.spec @@ -0,0 +1,80 @@ +# -*- mode: python ; coding: utf-8 -*- + +from PyInstaller.utils.hooks import collect_data_files, copy_metadata + +import sys + +sys.setrecursionlimit(sys.getrecursionlimit() * 5) + + +block_cipher = None + +dynamic_packages = [ + "filelock", + "numpy", + "packaging", + "onnxruntime", + "onnxruntime-directml", + "regex", + "requests", + "tokenizers", + "tqdm", +] +metadatas = [copy_metadata(pkg) for pkg in dynamic_packages] +metadatas = sum(metadatas, []) + +datas = [ + collect_data_files("basicsr", include_py_files=True, includes=[ + "archs/**", + "data/**", + "losses/**", + "models/**", + "utils/**", + ]), + collect_data_files("transformers", include_py_files=True, includes=[ + "**", + ]), +] +datas = sum(datas, []) + +a = Analysis( + ['../api/entry.py'], + pathex=[], + binaries=[], + datas=[ + *metadatas, + *datas, + ], + hiddenimports=['onnxruntime', 'onnxruntime-directml', 'tqdm'], + hookspath=[], + hooksconfig={}, + runtime_hooks=[], + excludes=[], + win_no_prefer_redirects=False, + win_private_assemblies=False, + cipher=block_cipher, + noarchive=False, +) +pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher) + +exe = EXE( + pyz, + a.scripts, + a.binaries, + a.zipfiles, + a.datas, + [], + name='onnx-web', + debug=False, + bootloader_ignore_signals=False, + strip=False, + upx=True, + upx_exclude=[], + runtime_tmpdir=None, + console=True, + disable_windowed_traceback=False, + argv_emulation=False, + target_arch=None, + codesign_identity=None, + entitlements_file=None, +)