1
0
Fork 0

feat(exe): add specs and launch scripts for Windows EXE bundle (#305)

This commit is contained in:
Sean Sube 2023-03-30 21:02:23 -05:00
parent 1cd436d643
commit 205ff3e403
Signed by: ssube
GPG Key ID: 3EED7B957D362AF1
7 changed files with 291 additions and 0 deletions

29
api/entry.py Normal file
View File

@ -0,0 +1,29 @@
import os
def script_method(fn, _rcb=None):
return fn
def script(obj, optimize=True, _frames_up=0, _rcb=None):
return obj
import torch.jit
torch.jit.script_method = script_method
torch.jit.script = script
import multiprocessing
if __name__ == '__main__':
multiprocessing.freeze_support()
try:
from onnx_web.main import main
app, pool = main()
print("starting workers")
pool.start()
print("starting flask")
app.run("0.0.0.0", 5000, debug=False)
input("press the any key")
pool.join()
except Exception as e:
print(e)
finally:
os.system("pause")

51
docs/setup-guide.md Normal file
View File

@ -0,0 +1,51 @@
# Setup Guide
This guide covers the setup process for onnx-web, including downloading the Windows bundle.
## Contents
- [Setup Guide](#setup-guide)
- [Contents](#contents)
- [Windows all-in-one bundle](#windows-all-in-one-bundle)
- [Windows Python installer](#windows-python-installer)
- [Windows Store Python](#windows-store-python)
## Windows all-in-one bundle
1. Download the latest ZIP file from TODO
2. Find the ZIP file and `Extract All` to a memorable folder
3. Open the folder where you extracted the files
1. You can add models to the `models` folder
2. Your images will be in the `outputs` folder, along with files containing the parameters used to generate them
4. Run the local server using one of the `onnx-web-*.bat` scripts
1. Run `onnx-web-half.bat` if you are using a GPU and you have < 12GB of VRAM
- `-half` mode is compatible with both AMD and Nvidia GPUs
- `-half` mode is not compatible with CPU mode
2. Run `onnx-web-full.bat` if you are using CPU mode or if you have >= 16GB of VRAM
- Try the `onnx-web-half.bat` script if you encounter out-of-memory errors or generating images is very slow
5. Wait for the models to be downloaded and converted
1. Most models are distributed in PyTorch format and need to be converted into ONNX format
2. This only happens once for each model and takes a few minutes
6. Open one of the URLs shown in the logs in your browser
1. This will typically be http://127.0.0.1:5000?api=http://127.0.0.1:5000
2. If you running the server on a different PC and not accessing it from a browser on the same system, use that PC's
IP address instead of 127.0.0.1
3. Any modern browser should work, including Chrome, Edge, and Firefox
4. Mobile browsers also work, but have stricter mixed-content policies
## Windows Python installer
1. Install Git
2. Install Python 3.10
3. Open command prompt
4. Run one of the `setup-*.bat` scripts
1. Run `setup-amd.bat` if you are using an AMD GPU and DirectML
2. Run `setup-nvidia.bat` if you are using an Nvidia GPU and CUDA
3. Run `setup-cpu.bat` if you are planning on only using CPU mode
5. In the future, run `launch.bat`
1. You should only need to run the setup script once
2. If you encounter any errors with Python imports, run the setup script again
## Windows Store Python
TODO

16
exe/README.txt Normal file
View File

@ -0,0 +1,16 @@
# onnx-web Windows bundle
This is the Windows all-in-one bundle for onnx-web, a tool for running Stable Diffusion using ONNX hardware
acceleration: https://github.com/ssube/onnx-web.
Please check the setup guide for the latest instructions, this may be an older version: https://github.com/ssube/onnx-web/blob/main/docs/setup-guide.md#windows-all-in-one-bundle
## Running onnx-web
You can run the local server using one of the setup scripts, onnx-web-full.bat or onnx-web-half.bat. Use the
onnx-web-half.bat script if you are using a GPU and have < 12GB of VRAM. Use the onnx-web-full.bat script if
you are using CPU mode or if you have >= 16GB of VRAM.
The user interface should be available in your browser at http://127.0.0.1:5000?api=http://127.0.0.1:5000. If
your PC uses a different IP address or you are running the server on one PC and using it from another, use that IP
address instead.

7
exe/onnx-web-full.bat Normal file
View File

@ -0,0 +1,7 @@
set ONNX_WEB_BASE_PATH=%~dp0
set ONNX_WEB_BUNDLE_PATH=%ONNX_WEB_BASE_PATH%\client
set ONNX_WEB_MODEL_PATH=%ONNX_WEB_BASE_PATH%\models
set ONNX_WEB_OUTPUT_PATH=%ONNX_WEB_BASE_PATH%\outputs
@echo Launching onnx-web in full-precision mode...
server\onnx-web.exe --diffusion --correction --upscaling

9
exe/onnx-web-half.bat Normal file
View File

@ -0,0 +1,9 @@
set ONNX_WEB_BASE_PATH=%~dp0
set ONNX_WEB_BUNDLE_PATH=%ONNX_WEB_BASE_PATH%\client
set ONNX_WEB_MODEL_PATH=%ONNX_WEB_BASE_PATH%\models
set ONNX_WEB_OUTPUT_PATH=%ONNX_WEB_BASE_PATH%\outputs
set ONNX_WEB_BLOCK_PLATFORMS=cpu
@echo Launching onnx-web in half-precision mode...
server\onnx-web.exe --diffusion --correction --upscaling --half

View File

@ -0,0 +1,99 @@
# -*- mode: python ; coding: utf-8 -*-
from PyInstaller.utils.hooks import collect_data_files, copy_metadata
import sys
sys.setrecursionlimit(sys.getrecursionlimit() * 5)
block_cipher = None
dynamic_packages = [
"filelock",
"numpy",
"packaging",
"onnxruntime",
"onnxruntime-directml",
"regex",
"requests",
"tokenizers",
"tqdm",
]
metadatas = [copy_metadata(pkg) for pkg in dynamic_packages]
metadatas = sum(metadatas, [])
datas = [
collect_data_files("basicsr", include_py_files=True, includes=[
"archs/**",
"data/**",
"losses/**",
"models/**",
"utils/**",
]),
collect_data_files("realesrgan", include_py_files=True, includes=[
"archs/**",
"data/**",
"losses/**",
"models/**",
"utils/**",
]),
collect_data_files("onnxruntime", include_py_files=True, includes=[
"transformers/**",
]),
collect_data_files("transformers", include_py_files=True, includes=[
"**",
]),
]
datas = sum(datas, [])
a = Analysis(
['../api/entry.py'],
pathex=[],
binaries=[],
datas=[
*metadatas,
*datas,
],
hiddenimports=['onnxruntime', 'onnxruntime-directml', 'tqdm'],
hookspath=[],
hooksconfig={},
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False,
)
pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher)
exe = EXE(
pyz,
a.scripts,
[],
name='onnx-web',
debug=False,
exclude_binaries=True,
bootloader_ignore_signals=False,
strip=False,
upx=True,
upx_exclude=[],
runtime_tmpdir=None,
console=True,
disable_windowed_traceback=False,
argv_emulation=False,
target_arch=None,
codesign_identity=None,
entitlements_file=None,
)
dir = COLLECT(
exe,
a.binaries,
a.zipfiles,
a.datas,
strip=False,
upx=True,
upx_exclude=[],
name='server',
)

View File

@ -0,0 +1,80 @@
# -*- mode: python ; coding: utf-8 -*-
from PyInstaller.utils.hooks import collect_data_files, copy_metadata
import sys
sys.setrecursionlimit(sys.getrecursionlimit() * 5)
block_cipher = None
dynamic_packages = [
"filelock",
"numpy",
"packaging",
"onnxruntime",
"onnxruntime-directml",
"regex",
"requests",
"tokenizers",
"tqdm",
]
metadatas = [copy_metadata(pkg) for pkg in dynamic_packages]
metadatas = sum(metadatas, [])
datas = [
collect_data_files("basicsr", include_py_files=True, includes=[
"archs/**",
"data/**",
"losses/**",
"models/**",
"utils/**",
]),
collect_data_files("transformers", include_py_files=True, includes=[
"**",
]),
]
datas = sum(datas, [])
a = Analysis(
['../api/entry.py'],
pathex=[],
binaries=[],
datas=[
*metadatas,
*datas,
],
hiddenimports=['onnxruntime', 'onnxruntime-directml', 'tqdm'],
hookspath=[],
hooksconfig={},
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False,
)
pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher)
exe = EXE(
pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='onnx-web',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
upx_exclude=[],
runtime_tmpdir=None,
console=True,
disable_windowed_traceback=False,
argv_emulation=False,
target_arch=None,
codesign_identity=None,
entitlements_file=None,
)