1
0
Fork 0

fix(api): use Waitress for Windows bundle

This commit is contained in:
Sean Sube 2023-04-10 23:22:32 -05:00
parent 76860f649a
commit 9a2f35e181
Signed by: ssube
GPG Key ID: 3EED7B957D362AF1
1 changed files with 19 additions and 5 deletions

View File

@ -1,5 +1,6 @@
import multiprocessing
import os
import threading
import waitress
import webbrowser
def script_method(fn, _rcb=None):
@ -15,24 +16,37 @@ torch.jit.script = script
if __name__ == '__main__':
multiprocessing.freeze_support()
try:
# convert the models
from onnx_web.convert.__main__ import main as convert
print("converting models to ONNX")
print("downloading and converting models to ONNX")
convert()
# create the server and load the config
from onnx_web.main import main
app, pool = main()
# launch the image workers
print("starting image workers")
pool.start()
# launch the API server
print("starting API server")
app.run("0.0.0.0", 5000, debug=False)
server = waitress.create_server(app, host="0.0.0.0", port=5000)
thread = threading.Thread(target=server.run)
thread.daemon = True
thread.start()
# launch the user's web browser
print("opening web browser")
url = "http://127.0.0.1:5000"
webbrowser.open_new_tab(f"{url}?api={url}")
# wait for enter and exit
input("press enter to quit")
server.close()
thread.join(1.0)
print("shutting down image workers")
pool.join()
except Exception as e:
print(e)
finally:
os.system("pause")