1
0
Fork 0

fix(api): additional logging around model load ops

This commit is contained in:
Sean Sube 2023-02-02 23:34:02 -06:00
parent e1ce3d2136
commit ea69753bfd
Signed by: ssube
GPG Key ID: 3EED7B957D362AF1
5 changed files with 13 additions and 6 deletions

View File

@ -42,6 +42,8 @@ def load_gfpgan(ctx: ServerContext, upscale: UpscaleParams, upsampler: Optional[
logger.info('reusing existing GFPGAN pipeline') logger.info('reusing existing GFPGAN pipeline')
return last_pipeline_instance return last_pipeline_instance
logger.debug('loading GFPGAN model from %s', face_path)
# TODO: find a way to pass the ONNX model to underlying architectures # TODO: find a way to pass the ONNX model to underlying architectures
gfpgan = GFPGANer( gfpgan = GFPGANer(
model_path=face_path, model_path=face_path,

View File

@ -54,10 +54,13 @@ def load_resrgan(ctx: ServerContext, params: UpscaleParams, tile=0):
model_path = [model_path, wdn_model_path] model_path = [model_path, wdn_model_path]
dni_weight = [params.denoise, 1 - params.denoise] dni_weight = [params.denoise, 1 - params.denoise]
logger.debug('loading Real ESRGAN upscale model from %s', model_path)
# TODO: shouldn't need the PTH file # TODO: shouldn't need the PTH file
model_path_pth = path.join(ctx.model_path, '%s.pth' % params.upscale_model)
upsampler = RealESRGANer( upsampler = RealESRGANer(
scale=params.scale, scale=params.scale,
model_path=path.join(ctx.model_path, '%s.pth' % params.upscale_model), model_path=model_path_pth,
dni_weight=dni_weight, dni_weight=dni_weight,
model=model, model=model,
tile=tile, tile=tile,

View File

@ -39,8 +39,10 @@ def load_stable_diffusion(ctx: ServerContext, upscale: UpscaleParams):
return last_pipeline_instance return last_pipeline_instance
if upscale.format == 'onnx': if upscale.format == 'onnx':
logger.debug('loading Stable Diffusion upscale ONNX model from %s, using provider %s', model_path, upscale.provider)
pipeline = OnnxStableDiffusionUpscalePipeline.from_pretrained(model_path, provider=upscale.provider) pipeline = OnnxStableDiffusionUpscalePipeline.from_pretrained(model_path, provider=upscale.provider)
else: else:
logger.debug('loading Stable Diffusion upscale model from %s, using provider %s', model_path, upscale.provider)
pipeline = StableDiffusionUpscalePipeline.from_pretrained(model_path, provider=upscale.provider) pipeline = StableDiffusionUpscalePipeline.from_pretrained(model_path, provider=upscale.provider)
last_pipeline_instance = pipeline last_pipeline_instance = pipeline

View File

@ -44,7 +44,7 @@ base_models: Models = {
('stable-diffusion-onnx-v2-1', 'stabilityai/stable-diffusion-2-1'), ('stable-diffusion-onnx-v2-1', 'stabilityai/stable-diffusion-2-1'),
('stable-diffusion-onnx-v2-inpainting', ('stable-diffusion-onnx-v2-inpainting',
'stabilityai/stable-diffusion-2-inpainting'), 'stabilityai/stable-diffusion-2-inpainting'),
# should be upscaling with a different converter # TODO: should have its own converter
('upscaling-stable-diffusion-x4', 'stabilityai/stable-diffusion-x4-upscaler'), ('upscaling-stable-diffusion-x4', 'stabilityai/stable-diffusion-x4-upscaler'),
], ],
'correction': [ 'correction': [

View File

@ -52,15 +52,15 @@ def load_pipeline(pipeline: DiffusionPipeline, model: str, provider: str, schedu
options = (pipeline, model, provider) options = (pipeline, model, provider)
if last_pipeline_instance != None and last_pipeline_options == options: if last_pipeline_instance != None and last_pipeline_options == options:
logger.info('reusing existing diffusion pipeline') logger.debug('reusing existing diffusion pipeline')
pipe = last_pipeline_instance pipe = last_pipeline_instance
else: else:
logger.info('unloading previous diffusion pipeline') logger.debug('unloading previous diffusion pipeline')
last_pipeline_instance = None last_pipeline_instance = None
last_pipeline_scheduler = None last_pipeline_scheduler = None
run_gc() run_gc()
logger.info('loading new diffusion pipeline') logger.debug('loading new diffusion pipeline from %s', model)
pipe = pipeline.from_pretrained( pipe = pipeline.from_pretrained(
model, model,
provider=provider, provider=provider,
@ -76,7 +76,7 @@ def load_pipeline(pipeline: DiffusionPipeline, model: str, provider: str, schedu
last_pipeline_scheduler = scheduler last_pipeline_scheduler = scheduler
if last_pipeline_scheduler != scheduler: if last_pipeline_scheduler != scheduler:
logger.info('loading new diffusion scheduler') logger.debug('loading new diffusion scheduler')
scheduler = scheduler.from_pretrained( scheduler = scheduler.from_pretrained(
model, subfolder='scheduler') model, subfolder='scheduler')