1
0
Fork 0

fix(api): use exception level logs

This commit is contained in:
Sean Sube 2023-03-16 22:29:07 -05:00
parent d9bae80778
commit 226710a015
Signed by: ssube
GPG Key ID: 3EED7B957D362AF1
11 changed files with 43 additions and 51 deletions

View File

@ -104,7 +104,7 @@ class ChainPipeline:
kwargs = stage_kwargs or {} kwargs = stage_kwargs or {}
kwargs = {**pipeline_kwargs, **kwargs} kwargs = {**pipeline_kwargs, **kwargs}
logger.info( logger.debug(
"running stage %s on image with dimensions %sx%s, %s", "running stage %s on image with dimensions %sx%s, %s",
name, name,
image.width, image.width,
@ -145,7 +145,7 @@ class ChainPipeline:
[stage_tile], [stage_tile],
) )
else: else:
logger.info("image within tile size, running stage") logger.debug("image within tile size, running stage")
image = stage_pipe( image = stage_pipe(
job, job,
server, server,
@ -156,7 +156,7 @@ class ChainPipeline:
**kwargs **kwargs
) )
logger.info( logger.debug(
"finished stage %s, result size: %sx%s", name, image.width, image.height "finished stage %s, result size: %sx%s", name, image.width, image.height
) )

View File

@ -38,7 +38,7 @@ def persist_s3(
try: try:
s3.upload_fileobj(data, bucket, output) s3.upload_fileobj(data, bucket, output)
logger.info("saved image to %s/%s", bucket, output) logger.info("saved image to %s/%s", bucket, output)
except Exception as err: except Exception:
logger.error("error saving image to S3: %s", err) logger.exception("error saving image to S3")
return source return source

View File

@ -32,7 +32,7 @@ def process_tile_grid(
idx = (y * tiles_x) + x idx = (y * tiles_x) + x
left = x * tile left = x * tile
top = y * tile top = y * tile
logger.info("processing tile %s of %s, %s.%s", idx + 1, total, y, x) logger.debug("processing tile %s of %s, %s.%s", idx + 1, total, y, x)
tile_image = source.crop((left, top, left + tile, top + tile)) tile_image = source.crop((left, top, left + tile, top + tile))
for filter in filters: for filter in filters:
@ -80,7 +80,7 @@ def process_tile_spiral(
top = center_y + int(top) top = center_y + int(top)
counter += 1 counter += 1
logger.info("processing tile %s of %s, %sx%s", counter, len(tiles), left, top) logger.debug("processing tile %s of %s, %sx%s", counter, len(tiles), left, top)
# TODO: only valid for scale == 1, resize source for others # TODO: only valid for scale == 1, resize source for others
tile_image = image.crop((left, top, left + tile, top + tile)) tile_image = image.crop((left, top, left + tile, top + tile))

View File

@ -3,7 +3,6 @@ from argparse import ArgumentParser
from logging import getLogger from logging import getLogger
from os import makedirs, path from os import makedirs, path
from sys import exit from sys import exit
from traceback import format_exception
from typing import Any, Dict, List, Optional, Tuple from typing import Any, Dict, List, Optional, Tuple
from urllib.parse import urlparse from urllib.parse import urlparse
@ -199,8 +198,8 @@ def convert_models(ctx: ConversionContext, args, models: Models):
try: try:
dest = fetch_model(ctx, name, source, model_format=model_format) dest = fetch_model(ctx, name, source, model_format=model_format)
logger.info("finished downloading source: %s -> %s", source, dest) logger.info("finished downloading source: %s -> %s", source, dest)
except Exception as e: except Exception:
logger.error("error fetching source %s: %s", name, e) logger.exception("error fetching source %s", name)
if args.diffusion and "diffusion" in models: if args.diffusion and "diffusion" in models:
for model in models.get("diffusion"): for model in models.get("diffusion"):
@ -246,11 +245,10 @@ def convert_models(ctx: ConversionContext, args, models: Models):
base_token=inversion.get("token"), base_token=inversion.get("token"),
) )
except Exception as e: except Exception:
logger.error( logger.exception(
"error converting diffusion model %s: %s", "error converting diffusion model %s",
name, name,
format_exception(type(e), e, e.__traceback__),
) )
if args.upscaling and "upscaling" in models: if args.upscaling and "upscaling" in models:
@ -268,11 +266,10 @@ def convert_models(ctx: ConversionContext, args, models: Models):
ctx, name, model["source"], model_format=model_format ctx, name, model["source"], model_format=model_format
) )
convert_upscale_resrgan(ctx, model, source) convert_upscale_resrgan(ctx, model, source)
except Exception as e: except Exception:
logger.error( logger.exception(
"error converting upscaling model %s: %s", "error converting upscaling model %s",
name, name,
format_exception(type(e), e, e.__traceback__),
) )
if args.correction and "correction" in models: if args.correction and "correction" in models:
@ -289,11 +286,10 @@ def convert_models(ctx: ConversionContext, args, models: Models):
ctx, name, model["source"], model_format=model_format ctx, name, model["source"], model_format=model_format
) )
convert_correction_gfpgan(ctx, model, source) convert_correction_gfpgan(ctx, model, source)
except Exception as e: except Exception:
logger.error( logger.exception(
"error converting correction model %s: %s", "error converting correction model %s",
name, name,
format_exception(type(e), e, e.__traceback__),
) )
@ -375,10 +371,10 @@ def main() -> int:
validate(data, extra_schema) validate(data, extra_schema)
logger.info("converting extra models") logger.info("converting extra models")
convert_models(ctx, args, data) convert_models(ctx, args, data)
except ValidationError as err: except ValidationError:
logger.error("invalid data in extras file: %s", err) logger.exception("invalid data in extras file")
except Exception as err: except Exception:
logger.error("error converting extra models: %s", err) logger.exception("error converting extra models")
return 0 return 0

View File

@ -160,10 +160,10 @@ def blend_loras(
fixed_initializer_names = [ fixed_initializer_names = [
fix_initializer_name(node.name) for node in base_model.graph.initializer fix_initializer_name(node.name) for node in base_model.graph.initializer
] ]
# logger.info("fixed initializer names: %s", fixed_initializer_names) logger.trace("fixed initializer names: %s", fixed_initializer_names)
fixed_node_names = [fix_node_name(node.name) for node in base_model.graph.node] fixed_node_names = [fix_node_name(node.name) for node in base_model.graph.node]
# logger.info("fixed node names: %s", fixed_node_names) logger.trace("fixed node names: %s", fixed_node_names)
for base_key, weights in blended.items(): for base_key, weights in blended.items():
conv_key = base_key + "_Conv" conv_key = base_key + "_Conv"

View File

@ -17,8 +17,6 @@ import json
import os import os
import re import re
import shutil import shutil
import sys
import traceback
from logging import getLogger from logging import getLogger
from typing import Dict, List from typing import Dict, List
@ -1395,7 +1393,6 @@ def extract_checkpoint(
image_size = 512 if is_512 else 768 image_size = 512 if is_512 else 768
# Needed for V2 models so we can create the right text encoder. # Needed for V2 models so we can create the right text encoder.
upcast_attention = False upcast_attention = False
msg = None
# Create empty config # Create empty config
db_config = TrainingConfig( db_config = TrainingConfig(
@ -1607,15 +1604,13 @@ def extract_checkpoint(
scheduler=scheduler, scheduler=scheduler,
) )
except Exception: except Exception:
logger.error( logger.exception(
"exception setting up output: %s", "error setting up output",
traceback.format_exception(*sys.exc_info()),
) )
pipe = None pipe = None
if pipe is None or db_config is None: if pipe is None or db_config is None:
msg = "pipeline or config is not set, unable to continue." logger.error("pipeline or config is not set, unable to continue")
logger.error(msg)
return return
else: else:
logger.info("saving diffusion model") logger.info("saving diffusion model")

View File

@ -133,8 +133,8 @@ def load_extras(context: ServerContext):
logger.debug("validating extras file %s", data) logger.debug("validating extras file %s", data)
try: try:
validate(data, extra_schema) validate(data, extra_schema)
except ValidationError as err: except ValidationError:
logger.error("invalid data in extras file: %s", err) logger.exception("invalid data in extras file")
continue continue
if "strings" in data: if "strings" in data:
@ -166,8 +166,8 @@ def load_extras(context: ServerContext):
f"inversion-{inversion_name}" f"inversion-{inversion_name}"
] = inversion["label"] ] = inversion["label"]
except Exception as err: except Exception:
logger.error("error loading extras file: %s", err) logger.exception("error loading extras file")
logger.debug("adding labels to strings: %s", labels) logger.debug("adding labels to strings: %s", labels)
merge( merge(

View File

@ -32,6 +32,9 @@ def register_routes(
def wrap_route(func, *args, **kwargs): def wrap_route(func, *args, **kwargs):
"""
From http://louistiao.me/posts/adding-__name__-and-__doc__-attributes-to-functoolspartial-objects/
"""
partial_func = partial(func, *args, **kwargs) partial_func = partial(func, *args, **kwargs)
update_wrapper(partial_func, func) update_wrapper(partial_func, func)
return partial_func return partial_func

View File

@ -131,8 +131,8 @@ class DevicePoolExecutor:
pass pass
except ValueError: except ValueError:
break break
except Exception as err: except Exception:
logger.error("error in log worker: %s", err) logger.exception("error in log worker")
logger_thread = Thread( logger_thread = Thread(
name="onnx-web logger", target=logger_worker, args=(self.logs,), daemon=True name="onnx-web logger", target=logger_worker, args=(self.logs,), daemon=True
@ -159,8 +159,8 @@ class DevicePoolExecutor:
pass pass
except ValueError: except ValueError:
break break
except Exception as err: except Exception:
logger.error("error in progress worker: %s", err) logger.exception("error in progress worker")
progress_thread = Thread( progress_thread = Thread(
name="onnx-web progress", name="onnx-web progress",
@ -189,8 +189,8 @@ class DevicePoolExecutor:
pass pass
except ValueError: except ValueError:
break break
except Exception as err: except Exception:
logger.error("error in finished worker: %s", err) logger.exception("error in finished worker")
finished_thread = Thread( finished_thread = Thread(
name="onnx-web finished", name="onnx-web finished",

View File

@ -65,7 +65,6 @@ def worker_main(context: WorkerContext, server: ServerContext):
logger.error("detected out-of-memory error, exiting: %s", e) logger.error("detected out-of-memory error, exiting: %s", e)
exit(EXIT_MEMORY) exit(EXIT_MEMORY)
else: else:
logger.error( logger.exception(
"error while running job: %s", "error while running job",
format_exception(type(e), e, e.__traceback__),
) )

View File

@ -342,8 +342,7 @@ def main():
logger.warning("test failed: %s", test.name) logger.warning("test failed: %s", test.name)
failed.append(test.name) failed.append(test.name)
except Exception as e: except Exception as e:
traceback.print_exception(type(e), e, e.__traceback__) logger.exception("error running test for %s", test.name)
logger.error("error running test for %s: %s", test.name, e)
failed.append(test.name) failed.append(test.name)
logger.info("%s of %s tests passed", len(passed), len(TEST_DATA)) logger.info("%s of %s tests passed", len(passed), len(TEST_DATA))