1
0
Fork 0

fix(api): use exception level logs

This commit is contained in:
Sean Sube 2023-03-16 22:29:07 -05:00
parent d9bae80778
commit 226710a015
Signed by: ssube
GPG Key ID: 3EED7B957D362AF1
11 changed files with 43 additions and 51 deletions

View File

@ -104,7 +104,7 @@ class ChainPipeline:
kwargs = stage_kwargs or {}
kwargs = {**pipeline_kwargs, **kwargs}
logger.info(
logger.debug(
"running stage %s on image with dimensions %sx%s, %s",
name,
image.width,
@ -145,7 +145,7 @@ class ChainPipeline:
[stage_tile],
)
else:
logger.info("image within tile size, running stage")
logger.debug("image within tile size, running stage")
image = stage_pipe(
job,
server,
@ -156,7 +156,7 @@ class ChainPipeline:
**kwargs
)
logger.info(
logger.debug(
"finished stage %s, result size: %sx%s", name, image.width, image.height
)

View File

@ -38,7 +38,7 @@ def persist_s3(
try:
s3.upload_fileobj(data, bucket, output)
logger.info("saved image to %s/%s", bucket, output)
except Exception as err:
logger.error("error saving image to S3: %s", err)
except Exception:
logger.exception("error saving image to S3")
return source

View File

@ -32,7 +32,7 @@ def process_tile_grid(
idx = (y * tiles_x) + x
left = x * tile
top = y * tile
logger.info("processing tile %s of %s, %s.%s", idx + 1, total, y, x)
logger.debug("processing tile %s of %s, %s.%s", idx + 1, total, y, x)
tile_image = source.crop((left, top, left + tile, top + tile))
for filter in filters:
@ -80,7 +80,7 @@ def process_tile_spiral(
top = center_y + int(top)
counter += 1
logger.info("processing tile %s of %s, %sx%s", counter, len(tiles), left, top)
logger.debug("processing tile %s of %s, %sx%s", counter, len(tiles), left, top)
# TODO: only valid for scale == 1, resize source for others
tile_image = image.crop((left, top, left + tile, top + tile))

View File

@ -3,7 +3,6 @@ from argparse import ArgumentParser
from logging import getLogger
from os import makedirs, path
from sys import exit
from traceback import format_exception
from typing import Any, Dict, List, Optional, Tuple
from urllib.parse import urlparse
@ -199,8 +198,8 @@ def convert_models(ctx: ConversionContext, args, models: Models):
try:
dest = fetch_model(ctx, name, source, model_format=model_format)
logger.info("finished downloading source: %s -> %s", source, dest)
except Exception as e:
logger.error("error fetching source %s: %s", name, e)
except Exception:
logger.exception("error fetching source %s", name)
if args.diffusion and "diffusion" in models:
for model in models.get("diffusion"):
@ -246,11 +245,10 @@ def convert_models(ctx: ConversionContext, args, models: Models):
base_token=inversion.get("token"),
)
except Exception as e:
logger.error(
"error converting diffusion model %s: %s",
except Exception:
logger.exception(
"error converting diffusion model %s",
name,
format_exception(type(e), e, e.__traceback__),
)
if args.upscaling and "upscaling" in models:
@ -268,11 +266,10 @@ def convert_models(ctx: ConversionContext, args, models: Models):
ctx, name, model["source"], model_format=model_format
)
convert_upscale_resrgan(ctx, model, source)
except Exception as e:
logger.error(
"error converting upscaling model %s: %s",
except Exception:
logger.exception(
"error converting upscaling model %s",
name,
format_exception(type(e), e, e.__traceback__),
)
if args.correction and "correction" in models:
@ -289,11 +286,10 @@ def convert_models(ctx: ConversionContext, args, models: Models):
ctx, name, model["source"], model_format=model_format
)
convert_correction_gfpgan(ctx, model, source)
except Exception as e:
logger.error(
"error converting correction model %s: %s",
except Exception:
logger.exception(
"error converting correction model %s",
name,
format_exception(type(e), e, e.__traceback__),
)
@ -375,10 +371,10 @@ def main() -> int:
validate(data, extra_schema)
logger.info("converting extra models")
convert_models(ctx, args, data)
except ValidationError as err:
logger.error("invalid data in extras file: %s", err)
except Exception as err:
logger.error("error converting extra models: %s", err)
except ValidationError:
logger.exception("invalid data in extras file")
except Exception:
logger.exception("error converting extra models")
return 0

View File

@ -160,10 +160,10 @@ def blend_loras(
fixed_initializer_names = [
fix_initializer_name(node.name) for node in base_model.graph.initializer
]
# logger.info("fixed initializer names: %s", fixed_initializer_names)
logger.trace("fixed initializer names: %s", fixed_initializer_names)
fixed_node_names = [fix_node_name(node.name) for node in base_model.graph.node]
# logger.info("fixed node names: %s", fixed_node_names)
logger.trace("fixed node names: %s", fixed_node_names)
for base_key, weights in blended.items():
conv_key = base_key + "_Conv"

View File

@ -17,8 +17,6 @@ import json
import os
import re
import shutil
import sys
import traceback
from logging import getLogger
from typing import Dict, List
@ -1395,7 +1393,6 @@ def extract_checkpoint(
image_size = 512 if is_512 else 768
# Needed for V2 models so we can create the right text encoder.
upcast_attention = False
msg = None
# Create empty config
db_config = TrainingConfig(
@ -1607,15 +1604,13 @@ def extract_checkpoint(
scheduler=scheduler,
)
except Exception:
logger.error(
"exception setting up output: %s",
traceback.format_exception(*sys.exc_info()),
logger.exception(
"error setting up output",
)
pipe = None
if pipe is None or db_config is None:
msg = "pipeline or config is not set, unable to continue."
logger.error(msg)
logger.error("pipeline or config is not set, unable to continue")
return
else:
logger.info("saving diffusion model")

View File

@ -133,8 +133,8 @@ def load_extras(context: ServerContext):
logger.debug("validating extras file %s", data)
try:
validate(data, extra_schema)
except ValidationError as err:
logger.error("invalid data in extras file: %s", err)
except ValidationError:
logger.exception("invalid data in extras file")
continue
if "strings" in data:
@ -166,8 +166,8 @@ def load_extras(context: ServerContext):
f"inversion-{inversion_name}"
] = inversion["label"]
except Exception as err:
logger.error("error loading extras file: %s", err)
except Exception:
logger.exception("error loading extras file")
logger.debug("adding labels to strings: %s", labels)
merge(

View File

@ -32,6 +32,9 @@ def register_routes(
def wrap_route(func, *args, **kwargs):
"""
From http://louistiao.me/posts/adding-__name__-and-__doc__-attributes-to-functoolspartial-objects/
"""
partial_func = partial(func, *args, **kwargs)
update_wrapper(partial_func, func)
return partial_func

View File

@ -131,8 +131,8 @@ class DevicePoolExecutor:
pass
except ValueError:
break
except Exception as err:
logger.error("error in log worker: %s", err)
except Exception:
logger.exception("error in log worker")
logger_thread = Thread(
name="onnx-web logger", target=logger_worker, args=(self.logs,), daemon=True
@ -159,8 +159,8 @@ class DevicePoolExecutor:
pass
except ValueError:
break
except Exception as err:
logger.error("error in progress worker: %s", err)
except Exception:
logger.exception("error in progress worker")
progress_thread = Thread(
name="onnx-web progress",
@ -189,8 +189,8 @@ class DevicePoolExecutor:
pass
except ValueError:
break
except Exception as err:
logger.error("error in finished worker: %s", err)
except Exception:
logger.exception("error in finished worker")
finished_thread = Thread(
name="onnx-web finished",

View File

@ -65,7 +65,6 @@ def worker_main(context: WorkerContext, server: ServerContext):
logger.error("detected out-of-memory error, exiting: %s", e)
exit(EXIT_MEMORY)
else:
logger.error(
"error while running job: %s",
format_exception(type(e), e, e.__traceback__),
logger.exception(
"error while running job",
)

View File

@ -342,8 +342,7 @@ def main():
logger.warning("test failed: %s", test.name)
failed.append(test.name)
except Exception as e:
traceback.print_exception(type(e), e, e.__traceback__)
logger.error("error running test for %s: %s", test.name, e)
logger.exception("error running test for %s", test.name)
failed.append(test.name)
logger.info("%s of %s tests passed", len(passed), len(TEST_DATA))