1
0
Fork 0

more lint, more trace

This commit is contained in:
Sean Sube 2023-03-16 20:22:20 -05:00
parent 9f0a6f134e
commit 4b832f3d8d
Signed by: ssube
GPG Key ID: 3EED7B957D362AF1
3 changed files with 12 additions and 12 deletions

View File

@ -47,25 +47,25 @@ def add_logging_level(level_name, level_num, method_name=None):
# This method was inspired by the answers to Stack Overflow post
# http://stackoverflow.com/q/2183233/2988730, especially
# http://stackoverflow.com/a/13638084/2988730
def logForLevel(self, message, *args, **kwargs):
if self.isEnabledFor(level_num):
self._log(level_num, message, args, **kwargs)
def log_for_level(self, message, *args, **kwargs):
self.log(level_num, message, args, **kwargs)
def logToRoot(message, *args, **kwargs):
def log_to_root(message, *args, **kwargs):
logging.log(level_num, message, *args, **kwargs)
logging.addLevelName(level_num, level_name)
setattr(logging, level_name, level_num)
setattr(logging.getLoggerClass(), method_name, logForLevel)
setattr(logging, method_name, logToRoot)
setattr(logging.getLoggerClass(), method_name, log_for_level)
setattr(logging, method_name, log_to_root)
# setup logging config before anything else loads
add_logging_level("TRACE", logging.DEBUG - 5)
try:
if path.exists(logging_path):
with open(logging_path, "r") as f:
config_logging = safe_load(f)
dictConfig(config_logging)
add_logging_level("TRACE", logging.DEBUG - 5)
except Exception as err:
print("error loading logging config: %s" % (err))

View File

@ -119,7 +119,7 @@ class DevicePoolExecutor:
def create_logger_worker(self) -> None:
def logger_worker(logs: Queue):
logger.info("checking in from logger worker thread")
logger.trace("checking in from logger worker thread")
while True:
try:
@ -144,7 +144,7 @@ class DevicePoolExecutor:
def create_progress_worker(self) -> None:
def progress_worker(progress: Queue):
logger.info("checking in from progress worker thread")
logger.trace("checking in from progress worker thread")
while True:
try:
job, device, value = progress.get(timeout=(self.join_timeout / 2))
@ -175,7 +175,7 @@ class DevicePoolExecutor:
def create_finished_worker(self) -> None:
def finished_worker(finished: Queue):
logger.info("checking in from finished worker thread")
logger.trace("checking in from finished worker thread")
while True:
try:
job, device = finished.get(timeout=(self.join_timeout / 2))
@ -218,7 +218,7 @@ class DevicePoolExecutor:
jobs.update([self.pending[d.device].qsize() for d in self.devices])
queued = jobs.most_common()
logger.debug("jobs queued by device: %s", queued)
logger.trace("jobs queued by device: %s", queued)
lowest_count = queued[-1][1]
lowest_devices = [d[0] for d in queued if d[1] == lowest_count]

View File

@ -23,7 +23,7 @@ def worker_main(context: WorkerContext, server: ServerContext):
apply_patches(server)
setproctitle("onnx-web worker: %s" % (context.device.device))
logger.info("checking in from worker, %s", get_available_providers())
logger.trace("checking in from worker, %s", get_available_providers())
# make leaking workers easier to recycle
context.progress.cancel_join_thread()