2023-01-28 23:09:19 +00:00
|
|
|
from logging import getLogger
|
2023-01-28 05:28:14 +00:00
|
|
|
from os import path
|
2023-11-19 00:13:13 +00:00
|
|
|
from typing import Optional
|
2023-01-28 05:28:14 +00:00
|
|
|
|
2023-02-05 13:53:26 +00:00
|
|
|
from PIL import Image
|
|
|
|
|
2023-02-06 14:07:06 +00:00
|
|
|
from ..params import DeviceParams, ImageParams, StageParams, UpscaleParams
|
2023-07-03 16:33:56 +00:00
|
|
|
from ..server import ModelTypes, ServerContext
|
2023-02-19 02:28:21 +00:00
|
|
|
from ..utils import run_gc
|
2023-02-26 05:49:39 +00:00
|
|
|
from ..worker import WorkerContext
|
2023-11-18 23:18:23 +00:00
|
|
|
from .base import BaseStage
|
2023-11-19 00:08:38 +00:00
|
|
|
from .result import StageResult
|
2023-01-31 14:16:57 +00:00
|
|
|
|
2023-01-28 23:09:19 +00:00
|
|
|
logger = getLogger(__name__)
|
|
|
|
|
2023-01-28 05:28:14 +00:00
|
|
|
|
2023-07-02 23:21:21 +00:00
|
|
|
class CorrectGFPGANStage(BaseStage):
|
2023-07-01 12:10:53 +00:00
|
|
|
def load(
|
|
|
|
self,
|
|
|
|
server: ServerContext,
|
|
|
|
_stage: StageParams,
|
|
|
|
upscale: UpscaleParams,
|
|
|
|
device: DeviceParams,
|
|
|
|
):
|
|
|
|
# must be within the load function for patch to take effect
|
|
|
|
# TODO: rewrite and remove
|
|
|
|
from gfpgan import GFPGANer
|
|
|
|
|
|
|
|
face_path = path.join(server.cache_path, "%s.pth" % (upscale.correction_model))
|
|
|
|
cache_key = (face_path,)
|
2023-07-03 16:33:56 +00:00
|
|
|
cache_pipe = server.cache.get(ModelTypes.correction, cache_key)
|
2023-07-01 12:10:53 +00:00
|
|
|
|
|
|
|
if cache_pipe is not None:
|
|
|
|
logger.info("reusing existing GFPGAN pipeline")
|
|
|
|
return cache_pipe
|
|
|
|
|
|
|
|
logger.debug("loading GFPGAN model from %s", face_path)
|
|
|
|
|
|
|
|
# TODO: find a way to pass the ONNX model to underlying architectures
|
|
|
|
gfpgan = GFPGANer(
|
|
|
|
arch="clean",
|
|
|
|
bg_upsampler=None,
|
|
|
|
channel_multiplier=2,
|
|
|
|
device=device.torch_str(),
|
|
|
|
model_path=face_path,
|
|
|
|
upscale=upscale.face_outscale,
|
|
|
|
)
|
|
|
|
|
2023-07-03 16:33:56 +00:00
|
|
|
server.cache.set(ModelTypes.correction, cache_key, gfpgan)
|
2023-07-01 12:10:53 +00:00
|
|
|
run_gc([device])
|
|
|
|
|
|
|
|
return gfpgan
|
|
|
|
|
|
|
|
def run(
|
|
|
|
self,
|
2023-07-16 00:00:20 +00:00
|
|
|
worker: WorkerContext,
|
2023-07-01 12:10:53 +00:00
|
|
|
server: ServerContext,
|
|
|
|
stage: StageParams,
|
|
|
|
_params: ImageParams,
|
2023-11-19 00:08:38 +00:00
|
|
|
sources: StageResult,
|
2023-07-01 12:10:53 +00:00
|
|
|
*,
|
|
|
|
upscale: UpscaleParams,
|
|
|
|
stage_source: Optional[Image.Image] = None,
|
|
|
|
**kwargs,
|
2023-11-19 00:08:38 +00:00
|
|
|
) -> StageResult:
|
2023-07-01 12:10:53 +00:00
|
|
|
upscale = upscale.with_args(**kwargs)
|
|
|
|
|
|
|
|
if upscale.correction_model is None:
|
2023-07-16 00:00:52 +00:00
|
|
|
logger.warning("no face model given, skipping")
|
2023-07-04 18:29:58 +00:00
|
|
|
return sources
|
2023-07-01 12:10:53 +00:00
|
|
|
|
|
|
|
logger.info("correcting faces with GFPGAN model: %s", upscale.correction_model)
|
2023-07-16 00:00:20 +00:00
|
|
|
device = worker.get_device()
|
2023-07-01 12:10:53 +00:00
|
|
|
gfpgan = self.load(server, stage, upscale, device)
|
|
|
|
|
2023-12-31 13:04:35 +00:00
|
|
|
outputs = []
|
2024-01-06 02:11:58 +00:00
|
|
|
for source in sources.as_arrays():
|
2023-12-31 13:04:35 +00:00
|
|
|
cropped, restored, result = gfpgan.enhance(
|
2023-11-19 00:08:38 +00:00
|
|
|
source,
|
2023-07-04 18:29:58 +00:00
|
|
|
has_aligned=False,
|
|
|
|
only_center_face=False,
|
|
|
|
paste_back=True,
|
|
|
|
weight=upscale.face_strength,
|
2023-11-19 00:13:13 +00:00
|
|
|
)
|
2023-12-31 13:04:35 +00:00
|
|
|
outputs.append(result)
|
2023-07-04 18:29:58 +00:00
|
|
|
|
2024-01-06 02:11:58 +00:00
|
|
|
return StageResult.from_arrays(outputs, metadata=sources.metadata)
|