From 29f03a303d206245f253ee722fa96ee17a66e54e Mon Sep 17 00:00:00 2001 From: ndming Date: Wed, 19 Mar 2025 13:46:13 +0100 Subject: [PATCH] Fix inconsistent training results with RGBA/PNG images The training relies on PIL to resize the input images and extracts the resized alpha to mask the rendered image during training. Since PIL pre-multiplies the resized RGB with the resized alpha, the training produces different Gaussian points depending on whether the input get resized or not. Moreover, the extracted alpha channel from PIL is not perfectly binarized, causing floaters around the edges. --- utils/general_utils.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/utils/general_utils.py b/utils/general_utils.py index 541c082..ca9f62b 100644 --- a/utils/general_utils.py +++ b/utils/general_utils.py @@ -19,6 +19,21 @@ def inverse_sigmoid(x): return torch.log(x/(1-x)) def PILtoTorch(pil_image, resolution): + # When resizing RGBA, PIL pre-multiplies the resulting RGB with the resized alpha channel. This gives + # different training behaviors depending on whether the image is actually resized (via -r flag) or not. + # Moreover, the resized alpha is no longer a perfect binary image due to interpolation, which produces + # a significant amount of floaters along the edges. To fix this, we manually mask the RGB if the input + # is an RGBA, then we forget the alpha channel entirely. The multiplication of the rendered image with + # the alpha_mask during training thus becomes a no-op for RGBA. + if pil_image.mode == 'RGBA': + from PIL import Image + image_np = np.array(pil_image) + rgb_np = image_np[..., :3] + alpha_np = image_np[..., 3:] + masked_rgb_np = (rgb_np / 255.0) * (alpha_np / 255.0) + masked_rgb_np = np.clip(masked_rgb_np, 0.0, 1.0) + pil_image = Image.fromarray((masked_rgb_np * 255).astype(np.uint8)) + resized_image_PIL = pil_image.resize(resolution) resized_image = torch.from_numpy(np.array(resized_image_PIL)) / 255.0 if len(resized_image.shape) == 3: