diff --git a/.gitmodules b/.gitmodules index d20bef2..dc4f1bc 100644 --- a/.gitmodules +++ b/.gitmodules @@ -3,7 +3,7 @@ url = https://gitlab.inria.fr/bkerbl/simple-knn.git [submodule "submodules/diff-gaussian-rasterization"] path = submodules/diff-gaussian-rasterization - url = https://github.com/graphdeco-inria/diff-gaussian-rasterization + url = https://github.com/jkulhanek/fork-diff-gaussian-rasterization.git [submodule "SIBR_viewers"] path = SIBR_viewers url = https://gitlab.inria.fr/sibr/sibr_core.git diff --git a/gaussian_renderer/__init__.py b/gaussian_renderer/__init__.py index f74e336..586b2a9 100644 --- a/gaussian_renderer/__init__.py +++ b/gaussian_renderer/__init__.py @@ -15,7 +15,7 @@ from diff_gaussian_rasterization import GaussianRasterizationSettings, GaussianR from scene.gaussian_model import GaussianModel from utils.sh_utils import eval_sh -def render(viewpoint_camera, pc : GaussianModel, pipe, bg_color : torch.Tensor, scaling_modifier = 1.0, override_color = None): +def render(viewpoint_camera, pc : GaussianModel, pipe, bg_color : torch.Tensor, scaling_modifier = 1.0, override_color = None, return_accumulation=False): """ Render the scene. @@ -32,7 +32,6 @@ def render(viewpoint_camera, pc : GaussianModel, pipe, bg_color : torch.Tensor, # Set up rasterization configuration tanfovx = math.tan(viewpoint_camera.FoVx * 0.5) tanfovy = math.tan(viewpoint_camera.FoVy * 0.5) - raster_settings = GaussianRasterizationSettings( image_height=int(viewpoint_camera.image_height), image_width=int(viewpoint_camera.image_width), @@ -45,7 +44,8 @@ def render(viewpoint_camera, pc : GaussianModel, pipe, bg_color : torch.Tensor, sh_degree=pc.active_sh_degree, campos=viewpoint_camera.camera_center, prefiltered=False, - debug=pipe.debug + debug=pipe.debug, + return_accumulation=return_accumulation ) rasterizer = GaussianRasterizer(raster_settings=raster_settings) @@ -82,7 +82,7 @@ def render(viewpoint_camera, pc : GaussianModel, pipe, bg_color : torch.Tensor, colors_precomp = override_color # Rasterize visible Gaussians to image, obtain their radii (on screen). - rendered_image, radii = rasterizer( + rendered_image, radii, accumulation = rasterizer( means3D = means3D, means2D = means2D, shs = shs, @@ -94,7 +94,10 @@ def render(viewpoint_camera, pc : GaussianModel, pipe, bg_color : torch.Tensor, # Those Gaussians that were frustum culled or had a radius of 0 were not visible. # They will be excluded from value updates used in the splitting criteria. - return {"render": rendered_image, - "viewspace_points": screenspace_points, - "visibility_filter" : radii > 0, - "radii": radii} + out = {"render": rendered_image, + "viewspace_points": screenspace_points, + "visibility_filter" : radii > 0, + "radii": radii} + if raster_settings.return_accumulation: + out["accumulation"] = accumulation + return out diff --git a/submodules/diff-gaussian-rasterization b/submodules/diff-gaussian-rasterization index 59f5f77..6cf71af 160000 --- a/submodules/diff-gaussian-rasterization +++ b/submodules/diff-gaussian-rasterization @@ -1 +1 @@ -Subproject commit 59f5f77e3ddbac3ed9db93ec2cfe99ed6c5d121d +Subproject commit 6cf71af574a8ac3463b7fcf02dfb54ecf4b684e8 diff --git a/train.py b/train.py index 36faf0d..46f8a07 100644 --- a/train.py +++ b/train.py @@ -56,7 +56,9 @@ def training(dataset, opt, pipe, testing_iterations, saving_iterations, checkpoi net_image_bytes = None custom_cam, do_training, pipe.convert_SHs_python, pipe.compute_cov3D_python, keep_alive, scaling_modifer = network_gui.receive() if custom_cam != None: - net_image = render(custom_cam, gaussians, pipe, background, scaling_modifer)["render"] + out = render(custom_cam, gaussians, pipe, background, scaling_modifer, return_accumulation=True) + # net_image = out["render"] + net_image = out["accumulation"][None].repeat(3, 1, 1) net_image_bytes = memoryview((torch.clamp(net_image, min=0, max=1.0) * 255).byte().permute(1, 2, 0).contiguous().cpu().numpy()) network_gui.send(net_image_bytes, dataset.source_path) if do_training and ((iteration < int(opt.iterations)) or not keep_alive):