Allow to render accumulation

This commit is contained in:
Jonas Kulhanek 2024-02-07 18:30:03 +01:00
parent 0955231a06
commit 8f8ce1086d
4 changed files with 16 additions and 11 deletions

2
.gitmodules vendored
View File

@ -3,7 +3,7 @@
url = https://gitlab.inria.fr/bkerbl/simple-knn.git url = https://gitlab.inria.fr/bkerbl/simple-knn.git
[submodule "submodules/diff-gaussian-rasterization"] [submodule "submodules/diff-gaussian-rasterization"]
path = submodules/diff-gaussian-rasterization path = submodules/diff-gaussian-rasterization
url = https://github.com/graphdeco-inria/diff-gaussian-rasterization url = https://github.com/jkulhanek/fork-diff-gaussian-rasterization.git
[submodule "SIBR_viewers"] [submodule "SIBR_viewers"]
path = SIBR_viewers path = SIBR_viewers
url = https://gitlab.inria.fr/sibr/sibr_core.git url = https://gitlab.inria.fr/sibr/sibr_core.git

View File

@ -15,7 +15,7 @@ from diff_gaussian_rasterization import GaussianRasterizationSettings, GaussianR
from scene.gaussian_model import GaussianModel from scene.gaussian_model import GaussianModel
from utils.sh_utils import eval_sh from utils.sh_utils import eval_sh
def render(viewpoint_camera, pc : GaussianModel, pipe, bg_color : torch.Tensor, scaling_modifier = 1.0, override_color = None): def render(viewpoint_camera, pc : GaussianModel, pipe, bg_color : torch.Tensor, scaling_modifier = 1.0, override_color = None, return_accumulation=False):
""" """
Render the scene. Render the scene.
@ -32,7 +32,6 @@ def render(viewpoint_camera, pc : GaussianModel, pipe, bg_color : torch.Tensor,
# Set up rasterization configuration # Set up rasterization configuration
tanfovx = math.tan(viewpoint_camera.FoVx * 0.5) tanfovx = math.tan(viewpoint_camera.FoVx * 0.5)
tanfovy = math.tan(viewpoint_camera.FoVy * 0.5) tanfovy = math.tan(viewpoint_camera.FoVy * 0.5)
raster_settings = GaussianRasterizationSettings( raster_settings = GaussianRasterizationSettings(
image_height=int(viewpoint_camera.image_height), image_height=int(viewpoint_camera.image_height),
image_width=int(viewpoint_camera.image_width), image_width=int(viewpoint_camera.image_width),
@ -45,7 +44,8 @@ def render(viewpoint_camera, pc : GaussianModel, pipe, bg_color : torch.Tensor,
sh_degree=pc.active_sh_degree, sh_degree=pc.active_sh_degree,
campos=viewpoint_camera.camera_center, campos=viewpoint_camera.camera_center,
prefiltered=False, prefiltered=False,
debug=pipe.debug debug=pipe.debug,
return_accumulation=return_accumulation
) )
rasterizer = GaussianRasterizer(raster_settings=raster_settings) rasterizer = GaussianRasterizer(raster_settings=raster_settings)
@ -82,7 +82,7 @@ def render(viewpoint_camera, pc : GaussianModel, pipe, bg_color : torch.Tensor,
colors_precomp = override_color colors_precomp = override_color
# Rasterize visible Gaussians to image, obtain their radii (on screen). # Rasterize visible Gaussians to image, obtain their radii (on screen).
rendered_image, radii = rasterizer( rendered_image, radii, accumulation = rasterizer(
means3D = means3D, means3D = means3D,
means2D = means2D, means2D = means2D,
shs = shs, shs = shs,
@ -94,7 +94,10 @@ def render(viewpoint_camera, pc : GaussianModel, pipe, bg_color : torch.Tensor,
# Those Gaussians that were frustum culled or had a radius of 0 were not visible. # Those Gaussians that were frustum culled or had a radius of 0 were not visible.
# They will be excluded from value updates used in the splitting criteria. # They will be excluded from value updates used in the splitting criteria.
return {"render": rendered_image, out = {"render": rendered_image,
"viewspace_points": screenspace_points, "viewspace_points": screenspace_points,
"visibility_filter" : radii > 0, "visibility_filter" : radii > 0,
"radii": radii} "radii": radii}
if raster_settings.return_accumulation:
out["accumulation"] = accumulation
return out

@ -1 +1 @@
Subproject commit 59f5f77e3ddbac3ed9db93ec2cfe99ed6c5d121d Subproject commit 6cf71af574a8ac3463b7fcf02dfb54ecf4b684e8

View File

@ -56,7 +56,9 @@ def training(dataset, opt, pipe, testing_iterations, saving_iterations, checkpoi
net_image_bytes = None net_image_bytes = None
custom_cam, do_training, pipe.convert_SHs_python, pipe.compute_cov3D_python, keep_alive, scaling_modifer = network_gui.receive() custom_cam, do_training, pipe.convert_SHs_python, pipe.compute_cov3D_python, keep_alive, scaling_modifer = network_gui.receive()
if custom_cam != None: if custom_cam != None:
net_image = render(custom_cam, gaussians, pipe, background, scaling_modifer)["render"] out = render(custom_cam, gaussians, pipe, background, scaling_modifer, return_accumulation=True)
# net_image = out["render"]
net_image = out["accumulation"][None].repeat(3, 1, 1)
net_image_bytes = memoryview((torch.clamp(net_image, min=0, max=1.0) * 255).byte().permute(1, 2, 0).contiguous().cpu().numpy()) net_image_bytes = memoryview((torch.clamp(net_image, min=0, max=1.0) * 255).byte().permute(1, 2, 0).contiguous().cpu().numpy())
network_gui.send(net_image_bytes, dataset.source_path) network_gui.send(net_image_bytes, dataset.source_path)
if do_training and ((iteration < int(opt.iterations)) or not keep_alive): if do_training and ((iteration < int(opt.iterations)) or not keep_alive):