mirror of
https://github.com/graphdeco-inria/gaussian-splatting
synced 2025-06-26 18:18:11 +00:00
add new comments
This commit is contained in:
parent
f9ef1ea404
commit
3e33ab3cda
@ -17,22 +17,30 @@ from utils.sh_utils import eval_sh
|
|||||||
|
|
||||||
def render(viewpoint_camera, pc : GaussianModel, pipe, bg_color : torch.Tensor, scaling_modifier = 1.0, override_color = None):
|
def render(viewpoint_camera, pc : GaussianModel, pipe, bg_color : torch.Tensor, scaling_modifier = 1.0, override_color = None):
|
||||||
"""
|
"""
|
||||||
Render the scene.
|
渲染场景: 将高斯分布的点投影到2D屏幕上来生成渲染图像
|
||||||
|
viewpoint_camera: 训练相机集合
|
||||||
Background tensor (bg_color) must be on GPU!
|
pc: 高斯模型
|
||||||
|
pipe: 管道相关参数
|
||||||
|
bg_color: Background tensor 必须 on GPU
|
||||||
|
scaling_modifier:
|
||||||
|
override_color:
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Create zero tensor. We will use it to make pytorch return gradients of the 2D (screen-space) means
|
# Create zero tensor. We will use it to make pytorch return gradients of the 2D (screen-space) means
|
||||||
|
# 创建一个与输入点云(高斯模型)大小相同的 零tensor,用于记录屏幕空间中的点的位置。这个张量将用于计算对于屏幕空间坐标的梯度
|
||||||
screenspace_points = torch.zeros_like(pc.get_xyz, dtype=pc.get_xyz.dtype, requires_grad=True, device="cuda") + 0
|
screenspace_points = torch.zeros_like(pc.get_xyz, dtype=pc.get_xyz.dtype, requires_grad=True, device="cuda") + 0
|
||||||
try:
|
try:
|
||||||
|
# 尝试保留张量的梯度。这是为了确保可以在反向传播过程中计算对于屏幕空间坐标的梯度
|
||||||
screenspace_points.retain_grad()
|
screenspace_points.retain_grad()
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
# Set up rasterization configuration
|
# Set up rasterization configuration
|
||||||
|
# 计算视场的 tan 值,这将用于设置光栅化配置
|
||||||
tanfovx = math.tan(viewpoint_camera.FoVx * 0.5)
|
tanfovx = math.tan(viewpoint_camera.FoVx * 0.5)
|
||||||
tanfovy = math.tan(viewpoint_camera.FoVy * 0.5)
|
tanfovy = math.tan(viewpoint_camera.FoVy * 0.5)
|
||||||
|
|
||||||
|
# 设置光栅化的配置,包括图像的大小、视场的 tan 值、背景颜色、视图矩阵viewmatrix、投影矩阵projmatrix等
|
||||||
raster_settings = GaussianRasterizationSettings(
|
raster_settings = GaussianRasterizationSettings(
|
||||||
image_height=int(viewpoint_camera.image_height),
|
image_height=int(viewpoint_camera.image_height),
|
||||||
image_width=int(viewpoint_camera.image_width),
|
image_width=int(viewpoint_camera.image_width),
|
||||||
@ -45,18 +53,19 @@ def render(viewpoint_camera, pc : GaussianModel, pipe, bg_color : torch.Tensor,
|
|||||||
sh_degree=pc.active_sh_degree,
|
sh_degree=pc.active_sh_degree,
|
||||||
campos=viewpoint_camera.camera_center,
|
campos=viewpoint_camera.camera_center,
|
||||||
prefiltered=False,
|
prefiltered=False,
|
||||||
debug=pipe.debug,
|
debug=pipe.debug
|
||||||
clamp_color=True
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# 创建一个高斯光栅化器对象,用于将高斯分布投影到屏幕上
|
||||||
rasterizer = GaussianRasterizer(raster_settings=raster_settings)
|
rasterizer = GaussianRasterizer(raster_settings=raster_settings)
|
||||||
|
|
||||||
|
# 获取高斯模型的三维坐标、屏幕空间坐标、透明度
|
||||||
means3D = pc.get_xyz
|
means3D = pc.get_xyz
|
||||||
means2D = screenspace_points
|
means2D = screenspace_points
|
||||||
opacity = pc.get_opacity
|
opacity = pc.get_opacity
|
||||||
|
|
||||||
# If precomputed 3d covariance is provided, use it. If not, then it will be computed from
|
# If precomputed 3d covariance is provided, use it. If not, then it will be computed from scaling / rotation by the rasterizer.
|
||||||
# scaling / rotation by the rasterizer.
|
# 如果提供了预先计算的3D协方差矩阵,则使用它。否则,它将由光栅化器根据尺度和旋转进行计算
|
||||||
scales = None
|
scales = None
|
||||||
rotations = None
|
rotations = None
|
||||||
cov3D_precomp = None
|
cov3D_precomp = None
|
||||||
@ -68,14 +77,20 @@ def render(viewpoint_camera, pc : GaussianModel, pipe, bg_color : torch.Tensor,
|
|||||||
|
|
||||||
# If precomputed colors are provided, use them. Otherwise, if it is desired to precompute colors
|
# If precomputed colors are provided, use them. Otherwise, if it is desired to precompute colors
|
||||||
# from SHs in Python, do it. If not, then SH -> RGB conversion will be done by rasterizer.
|
# from SHs in Python, do it. If not, then SH -> RGB conversion will be done by rasterizer.
|
||||||
|
# 如果提供了预先计算的颜色,则使用它们。否则,如果希望在Python中从球谐函数中预计算颜色,请执行此操作。如果没有,则颜色将通过光栅化器进行从球谐函数到RGB的转换
|
||||||
shs = None
|
shs = None
|
||||||
colors_precomp = None
|
colors_precomp = None
|
||||||
if override_color is None:
|
if override_color is None:
|
||||||
if pipe.convert_SHs_python:
|
if pipe.convert_SHs_python:
|
||||||
|
# 将SH特征的形状调整为(batch_size * num_points,3,(max_sh_degree+1)**2)
|
||||||
shs_view = pc.get_features.transpose(1, 2).view(-1, 3, (pc.max_sh_degree+1)**2)
|
shs_view = pc.get_features.transpose(1, 2).view(-1, 3, (pc.max_sh_degree+1)**2)
|
||||||
|
# 计算相机中心到每个点的方向向量,并归一化
|
||||||
dir_pp = (pc.get_xyz - viewpoint_camera.camera_center.repeat(pc.get_features.shape[0], 1))
|
dir_pp = (pc.get_xyz - viewpoint_camera.camera_center.repeat(pc.get_features.shape[0], 1))
|
||||||
|
# 计算相机中心到每个点的方向向量,并归一化
|
||||||
dir_pp_normalized = dir_pp/dir_pp.norm(dim=1, keepdim=True)
|
dir_pp_normalized = dir_pp/dir_pp.norm(dim=1, keepdim=True)
|
||||||
|
# 使用SH特征将方向向量转换为RGB颜色
|
||||||
sh2rgb = eval_sh(pc.active_sh_degree, shs_view, dir_pp_normalized)
|
sh2rgb = eval_sh(pc.active_sh_degree, shs_view, dir_pp_normalized)
|
||||||
|
# 将RGB颜色的范围限制在0到1之间
|
||||||
colors_precomp = torch.clamp_min(sh2rgb + 0.5, 0.0)
|
colors_precomp = torch.clamp_min(sh2rgb + 0.5, 0.0)
|
||||||
else:
|
else:
|
||||||
shs = pc.get_features
|
shs = pc.get_features
|
||||||
@ -83,6 +98,7 @@ def render(viewpoint_camera, pc : GaussianModel, pipe, bg_color : torch.Tensor,
|
|||||||
colors_precomp = override_color
|
colors_precomp = override_color
|
||||||
|
|
||||||
# Rasterize visible Gaussians to image, obtain their radii (on screen).
|
# Rasterize visible Gaussians to image, obtain their radii (on screen).
|
||||||
|
# 调用光栅化器,将高斯分布投影到屏幕上,获得渲染图像和每个高斯分布在屏幕上的半径
|
||||||
rendered_image, radii = rasterizer(
|
rendered_image, radii = rasterizer(
|
||||||
means3D = means3D,
|
means3D = means3D,
|
||||||
means2D = means2D,
|
means2D = means2D,
|
||||||
|
2
train.py
2
train.py
@ -105,7 +105,7 @@ def training(dataset, opt, pipe, testing_iterations, saving_iterations, checkpoi
|
|||||||
bg = torch.rand((3), device="cuda") if opt.random_background else background
|
bg = torch.rand((3), device="cuda") if opt.random_background else background
|
||||||
|
|
||||||
# 渲染当前视角的图像
|
# 渲染当前视角的图像
|
||||||
render_pkg = render(viewpoint_cam, gaussians, pipe, bg)
|
render_pkg = render(viewpoint_cam, gaussians, pipe, bg, return_depth=True, return_normal=True)
|
||||||
image, viewspace_point_tensor, visibility_filter, radii = render_pkg["render"], render_pkg["viewspace_points"], render_pkg["visibility_filter"], render_pkg["radii"]
|
image, viewspace_point_tensor, visibility_filter, radii = render_pkg["render"], render_pkg["viewspace_points"], render_pkg["visibility_filter"], render_pkg["radii"]
|
||||||
|
|
||||||
# Loss
|
# Loss
|
||||||
|
@ -95,8 +95,8 @@ def strip_lowerdiag(L):
|
|||||||
def strip_symmetric(sym):
|
def strip_symmetric(sym):
|
||||||
"""
|
"""
|
||||||
提取协方差矩阵的对称部分
|
提取协方差矩阵的对称部分
|
||||||
:param sym: 协方差矩阵
|
sym: 协方差矩阵
|
||||||
:return: 对称部分
|
return: 对称部分
|
||||||
"""
|
"""
|
||||||
return strip_lowerdiag(sym)
|
return strip_lowerdiag(sym)
|
||||||
|
|
||||||
@ -129,10 +129,9 @@ def build_rotation(r):
|
|||||||
def build_scaling_rotation(s, r):
|
def build_scaling_rotation(s, r):
|
||||||
"""
|
"""
|
||||||
构建3D高斯模型的尺度-旋转矩阵
|
构建3D高斯模型的尺度-旋转矩阵
|
||||||
|
s: 尺度参数
|
||||||
:param s: 尺度参数
|
r: 旋转参数
|
||||||
:param r: 旋转参数
|
return: 尺度-旋转矩阵
|
||||||
:return: 尺度-旋转矩阵
|
|
||||||
"""
|
"""
|
||||||
L = torch.zeros((s.shape[0], 3, 3), dtype=torch.float, device="cuda") # 初始化尺度矩阵
|
L = torch.zeros((s.shape[0], 3, 3), dtype=torch.float, device="cuda") # 初始化尺度矩阵
|
||||||
R = build_rotation(r) # 四元数 -> 旋转矩阵
|
R = build_rotation(r) # 四元数 -> 旋转矩阵
|
||||||
|
Loading…
Reference in New Issue
Block a user