add comments

This commit is contained in:
liuzhi 2024-05-09 20:17:55 +08:00
parent 8b095ba2be
commit 29b974ed47
8 changed files with 380 additions and 102 deletions

81
run_code.txt Normal file
View File

@ -0,0 +1,81 @@
Running:
# Train with train/test split
python train.py --source_path <path to COLMAP or NeRF Synthetic dataset> --model_path <保存路径> --eval
python train.py --source_path /media/liuzhi/b4608ade-d2e0-430d-a40b-f29a8b22cb8c/Dataset/3DGS_Dataset/湘家荡 --model_path output/xiangjiadang --eval --resolution 1
--source_path / -sCOLMAP 或合成 Synthetic NeRF data set的源目录的路径。COLMAP类型包含 images/, sparse/0
--model_path / -m训练模型的存储路径默认为 output/<random>
--images / -iCOLMAP 图像的替代子目录,默认为 images
--eval训练时默认使用全部图片--eval可以在训练时按照MipNeRF360-style划分 training/test用于 evaluation
--resolution / -r指定训练前加载图像的分辨率。如果是 1, 2, 4 or 8 则使用原始分辨率的 1/2, 1/4 or 1/8。
对于所有其他值,会将图像宽度调整到给定数字,同时保持图像宽高比;
如果未设置并且输入图像宽度超过 1.6K 像素,则输入将宽度自动缩放到 1.6k
--data_device指定源图像数据在训练时的放置位置默认使用cuda如果在大型/高分辨率数据集上进行训练建议使用cpu减少显存占用但训练速度会变慢
--white_background / -w添加此标志以使用白色背景而不是黑色默认用于评估 NeRF Synthetic dataset
--sh_degree球谐函数的阶数不大于 3默认为 3
--convert_SHs_python添加此标志以使用 PyTorch 而不是论文提出的pipeline计算 SH系数 的forward and backward
--convert_cov3D_python添加此标志以使用 PyTorch 而不是论文提出的pipeline计算 3D协方差 的forward and backward
--debug如果遇到错误请启用调试模式。如果光栅化器失败dump则会创建一个文件您可以在问题中将其转发给我们以便我们查看。
--debug_from调试速度慢。可以指定一个迭代次数从 0 开始),指定数字之前的迭代会是活动状态
--iterations训练的总迭代次数默认为 30_000
--ip启动 GUI 服务器的 IP默认为 127.0.0.1
--portGUI 服务器的端口,默认为 6009
--test_iterations训练脚本在测试集上计算 L1 和 PSNR 的分隔迭代次数,默认为 7000, 30000
--save_iterations训练脚本保存高斯模型的分隔迭代次数默认为 7000, 30000
--checkpoint_iterations存储checkpoint的分隔迭代次数以后续继续新联保存在model directory中
--start_checkpoint保存的checkpoint的路径以继续训练
--quiet此标志用于不写入任何文本到标准输出管道
--feature_lr球谐函数的学习率默认为 0.0025
--opacity_lr不透明度的学习率默认为 0.05
--scaling_lr尺度的学习率默认为 0.005
--rotation_lr旋转四元数的学习率默认为 0.001
--position_lr_max_steps位置学习率从初始值到最终值的步数从 0 开始),默认为 30_000
--position_lr_init位置学习率的初始值默认为 0.00016
--position_lr_final位置学习率的最终值默认为 0.0000016
--position_lr_delay_mult位置学习率的乘数默认为 0.01
--densify_from_iterdensification开始的迭代次数默认为 500
--densify_until_iterdensification结束的迭代次数默认为 15_000
--densify_grad_threshold决定是否应基于 2D 位置梯度对点进行densification的限制默认为0.0002
--densification_intervaldensify的频率默认为 100每迭代100次进行1次
--opacity_reset_interval重置不透明度的频率默认为 3_000
--lambda_dssimSSIM 对总损失的影响,从 0 到 1默认为0.2
--percent_dense点必须超过场景范围的百分比才能强制致密 (0--1)默认为0.01
#--------------------------------------------------------------------------
Evluation:
# Generate renderings
python render.py -m <path to trained model>
--model_path / -m为其创建renderings的以训练模型的路径
--skip_train跳过渲染training set
--skip_test跳过渲染test set
--quiet不写入任何文本到标准输出管道
以下参数会从用于训练的model path中自动读取。但可覆写它们
--source_path / -sCOLMAP 或合成 Synthetic NeRF data set的源目录的路径。
--images / -iCOLMAP 图像的替代子目录,默认为 images
--eval
--resolution / -r
--white_background / -w
--convert_SHs_python
--convert_cov3D_python
#--------------------------------------------------------------------------
# Compute error metrics on renderings
python metrics.py -m <path to trained model>
--model_paths / -m应计算metrics的model paths的分隔列表

View File

@ -19,27 +19,39 @@ from arguments import ModelParams
from utils.camera_utils import cameraList_from_camInfos, camera_to_JSON
class Scene:
"""
Scene 类用于管理场景的3D模型包括相机参数点云数据和高斯模型的初始化和加载
"""
gaussians : GaussianModel
def __init__(self, args : ModelParams, gaussians : GaussianModel, load_iteration=None, shuffle=True, resolution_scales=[1.0]):
"""b
:param path: Path to colmap scene main folder.
"""
self.model_path = args.model_path
self.loaded_iter = None
self.gaussians = gaussians
初始化场景对象
:param args: 包含模型路径和源路径等模型参数
:param gaussians: 高斯模型对象用于场景点的3D表示
:param load_iteration: 指定加载模型的迭代次数如果不为None且为-1则在输出文件夹下的point_cloud/文件夹下搜索迭代次数最大的模型且不为-1则加载指定迭代次数的
:param shuffle: 是否在训练前打乱相机列表
:param resolution_scales: 分辨率比例列表用于处理不同分辨率的相机
"""
self.model_path = args.model_path # 模型文件保存路径
self.loaded_iter = None # 已加载的迭代次数
self.gaussians = gaussians # 高斯模型对象
# 检查并加载已有的训练模型
if load_iteration:
# 不为None
if load_iteration == -1:
# 且为-1则在输出文件夹下的point_cloud/文件夹下搜索迭代次数最大的模型,记录最大迭代次数
self.loaded_iter = searchForMaxIteration(os.path.join(self.model_path, "point_cloud"))
else:
# 不为-1则加载指定迭代次数的
self.loaded_iter = load_iteration
print("Loading trained model at iteration {}".format(self.loaded_iter))
self.train_cameras = {}
self.test_cameras = {}
self.train_cameras = {} # 用于训练的相机参数
self.test_cameras = {} # 用于测试的相机参数
# 判断数据集类型是COLMAP的输出还是Blender得输出并从中加载场景信息
if os.path.exists(os.path.join(args.source_path, "sparse")):
scene_info = sceneLoadTypeCallbacks["Colmap"](args.source_path, args.images, args.eval)
elif os.path.exists(os.path.join(args.source_path, "transforms_train.json")):
@ -48,6 +60,7 @@ class Scene:
else:
assert False, "Could not recognize scene type!"
# loaded_iter = None模型还未训练过
if not self.loaded_iter:
with open(scene_info.ply_path, 'rb') as src_file, open(os.path.join(self.model_path, "input.ply") , 'wb') as dest_file:
dest_file.write(src_file.read())
@ -68,6 +81,7 @@ class Scene:
self.cameras_extent = scene_info.nerf_normalization["radius"]
# 根据resolution_scales加载不同分辨率的训练和测试位姿
for resolution_scale in resolution_scales:
print("Loading Training Cameras")
self.train_cameras[resolution_scale] = cameraList_from_camInfos(scene_info.train_cameras, resolution_scale, args)
@ -75,18 +89,29 @@ class Scene:
self.test_cameras[resolution_scale] = cameraList_from_camInfos(scene_info.test_cameras, resolution_scale, args)
if self.loaded_iter:
# 直接读取对应(已经迭代出来的)场景
self.gaussians.load_ply(os.path.join(self.model_path,
"point_cloud",
"iteration_" + str(self.loaded_iter),
"point_cloud.ply"))
else:
# loaded_iter = None模型还未训练过调用GaussianModel.create_from_pcd从scene_info.point_cloud中建立模型
self.gaussians.create_from_pcd(scene_info.point_cloud, self.cameras_extent)
def save(self, iteration):
"""
保存当前迭代下的3D高斯模型点云
iteration: 当前的迭代次数
"""
point_cloud_path = os.path.join(self.model_path, "point_cloud/iteration_{}".format(iteration))
self.gaussians.save_ply(os.path.join(point_cloud_path, "point_cloud.ply"))
def getTrainCameras(self, scale=1.0):
"""
获取指定分辨率比例的训练相机列表
scale: 分辨率比例
return: 指定分辨率比例的训练相机列表
"""
return self.train_cameras[scale]
def getTestCameras(self, scale=1.0):

View File

@ -66,32 +66,47 @@ def getNerfppNorm(cam_info):
return {"translate": translate, "radius": radius}
def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder):
cam_infos = []
cam_infos = [] # 初始化用于存储相机信息的列表
# 遍历所有相机的外参
for idx, key in enumerate(cam_extrinsics):
# 动态显示读取相机信息的进度
sys.stdout.write('\r')
# the exact output you're looking for:
sys.stdout.write("Reading camera {}/{}".format(idx+1, len(cam_extrinsics)))
sys.stdout.flush()
extr = cam_extrinsics[key]
intr = cam_intrinsics[extr.camera_id]
height = intr.height
width = intr.width
# 获取当前相机的外参和内参
extr = cam_extrinsics[key] # 当前相机的外参
intr = cam_intrinsics[extr.camera_id] # 根据外参中的camera_id找到对应的内参
height = intr.height # 相机图片的高度
width = intr.width # 相机图片的宽度
uid = intr.id
R = np.transpose(qvec2rotmat(extr.qvec))
T = np.array(extr.tvec)
uid = intr.id # 相机的唯一标识符
R = np.transpose(qvec2rotmat(extr.qvec)) # 将四元数表示的旋转转换为旋转矩阵R
T = np.array(extr.tvec) # 外参中的平移向量
# 根据相机内参模型计算视场角FoV
if intr.model=="SIMPLE_PINHOLE":
# 如果是简单针孔模型,只有一个焦距参数
focal_length_x = intr.params[0]
FovY = focal2fov(focal_length_x, height)
FovX = focal2fov(focal_length_x, width)
FovY = focal2fov(focal_length_x, height) # 计算垂直方向的视场角
FovX = focal2fov(focal_length_x, width) # 计算水平方向的视场角
elif intr.model=="PINHOLE":
# 如果是针孔模型,有两个焦距参数
focal_length_x = intr.params[0]
focal_length_y = intr.params[1]
FovY = focal2fov(focal_length_y, height)
FovX = focal2fov(focal_length_x, width)
FovY = focal2fov(focal_length_y, height) # 使用y方向的焦距计算垂直视场角
FovX = focal2fov(focal_length_x, width) # 使用x方向的焦距计算水平视场角
elif intr.model=="SIMPLE_RADIAL":
# 如果是针孔模型,有两个焦距参数
focal_length_x = intr.params[0]
focal_length_y = intr.params[1]
FovY = focal2fov(focal_length_y, height) # 使用y方向的焦距计算垂直视场角
FovX = focal2fov(focal_length_x, width) # 使用x方向的焦距计算水平视场角
else:
# 如果不是以上两种模型,抛出错误
assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!"
image_path = os.path.join(images_folder, os.path.basename(extr.name))
@ -101,11 +116,16 @@ def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder):
cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path=image_path, image_name=image_name, width=width, height=height)
cam_infos.append(cam_info)
# 在读取完所有相机信息后换行
sys.stdout.write('\n')
# 返回整理好的相机信息列表
return cam_infos
def fetchPly(path):
# 读取.ply文件
plydata = PlyData.read(path)
# 其第一个属性即vertex的信息为x', 'y', 'z', 'nx', 'ny', 'nz', 3个'f_dc_x', 45个'f_rest_xx', 'opacity', 3个'scale_x', 4个'rot_x'
vertices = plydata['vertex']
positions = np.vstack([vertices['x'], vertices['y'], vertices['z']]).T
colors = np.vstack([vertices['red'], vertices['green'], vertices['blue']]).T / 255.0
@ -129,6 +149,7 @@ def storePly(path, xyz, rgb):
ply_data = PlyData([vertex_element])
ply_data.write(path)
# 尝试读取COLMAP处理结果中的二进制相机外参文件imags.bin 和 内参文件cameras.bin
def readColmapSceneInfo(path, images, eval, llffhold=8):
try:
cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.bin")
@ -136,39 +157,50 @@ def readColmapSceneInfo(path, images, eval, llffhold=8):
cam_extrinsics = read_extrinsics_binary(cameras_extrinsic_file)
cam_intrinsics = read_intrinsics_binary(cameras_intrinsic_file)
except:
# 如果二进制文件读取失败,尝试读取文本格式的相机外参和内参文件
cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.txt")
cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.txt")
cam_extrinsics = read_extrinsics_text(cameras_extrinsic_file)
cam_intrinsics = read_intrinsics_text(cameras_intrinsic_file)
# 定义存放图片的目录,如果未指定则默认为"images"
reading_dir = "images" if images == None else images
# 读取并处理相机参数,转换为内部使用的格式
cam_infos_unsorted = readColmapCameras(cam_extrinsics=cam_extrinsics, cam_intrinsics=cam_intrinsics, images_folder=os.path.join(path, reading_dir))
# 根据图片名称对相机信息进行排序,以保证顺序一致性
cam_infos = sorted(cam_infos_unsorted.copy(), key = lambda x : x.image_name)
# 根据是否为评估模式eval将相机分为训练集和测试集
# 如果为评估模式根据llffhold参数通常用于LLFF数据集间隔选择测试相机
if eval:
train_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold != 0]
test_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold == 0]
else:
# 如果不是评估模式,所有相机均为训练相机,测试相机列表为空
train_cam_infos = cam_infos
test_cam_infos = []
# 计算场景归一化参数,这是为了处理不同尺寸和位置的场景,使模型训练更稳定
nerf_normalization = getNerfppNorm(train_cam_infos)
# 尝试读取点云数据优先从PLY文件读取如果不存在则尝试从BIN或TXT文件转换并保存为PLY格式
ply_path = os.path.join(path, "sparse/0/points3D.ply")
bin_path = os.path.join(path, "sparse/0/points3D.bin")
txt_path = os.path.join(path, "sparse/0/points3D.txt")
if not os.path.exists(ply_path):
print("Converting point3d.bin to .ply, will happen only the first time you open the scene.")
try:
xyz, rgb, _ = read_points3D_binary(bin_path)
xyz, rgb, _ = read_points3D_binary(bin_path) # 从points3D.bin读取COLMAP产生的稀疏点云
except:
xyz, rgb, _ = read_points3D_text(txt_path)
storePly(ply_path, xyz, rgb)
storePly(ply_path, xyz, rgb) # 转换成ply文件
try:
pcd = fetchPly(ply_path)
except:
pcd = None
# 组装场景信息,包括点云、训练用相机、测试用相机、场景归一化参数和点云文件路径
scene_info = SceneInfo(point_cloud=pcd,
train_cameras=train_cam_infos,
test_cameras=test_cam_infos,

View File

@ -24,38 +24,54 @@ from utils.general_utils import strip_symmetric, build_scaling_rotation
class GaussianModel:
def setup_functions(self):
"""
定义和初始化一些用于处理3D高斯模型参数的函数
"""
# 定义构建3D高斯协方差矩阵的函数
def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation):
L = build_scaling_rotation(scaling_modifier * scaling, rotation)
actual_covariance = L @ L.transpose(1, 2)
symm = strip_symmetric(actual_covariance)
L = build_scaling_rotation(scaling_modifier * scaling, rotation) # 从尺度、尺度的缩放、旋转得到L矩阵
actual_covariance = L @ L.transpose(1, 2) # 计算实际的协方差矩阵
symm = strip_symmetric(actual_covariance) # 提取对称部分
return symm
self.scaling_activation = torch.exp
self.scaling_inverse_activation = torch.log
self.covariance_activation = build_covariance_from_scaling_rotation
# 初始化一些激活函数
self.scaling_activation = torch.exp # 用exp函数确保尺度参数非负
self.scaling_inverse_activation = torch.log # 尺度的逆激活函数,用于梯度回传
self.opacity_activation = torch.sigmoid
self.inverse_opacity_activation = inverse_sigmoid
self.covariance_activation = build_covariance_from_scaling_rotation # 协方差矩阵的激活函数
self.rotation_activation = torch.nn.functional.normalize
self.opacity_activation = torch.sigmoid # 用sigmoid函数确保不透明度在0到1之间
self.inverse_opacity_activation = inverse_sigmoid # 不透明度的逆激活函数
self.rotation_activation = torch.nn.functional.normalize # 用于标准化旋转参数的函数
def __init__(self, sh_degree : int):
self.active_sh_degree = 0
self.max_sh_degree = sh_degree
self._xyz = torch.empty(0) # 世界坐标
self._features_dc = torch.empty(0) # diffuse color
self._features_rest = torch.empty(0) # spherical harmonic coefficients
self._scaling = torch.empty(0) # 3d scale
self._rotation = torch.empty(0) # rotation expressed in quaternions
self._opacity = torch.empty(0) # opacity
self.max_radii2D = torch.empty(0)
self.xyz_gradient_accum = torch.empty(0)
self.denom = torch.empty(0)
self.optimizer = None
self.percent_dense = 0
self.spatial_lr_scale = 0
"""
初始化3D高斯模型的参数
sh_degree: 球谐函数的最大阶数用于控制颜色表示的复杂度
"""
# 初始化球谐阶数和最大球谐阶数j
self.active_sh_degree = 0 # 当前激活的球谐阶数初始为0
self.max_sh_degree = sh_degree # 允许的最大球谐阶数
# 初始化3D高斯模型的各项参数
self._xyz = torch.empty(0) # 3D高斯的中心位置均值
self._features_dc = torch.empty(0) # 第一个球谐系数,用于表示基础颜色
self._features_rest = torch.empty(0) # 其余的球谐系数,用于表示颜色的细节和变化
self._scaling = torch.empty(0) # 3D高斯的尺度参数控制高斯的形状
self._rotation = torch.empty(0) # 3D高斯的旋转参数一系列四元数
self._opacity = torch.empty(0) # 3D高斯的不透明度sigmoid前的控制可见性
self.max_radii2D = torch.empty(0) # 在2D投影中每个高斯的最大半径
self.xyz_gradient_accum = torch.empty(0) # 累积3D高斯中心位置的梯度当它太大的时候要对Gaussian进行分裂小时代表under要复制
self.denom = torch.empty(0) # 与累积梯度配合使用表示统计了多少次累积梯度算平均梯度时除掉这个denom = denominator分母
self.optimizer = None # 优化器用于调整上述参数以改进模型论文中采用Adam见附录B Algorithm 1的伪代码
self.percent_dense = 0 # 控制Gaussian密集程度的超参数
self.spatial_lr_scale = 0 # 位置坐标的学习率要乘上这个,抵消在不同尺度下应用同一个学习率带来的问题
# 调用setup_functions来初始化一些处理函数
self.setup_functions()
def capture(self):
@ -122,35 +138,71 @@ class GaussianModel:
self.active_sh_degree += 1
def create_from_pcd(self, pcd : BasicPointCloud, spatial_lr_scale : float):
self.spatial_lr_scale = spatial_lr_scale
fused_point_cloud = torch.tensor(np.asarray(pcd.points)).float().cuda()
fused_color = RGB2SH(torch.tensor(np.asarray(pcd.colors)).float().cuda())
features = torch.zeros((fused_color.shape[0], 3, (self.max_sh_degree + 1) ** 2)).float().cuda()
features[:, :3, 0 ] = fused_color
features[:, 3:, 1:] = 0.0
"""
从点云数据初始化模型参数
:param pcd: 稀疏点云数据包含点的位置和颜色
:param spatial_lr_scale: 空间学习率缩放因子影响 位置坐标参数的学习率
"""
# 根据scene.Scene.__init__ 以及 scene.dataset_readers.SceneInfo.nerf_normalization即scene.dataset_readers.getNerfppNorm的代码
# 这个值似乎是训练相机中离它们的坐标平均值即中心最远距离的1.1倍,根据命名推断应该与学习率有关,防止固定的学习率适配不同尺度的场景时出现问题。
self.spatial_lr_scale = spatial_lr_scale
# 将点云的位置和颜色数据从numpy数组转换为PyTorch张量并传送到CUDA设备上
fused_point_cloud = torch.tensor(np.asarray(pcd.points)).float().cuda() # 稀疏点云的3D坐标大小为(P, 3)
fused_color = RGB2SH(torch.tensor(np.asarray(pcd.colors)).float().cuda()) # 球谐的直流分量,大小为(P, 3)
# RGB2SH(x) = (x - 0.5) / 0.28209479177387814看样子pcd.colors的原始范围应该是0到1。0.28209479177387814是1 / (2*sqrt(pi))是直流分量Y(l=0,m=0)的值
# 初始化存储球谐系数的张量RGB三通道球谐的所有系数每个通道有(max_sh_degree + 1) ** 2个球谐系数
features = torch.zeros((fused_color.shape[0], 3, (self.max_sh_degree + 1) ** 2)).float().cuda() # (P, 3, 16)
features[:, :3, 0 ] = fused_color # 将RGB转换后的球谐系数C0项的系数(直流分量)存入
features[:, 3:, 1:] = 0.0 # 其余球谐系数初始化为0
# 打印初始点的数量
print("Number of points at initialisation : ", fused_point_cloud.shape[0])
dist2 = torch.clamp_min(distCUDA2(torch.from_numpy(np.asarray(pcd.points)).float().cuda()), 0.0000001)
scales = torch.log(torch.sqrt(dist2))[...,None].repeat(1, 3)
rots = torch.zeros((fused_point_cloud.shape[0], 4), device="cuda")
rots[:, 0] = 1
# 计算点云中每个点到其最近的k个点的平均距离的 平方用于确定高斯的尺度参数scale且scale的平方不能低于1e-7
# distCUDA2由 submodules/simple-knn/simple_knn.cu 的 SimpleKNN::knn 函数实现KNN意思是K-Nearest Neighbor即求每一点最近的K个点
dist2 = torch.clamp_min(distCUDA2(torch.from_numpy(np.asarray(pcd.points)).float().cuda()), 0.0000001) # (P,)
opacities = inverse_sigmoid(0.1 * torch.ones((fused_point_cloud.shape[0], 1), dtype=torch.float, device="cuda"))
# 因为scale的激活函数是exp所以这里存的也不是真的scale而是ln(scale)。
# 因dist2其实是距离的平方所以这里要开根号
# repeat(1, 3) 标明三个方向上scale的初始值是相等的
scales = torch.log(torch.sqrt(dist2))[...,None].repeat(1, 3) # (P, 3)
self._xyz = nn.Parameter(fused_point_cloud.requires_grad_(True))
self._features_dc = nn.Parameter(features[:,:,0:1].transpose(1, 2).contiguous().requires_grad_(True))
self._features_rest = nn.Parameter(features[:,:,1:].transpose(1, 2).contiguous().requires_grad_(True))
self._scaling = nn.Parameter(scales.requires_grad_(True))
self._rotation = nn.Parameter(rots.requires_grad_(True))
self._opacity = nn.Parameter(opacities.requires_grad_(True))
self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda")
# 初始化每个点的旋转参数为单位四元数(无旋转)
rots = torch.zeros((fused_point_cloud.shape[0], 4), device="cuda") # (P, 4)
rots[:, 0] = 1 # 四元数的实部为1表示无旋转
# 初始化每个点的不透明度在sigmoid前的值为0.1inverse_sigmoid是sigmoid的反函数等于ln(x / (1 - x))。
# 不透明度存储的时候要取其经历sigmoid前的值inverse_sigmoid(0.1) = -2.197
opacities = inverse_sigmoid(0.1 * torch.ones((fused_point_cloud.shape[0], 1), dtype=torch.float, device="cuda")) # (P, 1)
# 将以上计算的参数设置为模型的可训练参数
self._xyz = nn.Parameter(fused_point_cloud.requires_grad_(True)) # 高斯椭球体中心位置坐标,(N, 3)
self._features_dc = nn.Parameter(features[:,:,0:1].transpose(1, 2).contiguous().requires_grad_(True)) # RGB三个通道球谐系数的直流分量C0项(N, 3, 1)
self._features_rest = nn.Parameter(features[:,:,1:].transpose(1, 2).contiguous().requires_grad_(True)) # RGB三个通道球谐系数的高阶分量(N, 3, (最大球谐阶数 + 1)² - 1)
self._scaling = nn.Parameter(scales.requires_grad_(True)) # 尺度(N, 3)
self._rotation = nn.Parameter(rots.requires_grad_(True)) # 旋转四元数(N, 4)
self._opacity = nn.Parameter(opacities.requires_grad_(True)) # 不透明度经过sigmoid之前(N, 1)
self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda") # 存储2D投影的最大半径初始化为0大小为(N,)
def training_setup(self, training_args):
"""
设置训练参数包括初始化用于累积梯度的变量配置优化器以及创建学习率调度器
:param training_args: 包含训练相关参数的对象
"""
# 设置在训练过程中用于密集化处理的3D高斯点的比例
# 控制Gaussian的密度在`densify_and_clone`中被使用
self.percent_dense = training_args.percent_dense
self.xyz_gradient_accum = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
self.denom = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
# 初始化用于累积3D高斯中心点位置梯度的张量用于之后判断是否需要对3D高斯进行克隆或切分
self.xyz_gradient_accum = torch.zeros((self.get_xyz.shape[0], 1), device="cuda") # 坐标的累积梯度
self.denom = torch.zeros((self.get_xyz.shape[0], 1), device="cuda") # 意义不明
# 配置各参数的优化器,包括指定参数、学习率和参数名称
l = [
{'params': [self._xyz], 'lr': training_args.position_lr_init * self.spatial_lr_scale, "name": "xyz"},
{'params': [self._features_dc], 'lr': training_args.feature_lr, "name": "f_dc"},
@ -160,13 +212,17 @@ class GaussianModel:
{'params': [self._rotation], 'lr': training_args.rotation_lr, "name": "rotation"}
]
# 创建优化器这里使用Adam优化器
self.optimizer = torch.optim.Adam(l, lr=0.0, eps=1e-15)
# 创建学习率调度器,用于对中心点位置的学习率进行调整
self.xyz_scheduler_args = get_expon_lr_func(lr_init=training_args.position_lr_init*self.spatial_lr_scale,
lr_final=training_args.position_lr_final*self.spatial_lr_scale,
lr_delay_mult=training_args.position_lr_delay_mult,
max_steps=training_args.position_lr_max_steps)
def update_learning_rate(self, iteration):
# 更新Gaussian坐标的学习率
''' Learning rate scheduling per step '''
for param_group in self.optimizer.param_groups:
if param_group["name"] == "xyz":
@ -174,17 +230,19 @@ class GaussianModel:
param_group['lr'] = lr
return lr
# 模型被保存到了/point_cloud/iteration_xxx/point_cloud.ply文件中使用PlyData.read()读取其第一个属性即vertex的信息为x', 'y', 'z', 'nx', 'ny', 'nz', 3个'f_dc_x', 45个'f_rest_xx', 'opacity', 3个'scale_x', 4个'rot_x'
def construct_list_of_attributes(self):
l = ['x', 'y', 'z', 'nx', 'ny', 'nz']
# 构建ply文件的键列表
l = ['x', 'y', 'z', 'nx', 'ny', 'nz'] # 不知道nxny,nz的用处
# All channels except the 3 DC
for i in range(self._features_dc.shape[1]*self._features_dc.shape[2]):
for i in range(self._features_dc.shape[1] * self._features_dc.shape[2]): # self._features_dc: (N, 3, 1)
l.append('f_dc_{}'.format(i))
for i in range(self._features_rest.shape[1]*self._features_rest.shape[2]):
for i in range(self._features_rest.shape[1] * self._features_rest.shape[2]): # self._features_rest: (N, 3, (最大球谐阶数 + 1)² - 1)
l.append('f_rest_{}'.format(i))
l.append('opacity')
for i in range(self._scaling.shape[1]):
for i in range(self._scaling.shape[1]): # shape[1]: 3
l.append('scale_{}'.format(i))
for i in range(self._rotation.shape[1]):
for i in range(self._rotation.shape[1]): # shape[1]: 4
l.append('rot_{}'.format(i))
return l
@ -192,7 +250,7 @@ class GaussianModel:
mkdir_p(os.path.dirname(path))
xyz = self._xyz.detach().cpu().numpy()
normals = np.zeros_like(xyz)
normals = np.zeros_like(xyz) # # nx, ny, nz全是0不知何用
f_dc = self._features_dc.detach().transpose(1, 2).flatten(start_dim=1).contiguous().cpu().numpy()
f_rest = self._features_rest.detach().transpose(1, 2).flatten(start_dim=1).contiguous().cpu().numpy()
opacities = self._opacity.detach().cpu().numpy()
@ -202,17 +260,21 @@ class GaussianModel:
dtype_full = [(attribute, 'f4') for attribute in self.construct_list_of_attributes()]
elements = np.empty(xyz.shape[0], dtype=dtype_full)
# 所有要保存的值合并成一个大数组
attributes = np.concatenate((xyz, normals, f_dc, f_rest, opacities, scale, rotation), axis=1)
elements[:] = list(map(tuple, attributes))
el = PlyElement.describe(elements, 'vertex')
PlyData([el]).write(path)
def reset_opacity(self):
opacities_new = inverse_sigmoid(torch.min(self.get_opacity, torch.ones_like(self.get_opacity)*0.01))
optimizable_tensors = self.replace_tensor_to_optimizer(opacities_new, "opacity")
# get_opacity返回了经过exp的不透明度是真的不透明度
# 这句话让所有不透明度都不能超过0.01
opacities_new = inverse_sigmoid(torch.min(self.get_opacity, torch.ones_like(self.get_opacity) * 0.01))
optimizable_tensors = self.replace_tensor_to_optimizer(opacities_new, "opacity") # 更新优化器中的不透明度
self._opacity = optimizable_tensors["opacity"]
def load_ply(self, path):
# 读取ply文件并把数据转换成torch.nn.Parameter等待优化
plydata = PlyData.read(path)
xyz = np.stack((np.asarray(plydata.elements[0]["x"]),
@ -256,12 +318,14 @@ class GaussianModel:
self.active_sh_degree = self.max_sh_degree
def replace_tensor_to_optimizer(self, tensor, name):
# 看样子是把优化器保存的某个名为`name`的参数的值强行替换为`tensor`
# 这里面需要注意的是修改Adam优化器的状态变量动量momentum和平方动量second-order momentum
optimizable_tensors = {}
for group in self.optimizer.param_groups:
if group["name"] == name:
stored_state = self.optimizer.state.get(group['params'][0], None)
stored_state["exp_avg"] = torch.zeros_like(tensor)
stored_state["exp_avg_sq"] = torch.zeros_like(tensor)
stored_state["exp_avg"] = torch.zeros_like(tensor) # 把动量清零
stored_state["exp_avg_sq"] = torch.zeros_like(tensor) # 把平方动量清零
del self.optimizer.state[group['params'][0]]
group["params"][0] = nn.Parameter(tensor.requires_grad_(True))
@ -271,6 +335,7 @@ class GaussianModel:
return optimizable_tensors
def _prune_optimizer(self, mask):
# 根据`mask`裁剪一部分参数及其动量和二阶动量
optimizable_tensors = {}
for group in self.optimizer.param_groups:
stored_state = self.optimizer.state.get(group['params'][0], None)
@ -289,9 +354,11 @@ class GaussianModel:
return optimizable_tensors
def prune_points(self, mask):
# 删除Gaussian并移除对应的所有属性
valid_points_mask = ~mask
optimizable_tensors = self._prune_optimizer(valid_points_mask)
# 重置各个参数
self._xyz = optimizable_tensors["xyz"]
self._features_dc = optimizable_tensors["f_dc"]
self._features_rest = optimizable_tensors["f_rest"]
@ -305,6 +372,7 @@ class GaussianModel:
self.max_radii2D = self.max_radii2D[valid_points_mask]
def cat_tensors_to_optimizer(self, tensors_dict):
# 把新的张量字典添加到优化器
optimizable_tensors = {}
for group in self.optimizer.param_groups:
assert len(group["params"]) == 1
@ -327,6 +395,7 @@ class GaussianModel:
return optimizable_tensors
def densification_postfix(self, new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation):
# 新增Gaussian把新属性添加到优化器中
d = {"xyz": new_xyz,
"f_dc": new_features_dc,
"f_rest": new_features_rest,
@ -355,10 +424,19 @@ class GaussianModel:
selected_pts_mask = torch.logical_and(selected_pts_mask,
torch.max(self.get_scaling, dim=1).values > self.percent_dense*scene_extent)
'''
被分裂的Gaussians满足两个条件
1. 平均梯度过大
2. 在某个方向的最大缩放大于一个阈值
参照论文5.2On the other hand...一段大Gaussian被分裂成两个小Gaussians
其放缩被除以φ=1.6且位置是以原先的大Gaussian作为概率密度函数进行采样的
'''
stds = self.get_scaling[selected_pts_mask].repeat(N,1)
means =torch.zeros((stds.size(0), 3),device="cuda")
samples = torch.normal(mean=means, std=stds)
rots = build_rotation(self._rotation[selected_pts_mask]).repeat(N,1,1)
# 算出随机采样出来的新坐标。bmm: batch matrix-matrix product
new_xyz = torch.bmm(rots, samples.unsqueeze(-1)).squeeze(-1) + self.get_xyz[selected_pts_mask].repeat(N, 1)
new_scaling = self.scaling_inverse_activation(self.get_scaling[selected_pts_mask].repeat(N,1) / (0.8*N))
new_rotation = self._rotation[selected_pts_mask].repeat(N,1)
@ -376,6 +454,7 @@ class GaussianModel:
selected_pts_mask = torch.where(torch.norm(grads, dim=-1) >= grad_threshold, True, False)
selected_pts_mask = torch.logical_and(selected_pts_mask,
torch.max(self.get_scaling, dim=1).values <= self.percent_dense*scene_extent)
# 提取出大于阈值`grad_threshold`且缩放参数较小小于self.percent_dense * scene_extent的Gaussians在下面进行克隆
new_xyz = self._xyz[selected_pts_mask]
new_features_dc = self._features_dc[selected_pts_mask]
@ -387,21 +466,26 @@ class GaussianModel:
self.densification_postfix(new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation)
def densify_and_prune(self, max_grad, min_opacity, extent, max_screen_size):
grads = self.xyz_gradient_accum / self.denom
grads = self.xyz_gradient_accum / self.denom # 计算平均梯度
grads[grads.isnan()] = 0.0
self.densify_and_clone(grads, max_grad, extent)
self.densify_and_split(grads, max_grad, extent)
self.densify_and_clone(grads, max_grad, extent) # 通过克隆增加密度
self.densify_and_split(grads, max_grad, extent) # 通过分裂增加密度
# 接下来移除一些Gaussians它们满足下列要求中的一个
# 1. 接近透明不透明度小于min_opacity
# 2. 在某个相机视野里出现过的最大2D半径大于屏幕像平面大小
# 3. 在某个方向的最大缩放大于0.1 * extent也就是说很长的长条形也是会被移除的
prune_mask = (self.get_opacity < min_opacity).squeeze()
if max_screen_size:
big_points_vs = self.max_radii2D > max_screen_size
big_points_vs = self.max_radii2D > max_screen_size # vs = view space?
big_points_ws = self.get_scaling.max(dim=1).values > 0.1 * extent
prune_mask = torch.logical_or(torch.logical_or(prune_mask, big_points_vs), big_points_ws)
prune_mask = torch.logical_or(torch.logical_or(prune_mask, big_points_vs), big_points_ws) # ws = world space?
self.prune_points(prune_mask)
torch.cuda.empty_cache()
def add_densification_stats(self, viewspace_point_tensor, update_filter):
# 统计坐标的累积梯度和均值的分母(即迭代步数?)
self.xyz_gradient_accum[update_filter] += torch.norm(viewspace_point_tensor.grad[update_filter,:2], dim=-1, keepdim=True)
self.denom[update_filter] += 1

@ -1 +1 @@
Subproject commit 59f5f77e3ddbac3ed9db93ec2cfe99ed6c5d121d
Subproject commit 8ead777020a16604d8b7a1d0bfac2dbb8df962a9

View File

@ -10,8 +10,10 @@
#
import os
import numpy as np
import torch
from random import randint
from PIL import Image
from utils.loss_utils import l1_loss, ssim
from gaussian_renderer import render, network_gui
import sys
@ -31,24 +33,31 @@ except ImportError:
def training(dataset, opt, pipe, testing_iterations, saving_iterations, checkpoint_iterations, checkpoint, debug_from):
first_iter = 0
tb_writer = prepare_output_and_logger(dataset)
gaussians = GaussianModel(dataset.sh_degree)
scene = Scene(dataset, gaussians)
gaussians.training_setup(opt)
gaussians = GaussianModel(dataset.sh_degree) # 创建初始化高斯模型用于表示场景中的每个点的3D高斯分布
scene = Scene(dataset, gaussians) # 创建初始3D场景对象加载数据集和对应的相机参数
gaussians.training_setup(opt) # 为高斯模型参数设置优化器和学习率调度器
# 如果提供了checkpoint则从checkpoint加载模型参数并恢复训练进度
if checkpoint:
(model_params, first_iter) = torch.load(checkpoint)
gaussians.restore(model_params, opt)
# 设置背景颜色,白色或黑色取决于数据集要求
bg_color = [1, 1, 1] if dataset.white_background else [0, 0, 0]
background = torch.tensor(bg_color, dtype=torch.float32, device="cuda")
# 创建CUDA事件用于计时
iter_start = torch.cuda.Event(enable_timing = True)
iter_end = torch.cuda.Event(enable_timing = True)
viewpoint_stack = None
ema_loss_for_log = 0.0
# 使用tqdm库创建进度条追踪训练进度
progress_bar = tqdm(range(first_iter, opt.iterations), desc="Training progress")
first_iter += 1
for iteration in range(first_iter, opt.iterations + 1):
for iteration in range(first_iter, opt.iterations + 1):
if network_gui.conn == None:
network_gui.try_connect()
while network_gui.conn != None:
@ -64,25 +73,28 @@ def training(dataset, opt, pipe, testing_iterations, saving_iterations, checkpoi
except Exception as e:
network_gui.conn = None
iter_start.record()
iter_start.record() # 记录迭代开始时间
gaussians.update_learning_rate(iteration)
gaussians.update_learning_rate(iteration) # 根据当前迭代次数更新学习率
# Every 1000 its we increase the levels of SH up to a maximum degree
# 每1000次迭代提升球谐函数的次数以改进模型复杂度Every 1000 its we increase the levels of SH up to a maximum degree
if iteration % 1000 == 0:
gaussians.oneupSHdegree()
# Pick a random Camera
# 随机选择一个训练用的相机视角,Pick a random Camera
if not viewpoint_stack:
viewpoint_stack = scene.getTrainCameras().copy()
viewpoint_cam = viewpoint_stack.pop(randint(0, len(viewpoint_stack)-1))
# Render
# 如果达到调试起始点,启用调试模式
if (iteration - 1) == debug_from:
pipe.debug = True
# Render根据相机参数使用可微光栅化器渲染图像
# 根据设置决定是否使用随机背景颜色
bg = torch.rand((3), device="cuda") if opt.random_background else background
# 渲染当前视角的图像
render_pkg = render(viewpoint_cam, gaussians, pipe, bg)
image, viewspace_point_tensor, visibility_filter, radii = render_pkg["render"], render_pkg["viewspace_points"], render_pkg["visibility_filter"], render_pkg["radii"]
@ -92,10 +104,10 @@ def training(dataset, opt, pipe, testing_iterations, saving_iterations, checkpoi
loss = (1.0 - opt.lambda_dssim) * Ll1 + opt.lambda_dssim * (1.0 - ssim(image, gt_image))
loss.backward()
iter_end.record()
iter_end.record() # 记录迭代结束时间
with torch.no_grad():
# Progress bar
# 更新进度条和损失显示,Progress bar
ema_loss_for_log = 0.4 * loss.item() + 0.6 * ema_loss_for_log
if iteration % 10 == 0:
progress_bar.set_postfix({"Loss": f"{ema_loss_for_log:.{7}f}"})
@ -103,13 +115,13 @@ def training(dataset, opt, pipe, testing_iterations, saving_iterations, checkpoi
if iteration == opt.iterations:
progress_bar.close()
# Log and save
# 定期记录训练数据并保存模型,Log and save
training_report(tb_writer, iteration, Ll1, loss, l1_loss, iter_start.elapsed_time(iter_end), testing_iterations, scene, render, (pipe, background))
if (iteration in saving_iterations):
print("\n[ITER {}] Saving Gaussians".format(iteration))
scene.save(iteration)
# Densification
# 在指定迭代区间内对3D高斯模型进行增密和修剪Densification
if iteration < opt.densify_until_iter:
# Keep track of max radii in image-space for pruning
gaussians.max_radii2D[visibility_filter] = torch.max(gaussians.max_radii2D[visibility_filter], radii[visibility_filter])
@ -122,11 +134,12 @@ def training(dataset, opt, pipe, testing_iterations, saving_iterations, checkpoi
if iteration % opt.opacity_reset_interval == 0 or (dataset.white_background and iteration == opt.densify_from_iter):
gaussians.reset_opacity()
# Optimizer step
# 执行优化器的一步,并准备下一次迭代,Optimizer step
if iteration < opt.iterations:
gaussians.optimizer.step()
gaussians.optimizer.zero_grad(set_to_none = True)
# 定期保存checkpoint
if (iteration in checkpoint_iterations):
print("\n[ITER {}] Saving Checkpoint".format(iteration))
torch.save((gaussians.capture(), iteration), scene.model_path + "/chkpnt" + str(iteration) + ".pth")
@ -193,9 +206,11 @@ def training_report(tb_writer, iteration, Ll1, loss, l1_loss, elapsed, testing_i
if __name__ == "__main__":
# Set up command line argument parser
parser = ArgumentParser(description="Training script parameters")
lp = ModelParams(parser)
op = OptimizationParams(parser)
pp = PipelineParams(parser)
lp = ModelParams(parser) # 模型 相关参数
op = OptimizationParams(parser) # 优化 相关参数
pp = PipelineParams(parser) # 渲染 相关参数
parser.add_argument('--ip', type=str, default="127.0.0.1")
parser.add_argument('--port', type=int, default=6009)
parser.add_argument('--debug_from', type=int, default=-1)

View File

@ -43,11 +43,23 @@ def get_expon_lr_func(
:param max_steps: int, the number of steps during optimization.
:return HoF which takes step as input
"""
"""
创建一个学习率调度函数该函数根据训练进度动态调整学习率
:param lr_init: 初始学习率
:param lr_final: 最终学习率
:param lr_delay_steps: 学习率延迟步数在这些步数内学习率将被降低
:param lr_delay_mult: 学习率延迟乘数用于计算初始延迟学习率
:param max_steps: 最大步数用于规范化训练进度
:return: 一个函数根据当前步数返回调整后的学习率
"""
def helper(step):
# 如果步数小于0或学习率为0直接返回0表示不进行优化
if step < 0 or (lr_init == 0.0 and lr_final == 0.0):
# Disable this parameter
return 0.0
# 如果设置了学习率延迟步数,计算延迟调整后的学习率
if lr_delay_steps > 0:
# A kind of reverse cosine decay.
delay_rate = lr_delay_mult + (1 - lr_delay_mult) * np.sin(
@ -55,15 +67,23 @@ def get_expon_lr_func(
)
else:
delay_rate = 1.0
# 根据步数计算学习率的对数线性插值,实现从初始学习率到最终学习率的平滑过渡
t = np.clip(step / max_steps, 0, 1)
log_lerp = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t)
# 返回调整后的学习率
return delay_rate * log_lerp
return helper
def strip_lowerdiag(L):
"""
从协方差矩阵中提取六个独立参数
:param L: 协方差矩阵
:return: 六个独立参数组成的张量
"""
uncertainty = torch.zeros((L.shape[0], 6), dtype=torch.float, device="cuda")
# 提取协方差矩阵的独立元素
uncertainty[:, 0] = L[:, 0, 0]
uncertainty[:, 1] = L[:, 0, 1]
uncertainty[:, 2] = L[:, 0, 2]
@ -73,9 +93,17 @@ def strip_lowerdiag(L):
return uncertainty
def strip_symmetric(sym):
"""
提取协方差矩阵的对称部分
:param sym: 协方差矩阵
:return: 对称部分
"""
return strip_lowerdiag(sym)
def build_rotation(r):
'''
从旋转四元数 -> 单位化 -> 3x3的旋转矩阵
'''
norm = torch.sqrt(r[:,0]*r[:,0] + r[:,1]*r[:,1] + r[:,2]*r[:,2] + r[:,3]*r[:,3])
q = r / norm[:, None]
@ -99,14 +127,22 @@ def build_rotation(r):
return R
def build_scaling_rotation(s, r):
L = torch.zeros((s.shape[0], 3, 3), dtype=torch.float, device="cuda")
R = build_rotation(r)
"""
构建3D高斯模型的尺度-旋转矩阵
:param s: 尺度参数
:param r: 旋转参数
:return: 尺度-旋转矩阵
"""
L = torch.zeros((s.shape[0], 3, 3), dtype=torch.float, device="cuda") # 初始化尺度矩阵
R = build_rotation(r) # 四元数 -> 旋转矩阵
# 设置尺度矩阵的对角线元素
L[:,0,0] = s[:,0]
L[:,1,1] = s[:,1]
L[:,2,2] = s[:,2]
L = R @ L
L = R @ L # 应用旋转
return L
def safe_state(silent):

View File

@ -112,6 +112,11 @@ def eval_sh(deg, sh, dirs):
return result
def RGB2SH(rgb):
"""
将RGB颜色值转换为球谐系数C0项的系数
:param rgb: RGB颜色值
:return: 转换后的球谐系数C0项的系数
"""
return (rgb - 0.5) / C0
def SH2RGB(sh):