mirror of
https://github.com/graphdeco-inria/gaussian-splatting
synced 2025-04-08 15:04:14 +00:00
chore: prepare for pull-request
-> removing debug commentaries -> removing unused proposed code
This commit is contained in:
parent
b5a5f72eda
commit
39fb001ef0
@ -62,5 +62,5 @@ if __name__ == "__main__":
|
|||||||
|
|
||||||
# Initialize system state (RNG)
|
# Initialize system state (RNG)
|
||||||
safe_state(args.quiet)
|
safe_state(args.quiet)
|
||||||
|
|
||||||
render_sets(model.extract(args), args.iteration, pipeline.extract(args), args.skip_train, args.skip_test)
|
render_sets(model.extract(args), args.iteration, pipeline.extract(args), args.skip_train, args.skip_test)
|
@ -23,11 +23,11 @@ from utils.general_utils import strip_symmetric, build_scaling_rotation
|
|||||||
|
|
||||||
class GaussianModel:
|
class GaussianModel:
|
||||||
|
|
||||||
def setup_functions(self, dtype):
|
def setup_functions(self):
|
||||||
def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation):
|
def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation):
|
||||||
L = build_scaling_rotation(scaling_modifier * scaling, rotation, dtype)
|
L = build_scaling_rotation(scaling_modifier * scaling, rotation)
|
||||||
actual_covariance = L @ L.transpose(1, 2)
|
actual_covariance = L @ L.transpose(1, 2)
|
||||||
symm = strip_symmetric(actual_covariance, dtype)
|
symm = strip_symmetric(actual_covariance)
|
||||||
return symm
|
return symm
|
||||||
|
|
||||||
self.scaling_activation = torch.exp
|
self.scaling_activation = torch.exp
|
||||||
@ -41,7 +41,7 @@ class GaussianModel:
|
|||||||
self.rotation_activation = torch.nn.functional.normalize
|
self.rotation_activation = torch.nn.functional.normalize
|
||||||
|
|
||||||
|
|
||||||
def __init__(self, sh_degree : int, dtype=torch.float32):
|
def __init__(self, sh_degree : int):
|
||||||
self.active_sh_degree = 0
|
self.active_sh_degree = 0
|
||||||
self.max_sh_degree = sh_degree
|
self.max_sh_degree = sh_degree
|
||||||
self._xyz = torch.empty(0)
|
self._xyz = torch.empty(0)
|
||||||
@ -56,8 +56,7 @@ class GaussianModel:
|
|||||||
self.optimizer = None
|
self.optimizer = None
|
||||||
self.percent_dense = 0
|
self.percent_dense = 0
|
||||||
self.spatial_lr_scale = 0
|
self.spatial_lr_scale = 0
|
||||||
self.dtype = dtype
|
self.setup_functions()
|
||||||
self.setup_functions(dtype)
|
|
||||||
|
|
||||||
def capture(self):
|
def capture(self):
|
||||||
return (
|
return (
|
||||||
@ -137,7 +136,7 @@ class GaussianModel:
|
|||||||
rots = torch.zeros((fused_point_cloud.shape[0], 4), device="cuda")
|
rots = torch.zeros((fused_point_cloud.shape[0], 4), device="cuda")
|
||||||
rots[:, 0] = 1
|
rots[:, 0] = 1
|
||||||
|
|
||||||
opacities = inverse_sigmoid(0.1 * torch.ones((fused_point_cloud.shape[0], 1), dtype=self.dtype, device="cuda"))
|
opacities = inverse_sigmoid(0.1 * torch.ones((fused_point_cloud.shape[0], 1), dtype=torch.float, device="cuda"))
|
||||||
|
|
||||||
self._xyz = nn.Parameter(fused_point_cloud.requires_grad_(True))
|
self._xyz = nn.Parameter(fused_point_cloud.requires_grad_(True))
|
||||||
self._features_dc = nn.Parameter(features[:,:,0:1].transpose(1, 2).contiguous().requires_grad_(True))
|
self._features_dc = nn.Parameter(features[:,:,0:1].transpose(1, 2).contiguous().requires_grad_(True))
|
||||||
@ -145,7 +144,7 @@ class GaussianModel:
|
|||||||
self._scaling = nn.Parameter(scales.requires_grad_(True))
|
self._scaling = nn.Parameter(scales.requires_grad_(True))
|
||||||
self._rotation = nn.Parameter(rots.requires_grad_(True))
|
self._rotation = nn.Parameter(rots.requires_grad_(True))
|
||||||
self._opacity = nn.Parameter(opacities.requires_grad_(True))
|
self._opacity = nn.Parameter(opacities.requires_grad_(True))
|
||||||
self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda", dtype=self.dtype)
|
self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda")
|
||||||
|
|
||||||
def training_setup(self, training_args):
|
def training_setup(self, training_args):
|
||||||
self.percent_dense = training_args.percent_dense
|
self.percent_dense = training_args.percent_dense
|
||||||
@ -247,12 +246,12 @@ class GaussianModel:
|
|||||||
for idx, attr_name in enumerate(rot_names):
|
for idx, attr_name in enumerate(rot_names):
|
||||||
rots[:, idx] = np.asarray(plydata.elements[0][attr_name])
|
rots[:, idx] = np.asarray(plydata.elements[0][attr_name])
|
||||||
|
|
||||||
self._xyz = nn.Parameter(torch.tensor(xyz, dtype=self.dtype, device="cuda").requires_grad_(True))
|
self._xyz = nn.Parameter(torch.tensor(xyz, dtype=torch.float, device="cuda").requires_grad_(True))
|
||||||
self._features_dc = nn.Parameter(torch.tensor(features_dc, dtype=self.dtype, device="cuda").transpose(1, 2).contiguous().requires_grad_(True))
|
self._features_dc = nn.Parameter(torch.tensor(features_dc, dtype=torch.float, device="cuda").transpose(1, 2).contiguous().requires_grad_(True))
|
||||||
self._features_rest = nn.Parameter(torch.tensor(features_extra, dtype=self.dtype, device="cuda").transpose(1, 2).contiguous().requires_grad_(True))
|
self._features_rest = nn.Parameter(torch.tensor(features_extra, dtype=torch.float, device="cuda").transpose(1, 2).contiguous().requires_grad_(True))
|
||||||
self._opacity = nn.Parameter(torch.tensor(opacities, dtype=self.dtype, device="cuda").requires_grad_(True))
|
self._opacity = nn.Parameter(torch.tensor(opacities, dtype=torch.float, device="cuda").requires_grad_(True))
|
||||||
self._scaling = nn.Parameter(torch.tensor(scales, dtype=self.dtype, device="cuda").requires_grad_(True))
|
self._scaling = nn.Parameter(torch.tensor(scales, dtype=torch.float, device="cuda").requires_grad_(True))
|
||||||
self._rotation = nn.Parameter(torch.tensor(rots, dtype=self.dtype, device="cuda").requires_grad_(True))
|
self._rotation = nn.Parameter(torch.tensor(rots, dtype=torch.float, device="cuda").requires_grad_(True))
|
||||||
|
|
||||||
self.active_sh_degree = self.max_sh_degree
|
self.active_sh_degree = self.max_sh_degree
|
||||||
|
|
||||||
|
3
train.py
3
train.py
@ -16,7 +16,7 @@ from utils.loss_utils import l1_loss, ssim
|
|||||||
from gaussian_renderer import render, network_gui
|
from gaussian_renderer import render, network_gui
|
||||||
import sys
|
import sys
|
||||||
from scene import Scene, GaussianModel
|
from scene import Scene, GaussianModel
|
||||||
from utils.general_utils import get_data_dtype, safe_state
|
from utils.general_utils import safe_state
|
||||||
import uuid
|
import uuid
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
from utils.image_utils import psnr
|
from utils.image_utils import psnr
|
||||||
@ -216,7 +216,6 @@ if __name__ == "__main__":
|
|||||||
# Start GUI server, configure and run training
|
# Start GUI server, configure and run training
|
||||||
network_gui.init(args.ip, args.port)
|
network_gui.init(args.ip, args.port)
|
||||||
torch.autograd.set_detect_anomaly(args.detect_anomaly)
|
torch.autograd.set_detect_anomaly(args.detect_anomaly)
|
||||||
|
|
||||||
training(lp.extract(args), op.extract(args), pp.extract(args), args.test_iterations, args.save_iterations, args.checkpoint_iterations, args.start_checkpoint, args.debug_from)
|
training(lp.extract(args), op.extract(args), pp.extract(args), args.test_iterations, args.save_iterations, args.checkpoint_iterations, args.start_checkpoint, args.debug_from)
|
||||||
|
|
||||||
# All done
|
# All done
|
||||||
|
@ -39,8 +39,6 @@ def loadCam(args, id, cam_info, resolution_scale):
|
|||||||
resolution = (int(orig_w / scale), int(orig_h / scale))
|
resolution = (int(orig_w / scale), int(orig_h / scale))
|
||||||
|
|
||||||
resized_image_rgb = PILtoTorch(cam_info.image, resolution)
|
resized_image_rgb = PILtoTorch(cam_info.image, resolution)
|
||||||
|
|
||||||
# resized_image_rgb = resized_image_rgb.to(get_data_dtype(args.data_dtype))
|
|
||||||
|
|
||||||
gt_image = resized_image_rgb[:3, ...]
|
gt_image = resized_image_rgb[:3, ...]
|
||||||
loaded_mask = None
|
loaded_mask = None
|
||||||
|
@ -20,7 +20,7 @@ def inverse_sigmoid(x):
|
|||||||
|
|
||||||
def PILtoTorch(pil_image, resolution):
|
def PILtoTorch(pil_image, resolution):
|
||||||
resized_image_PIL = pil_image.resize(resolution)
|
resized_image_PIL = pil_image.resize(resolution)
|
||||||
resized_image = torch.from_numpy(np.array(resized_image_PIL))# / 255.0
|
resized_image = torch.from_numpy(np.array(resized_image_PIL))
|
||||||
if len(resized_image.shape) == 3:
|
if len(resized_image.shape) == 3:
|
||||||
return resized_image.permute(2, 0, 1)
|
return resized_image.permute(2, 0, 1)
|
||||||
else:
|
else:
|
||||||
@ -61,8 +61,8 @@ def get_expon_lr_func(
|
|||||||
|
|
||||||
return helper
|
return helper
|
||||||
|
|
||||||
def strip_lowerdiag(L, dtype=torch.float32):
|
def strip_lowerdiag(L):
|
||||||
uncertainty = torch.zeros((L.shape[0], 6), dtype=dtype, device="cuda")
|
uncertainty = torch.zeros((L.shape[0], 6), dtype=torch.float, device="cuda")
|
||||||
|
|
||||||
uncertainty[:, 0] = L[:, 0, 0]
|
uncertainty[:, 0] = L[:, 0, 0]
|
||||||
uncertainty[:, 1] = L[:, 0, 1]
|
uncertainty[:, 1] = L[:, 0, 1]
|
||||||
@ -72,8 +72,8 @@ def strip_lowerdiag(L, dtype=torch.float32):
|
|||||||
uncertainty[:, 5] = L[:, 2, 2]
|
uncertainty[:, 5] = L[:, 2, 2]
|
||||||
return uncertainty
|
return uncertainty
|
||||||
|
|
||||||
def strip_symmetric(sym, dtype=torch.float32):
|
def strip_symmetric(sym):
|
||||||
return strip_lowerdiag(sym, dtype=dtype)
|
return strip_lowerdiag(sym)
|
||||||
|
|
||||||
def build_rotation(r):
|
def build_rotation(r):
|
||||||
norm = torch.sqrt(r[:,0]*r[:,0] + r[:,1]*r[:,1] + r[:,2]*r[:,2] + r[:,3]*r[:,3])
|
norm = torch.sqrt(r[:,0]*r[:,0] + r[:,1]*r[:,1] + r[:,2]*r[:,2] + r[:,3]*r[:,3])
|
||||||
@ -98,8 +98,8 @@ def build_rotation(r):
|
|||||||
R[:, 2, 2] = 1 - 2 * (x*x + y*y)
|
R[:, 2, 2] = 1 - 2 * (x*x + y*y)
|
||||||
return R
|
return R
|
||||||
|
|
||||||
def build_scaling_rotation(s, r, dtype=torch.float32):
|
def build_scaling_rotation(s, r):
|
||||||
L = torch.zeros((s.shape[0], 3, 3), dtype=dtype, device="cuda")
|
L = torch.zeros((s.shape[0], 3, 3), dtype=torch.float, device="cuda")
|
||||||
R = build_rotation(r)
|
R = build_rotation(r)
|
||||||
|
|
||||||
L[:,0,0] = s[:,0]
|
L[:,0,0] = s[:,0]
|
||||||
@ -139,4 +139,4 @@ def get_data_dtype(dtype):
|
|||||||
return torch.float64
|
return torch.float64
|
||||||
elif dtype == "float16":
|
elif dtype == "float16":
|
||||||
return torch.float16
|
return torch.float16
|
||||||
return torch.float32
|
return torch.float32
|
||||||
|
Loading…
Reference in New Issue
Block a user