mirror of
https://github.com/graphdeco-inria/gaussian-splatting
synced 2024-11-25 13:26:47 +00:00
Merge branch 'release' into develop
This commit is contained in:
commit
ba677c3820
@ -71,8 +71,8 @@ The optimizer uses PyTorch and CUDA extensions in a Python environment to produc
|
||||
|
||||
### Software Requirements
|
||||
- Conda (recommended for easy setup)
|
||||
- C++ Compiler for PyTorch extensions (we *recommend* Visual Studio 2019 for Windows)
|
||||
- CUDA 11 SDK for PyTorch extensions (we used 11.8)
|
||||
- C++ Compiler for PyTorch extensions (we used Visual Studio 2019 for Windows)
|
||||
- CUDA SDK 11.7+ for PyTorch extensions (we used 11.8, known issues with 11.6)
|
||||
- C++ Compiler and CUDA SDK must be compatible
|
||||
|
||||
### Setup
|
||||
@ -269,8 +269,8 @@ We provide two interactive iewers for our method: remote and real-time. Our view
|
||||
- CUDA-ready GPU with Compute Capability 7.0+ (only for Real-Time Viewer)
|
||||
|
||||
### Software Requirements
|
||||
- C++ Compiler (we *recommend* Visual Studio 2019 for Windows)
|
||||
- CUDA 11 Developer SDK (we used 11.8)
|
||||
- C++ Compiler (we used Visual Studio 2019 for Windows)
|
||||
- CUDA SDK 11 (we used 11.8)
|
||||
- CMake (recent version, we used 3.24)
|
||||
- 7zip (only on Windows)
|
||||
|
||||
|
@ -72,7 +72,7 @@ class OptimizationParams(ParamGroup):
|
||||
self.position_lr_init = 0.00016
|
||||
self.position_lr_final = 0.0000016
|
||||
self.position_lr_delay_mult = 0.01
|
||||
self.posititon_lr_max_steps = 30_000
|
||||
self.position_lr_max_steps = 30_000
|
||||
self.feature_lr = 0.0025
|
||||
self.opacity_lr = 0.05
|
||||
self.scaling_lr = 0.001
|
||||
|
@ -126,7 +126,7 @@ class GaussianModel:
|
||||
self.xyz_scheduler_args = get_expon_lr_func(lr_init=training_args.position_lr_init*self.spatial_lr_scale,
|
||||
lr_final=training_args.position_lr_final*self.spatial_lr_scale,
|
||||
lr_delay_mult=training_args.position_lr_delay_mult,
|
||||
max_steps=training_args.posititon_lr_max_steps)
|
||||
max_steps=training_args.position_lr_max_steps)
|
||||
|
||||
def update_learning_rate(self, iteration):
|
||||
''' Learning rate scheduling per step '''
|
||||
|
6
train.py
6
train.py
@ -162,9 +162,9 @@ def training_report(tb_writer, iteration, Ll1, loss, l1_loss, elapsed, testing_i
|
||||
images = torch.cat((images, image.unsqueeze(0)), dim=0)
|
||||
gts = torch.cat((gts, gt_image.unsqueeze(0)), dim=0)
|
||||
if tb_writer and (idx < 5):
|
||||
tb_writer.add_images(config['name'] + "_view_{}/render".format(viewpoint.image_name), image, global_step=iteration)
|
||||
tb_writer.add_images(config['name'] + "_view_{}/render".format(viewpoint.image_name), image[None], global_step=iteration)
|
||||
if iteration == testing_iterations[0]:
|
||||
tb_writer.add_images(config['name'] + "_view_{}/ground_truth".format(viewpoint.image_name), gt_image, global_step=iteration)
|
||||
tb_writer.add_images(config['name'] + "_view_{}/ground_truth".format(viewpoint.image_name), gt_image[None], global_step=iteration)
|
||||
|
||||
l1_test = l1_loss(images, gts)
|
||||
psnr_test = psnr(images, gts).mean()
|
||||
@ -204,4 +204,4 @@ if __name__ == "__main__":
|
||||
training(lp.extract(args), op.extract(args), pp.extract(args), args.test_iterations, args.save_iterations)
|
||||
|
||||
# All done
|
||||
print("\nTraining complete.")
|
||||
print("\nTraining complete.")
|
||||
|
Loading…
Reference in New Issue
Block a user