Merge branch 'release' into develop

This commit is contained in:
bkerbl 2023-07-11 20:09:42 +02:00
commit ba677c3820
4 changed files with 9 additions and 9 deletions

View File

@ -71,8 +71,8 @@ The optimizer uses PyTorch and CUDA extensions in a Python environment to produc
### Software Requirements ### Software Requirements
- Conda (recommended for easy setup) - Conda (recommended for easy setup)
- C++ Compiler for PyTorch extensions (we *recommend* Visual Studio 2019 for Windows) - C++ Compiler for PyTorch extensions (we used Visual Studio 2019 for Windows)
- CUDA 11 SDK for PyTorch extensions (we used 11.8) - CUDA SDK 11.7+ for PyTorch extensions (we used 11.8, known issues with 11.6)
- C++ Compiler and CUDA SDK must be compatible - C++ Compiler and CUDA SDK must be compatible
### Setup ### Setup
@ -269,8 +269,8 @@ We provide two interactive iewers for our method: remote and real-time. Our view
- CUDA-ready GPU with Compute Capability 7.0+ (only for Real-Time Viewer) - CUDA-ready GPU with Compute Capability 7.0+ (only for Real-Time Viewer)
### Software Requirements ### Software Requirements
- C++ Compiler (we *recommend* Visual Studio 2019 for Windows) - C++ Compiler (we used Visual Studio 2019 for Windows)
- CUDA 11 Developer SDK (we used 11.8) - CUDA SDK 11 (we used 11.8)
- CMake (recent version, we used 3.24) - CMake (recent version, we used 3.24)
- 7zip (only on Windows) - 7zip (only on Windows)

View File

@ -72,7 +72,7 @@ class OptimizationParams(ParamGroup):
self.position_lr_init = 0.00016 self.position_lr_init = 0.00016
self.position_lr_final = 0.0000016 self.position_lr_final = 0.0000016
self.position_lr_delay_mult = 0.01 self.position_lr_delay_mult = 0.01
self.posititon_lr_max_steps = 30_000 self.position_lr_max_steps = 30_000
self.feature_lr = 0.0025 self.feature_lr = 0.0025
self.opacity_lr = 0.05 self.opacity_lr = 0.05
self.scaling_lr = 0.001 self.scaling_lr = 0.001

View File

@ -126,7 +126,7 @@ class GaussianModel:
self.xyz_scheduler_args = get_expon_lr_func(lr_init=training_args.position_lr_init*self.spatial_lr_scale, self.xyz_scheduler_args = get_expon_lr_func(lr_init=training_args.position_lr_init*self.spatial_lr_scale,
lr_final=training_args.position_lr_final*self.spatial_lr_scale, lr_final=training_args.position_lr_final*self.spatial_lr_scale,
lr_delay_mult=training_args.position_lr_delay_mult, lr_delay_mult=training_args.position_lr_delay_mult,
max_steps=training_args.posititon_lr_max_steps) max_steps=training_args.position_lr_max_steps)
def update_learning_rate(self, iteration): def update_learning_rate(self, iteration):
''' Learning rate scheduling per step ''' ''' Learning rate scheduling per step '''

View File

@ -162,9 +162,9 @@ def training_report(tb_writer, iteration, Ll1, loss, l1_loss, elapsed, testing_i
images = torch.cat((images, image.unsqueeze(0)), dim=0) images = torch.cat((images, image.unsqueeze(0)), dim=0)
gts = torch.cat((gts, gt_image.unsqueeze(0)), dim=0) gts = torch.cat((gts, gt_image.unsqueeze(0)), dim=0)
if tb_writer and (idx < 5): if tb_writer and (idx < 5):
tb_writer.add_images(config['name'] + "_view_{}/render".format(viewpoint.image_name), image, global_step=iteration) tb_writer.add_images(config['name'] + "_view_{}/render".format(viewpoint.image_name), image[None], global_step=iteration)
if iteration == testing_iterations[0]: if iteration == testing_iterations[0]:
tb_writer.add_images(config['name'] + "_view_{}/ground_truth".format(viewpoint.image_name), gt_image, global_step=iteration) tb_writer.add_images(config['name'] + "_view_{}/ground_truth".format(viewpoint.image_name), gt_image[None], global_step=iteration)
l1_test = l1_loss(images, gts) l1_test = l1_loss(images, gts)
psnr_test = psnr(images, gts).mean() psnr_test = psnr(images, gts).mean()
@ -204,4 +204,4 @@ if __name__ == "__main__":
training(lp.extract(args), op.extract(args), pp.extract(args), args.test_iterations, args.save_iterations) training(lp.extract(args), op.extract(args), pp.extract(args), args.test_iterations, args.save_iterations)
# All done # All done
print("\nTraining complete.") print("\nTraining complete.")