From 8943f57b268a4c39f4bf5b65f7c21d68f236cc7c Mon Sep 17 00:00:00 2001 From: bkerbl Date: Sun, 9 Jul 2023 15:14:31 +0200 Subject: [PATCH] Update --- README.md | 2 +- full_eval.py | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 084b4ca..90f88e7 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ Bernhard Kerbl*, Georgios Kopanas*, Thomas Leimkühler, George Drettakis (* indi | [Webpage](https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/) | [Full Paper](https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/3d_gaussian_splatting_high.pdf) | [Video](https://youtu.be/T_kXY43VZnk) | [Other GRAPHDECO Publications](http://www-sop.inria.fr/reves/publis/gdindex.php) | [FUNGRAPH project page](https://fungraph.inria.fr) -[T&T+DB Datasets (650MB)](https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/datasets/input/tandt_db.zip) | [Pre-trained Models (14 GB)](https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/datasets/pretrained/models.zip) | [Viewer Binaries for Windows (60MB)](https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/binaries/viewers.zip) | [Evaluation Images](https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/evaluation/images.zip) |
+[T&T+DB Datasets (650MB)](https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/datasets/input/tandt_db.zip) | [Pre-trained Models (14 GB)](https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/datasets/pretrained/models.zip) | [Viewer Binaries for Windows (60MB)](https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/binaries/viewers.zip) | [Evaluation Images (7 GB)](https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/evaluation/images.zip) |
![Teaser image](assets/teaser.png) This repository contains the code associated with the paper "3D Gaussian Splatting for Real-Time Radiance Field Rendering", which can be found [here](https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/). We further provide the reference images used to create the error metrics reported in the paper, as well as recently created, pre-trained models. diff --git a/full_eval.py b/full_eval.py index 6b5776e..d174ef8 100644 --- a/full_eval.py +++ b/full_eval.py @@ -38,18 +38,18 @@ if not args.skip_training or not args.skip_rendering: if not args.skip_training: common_args = " --quiet --eval --test_iterations -1" - for scene in tanks_and_temples_scenes: - source = args.tanksandtemples + "/" + scene - os.system("python train.py -s " + source + " -m " + args.output_path + "/" + scene + common_args) - for scene in deep_blending_scenes: - source = args.deepblending + "/" + scene - os.system("python train.py -s " + source + " -m " + args.output_path + "/" + scene + common_args) for scene in mipnerf360_outdoor_scenes: source = args.mipnerf360 + "/" + scene os.system("python train.py -s " + source + " -i images_4 -m " + args.output_path + "/" + scene + common_args) for scene in mipnerf360_indoor_scenes: source = args.mipnerf360 + "/" + scene os.system("python train.py -s " + source + " -i images_2 -m " + args.output_path + "/" + scene + common_args) + for scene in tanks_and_temples_scenes: + source = args.tanksandtemples + "/" + scene + os.system("python train.py -s " + source + " -m " + args.output_path + "/" + scene + common_args) + for scene in deep_blending_scenes: + source = args.deepblending + "/" + scene + os.system("python train.py -s " + source + " -m " + args.output_path + "/" + scene + common_args) if not args.skip_rendering: all_sources = []