This commit is contained in:
Ammar Qammaz 2024-09-04 15:17:54 +02:00 committed by GitHub
commit 70f89b5328
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 274 additions and 0 deletions

84
docker/Dockerfile Normal file
View File

@ -0,0 +1,84 @@
#FROM tensorflow/tensorflow:latest-gpu
FROM nvidia/cuda:12.2.0-devel-ubuntu22.04
ENV DEBIAN_FRONTEND noninteractive
ARG user_id
ARG root_psw="12345678"
ARG user_psw="ok"
ARG user_name=user
# Installs the necessary pkgs.
RUN \
echo "**** packages installation ****" \
&& apt-key adv --fetch-keys http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1604/x86_64/3bf863cc.pub \
&& apt-get update && apt-get dist-upgrade -y && apt-get install -y \
vim \
build-essential \
cmake \
ffmpeg \
imagemagick \
unzip \
freeglut3-dev \
libopencv-dev \
libjpeg-dev \
libpng-dev \
libglew-dev \
libpthread-stubs0-dev \
git \
virtualenv \
sqlite3 \
libsqlite3-dev \
time \
sudo \
wget \
nano \
libboost-program-options-dev \
libboost-filesystem-dev \
libboost-graph-dev \
libboost-regex-dev \
libboost-system-dev \
libboost-test-dev \
libeigen3-dev \
libsuitesparse-dev \
libfreeimage-dev \
libgoogle-glog-dev \
libgflags-dev \
libglew-dev \
qtbase5-dev \
libqt5opengl5-dev \
libcgal-dev \
libcgal-qt5-dev \
libmetis-dev \
libflann-dev \
libatlas-base-dev \
libsuitesparse-dev \
libcufft10 \
libcusparse11 \
libcublas11 \
libcublaslt11 \
&& echo "**** python pip update ****" \
&& /usr/bin/python3 -m pip install --upgrade pip \
&& echo "**** aliases for l and ll commands creation ****" \
&& echo -e 'ls --color=auto "$@"' > /usr/bin/l \
&& echo -e 'ls -lah --color=auto "$@"' > /usr/bin/ll \
&& chmod +x /usr/bin/ll /usr/bin/l \
&& echo "**** history-search-backward by pressing F8 ****" \
&& sed -i 's/# "\\e\[5~": history-search-backward/"\\e\[19~": history-search-backward/' /etc/inputrc \
&& echo "**** root password creation ****" \
&& echo "root:${root_psw}" | chpasswd \
&& echo "**** user creation ****" \
&& useradd -m -s /usr/bin/bash -u ${user_id} -G sudo ${user_name} \
&& echo "${user_name}:${user_psw}" | chpasswd \
&& chown -R ${user_name}:${user_name} /home/${user_name}/ \
&& mkdir /home/${user_name}/workspace/ \
&& chown -R user:user /home/${user_name}/workspace
USER ${user_name}
WORKDIR /home/${user_name}/
RUN git config --global http.sslverify false
RUN cd /home/${user_name}/workspace && git clone --recursive https://github.com/graphdeco-inria/gaussian-splatting && cd gaussian-splatting && docker/initialize.sh

44
docker/build_and_deploy.sh Executable file
View File

@ -0,0 +1,44 @@
#!/usr/bin/env bash
# This script builds and runs a docker image for local use.
#Although I dislike the use of docker for a myriad of reasons, due needing it to deploy on a particular machine
#I am adding a docker container builder for the repository to automate the process
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd "$DIR"
cd ..
REPOSITORY=`pwd`
cd "$DIR"
NAME="gaussian-splatting"
dockerfile_pth="$DIR"
mount_pth="$REPOSITORY"
# update tensorflow image
docker pull tensorflow/tensorflow:latest-gpu
# build and run tensorflow
docker build \
-t $NAME \
$dockerfile_pth \
--build-arg user_id=$UID
docker run -d \
--gpus all \
--shm-size 8G \
-it \
--name $NAME-container \
-v $mount_pth:/home/user/workspace \
$NAME
docker ps -a
OUR_DOCKER_ID=`docker ps -a | grep gaussian-splatting | cut -f1 -d' '`
echo "Our docker ID is : $OUR_DOCKER_ID"
echo "Attaching it using : docker attach $OUD_DOCKER_ID"
docker attach $OUR_DOCKER_ID
exit 0

59
docker/initialize.sh Executable file
View File

@ -0,0 +1,59 @@
#!/bin/bash
#git clone https://github.com/NVIDIA/cuda-samples #<- To Test docker Cuda image..
#cd cuda-samples
#also get add-ons
git clone https://github.com/antimatter15/splat
git clone https://github.com/ReshotAI/gaussian-splatting-blender-addon/
git clone https://github.com/francescofugazzi/3dgsconverter #needs scikit-learn
git clone https://ceres-solver.googlesource.com/ceres-solver
cd ceres-solver
#git checkout $(git describe --tags) # Checkout the latest release
mkdir build
cd build
cmake .. -DBUILD_TESTING=OFF -DBUILD_EXAMPLES=OFF -DCMAKE_CUDA_ARCHITECTURES="60;70;80" #-DUSE_CUDA=OFF
make -j8
sudo make install
cd ..
cd ..
git clone https://github.com/colmap/colmap
cd colmap
#git checkout dev
mkdir build
cd build
cmake .. -DCMAKE_CUDA_ARCHITECTURES="60;70;80" #-DCUDA_ENABLED=OFF
make -j8
sudo make install
cd ..
cd ..
python3.10 -m pip install plyfile tqdm scikit-learn
python3.10 -m pip install https://huggingface.co/camenduru/gaussian-splatting/resolve/main/diff_gaussian_rasterization-0.0.0-cp310-cp310-linux_x86_64.whl
python3.10 -m pip install https://huggingface.co/camenduru/gaussian-splatting/resolve/main/simple_knn-0.0.0-cp310-cp310-linux_x86_64.whl
python3.10 -m pip install torchvision
ln -s docker/run.sh ./run.sh
#Build viewer
#sudo apt install -y libimgui-dev libglew-dev libassimp-dev libboost-all-dev libgtk-3-dev libopencv-dev libglfw3-dev libavdevice-dev libavcodec-dev libeigen3-dev libxxf86vm-dev libembree-dev
#git clone https://github.com/JayFoxRox/SIBR_viewers
#cd SIBR_viewers
#cmake -Bbuild . -DCMAKE_BUILD_TYPE=Release -DASSIMP_LIBRARY=/usr/lib/x86_64-linux-gnu/libassimp.so
#cmake --build build -j24 --target install
#cd ..
#sudo apt-get -y install cuda
sudo apt -y install nvidia-cuda-toolkit
exit 0

33
docker/run.sh Executable file
View File

@ -0,0 +1,33 @@
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd "$DIR"
#cd ..
mkdir -p $1-data/input
mkdir -p $1-data/output
FPS="5" #<-change this to change framerate
if [ -f $1-data/input/0001.jpg ]
then
echo "File $1 appears to have already been split .."
else
ffmpeg -i $1 -qscale:v 1 -qmin 1 -vf fps=$FPS $1-data/input/%04d.jpg
fi
python3.10 convert.py -s $1-data/ --camera SIMPLE_RADIAL --no_gpu #GPU produces worse results (?)
python3.10 train.py -s $1-data/ -r 1 --model_path=$1-data/output/ --position_lr_init 0.000016 --scaling_lr 0.001 --iterations 35000 #Test more training budget
python3.10 3dgsconverter/3dgsconverter.py -i $1-data/output/point_cloud/iteration_30000/point_cloud.ply -o $1-data/output/point_cloud/iteration_30000/output_cc.ply -f cc --rgb --density_filter --remove_flyers
python3.10 3dgsconverter/3dgsconverter.py -i $1-data/output/point_cloud/iteration_30000/output_cc.ply -o $1-data/output/point_cloud/iteration_30000/point_cloud_clean.ply -f 3dgs
#pack it in
#tar cvfjh "$1.tar.bz2" $1-data/
exit 0

40
docker/setup.sh Executable file
View File

@ -0,0 +1,40 @@
#!/usr/bin/env bash
# This script builds and runs a docker image for local use.
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd "$DIR"
cd ..
REPOSITORY=`pwd`
cd "$DIR"
#https://docs.nvidia.com/ai-enterprise/deployment-guide/dg-docker.html
sudo apt-get update
sudo apt-get install -y \
apt-transport-https \
ca-certificates \
curl \
gnupg-agent \
software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo apt-key fingerprint 0EBFCD88
sudo add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
sudo apt-get update
sudo apt-get install -y docker-ce docker-ce-cli containerd.io
sudo docker run hello-world
#Make sure docker group is ok
sudo groupadd docker
sudo usermod -aG docker $USER
newgrp docker
exit 0

View File

@ -29,6 +29,10 @@ except ImportError:
TENSORBOARD_FOUND = False
def training(dataset, opt, pipe, testing_iterations, saving_iterations, checkpoint_iterations, checkpoint, debug_from):
bestLossEncountered = 1000000
bestIterationEncountered = 0
goodLossThreshold = 0.07
first_iter = 0
tb_writer = prepare_output_and_logger(dataset)
gaussians = GaussianModel(dataset.sh_degree)
@ -103,6 +107,16 @@ def training(dataset, opt, pipe, testing_iterations, saving_iterations, checkpoi
if iteration == opt.iterations:
progress_bar.close()
if (bestLossEncountered>ema_loss_for_log) and (goodLossThreshold > ema_loss_for_log):
if (bestIterationEncountered!=0):
print("\n[GOOD ITER {}] Erasing previous best iteration..".format(bestIterationEncountered))
os.system("rm %s/point_cloud/iteration_%u/*.ply && rmdir %s/point_cloud/iteration_%u/"%(scene.model_path,bestIterationEncountered,scene.model_path,bestIterationEncountered))
bestLossEncountered = ema_loss_for_log
bestIterationEncountered = iteration
print("\n[GOOD ITER {}] Now remembering this iteration..".format(iteration))
saving_iterations.append(iteration)
# Log and save
training_report(tb_writer, iteration, Ll1, loss, l1_loss, iter_start.elapsed_time(iter_end), testing_iterations, scene, render, (pipe, background))
if (iteration in saving_iterations):