mirror of
https://github.com/graphdeco-inria/gaussian-splatting
synced 2025-06-26 18:18:11 +00:00
modified all errors
This commit is contained in:
parent
48ceb9419b
commit
2560a16632
156
augment.py
156
augment.py
@ -1,5 +1,3 @@
|
|||||||
|
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
@ -13,6 +11,7 @@ from utils.aug_utils import *
|
|||||||
|
|
||||||
def augment(colmap_path, image_path, augment_path, camera_order, visibility_aware_culling, compare_center_patch):
|
def augment(colmap_path, image_path, augment_path, camera_order, visibility_aware_culling, compare_center_patch):
|
||||||
colmap_images, colmap_points3D, colmap_cameras = get_colmap_data(colmap_path)
|
colmap_images, colmap_points3D, colmap_cameras = get_colmap_data(colmap_path)
|
||||||
|
np.seterr(divide='ignore', invalid='ignore')
|
||||||
sorted_keys = cluster_cameras(colmap_path, camera_order)
|
sorted_keys = cluster_cameras(colmap_path, camera_order)
|
||||||
|
|
||||||
points3d = []
|
points3d = []
|
||||||
@ -29,11 +28,12 @@ def augment(colmap_path, image_path, augment_path, camera_order, visibility_awar
|
|||||||
|
|
||||||
count = 0
|
count = 0
|
||||||
roots = {}
|
roots = {}
|
||||||
pbar = tqdm(len(sorted_keys))
|
pbar = tqdm(range(len(sorted_keys)))
|
||||||
for view_idx in pbar:
|
for view_idx in pbar:
|
||||||
view = sorted_keys[view_idx]
|
view = sorted_keys[view_idx]
|
||||||
view_root, augmented_count = image_quadtree_augmentation(
|
view_root, augmented_count = image_quadtree_augmentation(
|
||||||
view,
|
view,
|
||||||
|
image_path,
|
||||||
colmap_cameras,
|
colmap_cameras,
|
||||||
colmap_images,
|
colmap_images,
|
||||||
colmap_points3D,
|
colmap_points3D,
|
||||||
@ -42,25 +42,25 @@ def augment(colmap_path, image_path, augment_path, camera_order, visibility_awar
|
|||||||
intrinsics_camera,
|
intrinsics_camera,
|
||||||
rotations_image,
|
rotations_image,
|
||||||
translations_image,
|
translations_image,
|
||||||
visibility_aware_culling,
|
visibility_aware_culling=visibility_aware_culling,
|
||||||
)
|
)
|
||||||
count += augmented_count
|
count += augmented_count
|
||||||
pbar.set_description(f"{count} points augmented")
|
pbar.set_description(f"{count} points augmented")
|
||||||
roots[view] = view_root
|
roots[view] = view_root
|
||||||
|
|
||||||
for view1_idx in tqdm(range(len(sorted_keys))):
|
for view1_idx in tqdm(range(len(sorted_keys))):
|
||||||
for view2_idx in [view_idx + 6,
|
for view2_idx in [view1_idx + 6,
|
||||||
view_idx + 5,
|
view1_idx + 5,
|
||||||
view_idx + 4,
|
view1_idx + 4,
|
||||||
view_idx + 3,
|
view1_idx + 3,
|
||||||
view_idx + 2,
|
view1_idx + 2,
|
||||||
view_idx + 1,
|
view1_idx + 1,
|
||||||
view_idx - 1,
|
view1_idx - 1,
|
||||||
view_idx - 2,
|
view1_idx - 2,
|
||||||
view_idx - 3,
|
view1_idx - 3,
|
||||||
view_idx - 4,
|
view1_idx - 4,
|
||||||
view_idx - 5,
|
view1_idx - 5,
|
||||||
view_idx - 6]:
|
view1_idx - 6]:
|
||||||
if view2_idx > len(sorted_keys) - 1:
|
if view2_idx > len(sorted_keys) - 1:
|
||||||
view2_idx = view2_idx - len(sorted_keys)
|
view2_idx = view2_idx - len(sorted_keys)
|
||||||
view1 = sorted_keys[view1_idx]
|
view1 = sorted_keys[view1_idx]
|
||||||
@ -93,6 +93,8 @@ def augment(colmap_path, image_path, augment_path, camera_order, visibility_awar
|
|||||||
x, y = view1_sample_points_view2[i]
|
x, y = view1_sample_points_view2[i]
|
||||||
corresponding_node_type = None
|
corresponding_node_type = None
|
||||||
error = None
|
error = None
|
||||||
|
|
||||||
|
# Case 1: Culling
|
||||||
if (view1_sample_points_view2_depth[i] < 0) | \
|
if (view1_sample_points_view2_depth[i] < 0) | \
|
||||||
(view1_sample_points_view2[i, 0] < 0) | \
|
(view1_sample_points_view2[i, 0] < 0) | \
|
||||||
(view1_sample_points_view2[i, 0] >= image_view2.shape[1]) | \
|
(view1_sample_points_view2[i, 0] >= image_view2.shape[1]) | \
|
||||||
@ -102,65 +104,85 @@ def augment(colmap_path, image_path, augment_path, camera_order, visibility_awar
|
|||||||
corresponding_node_type = "culled"
|
corresponding_node_type = "culled"
|
||||||
matching_log.append([view2, corresponding_node_type, error])
|
matching_log.append([view2, corresponding_node_type, error])
|
||||||
continue
|
continue
|
||||||
else:
|
|
||||||
view2_corresponding_node = find_leaf_node(view2_root, x, y)
|
# Case 2: Find corresponding node
|
||||||
if view2_corresponding_node is None:
|
view2_corresponding_node = find_leaf_node(view2_root, x, y)
|
||||||
corresponding_node_type = "missing"
|
if view2_corresponding_node is None:
|
||||||
else:
|
corresponding_node_type = "missing"
|
||||||
if view2_corresponding_node.unoccupied:
|
matching_log.append([view2, corresponding_node_type, error])
|
||||||
if view2_corresponding_node.depth_interpolated:
|
continue
|
||||||
error = np.linalg.norm(view1_sample_points_view2_depth[i] - view2_corresponding_node.sampled_point_depth)
|
|
||||||
if error < 0.2 * view2_corresponding_node.sampled_point_depth:
|
# Case 3: Process unoccupied node
|
||||||
if compare_center_patch:
|
if view2_corresponding_node.unoccupied:
|
||||||
try:
|
if view2_corresponding_node.depth_interpolated:
|
||||||
view1_sample_point_patch = image_view2[int(view1_sample_points_view2[i, 1])-1:\
|
error = np.linalg.norm(view1_sample_points_view2_depth[i] - view2_corresponding_node.sampled_point_depth)
|
||||||
int(view1_sample_points_view2[i,1])+2,
|
if error < 0.2 * view2_corresponding_node.sampled_point_depth:
|
||||||
int(view1_sample_points_view2[i, 0])-1:\
|
if compare_center_patch:
|
||||||
int(view1_sample_points_view2[i,0])+2]
|
try:
|
||||||
view2_corresponding_node_patch = image_view2[int(view2_corresponding_node.sampled_point_uv[1])-1:\
|
view1_sample_point_patch = image_view2[int(view1_sample_points_view2[i, 1])-1:\
|
||||||
int(view2_corresponding_node.sampled_point_uv[1])+2,
|
int(view1_sample_points_view2[i,1])+2,
|
||||||
int(view2_corresponding_node.sampled_point_uv[0])-1:\
|
int(view1_sample_points_view2[i, 0])-1:\
|
||||||
int(view2_corresponding_node.sampled_point_uv[0])+2]
|
int(view1_sample_points_view2[i,0])+2]
|
||||||
if compare_local_texture(view1_sample_point_patch, view2_corresponding_node_patch) > 0.5:
|
view2_corresponding_node_patch = image_view2[int(view2_corresponding_node.sampled_point_uv[1])-1:\
|
||||||
corresponding_node_type = "sampledrejected"
|
int(view2_corresponding_node.sampled_point_uv[1])+2,
|
||||||
else:
|
int(view2_corresponding_node.sampled_point_uv[0])-1:\
|
||||||
corresponding_node_type = "sampled"
|
int(view2_corresponding_node.sampled_point_uv[0])+2]
|
||||||
except IndexError:
|
if compare_local_texture(view1_sample_point_patch, view2_corresponding_node_patch) > 0.5:
|
||||||
corresponding_node_type = "sampledrejected"
|
corresponding_node_type = "sampledrejected"
|
||||||
else:
|
else:
|
||||||
corresponding_node_type = "sampled"
|
corresponding_node_type = "sampled"
|
||||||
|
except IndexError:
|
||||||
else:
|
|
||||||
corresponding_node_type = "sampledrejected"
|
corresponding_node_type = "sampledrejected"
|
||||||
else:
|
else:
|
||||||
corresponding_node_type = "depthrejected"
|
corresponding_node_type = "sampled"
|
||||||
else:
|
else:
|
||||||
corresponding_3d_depth = np.array(view2_corresponding_node.points3d_depths)
|
corresponding_node_type = "sampledrejected"
|
||||||
error = np.linalg.norm(view1_sample_points_view2_depth[i] - corresponding_3d_depth)
|
else:
|
||||||
|
corresponding_node_type = "depthrejected"
|
||||||
|
else:
|
||||||
|
corresponding_3d_depth = np.array(view2_corresponding_node.points3d_depths)
|
||||||
|
error = np.linalg.norm(view1_sample_points_view2_depth[i] - corresponding_3d_depth)
|
||||||
|
|
||||||
if np.min(error) < 0.2 * corresponding_3d_depth[np.argmin(error)]:
|
if np.min(error) < 0.2 * corresponding_3d_depth[np.argmin(error)]:
|
||||||
if compare_center_patch:
|
if compare_center_patch:
|
||||||
try:
|
try:
|
||||||
point_3d_coord = points3d_view2_pixcoord[view2_corresponding_node.points3d_indices[np.argmin[error]]]
|
point_3d_coord = points3d_view2_pixcoord[view2_corresponding_node.points3d_indices[np.argmin(error)]]
|
||||||
point_3d_patch = image_view2[int(point_3d_coord[1])-1:\
|
point_3d_patch = image_view2[int(point_3d_coord[1])-1:\
|
||||||
int(point_3d_coord[1])+2,
|
int(point_3d_coord[1])+2,
|
||||||
int(point_3d_coord[0])-1:\
|
int(point_3d_coord[0])-1:\
|
||||||
int(point_3d_coord[0])+2]
|
int(point_3d_coord[0])+2]
|
||||||
view1_sample_point_patch = image_view2[int(view1_sample_points_view2[i, 1])-1:\
|
view1_sample_point_patch = image_view2[int(view1_sample_points_view2[i, 1])-1:\
|
||||||
int(view1_sample_points_view2[i,1])+2,
|
int(view1_sample_points_view2[i,1])+2,
|
||||||
int(view1_sample_points_view2[i, 0])-1:\
|
int(view1_sample_points_view2[i, 0])-1:\
|
||||||
int(view1_sample_points_view2[i,0])+2]
|
int(view1_sample_points_view2[i,0])+2]
|
||||||
if compare_local_texture(view1_sample_point_patch, point_3d_patch) > 0.5:
|
if compare_local_texture(view1_sample_point_patch, point_3d_patch) > 0.5:
|
||||||
corresponding_node_type = "rejectedoccupied3d"
|
corresponding_node_type = "rejectedoccupied3d"
|
||||||
else:
|
|
||||||
corresponding_node_type = "occupied3d"
|
|
||||||
except IndexError:
|
|
||||||
corresponding_node_type = "rejectedoccupied3d"
|
|
||||||
else:
|
else:
|
||||||
corresponding_node_type = "occupied3d"
|
corresponding_node_type = "occupied3d"
|
||||||
else:
|
except IndexError:
|
||||||
corresponding_node_type = "rejectedoccupied3d"
|
corresponding_node_type = "rejectedoccupied3d"
|
||||||
matching_log.append([view2, corresponding_node_type, error])
|
else:
|
||||||
|
corresponding_node_type = "occupied3d"
|
||||||
|
else:
|
||||||
|
corresponding_node_type = "rejectedoccupied3d"
|
||||||
|
|
||||||
|
# 모든 경우에 대해 로그 추가
|
||||||
|
matching_log.append([view2, corresponding_node_type, error])
|
||||||
|
|
||||||
|
node_index = 0
|
||||||
|
view1_leaf_nodes = []
|
||||||
|
gather_leaf_nodes(view1_root, view1_leaf_nodes)
|
||||||
|
for node in view1_leaf_nodes:
|
||||||
|
if node.unoccupied:
|
||||||
|
if node.depth_interpolated:
|
||||||
|
node.matching_log[view2] = matching_log[node_index]
|
||||||
|
if matching_log[node_index][1] in ["depthrejected", "missing", "culled"]:
|
||||||
|
None
|
||||||
|
else:
|
||||||
|
node.inference_count += 1
|
||||||
|
node.rejection_count += 1 if matching_log[node_index][1] in ["rejectedoccupied3d",
|
||||||
|
"sampledrejected"] else 0
|
||||||
|
node_index += 1
|
||||||
|
|
||||||
sampled_points_total = []
|
sampled_points_total = []
|
||||||
sampled_points_rgb_total = []
|
sampled_points_rgb_total = []
|
||||||
@ -178,7 +200,7 @@ def augment(colmap_path, image_path, augment_path, camera_order, visibility_awar
|
|||||||
sampled_points_total.append([node.sampled_point_world])
|
sampled_points_total.append([node.sampled_point_world])
|
||||||
sampled_points_rgb_total.append([node.sampled_point_rgb])
|
sampled_points_rgb_total.append([node.sampled_point_rgb])
|
||||||
sampled_points_uv_total.append([node.sampled_point_uv])
|
sampled_points_uv_total.append([node.sampled_point_uv])
|
||||||
sampled_points_neighbors_uv_total.append([node.sampled_point_neighbors_uv])
|
sampled_points_neighbors_uv_total.append([node.sampled_point_neighbours_uv])
|
||||||
print("total_Sampled_points: ", len(sampled_points_total))
|
print("total_Sampled_points: ", len(sampled_points_total))
|
||||||
xyz = np.concatenate(sampled_points_total, axis=0)
|
xyz = np.concatenate(sampled_points_total, axis=0)
|
||||||
rgb = np.concatenate(sampled_points_rgb_total, axis=0)
|
rgb = np.concatenate(sampled_points_rgb_total, axis=0)
|
||||||
|
64
eval_mipnerf360.sh
Normal file
64
eval_mipnerf360.sh
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
for scene in bicycle flowers garden room stump
|
||||||
|
do
|
||||||
|
python augment.py --colmap_path /home/cvnar/disk4tb/360/${scene}/sparse/0 --image_path /home/cvnar/disk4tb/360/${scene}/images_4 \
|
||||||
|
--augment_path /home/cvnar/disk4tb/360_augmented/${scene}/sparse/0/points3D.bin \
|
||||||
|
--camera_order covisibility \
|
||||||
|
--visibility_aware_culling \
|
||||||
|
--compare_center_patch
|
||||||
|
|
||||||
|
python train.py -s /home/cvnar/disk4tb/360/${scene} -m ../experiments/360/${scene} \
|
||||||
|
-i images_4 \
|
||||||
|
--eval
|
||||||
|
|
||||||
|
python render.py -m ../experiments/360/${scene} --skip_train
|
||||||
|
|
||||||
|
python metrics.py -m ../experiments/360/${scene}
|
||||||
|
|
||||||
|
rm /home/cvnar/disk4tb/360_augmented/${scene}/sparse/0/points3D.ply
|
||||||
|
|
||||||
|
python train.py -s /home/cvnar/disk4tb/360_augmented/${scene} -m ../experiments/360_augmented/${scene} \
|
||||||
|
-i images_4 \
|
||||||
|
--eval \
|
||||||
|
--bundle_training \
|
||||||
|
--camera_order covisibility \
|
||||||
|
--enable_ds_lap \
|
||||||
|
--lambda_ds 1.2 \
|
||||||
|
--lambda_lap 0.4
|
||||||
|
|
||||||
|
python render.py -m ../experiments/360_augmented/${scene} --skip_train
|
||||||
|
|
||||||
|
python metrics.py -m ../experiments/360_augmented/${scene}
|
||||||
|
done
|
||||||
|
|
||||||
|
for scene in bonsai counter kitchen room
|
||||||
|
do
|
||||||
|
python augment.py --colmap_path /home/cvnar/disk4tb/360/${scene}/sparse/0 --image_path /home/cvnar/disk4tb/360/${scene}/images_2 \
|
||||||
|
--augment_path /home/cvnar/disk4tb/360_augmented/${scene}_augmented/sparse/0/points3D.bin \
|
||||||
|
--camera_order covisibility \
|
||||||
|
--visibility_aware_culling \
|
||||||
|
--compare_center_patch
|
||||||
|
|
||||||
|
python train.py -s /home/cvnar/disk4tb/360/${scene} -m ../experiments/360/${scene} \
|
||||||
|
-i images_2 \
|
||||||
|
--eval
|
||||||
|
|
||||||
|
python render.py -m ../experiments/360/${scene} --skip_train
|
||||||
|
|
||||||
|
python metrics.py -m ../experiments/360/${scene}
|
||||||
|
|
||||||
|
rm /home/cvnar/disk4tb/360_augmented/${scene}/sparse/0/points3D.ply
|
||||||
|
|
||||||
|
python train.py -s /home/cvnar/disk4tb/360_augmented/${scene} -m ../experiments/360_augmented/${scene} \
|
||||||
|
-i images_2 \
|
||||||
|
--eval \
|
||||||
|
--bundle_training \
|
||||||
|
--camera_order covisibility \
|
||||||
|
--enable_ds_lap \
|
||||||
|
--lambda_ds 1.2 \
|
||||||
|
--lambda_lap 0.4
|
||||||
|
|
||||||
|
python render.py -m ../experiments/360_augmented/${scene} --skip_train
|
||||||
|
|
||||||
|
python metrics.py -m ../experiments/360_augmented/${scene}
|
||||||
|
done
|
||||||
|
|
10
train.py
10
train.py
@ -84,7 +84,7 @@ def training(dataset,
|
|||||||
ema_Ll1depth_for_log = 0.0
|
ema_Ll1depth_for_log = 0.0
|
||||||
|
|
||||||
if bundle_training:
|
if bundle_training:
|
||||||
sorted_keys = cluster_cameras(dataset.source_path, camera_order)
|
sorted_keys = cluster_cameras(os.path.join(dataset.source_path, 'sparse/0'), camera_order)
|
||||||
start_indices, cluster_sizes = bundle_start_index_generator(sorted_keys, 20)
|
start_indices, cluster_sizes = bundle_start_index_generator(sorted_keys, 20)
|
||||||
n_interval = 0
|
n_interval = 0
|
||||||
|
|
||||||
@ -125,9 +125,9 @@ def training(dataset,
|
|||||||
if not viewpoint_stack:
|
if not viewpoint_stack:
|
||||||
viewpoint_stack = scene.getTrainCameras().copy()
|
viewpoint_stack = scene.getTrainCameras().copy()
|
||||||
viewpoint_indices = list(range(len(viewpoint_stack)))
|
viewpoint_indices = list(range(len(viewpoint_stack)))
|
||||||
rand_idx = randint(0, len(viewpoint_indices) - 1)
|
rand_idx = randint(0, len(viewpoint_indices) - 1)
|
||||||
viewpoint_cam = viewpoint_stack.pop(rand_idx)
|
viewpoint_cam = viewpoint_stack.pop(rand_idx)
|
||||||
vind = viewpoint_indices.pop(rand_idx)
|
vind = viewpoint_indices.pop(rand_idx)
|
||||||
|
|
||||||
# Render
|
# Render
|
||||||
if (iteration - 1) == debug_from:
|
if (iteration - 1) == debug_from:
|
||||||
@ -364,8 +364,8 @@ if __name__ == "__main__":
|
|||||||
args.checkpoint_iterations,
|
args.checkpoint_iterations,
|
||||||
args.start_checkpoint,
|
args.start_checkpoint,
|
||||||
args.debug_from,
|
args.debug_from,
|
||||||
args.bundle_training,
|
|
||||||
args.camera_order,
|
args.camera_order,
|
||||||
|
args.bundle_training,
|
||||||
args.enable_ds_lap,
|
args.enable_ds_lap,
|
||||||
args.lambda_ds,
|
args.lambda_ds,
|
||||||
args.lambda_lap)
|
args.lambda_lap)
|
||||||
|
0
utils/__init__.py
Normal file
0
utils/__init__.py
Normal file
@ -9,7 +9,7 @@ import rtree
|
|||||||
from shapely.geometry import Point, box
|
from shapely.geometry import Point, box
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
from sklearn.decomposition import PCA
|
from sklearn.decomposition import PCA
|
||||||
from colmap_utils import compute_extrinsics, get_colmap_data
|
from utils.colmap_utils import compute_extrinsics, get_colmap_data
|
||||||
from matplotlib import pyplot as plt
|
from matplotlib import pyplot as plt
|
||||||
|
|
||||||
class Node:
|
class Node:
|
||||||
|
@ -3,7 +3,7 @@ import os
|
|||||||
from colmap.scripts.python.read_write_model import *
|
from colmap.scripts.python.read_write_model import *
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
from colmap_utils import compute_extrinsics, compute_intrinsics, get_colmap_data
|
from utils.colmap_utils import compute_extrinsics, compute_intrinsics, get_colmap_data
|
||||||
import cv2
|
import cv2
|
||||||
from sklearn.decomposition import PCA
|
from sklearn.decomposition import PCA
|
||||||
|
|
||||||
@ -77,9 +77,9 @@ def create_sequence_from_covisibility_graph(covisibility_graph, min_covisibility
|
|||||||
|
|
||||||
return sequence
|
return sequence
|
||||||
|
|
||||||
def cluster_cameras(model_path, camera_order):
|
def cluster_cameras(colmap_path, camera_order):
|
||||||
colmap_path = os.path.join(model_path, 'sparse/0')
|
|
||||||
colmap_images, colmap_points3D, colmap_cameras = get_colmap_data(colmap_path)
|
colmap_images, colmap_points3D, colmap_cameras = get_colmap_data(colmap_path)
|
||||||
|
print(camera_order)
|
||||||
if camera_order == 'covisibility':
|
if camera_order == 'covisibility':
|
||||||
covisibility_matrix, id_to_idx, idx_to_id = build_covisibility_matrix(colmap_images, colmap_points3D)
|
covisibility_matrix, id_to_idx, idx_to_id = build_covisibility_matrix(colmap_images, colmap_points3D)
|
||||||
covisibility_graph = create_covisibility_graph(covisibility_matrix, idx_to_id)
|
covisibility_graph = create_covisibility_graph(covisibility_matrix, idx_to_id)
|
||||||
@ -138,6 +138,8 @@ def cluster_cameras(model_path, camera_order):
|
|||||||
sorted_cam_centers = cam_center_2d[sorted_indices]
|
sorted_cam_centers = cam_center_2d[sorted_indices]
|
||||||
sorted_keys = np.array(key)[sorted_indices]
|
sorted_keys = np.array(key)[sorted_indices]
|
||||||
|
|
||||||
|
print(sorted_keys)
|
||||||
|
|
||||||
return sorted_keys
|
return sorted_keys
|
||||||
|
|
||||||
def bundle_start_index_generator(sorted_keys, initial_interval):
|
def bundle_start_index_generator(sorted_keys, initial_interval):
|
||||||
|
Loading…
Reference in New Issue
Block a user