mirror of
https://github.com/graphdeco-inria/gaussian-splatting
synced 2025-04-03 04:40:51 +00:00
Merge 2556ffa076
into 54c035f783
This commit is contained in:
commit
3dd08a6a17
@ -261,6 +261,10 @@ python metrics.py -m <path to pre-trained model>
|
||||
|
||||
#### --model_paths / -m
|
||||
Space-separated list of model paths for which metrics should be computed.
|
||||
#### --eval_dir_name / -e
|
||||
Name of the folder contains render results at <model_paths> / <eval_dir_name>. (```test``` by default)
|
||||
#### --run_iter
|
||||
If set, read image iteratively to save VRAM but take more time.
|
||||
</details>
|
||||
<br>
|
||||
|
||||
|
88
metrics.py
88
metrics.py
@ -20,6 +20,7 @@ import json
|
||||
from tqdm import tqdm
|
||||
from utils.image_utils import psnr
|
||||
from argparse import ArgumentParser
|
||||
import gc
|
||||
|
||||
def readImages(renders_dir, gt_dir):
|
||||
renders = []
|
||||
@ -33,7 +34,8 @@ def readImages(renders_dir, gt_dir):
|
||||
image_names.append(fname)
|
||||
return renders, gts, image_names
|
||||
|
||||
def evaluate(model_paths):
|
||||
|
||||
def evaluate(model_paths, eval_dir_name="test"):
|
||||
|
||||
full_dict = {}
|
||||
per_view_dict = {}
|
||||
@ -49,7 +51,7 @@ def evaluate(model_paths):
|
||||
full_dict_polytopeonly[scene_dir] = {}
|
||||
per_view_dict_polytopeonly[scene_dir] = {}
|
||||
|
||||
test_dir = Path(scene_dir) / "test"
|
||||
test_dir = Path(scene_dir) / eval_dir_name
|
||||
|
||||
for method in os.listdir(test_dir):
|
||||
print("Method:", method)
|
||||
@ -92,6 +94,80 @@ def evaluate(model_paths):
|
||||
except:
|
||||
print("Unable to compute metrics for model", scene_dir)
|
||||
|
||||
|
||||
|
||||
def readImage(renders_dir, gt_dir, fname):
|
||||
render = tf.to_tensor(Image.open(renders_dir / fname)).unsqueeze(0)[:, :3, :, :].cuda()
|
||||
gt = tf.to_tensor(Image.open(gt_dir / fname)).unsqueeze(0)[:, :3, :, :].cuda()
|
||||
return render, gt
|
||||
|
||||
|
||||
def evaluate_iter(model_paths, eval_dir_name="test"):
|
||||
|
||||
full_dict = {}
|
||||
per_view_dict = {}
|
||||
full_dict_polytopeonly = {}
|
||||
per_view_dict_polytopeonly = {}
|
||||
print("")
|
||||
|
||||
for scene_dir in model_paths:
|
||||
try:
|
||||
print("Scene:", scene_dir)
|
||||
full_dict[scene_dir] = {}
|
||||
per_view_dict[scene_dir] = {}
|
||||
full_dict_polytopeonly[scene_dir] = {}
|
||||
per_view_dict_polytopeonly[scene_dir] = {}
|
||||
|
||||
test_dir = Path(scene_dir) / eval_dir_name
|
||||
|
||||
for method in os.listdir(test_dir):
|
||||
print("Method:", method)
|
||||
|
||||
full_dict[scene_dir][method] = {}
|
||||
per_view_dict[scene_dir][method] = {}
|
||||
full_dict_polytopeonly[scene_dir][method] = {}
|
||||
per_view_dict_polytopeonly[scene_dir][method] = {}
|
||||
|
||||
method_dir = test_dir / method
|
||||
gt_dir = method_dir/ "gt"
|
||||
renders_dir = method_dir / "renders"
|
||||
image_names = os.listdir(renders_dir)
|
||||
|
||||
ssims = []
|
||||
psnrs = []
|
||||
lpipss = []
|
||||
|
||||
for fname in tqdm(image_names, desc="Metric evaluation progress"):
|
||||
render, gt = readImage(renders_dir, gt_dir, fname)
|
||||
|
||||
ssims.append(ssim(render, gt))
|
||||
psnrs.append(psnr(render, gt))
|
||||
lpipss.append(lpips(render, gt, net_type='vgg'))
|
||||
|
||||
del render, gt
|
||||
gc.collect()
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
print(" SSIM : {:>12.7f}".format(torch.tensor(ssims).mean(), ".5"))
|
||||
print(" PSNR : {:>12.7f}".format(torch.tensor(psnrs).mean(), ".5"))
|
||||
print(" LPIPS: {:>12.7f}".format(torch.tensor(lpipss).mean(), ".5"))
|
||||
print("")
|
||||
|
||||
full_dict[scene_dir][method].update({"SSIM": torch.tensor(ssims).mean().item(),
|
||||
"PSNR": torch.tensor(psnrs).mean().item(),
|
||||
"LPIPS": torch.tensor(lpipss).mean().item()})
|
||||
per_view_dict[scene_dir][method].update({"SSIM": {name: ssim for ssim, name in zip(torch.tensor(ssims).tolist(), image_names)},
|
||||
"PSNR": {name: psnr for psnr, name in zip(torch.tensor(psnrs).tolist(), image_names)},
|
||||
"LPIPS": {name: lp for lp, name in zip(torch.tensor(lpipss).tolist(), image_names)}})
|
||||
|
||||
with open(scene_dir + "/results.json", 'w') as fp:
|
||||
json.dump(full_dict[scene_dir], fp, indent=True)
|
||||
with open(scene_dir + "/per_view.json", 'w') as fp:
|
||||
json.dump(per_view_dict[scene_dir], fp, indent=True)
|
||||
except:
|
||||
print("Unable to compute metrics for model", scene_dir)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
device = torch.device("cuda:0")
|
||||
torch.cuda.set_device(device)
|
||||
@ -99,5 +175,11 @@ if __name__ == "__main__":
|
||||
# Set up command line argument parser
|
||||
parser = ArgumentParser(description="Training script parameters")
|
||||
parser.add_argument('--model_paths', '-m', required=True, nargs="+", type=str, default=[])
|
||||
parser.add_argument('--eval_dir_name', '-e', type=str, default="test", help="Name of the folder contains render results at <model_paths> / <eval_dir_name>")
|
||||
parser.add_argument('--run_iter', action='store_true', default=False, help="Read image iteratively to save VRAM")
|
||||
args = parser.parse_args()
|
||||
evaluate(args.model_paths)
|
||||
|
||||
if args.run_iter:
|
||||
evaluate_iter(args.model_paths, args.eval_dir_name)
|
||||
else:
|
||||
evaluate(args.model_paths, args.eval_dir_name)
|
||||
|
Loading…
Reference in New Issue
Block a user