From d7eeefa3e72ea9e89673a54be7c4a0d212cdf215 Mon Sep 17 00:00:00 2001 From: Matt Li Date: Tue, 12 Nov 2024 17:46:01 -0500 Subject: [PATCH] Add `eval_dir` args to specific evaluation directory The default directory is test. Sometimes, we also need to evaluate the train-rendered images to compare the implications of different resolution parameters. --- metrics.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/metrics.py b/metrics.py index f7393a4..ab77e46 100644 --- a/metrics.py +++ b/metrics.py @@ -33,7 +33,7 @@ def readImages(renders_dir, gt_dir): image_names.append(fname) return renders, gts, image_names -def evaluate(model_paths): +def evaluate(model_paths, eval_dir): full_dict = {} per_view_dict = {} @@ -49,7 +49,7 @@ def evaluate(model_paths): full_dict_polytopeonly[scene_dir] = {} per_view_dict_polytopeonly[scene_dir] = {} - test_dir = Path(scene_dir) / "test" + test_dir = Path(scene_dir) / eval_dir for method in os.listdir(test_dir): print("Method:", method) @@ -99,5 +99,6 @@ if __name__ == "__main__": # Set up command line argument parser parser = ArgumentParser(description="Training script parameters") parser.add_argument('--model_paths', '-m', required=True, nargs="+", type=str, default=[]) + parser.add_argument('--eval_dir', required=False, default="test", type=str) args = parser.parse_args() - evaluate(args.model_paths) + evaluate(args.model_paths, args.eval_dir)