├── .gitmodules
├── LICENSE.md
├── README.md
├── arguments
└── __init__.py
├── convert.py
├── environment.yml
├── full_eval.py
├── gaussian_renderer
├── __init__.py
└── network_gui.py
├── lpipsPyTorch
├── __init__.py
└── modules
│ ├── lpips.py
│ ├── networks.py
│ └── utils.py
├── metrics.py
├── render.py
├── render_video.py
├── results
├── DeepBlending
│ ├── drjohnson.csv
│ └── playroom.csv
├── MipNeRF360
│ ├── bicycle.csv
│ ├── bonsai.csv
│ ├── counter.csv
│ ├── flowers.csv
│ ├── garden.csv
│ ├── kitchen.csv
│ ├── room.csv
│ ├── stump.csv
│ └── treehill.csv
├── SyntheticNeRF
│ ├── chair.csv
│ ├── drums.csv
│ ├── ficus.csv
│ ├── hotdog.csv
│ ├── lego.csv
│ ├── materials.csv
│ ├── mic.csv
│ └── ship.csv
└── TanksAndTemples
│ ├── train.csv
│ └── truck.csv
├── scene
├── __init__.py
├── cameras.py
├── colmap_loader.py
├── dataset_readers.py
└── gaussian_model.py
├── train.py
└── utils
├── camera_utils.py
├── general_utils.py
├── graphics_utils.py
├── image_utils.py
├── loss_utils.py
├── pose_utils.py
├── sh_utils.py
└── system_utils.py
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "submodules/simple-knn"]
2 | path = submodules/simple-knn
3 | url = https://github.com/camenduru/simple-knn.git
4 | [submodule "submodules/diff-gaussian-rasterization"]
5 | path = submodules/diff-gaussian-rasterization
6 | url = https://github.com/graphdeco-inria/diff-gaussian-rasterization.git
7 |
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | This repository is built on Gaussian-Splatting.
2 |
3 | Gaussian-Splatting License
4 | ===========================
5 |
6 | **Inria** and **the Max Planck Institut for Informatik (MPII)** hold all the ownership rights on the *Software* named **gaussian-splatting**.
7 | The *Software* is in the process of being registered with the Agence pour la Protection des
8 | Programmes (APP).
9 |
10 | The *Software* is still being developed by the *Licensor*.
11 |
12 | *Licensor*'s goal is to allow the research community to use, test and evaluate
13 | the *Software*.
14 |
15 | ## 1. Definitions
16 |
17 | *Licensee* means any person or entity that uses the *Software* and distributes
18 | its *Work*.
19 |
20 | *Licensor* means the owners of the *Software*, i.e Inria and MPII
21 |
22 | *Software* means the original work of authorship made available under this
23 | License ie gaussian-splatting.
24 |
25 | *Work* means the *Software* and any additions to or derivative works of the
26 | *Software* that are made available under this License.
27 |
28 |
29 | ## 2. Purpose
30 | This license is intended to define the rights granted to the *Licensee* by
31 | Licensors under the *Software*.
32 |
33 | ## 3. Rights granted
34 |
35 | For the above reasons Licensors have decided to distribute the *Software*.
36 | Licensors grant non-exclusive rights to use the *Software* for research purposes
37 | to research users (both academic and industrial), free of charge, without right
38 | to sublicense.. The *Software* may be used "non-commercially", i.e., for research
39 | and/or evaluation purposes only.
40 |
41 | Subject to the terms and conditions of this License, you are granted a
42 | non-exclusive, royalty-free, license to reproduce, prepare derivative works of,
43 | publicly display, publicly perform and distribute its *Work* and any resulting
44 | derivative works in any form.
45 |
46 | ## 4. Limitations
47 |
48 | **4.1 Redistribution.** You may reproduce or distribute the *Work* only if (a) you do
49 | so under this License, (b) you include a complete copy of this License with
50 | your distribution, and (c) you retain without modification any copyright,
51 | patent, trademark, or attribution notices that are present in the *Work*.
52 |
53 | **4.2 Derivative Works.** You may specify that additional or different terms apply
54 | to the use, reproduction, and distribution of your derivative works of the *Work*
55 | ("Your Terms") only if (a) Your Terms provide that the use limitation in
56 | Section 2 applies to your derivative works, and (b) you identify the specific
57 | derivative works that are subject to Your Terms. Notwithstanding Your Terms,
58 | this License (including the redistribution requirements in Section 3.1) will
59 | continue to apply to the *Work* itself.
60 |
61 | **4.3** Any other use without of prior consent of Licensors is prohibited. Research
62 | users explicitly acknowledge having received from Licensors all information
63 | allowing to appreciate the adequacy between of the *Software* and their needs and
64 | to undertake all necessary precautions for its execution and use.
65 |
66 | **4.4** The *Software* is provided both as a compiled library file and as source
67 | code. In case of using the *Software* for a publication or other results obtained
68 | through the use of the *Software*, users are strongly encouraged to cite the
69 | corresponding publications as explained in the documentation of the *Software*.
70 |
71 | ## 5. Disclaimer
72 |
73 | THE USER CANNOT USE, EXPLOIT OR DISTRIBUTE THE *SOFTWARE* FOR COMMERCIAL PURPOSES
74 | WITHOUT PRIOR AND EXPLICIT CONSENT OF LICENSORS. YOU MUST CONTACT INRIA FOR ANY
75 | UNAUTHORIZED USE: stip-sophia.transfert@inria.fr . ANY SUCH ACTION WILL
76 | CONSTITUTE A FORGERY. THIS *SOFTWARE* IS PROVIDED "AS IS" WITHOUT ANY WARRANTIES
77 | OF ANY NATURE AND ANY EXPRESS OR IMPLIED WARRANTIES, WITH REGARDS TO COMMERCIAL
78 | USE, PROFESSIONNAL USE, LEGAL OR NOT, OR OTHER, OR COMMERCIALISATION OR
79 | ADAPTATION. UNLESS EXPLICITLY PROVIDED BY LAW, IN NO EVENT, SHALL INRIA OR THE
80 | AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
81 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
82 | GOODS OR SERVICES, LOSS OF USE, DATA, OR PROFITS OR BUSINESS INTERRUPTION)
83 | HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
84 | LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING FROM, OUT OF OR
85 | IN CONNECTION WITH THE *SOFTWARE* OR THE USE OR OTHER DEALINGS IN THE *SOFTWARE*.
86 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Compact 3D Gaussian Representation for Radiance Field (CVPR 2024 Highlight)
2 | ### Joo Chan Lee, Daniel Rho, Xiangyu Sun, Jong Hwan Ko, and Eunbyung Park
3 |
4 | ### [[Project Page](https://maincold2.github.io/c3dgs/)] [[Paper(arxiv)](https://arxiv.org/abs/2311.13681)] [[Extended Paper](https://arxiv.org/abs/2408.03822)]
5 |
6 | Our code is based on [3D Gaussian Splatting](https://github.com/graphdeco-inria/gaussian-splatting).
7 |
8 | ## Method Overview
9 |
10 |
11 | We place a specific emphasis on two key objectives: reducing the number of Gaussian points without sacrificing performance and compressing the Gaussian attributes. To this end, we propose a learnable mask strategy that significantly reduces the number of Gaussians while preserving high performance. In addition, we propose a compact but effective representation of view-dependent color by employing a grid-based neural field rather than relying on spherical harmonics. Finally, we learn codebooks to compactly represent the geometric attributes of Gaussian by vector quantization.
12 |
13 | ## Update
14 |
15 | #### Aug. 2024
16 | - We added another post-processing technique: sorting Gaussians in morton order like in [Compressed 3DGS](https://github.com/graphdeco-inria/gaussian-splatting). With --comp and --store_npz options, we can achieve a further 10% compression (the size of point_cloud_pp.npz file).
17 | - We extended the compact 3D Gaussian splatting for dynamic scene representation.
18 |
19 | Please check out our [extended paper](https://arxiv.org/abs/2408.03822) and [codes for dynamic scenes](https://github.com/maincold2/Dynamic_C3DGS).
20 |
21 |
22 |
23 | #### Apr. 2024
24 | We updated saving codes for the result file (.npz) reflecting the actual storage.
25 | With --store_npz option, the code will generate point_cloud.npz or point_cloud_pp.npz file according to --comp option, rather than the .ply file. The storage of these files would be almost the same as the estimations in the 'Storage' file (slightly reduced storage for point_cloud_pp.npz due to the npz compression).
26 |
27 | #### Feb. 2024
28 | We additionally implement straightforward post-processing techniques on the model attributes: 1) Applying 8-bit min-max quantization to opacity and hash grid parameters. 2) Pruning hash grid parameters with values below 0.1. 3) Applying Huffman encoding on the quantized opacity and hash parameters, and R-VQ indices.
29 |
30 | As a result, our model is further downsized by over 40 \% regardless of dataset, achieving more than 25x compression from 3DGS, while maintaining high performance.
31 |
32 |
33 | ## Setup
34 |
35 | For installation:
36 | ```shell
37 | git clone https://github.com/maincold2/Compact-3DGS.git --recursive
38 | conda env create --file environment.yml
39 | conda activate c3dgs
40 | ```
41 | We used Mip-NeRF 360, Tanks & Temples, Deep Blending, and NeRF synthetic datasets.
42 |
43 | ## Running
44 |
45 | ### Real-world scenes (e.g., 360, T&T, and DB)
46 |
47 |
48 | ```shell
49 | python train.py -s --eval
50 | ```
51 | #### --comp
52 | Applying post-processings for compression.
53 | #### --store_npz
54 | Storing npz file reflecting the actual storage.
55 |
56 | More Command Line Arguments for train.py
57 |
58 | #### --lambda_mask
59 | Weight of masking loss to control ma the number of Gaussians masking control factor, 0.01 by default
60 | #### --mask_lr
61 | Learning rate of masking parameter, 0.01 by default
62 | #### --net_lr
63 | Learning rate for the neural field, 0.01 by default
64 | #### --net_lr_step
65 | Step schedule for training the neural field, [5000, 15000, 25000] by default
66 | #### --max_hashmap
67 | Maximum hashmap size (log) of the neural field, 19 by default
68 | #### --rvq_size
69 | Codebook size in each R-VQ stage, 64 by default
70 | #### --rvq_num
71 | The number of R-VQ stages, 6 by default
72 |
73 | #### Refer to other arguments of [3DGS](https://github.com/graphdeco-inria/gaussian-splatting).
74 |
75 |
76 |
77 |
78 |
79 | ### NeRF-synthetic scenes
80 |
81 | Some different hyper-parameters are required for synthetic scenes.
82 | ```shell
83 | python train.py -s --eval --max_hashmap 16 --lambda_mask 4e-3 --mask_lr 1e-3 --net_lr 1e-3 --net_lr_step 25000
84 | ```
85 |
86 | ## Evaluation
87 | ```shell
88 | python render.py -m --max_hashmap
89 | python metrics.py -m
90 | ```
91 |
92 | ## 3DGS Viewer
93 | The original SIBR interactive viewer of 3DGS can not support neural fields for view-dependent color. We would like to support and update this shortly if possible.
94 |
95 | Currently, to use the viewer, you have two options: either bypass the neural field for view-dependent color by only applying masking and the geometry codebook, or train neural fields to represent spherical harmonics without inputting view direction (slightly lower performance). After this, you can save the output in a PLY format, similar to 3DGS.
96 |
97 | ## BibTeX
98 | ```
99 | @InProceedings{lee2024c3dgs,
100 | author = {Lee, Joo Chan and Rho, Daniel and Sun, Xiangyu and Ko, Jong Hwan and Park, Eunbyung},
101 | title = {Compact 3D Gaussian Representation for Radiance Field},
102 | booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
103 | year = {2024},
104 | pages = {21719-21728}
105 | }
106 | ```
107 |
--------------------------------------------------------------------------------
/arguments/__init__.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (C) 2023, Inria
3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco
4 | # All rights reserved.
5 | #
6 | # This software is free for non-commercial, research and evaluation use
7 | # under the terms of the LICENSE.md file.
8 | #
9 | # For inquiries contact george.drettakis@inria.fr
10 | #
11 |
12 | from argparse import ArgumentParser, Namespace
13 | import sys
14 | import os
15 |
16 | class GroupParams:
17 | pass
18 |
19 | class ParamGroup:
20 | def __init__(self, parser: ArgumentParser, name : str, fill_none = False):
21 | group = parser.add_argument_group(name)
22 | for key, value in vars(self).items():
23 | shorthand = False
24 | if key.startswith("_"):
25 | shorthand = True
26 | key = key[1:]
27 | t = type(value)
28 | value = value if not fill_none else None
29 | if shorthand:
30 | if t == bool:
31 | group.add_argument("--" + key, ("-" + key[0:1]), default=value, action="store_true")
32 | else:
33 | group.add_argument("--" + key, ("-" + key[0:1]), default=value, type=t)
34 | else:
35 | if t == bool:
36 | group.add_argument("--" + key, default=value, action="store_true")
37 | else:
38 | group.add_argument("--" + key, default=value, type=t)
39 |
40 | def extract(self, args):
41 | group = GroupParams()
42 | for arg in vars(args).items():
43 | if arg[0] in vars(self) or ("_" + arg[0]) in vars(self):
44 | setattr(group, arg[0], arg[1])
45 | return group
46 |
47 | class ModelParams(ParamGroup):
48 | def __init__(self, parser, sentinel=False):
49 | self.sh_degree = 0
50 | self._source_path = ""
51 | self._model_path = ""
52 | self._images = "images"
53 | self._resolution = -1
54 | self._white_background = False
55 | self.data_device = "cuda"
56 | self.eval = False
57 | self.max_hashmap = 19
58 | self.rvq_size = 64
59 | self.rvq_num = 6
60 | super().__init__(parser, "Loading Parameters", sentinel)
61 |
62 | def extract(self, args):
63 | g = super().extract(args)
64 | g.source_path = os.path.abspath(g.source_path)
65 | return g
66 |
67 | class PipelineParams(ParamGroup):
68 | def __init__(self, parser):
69 | self.convert_SHs_python = False
70 | self.compute_cov3D_python = False
71 | self.debug = False
72 | super().__init__(parser, "Pipeline Parameters")
73 |
74 | class OptimizationParams(ParamGroup):
75 | def __init__(self, parser):
76 | self.iterations = 30_000
77 | self.position_lr_init = 0.00016
78 | self.position_lr_final = 0.0000016
79 | self.position_lr_delay_mult = 0.01
80 | self.position_lr_max_steps = 30_000
81 | self.feature_lr = 0.0025
82 | self.opacity_lr = 0.05
83 | self.scaling_lr = 0.005
84 | self.rotation_lr = 0.001
85 | self.percent_dense = 0.01
86 | self.lambda_dssim = 0.2
87 | self.densification_interval = 100
88 | self.opacity_reset_interval = 3000
89 | self.densify_from_iter = 500
90 | self.densify_until_iter = 15_000
91 | self.densify_grad_threshold = 0.0002
92 | self.mask_prune_iter = 1_000
93 | self.rvq_iter = 29_000
94 | self.mask_lr = 0.01
95 | self.net_lr = 0.01
96 | self.net_lr_step = [5_000, 15_000, 25_000]
97 | self.lambda_mask = 0.0005
98 | super().__init__(parser, "Optimization Parameters")
99 |
100 | def get_combined_args(parser : ArgumentParser):
101 | cmdlne_string = sys.argv[1:]
102 | cfgfile_string = "Namespace()"
103 | args_cmdline = parser.parse_args(cmdlne_string)
104 |
105 | try:
106 | cfgfilepath = os.path.join(args_cmdline.model_path, "cfg_args")
107 | print("Looking for config file in", cfgfilepath)
108 | with open(cfgfilepath) as cfg_file:
109 | print("Config file found: {}".format(cfgfilepath))
110 | cfgfile_string = cfg_file.read()
111 | except TypeError:
112 | print("Config file not found at")
113 | pass
114 | args_cfgfile = eval(cfgfile_string)
115 |
116 | merged_dict = vars(args_cfgfile).copy()
117 | for k,v in vars(args_cmdline).items():
118 | if v != None:
119 | merged_dict[k] = v
120 | return Namespace(**merged_dict)
121 |
--------------------------------------------------------------------------------
/convert.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (C) 2023, Inria
3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco
4 | # All rights reserved.
5 | #
6 | # This software is free for non-commercial, research and evaluation use
7 | # under the terms of the LICENSE.md file.
8 | #
9 | # For inquiries contact george.drettakis@inria.fr
10 | #
11 |
12 | import os
13 | import logging
14 | from argparse import ArgumentParser
15 | import shutil
16 |
17 | # This Python script is based on the shell converter script provided in the MipNerF 360 repository.
18 | parser = ArgumentParser("Colmap converter")
19 | parser.add_argument("--no_gpu", action='store_true')
20 | parser.add_argument("--skip_matching", action='store_true')
21 | parser.add_argument("--source_path", "-s", required=True, type=str)
22 | parser.add_argument("--camera", default="OPENCV", type=str)
23 | parser.add_argument("--colmap_executable", default="", type=str)
24 | parser.add_argument("--resize", action="store_true")
25 | parser.add_argument("--magick_executable", default="", type=str)
26 | args = parser.parse_args()
27 | colmap_command = '"{}"'.format(args.colmap_executable) if len(args.colmap_executable) > 0 else "colmap"
28 | magick_command = '"{}"'.format(args.magick_executable) if len(args.magick_executable) > 0 else "magick"
29 | use_gpu = 1 if not args.no_gpu else 0
30 |
31 | if not args.skip_matching:
32 | os.makedirs(args.source_path + "/distorted/sparse", exist_ok=True)
33 |
34 | ## Feature extraction
35 | feat_extracton_cmd = colmap_command + " feature_extractor "\
36 | "--database_path " + args.source_path + "/distorted/database.db \
37 | --image_path " + args.source_path + "/input \
38 | --ImageReader.single_camera 1 \
39 | --ImageReader.camera_model " + args.camera + " \
40 | --SiftExtraction.use_gpu " + str(use_gpu)
41 | exit_code = os.system(feat_extracton_cmd)
42 | if exit_code != 0:
43 | logging.error(f"Feature extraction failed with code {exit_code}. Exiting.")
44 | exit(exit_code)
45 |
46 | ## Feature matching
47 | feat_matching_cmd = colmap_command + " exhaustive_matcher \
48 | --database_path " + args.source_path + "/distorted/database.db \
49 | --SiftMatching.use_gpu " + str(use_gpu)
50 | exit_code = os.system(feat_matching_cmd)
51 | if exit_code != 0:
52 | logging.error(f"Feature matching failed with code {exit_code}. Exiting.")
53 | exit(exit_code)
54 |
55 | ### Bundle adjustment
56 | # The default Mapper tolerance is unnecessarily large,
57 | # decreasing it speeds up bundle adjustment steps.
58 | mapper_cmd = (colmap_command + " mapper \
59 | --database_path " + args.source_path + "/distorted/database.db \
60 | --image_path " + args.source_path + "/input \
61 | --output_path " + args.source_path + "/distorted/sparse \
62 | --Mapper.ba_global_function_tolerance=0.000001")
63 | exit_code = os.system(mapper_cmd)
64 | if exit_code != 0:
65 | logging.error(f"Mapper failed with code {exit_code}. Exiting.")
66 | exit(exit_code)
67 |
68 | ### Image undistortion
69 | ## We need to undistort our images into ideal pinhole intrinsics.
70 | img_undist_cmd = (colmap_command + " image_undistorter \
71 | --image_path " + args.source_path + "/input \
72 | --input_path " + args.source_path + "/distorted/sparse/0 \
73 | --output_path " + args.source_path + "\
74 | --output_type COLMAP")
75 | exit_code = os.system(img_undist_cmd)
76 | if exit_code != 0:
77 | logging.error(f"Mapper failed with code {exit_code}. Exiting.")
78 | exit(exit_code)
79 |
80 | files = os.listdir(args.source_path + "/sparse")
81 | os.makedirs(args.source_path + "/sparse/0", exist_ok=True)
82 | # Copy each file from the source directory to the destination directory
83 | for file in files:
84 | if file == '0':
85 | continue
86 | source_file = os.path.join(args.source_path, "sparse", file)
87 | destination_file = os.path.join(args.source_path, "sparse", "0", file)
88 | shutil.move(source_file, destination_file)
89 |
90 | if(args.resize):
91 | print("Copying and resizing...")
92 |
93 | # Resize images.
94 | os.makedirs(args.source_path + "/images_2", exist_ok=True)
95 | os.makedirs(args.source_path + "/images_4", exist_ok=True)
96 | os.makedirs(args.source_path + "/images_8", exist_ok=True)
97 | # Get the list of files in the source directory
98 | files = os.listdir(args.source_path + "/images")
99 | # Copy each file from the source directory to the destination directory
100 | for file in files:
101 | source_file = os.path.join(args.source_path, "images", file)
102 |
103 | destination_file = os.path.join(args.source_path, "images_2", file)
104 | shutil.copy2(source_file, destination_file)
105 | exit_code = os.system(magick_command + " mogrify -resize 50% " + destination_file)
106 | if exit_code != 0:
107 | logging.error(f"50% resize failed with code {exit_code}. Exiting.")
108 | exit(exit_code)
109 |
110 | destination_file = os.path.join(args.source_path, "images_4", file)
111 | shutil.copy2(source_file, destination_file)
112 | exit_code = os.system(magick_command + " mogrify -resize 25% " + destination_file)
113 | if exit_code != 0:
114 | logging.error(f"25% resize failed with code {exit_code}. Exiting.")
115 | exit(exit_code)
116 |
117 | destination_file = os.path.join(args.source_path, "images_8", file)
118 | shutil.copy2(source_file, destination_file)
119 | exit_code = os.system(magick_command + " mogrify -resize 12.5% " + destination_file)
120 | if exit_code != 0:
121 | logging.error(f"12.5% resize failed with code {exit_code}. Exiting.")
122 | exit(exit_code)
123 |
124 | print("Done.")
125 |
--------------------------------------------------------------------------------
/environment.yml:
--------------------------------------------------------------------------------
1 | name: c3dgs
2 | channels:
3 | - pytorch
4 | - conda-forge
5 | - defaults
6 | dependencies:
7 | - cudatoolkit=11.6
8 | - plyfile=0.8.1
9 | - python=3.7.13
10 | - pip=22.3.1
11 | - pytorch=1.12.1
12 | - torchaudio=0.12.1
13 | - torchvision=0.13.1
14 | - tqdm
15 | - pip:
16 | - dahuffman==0.4.1
17 | - vector-quantize-pytorch==1.8.1
18 | - git+https://github.com/NVlabs/tiny-cuda-nn/#subdirectory=bindings/torch
19 | - submodules/diff-gaussian-rasterization
20 | - submodules/simple-knn
--------------------------------------------------------------------------------
/full_eval.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (C) 2023, Inria
3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco
4 | # All rights reserved.
5 | #
6 | # This software is free for non-commercial, research and evaluation use
7 | # under the terms of the LICENSE.md file.
8 | #
9 | # For inquiries contact george.drettakis@inria.fr
10 | #
11 |
12 | import os
13 | from argparse import ArgumentParser
14 |
15 | mipnerf360_outdoor_scenes = ["bicycle", "flowers", "garden", "stump", "treehill"]
16 | mipnerf360_indoor_scenes = ["room", "counter", "kitchen", "bonsai"]
17 | tanks_and_temples_scenes = ["truck", "train"]
18 | deep_blending_scenes = ["drjohnson", "playroom"]
19 |
20 | parser = ArgumentParser(description="Full evaluation script parameters")
21 | parser.add_argument("--skip_training", action="store_true")
22 | parser.add_argument("--skip_rendering", action="store_true")
23 | parser.add_argument("--skip_metrics", action="store_true")
24 | parser.add_argument("--output_path", default="./eval")
25 | args, _ = parser.parse_known_args()
26 |
27 | all_scenes = []
28 | all_scenes.extend(mipnerf360_outdoor_scenes)
29 | all_scenes.extend(mipnerf360_indoor_scenes)
30 | all_scenes.extend(tanks_and_temples_scenes)
31 | all_scenes.extend(deep_blending_scenes)
32 |
33 | if not args.skip_training or not args.skip_rendering:
34 | parser.add_argument('--mipnerf360', "-m360", required=True, type=str)
35 | parser.add_argument("--tanksandtemples", "-tat", required=True, type=str)
36 | parser.add_argument("--deepblending", "-db", required=True, type=str)
37 | args = parser.parse_args()
38 |
39 | if not args.skip_training:
40 | common_args = " --quiet --eval --test_iterations -1 "
41 | for scene in mipnerf360_outdoor_scenes:
42 | source = args.mipnerf360 + "/" + scene
43 | os.system("python train.py -s " + source + " -i images_4 -m " + args.output_path + "/" + scene + common_args)
44 | for scene in mipnerf360_indoor_scenes:
45 | source = args.mipnerf360 + "/" + scene
46 | os.system("python train.py -s " + source + " -i images_2 -m " + args.output_path + "/" + scene + common_args)
47 | for scene in tanks_and_temples_scenes:
48 | source = args.tanksandtemples + "/" + scene
49 | os.system("python train.py -s " + source + " -m " + args.output_path + "/" + scene + common_args)
50 | for scene in deep_blending_scenes:
51 | source = args.deepblending + "/" + scene
52 | os.system("python train.py -s " + source + " -m " + args.output_path + "/" + scene + common_args)
53 |
54 | if not args.skip_rendering:
55 | all_sources = []
56 | for scene in mipnerf360_outdoor_scenes:
57 | all_sources.append(args.mipnerf360 + "/" + scene)
58 | for scene in mipnerf360_indoor_scenes:
59 | all_sources.append(args.mipnerf360 + "/" + scene)
60 | for scene in tanks_and_temples_scenes:
61 | all_sources.append(args.tanksandtemples + "/" + scene)
62 | for scene in deep_blending_scenes:
63 | all_sources.append(args.deepblending + "/" + scene)
64 |
65 | common_args = " --quiet --eval --skip_train"
66 | for scene, source in zip(all_scenes, all_sources):
67 | os.system("python render.py --iteration 7000 -s " + source + " -m " + args.output_path + "/" + scene + common_args)
68 | os.system("python render.py --iteration 30000 -s " + source + " -m " + args.output_path + "/" + scene + common_args)
69 |
70 | if not args.skip_metrics:
71 | scenes_string = ""
72 | for scene in all_scenes:
73 | scenes_string += "\"" + args.output_path + "/" + scene + "\" "
74 |
75 | os.system("python metrics.py -m " + scenes_string)
--------------------------------------------------------------------------------
/gaussian_renderer/__init__.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (C) 2023, Inria
3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco
4 | # All rights reserved.
5 | #
6 | # This software is free for non-commercial, research and evaluation use
7 | # under the terms of the LICENSE.md file.
8 | #
9 | # For inquiries contact george.drettakis@inria.fr
10 | #
11 |
12 | import torch
13 | import math
14 | from diff_gaussian_rasterization import GaussianRasterizationSettings, GaussianRasterizer
15 | from scene.gaussian_model import GaussianModel
16 | from utils.sh_utils import eval_sh
17 |
18 | def render(viewpoint_camera, pc : GaussianModel, pipe, bg_color : torch.Tensor, scaling_modifier = 1.0, override_color = None, itr=-1, rvq_iter=False):
19 | """
20 | Render the scene.
21 |
22 | Background tensor (bg_color) must be on GPU!
23 | """
24 |
25 | # Create zero tensor. We will use it to make pytorch return gradients of the 2D (screen-space) means
26 | screenspace_points = torch.zeros_like(pc.get_xyz, dtype=pc.get_xyz.dtype, requires_grad=True, device="cuda") + 0
27 | try:
28 | screenspace_points.retain_grad()
29 | except:
30 | pass
31 |
32 | # Set up rasterization configuration
33 | tanfovx = math.tan(viewpoint_camera.FoVx * 0.5)
34 | tanfovy = math.tan(viewpoint_camera.FoVy * 0.5)
35 |
36 | raster_settings = GaussianRasterizationSettings(
37 | image_height=int(viewpoint_camera.image_height),
38 | image_width=int(viewpoint_camera.image_width),
39 | tanfovx=tanfovx,
40 | tanfovy=tanfovy,
41 | bg=bg_color,
42 | scale_modifier=scaling_modifier,
43 | viewmatrix=viewpoint_camera.world_view_transform,
44 | projmatrix=viewpoint_camera.full_proj_transform,
45 | sh_degree=pc.active_sh_degree,
46 | campos=viewpoint_camera.camera_center,
47 | prefiltered=False,
48 | debug=pipe.debug
49 | )
50 |
51 | rasterizer = GaussianRasterizer(raster_settings=raster_settings)
52 |
53 | means3D = pc.get_xyz
54 | means2D = screenspace_points
55 | cov3D_precomp = None
56 |
57 | if itr == -1:
58 | scales = pc._scaling
59 | rotations = pc._rotation
60 | opacity = pc._opacity
61 |
62 | dir_pp = (means3D - viewpoint_camera.camera_center.repeat(means3D.shape[0], 1))
63 | dir_pp = dir_pp/dir_pp.norm(dim=1, keepdim=True)
64 | shs = pc.mlp_head(torch.cat([pc._feature, pc.direction_encoding(dir_pp)], dim=-1)).unsqueeze(1)
65 |
66 | else:
67 | mask = ((torch.sigmoid(pc._mask) > 0.01).float()- torch.sigmoid(pc._mask)).detach() + torch.sigmoid(pc._mask)
68 | if rvq_iter:
69 | scales = pc.vq_scale(pc.get_scaling.unsqueeze(0))[0]
70 | rotations = pc.vq_rot(pc.get_rotation.unsqueeze(0))[0]
71 | scales = scales.squeeze()*mask
72 | rotations = rotations.squeeze()
73 | opacity = pc.get_opacity*mask
74 |
75 | else:
76 | scales = pc.get_scaling*mask
77 | rotations = pc.get_rotation
78 | opacity = pc.get_opacity*mask
79 |
80 | xyz = pc.contract_to_unisphere(means3D.clone().detach(), torch.tensor([-1.0, -1.0, -1.0, 1.0, 1.0, 1.0], device='cuda'))
81 | dir_pp = (means3D - viewpoint_camera.camera_center.repeat(means3D.shape[0], 1))
82 | dir_pp = dir_pp/dir_pp.norm(dim=1, keepdim=True)
83 | shs = pc.mlp_head(torch.cat([pc.recolor(xyz), pc.direction_encoding(dir_pp)], dim=-1)).unsqueeze(1)
84 |
85 | # Rasterize visible Gaussians to image, obtain their radii (on screen).
86 | rendered_image, radii = rasterizer(
87 | means3D = means3D.float(),
88 | means2D = means2D,
89 | shs = shs.float(),
90 | colors_precomp = None,
91 | opacities = opacity,
92 | scales = scales,
93 | rotations = rotations,
94 | cov3D_precomp = None)
95 |
96 | # Those Gaussians that were frustum culled or had a radius of 0 were not visible.
97 | # They will be excluded from value updates used in the splitting criteria.
98 | return {"render": rendered_image,
99 | "viewspace_points": screenspace_points,
100 | "visibility_filter" : radii > 0,
101 | "radii": radii
102 | }
--------------------------------------------------------------------------------
/gaussian_renderer/network_gui.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (C) 2023, Inria
3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco
4 | # All rights reserved.
5 | #
6 | # This software is free for non-commercial, research and evaluation use
7 | # under the terms of the LICENSE.md file.
8 | #
9 | # For inquiries contact george.drettakis@inria.fr
10 | #
11 |
12 | import torch
13 | import traceback
14 | import socket
15 | import json
16 | from scene.cameras import MiniCam
17 |
18 | host = "127.0.0.1"
19 | port = 6009
20 |
21 | conn = None
22 | addr = None
23 |
24 | listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
25 |
26 | def init(wish_host, wish_port):
27 | global host, port, listener
28 | host = wish_host
29 | port = wish_port
30 | listener.bind((host, port))
31 | listener.listen()
32 | listener.settimeout(0)
33 |
34 | def try_connect():
35 | global conn, addr, listener
36 | try:
37 | conn, addr = listener.accept()
38 | print(f"\nConnected by {addr}")
39 | conn.settimeout(None)
40 | except Exception as inst:
41 | pass
42 |
43 | def read():
44 | global conn
45 | messageLength = conn.recv(4)
46 | messageLength = int.from_bytes(messageLength, 'little')
47 | message = conn.recv(messageLength)
48 | return json.loads(message.decode("utf-8"))
49 |
50 | def send(message_bytes, verify):
51 | global conn
52 | if message_bytes != None:
53 | conn.sendall(message_bytes)
54 | conn.sendall(len(verify).to_bytes(4, 'little'))
55 | conn.sendall(bytes(verify, 'ascii'))
56 |
57 | def receive():
58 | message = read()
59 |
60 | width = message["resolution_x"]
61 | height = message["resolution_y"]
62 |
63 | if width != 0 and height != 0:
64 | try:
65 | do_training = bool(message["train"])
66 | fovy = message["fov_y"]
67 | fovx = message["fov_x"]
68 | znear = message["z_near"]
69 | zfar = message["z_far"]
70 | do_shs_python = bool(message["shs_python"])
71 | do_rot_scale_python = bool(message["rot_scale_python"])
72 | keep_alive = bool(message["keep_alive"])
73 | scaling_modifier = message["scaling_modifier"]
74 | world_view_transform = torch.reshape(torch.tensor(message["view_matrix"]), (4, 4)).cuda()
75 | world_view_transform[:,1] = -world_view_transform[:,1]
76 | world_view_transform[:,2] = -world_view_transform[:,2]
77 | full_proj_transform = torch.reshape(torch.tensor(message["view_projection_matrix"]), (4, 4)).cuda()
78 | full_proj_transform[:,1] = -full_proj_transform[:,1]
79 | custom_cam = MiniCam(width, height, fovy, fovx, znear, zfar, world_view_transform, full_proj_transform)
80 | except Exception as e:
81 | print("")
82 | traceback.print_exc()
83 | raise e
84 | return custom_cam, do_training, do_shs_python, do_rot_scale_python, keep_alive, scaling_modifier
85 | else:
86 | return None, None, None, None, None, None
--------------------------------------------------------------------------------
/lpipsPyTorch/__init__.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from .modules.lpips import LPIPS
4 |
5 |
6 | def lpips(x: torch.Tensor,
7 | y: torch.Tensor,
8 | net_type: str = 'alex',
9 | version: str = '0.1'):
10 | r"""Function that measures
11 | Learned Perceptual Image Patch Similarity (LPIPS).
12 |
13 | Arguments:
14 | x, y (torch.Tensor): the input tensors to compare.
15 | net_type (str): the network type to compare the features:
16 | 'alex' | 'squeeze' | 'vgg'. Default: 'alex'.
17 | version (str): the version of LPIPS. Default: 0.1.
18 | """
19 | device = x.device
20 | criterion = LPIPS(net_type, version).to(device)
21 | return criterion(x, y)
22 |
--------------------------------------------------------------------------------
/lpipsPyTorch/modules/lpips.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 | from .networks import get_network, LinLayers
5 | from .utils import get_state_dict
6 |
7 |
8 | class LPIPS(nn.Module):
9 | r"""Creates a criterion that measures
10 | Learned Perceptual Image Patch Similarity (LPIPS).
11 |
12 | Arguments:
13 | net_type (str): the network type to compare the features:
14 | 'alex' | 'squeeze' | 'vgg'. Default: 'alex'.
15 | version (str): the version of LPIPS. Default: 0.1.
16 | """
17 | def __init__(self, net_type: str = 'alex', version: str = '0.1'):
18 |
19 | assert version in ['0.1'], 'v0.1 is only supported now'
20 |
21 | super(LPIPS, self).__init__()
22 |
23 | # pretrained network
24 | self.net = get_network(net_type)
25 |
26 | # linear layers
27 | self.lin = LinLayers(self.net.n_channels_list)
28 | self.lin.load_state_dict(get_state_dict(net_type, version))
29 |
30 | def forward(self, x: torch.Tensor, y: torch.Tensor):
31 | feat_x, feat_y = self.net(x), self.net(y)
32 |
33 | diff = [(fx - fy) ** 2 for fx, fy in zip(feat_x, feat_y)]
34 | res = [l(d).mean((2, 3), True) for d, l in zip(diff, self.lin)]
35 |
36 | return torch.sum(torch.cat(res, 0), 0, True)
37 |
--------------------------------------------------------------------------------
/lpipsPyTorch/modules/networks.py:
--------------------------------------------------------------------------------
1 | from typing import Sequence
2 |
3 | from itertools import chain
4 |
5 | import torch
6 | import torch.nn as nn
7 | from torchvision import models
8 |
9 | from .utils import normalize_activation
10 |
11 |
12 | def get_network(net_type: str):
13 | if net_type == 'alex':
14 | return AlexNet()
15 | elif net_type == 'squeeze':
16 | return SqueezeNet()
17 | elif net_type == 'vgg':
18 | return VGG16()
19 | else:
20 | raise NotImplementedError('choose net_type from [alex, squeeze, vgg].')
21 |
22 |
23 | class LinLayers(nn.ModuleList):
24 | def __init__(self, n_channels_list: Sequence[int]):
25 | super(LinLayers, self).__init__([
26 | nn.Sequential(
27 | nn.Identity(),
28 | nn.Conv2d(nc, 1, 1, 1, 0, bias=False)
29 | ) for nc in n_channels_list
30 | ])
31 |
32 | for param in self.parameters():
33 | param.requires_grad = False
34 |
35 |
36 | class BaseNet(nn.Module):
37 | def __init__(self):
38 | super(BaseNet, self).__init__()
39 |
40 | # register buffer
41 | self.register_buffer(
42 | 'mean', torch.Tensor([-.030, -.088, -.188])[None, :, None, None])
43 | self.register_buffer(
44 | 'std', torch.Tensor([.458, .448, .450])[None, :, None, None])
45 |
46 | def set_requires_grad(self, state: bool):
47 | for param in chain(self.parameters(), self.buffers()):
48 | param.requires_grad = state
49 |
50 | def z_score(self, x: torch.Tensor):
51 | return (x - self.mean) / self.std
52 |
53 | def forward(self, x: torch.Tensor):
54 | x = self.z_score(x)
55 |
56 | output = []
57 | for i, (_, layer) in enumerate(self.layers._modules.items(), 1):
58 | x = layer(x)
59 | if i in self.target_layers:
60 | output.append(normalize_activation(x))
61 | if len(output) == len(self.target_layers):
62 | break
63 | return output
64 |
65 |
66 | class SqueezeNet(BaseNet):
67 | def __init__(self):
68 | super(SqueezeNet, self).__init__()
69 |
70 | self.layers = models.squeezenet1_1(True).features
71 | self.target_layers = [2, 5, 8, 10, 11, 12, 13]
72 | self.n_channels_list = [64, 128, 256, 384, 384, 512, 512]
73 |
74 | self.set_requires_grad(False)
75 |
76 |
77 | class AlexNet(BaseNet):
78 | def __init__(self):
79 | super(AlexNet, self).__init__()
80 |
81 | self.layers = models.alexnet(True).features
82 | self.target_layers = [2, 5, 8, 10, 12]
83 | self.n_channels_list = [64, 192, 384, 256, 256]
84 |
85 | self.set_requires_grad(False)
86 |
87 |
88 | class VGG16(BaseNet):
89 | def __init__(self):
90 | super(VGG16, self).__init__()
91 |
92 | self.layers = models.vgg16(weights=models.VGG16_Weights.IMAGENET1K_V1).features
93 | self.target_layers = [4, 9, 16, 23, 30]
94 | self.n_channels_list = [64, 128, 256, 512, 512]
95 |
96 | self.set_requires_grad(False)
97 |
--------------------------------------------------------------------------------
/lpipsPyTorch/modules/utils.py:
--------------------------------------------------------------------------------
1 | from collections import OrderedDict
2 |
3 | import torch
4 |
5 |
6 | def normalize_activation(x, eps=1e-10):
7 | norm_factor = torch.sqrt(torch.sum(x ** 2, dim=1, keepdim=True))
8 | return x / (norm_factor + eps)
9 |
10 |
11 | def get_state_dict(net_type: str = 'alex', version: str = '0.1'):
12 | # build url
13 | url = 'https://raw.githubusercontent.com/richzhang/PerceptualSimilarity/' \
14 | + f'master/lpips/weights/v{version}/{net_type}.pth'
15 |
16 | # download
17 | old_state_dict = torch.hub.load_state_dict_from_url(
18 | url, progress=True,
19 | map_location=None if torch.cuda.is_available() else torch.device('cpu')
20 | )
21 |
22 | # rename keys
23 | new_state_dict = OrderedDict()
24 | for key, val in old_state_dict.items():
25 | new_key = key
26 | new_key = new_key.replace('lin', '')
27 | new_key = new_key.replace('model.', '')
28 | new_state_dict[new_key] = val
29 |
30 | return new_state_dict
31 |
--------------------------------------------------------------------------------
/metrics.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (C) 2023, Inria
3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco
4 | # All rights reserved.
5 | #
6 | # This software is free for non-commercial, research and evaluation use
7 | # under the terms of the LICENSE.md file.
8 | #
9 | # For inquiries contact george.drettakis@inria.fr
10 | #
11 |
12 | from pathlib import Path
13 | import os
14 | from PIL import Image
15 | import torch
16 | import torchvision.transforms.functional as tf
17 | from utils.loss_utils import ssim
18 | from lpipsPyTorch import lpips
19 | import json
20 | from tqdm import tqdm
21 | from utils.image_utils import psnr
22 | from argparse import ArgumentParser
23 |
24 | def readImages(renders_dir, gt_dir):
25 | renders = []
26 | gts = []
27 | image_names = []
28 | for fname in os.listdir(renders_dir):
29 | render = Image.open(renders_dir / fname)
30 | gt = Image.open(gt_dir / fname)
31 | renders.append(tf.to_tensor(render).unsqueeze(0)[:, :3, :, :].cuda())
32 | gts.append(tf.to_tensor(gt).unsqueeze(0)[:, :3, :, :].cuda())
33 | image_names.append(fname)
34 | return renders, gts, image_names
35 |
36 | def evaluate(model_paths):
37 |
38 | full_dict = {}
39 | per_view_dict = {}
40 | full_dict_polytopeonly = {}
41 | per_view_dict_polytopeonly = {}
42 | print("")
43 |
44 | for scene_dir in model_paths:
45 | try:
46 | print("Scene:", scene_dir)
47 | full_dict[scene_dir] = {}
48 | per_view_dict[scene_dir] = {}
49 | full_dict_polytopeonly[scene_dir] = {}
50 | per_view_dict_polytopeonly[scene_dir] = {}
51 |
52 | test_dir = Path(scene_dir) / "test"
53 |
54 | for method in os.listdir(test_dir):
55 | print("Method:", method)
56 |
57 | full_dict[scene_dir][method] = {}
58 | per_view_dict[scene_dir][method] = {}
59 | full_dict_polytopeonly[scene_dir][method] = {}
60 | per_view_dict_polytopeonly[scene_dir][method] = {}
61 |
62 | method_dir = test_dir / method
63 | gt_dir = method_dir/ "gt"
64 | renders_dir = method_dir / "renders"
65 | renders, gts, image_names = readImages(renders_dir, gt_dir)
66 |
67 | ssims = []
68 | psnrs = []
69 | lpipss = []
70 |
71 | for idx in tqdm(range(len(renders)), desc="Metric evaluation progress"):
72 | ssims.append(ssim(renders[idx], gts[idx]))
73 | psnrs.append(psnr(renders[idx], gts[idx]))
74 | lpipss.append(lpips(renders[idx], gts[idx], net_type='vgg'))
75 |
76 | print(" SSIM : {:>12.7f}".format(torch.tensor(ssims).mean(), ".5"))
77 | print(" PSNR : {:>12.7f}".format(torch.tensor(psnrs).mean(), ".5"))
78 | print(" LPIPS: {:>12.7f}".format(torch.tensor(lpipss).mean(), ".5"))
79 | print("")
80 |
81 | full_dict[scene_dir][method].update({"SSIM": torch.tensor(ssims).mean().item(),
82 | "PSNR": torch.tensor(psnrs).mean().item(),
83 | "LPIPS": torch.tensor(lpipss).mean().item()})
84 | per_view_dict[scene_dir][method].update({"SSIM": {name: ssim for ssim, name in zip(torch.tensor(ssims).tolist(), image_names)},
85 | "PSNR": {name: psnr for psnr, name in zip(torch.tensor(psnrs).tolist(), image_names)},
86 | "LPIPS": {name: lp for lp, name in zip(torch.tensor(lpipss).tolist(), image_names)}})
87 |
88 | with open(scene_dir + "/results.json", 'w') as fp:
89 | json.dump(full_dict[scene_dir], fp, indent=True)
90 | with open(scene_dir + "/per_view.json", 'w') as fp:
91 | json.dump(per_view_dict[scene_dir], fp, indent=True)
92 | except:
93 | print("Unable to compute metrics for model", scene_dir)
94 |
95 | if __name__ == "__main__":
96 | device = torch.device("cuda:0")
97 | torch.cuda.set_device(device)
98 |
99 | # Set up command line argument parser
100 | parser = ArgumentParser(description="Training script parameters")
101 | parser.add_argument('--model_paths', '-m', required=True, nargs="+", type=str, default=[])
102 | args = parser.parse_args()
103 | evaluate(args.model_paths)
104 |
--------------------------------------------------------------------------------
/render.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (C) 2023, Inria
3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco
4 | # All rights reserved.
5 | #
6 | # This software is free for non-commercial, research and evaluation use
7 | # under the terms of the LICENSE.md file.
8 | #
9 | # For inquiries contact george.drettakis@inria.fr
10 | #
11 |
12 | import torch
13 | from scene import Scene
14 | import os
15 | from tqdm import tqdm
16 | from os import makedirs
17 | from gaussian_renderer import render
18 | import torchvision
19 | from utils.general_utils import safe_state
20 | from argparse import ArgumentParser
21 | from arguments import ModelParams, PipelineParams, get_combined_args
22 | from gaussian_renderer import GaussianModel
23 |
24 | def render_set(model_path, name, iteration, views, gaussians, pipeline, background):
25 | render_path = os.path.join(model_path, name, "ours_{}".format(iteration), "renders")
26 | gts_path = os.path.join(model_path, name, "ours_{}".format(iteration), "gt")
27 |
28 | makedirs(render_path, exist_ok=True)
29 | makedirs(gts_path, exist_ok=True)
30 |
31 | for idx, view in enumerate(tqdm(views, desc="Rendering progress")):
32 | rendering = render(view, gaussians, pipeline, background)["render"]
33 | gt = view.original_image[0:3, :, :]
34 | torchvision.utils.save_image(rendering, os.path.join(render_path, '{0:05d}'.format(idx) + ".png"))
35 | torchvision.utils.save_image(gt, os.path.join(gts_path, '{0:05d}'.format(idx) + ".png"))
36 |
37 | def render_sets(dataset : ModelParams, iteration : int, pipeline : PipelineParams, skip_train : bool, skip_test : bool):
38 | with torch.no_grad():
39 | gaussians = GaussianModel(dataset)
40 | scene = Scene(dataset, gaussians, load_iteration=iteration, shuffle=False)
41 |
42 | gaussians.precompute()
43 |
44 | bg_color = [1,1,1] if dataset.white_background else [0, 0, 0]
45 | background = torch.tensor(bg_color, dtype=torch.float32, device="cuda")
46 |
47 | if not skip_train:
48 | render_set(dataset.model_path, "train", scene.loaded_iter, scene.getTrainCameras(), gaussians, pipeline, background)
49 |
50 | if not skip_test:
51 | render_set(dataset.model_path, "test", scene.loaded_iter, scene.getTestCameras(), gaussians, pipeline, background)
52 |
53 | if __name__ == "__main__":
54 | # Set up command line argument parser
55 | parser = ArgumentParser(description="Testing script parameters")
56 | model = ModelParams(parser, sentinel=True)
57 | pipeline = PipelineParams(parser)
58 | parser.add_argument("--iteration", default=-1, type=int)
59 | parser.add_argument("--skip_train", action="store_true")
60 | parser.add_argument("--skip_test", action="store_true")
61 | parser.add_argument("--quiet", action="store_true")
62 |
63 | args = get_combined_args(parser)
64 | print("Rendering " + args.model_path)
65 |
66 | # Initialize system state (RNG)
67 | safe_state(args.quiet)
68 |
69 | render_sets(model.extract(args), args.iteration, pipeline.extract(args), args.skip_train, args.skip_test)
--------------------------------------------------------------------------------
/render_video.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (C) 2023, Inria
3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco
4 | # All rights reserved.
5 | #
6 | # This software is free for non-commercial, research and evaluation use
7 | # under the terms of the LICENSE.md file.
8 | #
9 | # For inquiries contact george.drettakis@inria.fr
10 | #
11 |
12 | import torch
13 | from scene import Scene
14 | import os
15 | from tqdm import tqdm
16 | import numpy as np
17 | from os import makedirs
18 | from gaussian_renderer import render
19 | import torchvision
20 | from utils.general_utils import safe_state
21 | from argparse import ArgumentParser
22 | from arguments import ModelParams, PipelineParams, get_combined_args
23 | from gaussian_renderer import GaussianModel
24 | from icecream import ic
25 | import copy
26 |
27 | from utils.graphics_utils import getWorld2View2
28 | from utils.pose_utils import generate_ellipse_path, generate_spherical_sample_path, generate_spiral_path, generate_spherify_path, gaussian_poses, circular_poses
29 | # import stepfun
30 |
31 |
32 |
33 | def render_set(model_path, name, iteration, views, gaussians, pipeline, background):
34 | render_path = os.path.join(model_path, name, "ours_{}".format(iteration), "renders")
35 | gts_path = os.path.join(model_path, name, "ours_{}".format(iteration), "gt")
36 |
37 | makedirs(render_path, exist_ok=True)
38 | makedirs(gts_path, exist_ok=True)
39 |
40 | for idx, view in enumerate(tqdm(views, desc="Rendering progress")):
41 | rendering = render(view, gaussians, pipeline, background)["render"]
42 | gt = view.original_image[0:3, :, :]
43 | torchvision.utils.save_image(rendering, os.path.join(render_path, '{0:05d}'.format(idx) + ".png"))
44 | torchvision.utils.save_image(gt, os.path.join(gts_path, '{0:05d}'.format(idx) + ".png"))
45 |
46 |
47 | # xy circular
48 | def render_circular_video(model_path, iteration, views, gaussians, pipeline, background, radius=0.5, n_frames=240):
49 | render_path = os.path.join(model_path, 'circular', "ours_{}".format(iteration))
50 | os.makedirs(render_path, exist_ok=True)
51 | makedirs(render_path, exist_ok=True)
52 | # view = views[0]
53 | for idx in range(n_frames):
54 | view = copy.deepcopy(views[13])
55 | angle = 2 * np.pi * idx / n_frames
56 | cam = circular_poses(view, radius, angle)
57 | rendering = render(cam, gaussians, pipeline, background)["render"]
58 | torchvision.utils.save_image(rendering, os.path.join(render_path, '{0:05d}'.format(idx) + ".png"))
59 |
60 |
61 | def render_video(model_path, iteration, views, gaussians, pipeline, background):
62 | render_path = os.path.join(model_path, 'video', "ours_{}".format(iteration))
63 | makedirs(render_path, exist_ok=True)
64 | view = views[0]
65 | # render_path_spiral
66 | # render_path_spherical
67 | for idx, pose in enumerate(tqdm(generate_ellipse_path(views,n_frames=600), desc="Rendering progress")):
68 | view.world_view_transform = torch.tensor(getWorld2View2(pose[:3, :3].T, pose[:3, 3], view.trans, view.scale)).transpose(0, 1).cuda()
69 | view.full_proj_transform = (view.world_view_transform.unsqueeze(0).bmm(view.projection_matrix.unsqueeze(0))).squeeze(0)
70 | view.camera_center = view.world_view_transform.inverse()[3, :3]
71 | rendering = render(view, gaussians, pipeline, background)["render"]
72 | torchvision.utils.save_image(rendering, os.path.join(render_path, '{0:05d}'.format(idx) + ".png"))
73 |
74 |
75 | def gaussian_render(model_path, iteration, views, gaussians, pipeline, background, args):
76 | views = views[:10] #take the first 10 views and check gaussian view point
77 | render_path = os.path.join(model_path, 'video', "gaussians_{}_std{}".format(iteration, args.std))
78 | makedirs(render_path, exist_ok=True)
79 |
80 | for i, view in enumerate(views):
81 | rendering = render(view, gaussians, pipeline, background)["render"]
82 | sub_path = os.path.join(render_path,"view_"+str(i))
83 | makedirs(sub_path ,exist_ok=True)
84 | torchvision.utils.save_image(rendering, os.path.join(sub_path, "gt"+'{0:05d}'.format(i) + ".png"))
85 | for j in range(10):
86 | n_view = copy.deepcopy(view)
87 | g_view = gaussain_poses(n_view, args.mean, args.std)
88 | rendering = render(g_view, gaussians, pipeline, background)["render"]
89 | torchvision.utils.save_image(rendering, os.path.join(sub_path, '{0:05d}'.format(j) + ".png"))
90 |
91 |
92 | def render_sets(dataset : ModelParams, iteration : int, pipeline : PipelineParams, skip_train : bool, skip_test : bool, video: bool, circular:bool, radius: float, args):
93 | with torch.no_grad():
94 | gaussians = GaussianModel(dataset)
95 | scene = Scene(dataset, gaussians, load_iteration=iteration, shuffle=False)
96 |
97 | gaussians.precompute()
98 |
99 | bg_color = [1,1,1] if dataset.white_background else [0, 0, 0]
100 | background = torch.tensor(bg_color, dtype=torch.float32, device="cuda")
101 |
102 | if not skip_train:
103 | render_set(dataset.model_path, "train", scene.loaded_iter, scene.getTrainCameras(), gaussians, pipeline, background)
104 |
105 | if not skip_test:
106 | render_set(dataset.model_path, "test", scene.loaded_iter, scene.getTestCameras(), gaussians, pipeline, background)
107 | if circular:
108 | render_circular_video(dataset.model_path, scene.loaded_iter, scene.getTestCameras(), gaussians, pipeline, background,radius)
109 | # by default generate ellipse path, other options include spiral, circular, or other generate_xxx_path function from utils.pose_utils
110 | # Modify trajectory function in render_video's enumerate
111 | if video:
112 | render_video(dataset.model_path, scene.loaded_iter, scene.getTrainCameras(), gaussians, pipeline, background)
113 | #sample virtual view
114 | if args.gaussians:
115 | gaussian_render(dataset.model_path, scene.loaded_iter, scene.getTestCameras(), gaussians, pipeline, background, args)
116 |
117 |
118 | if __name__ == "__main__":
119 | # Set up command line argument parser
120 | parser = ArgumentParser(description="Testing script parameters")
121 | model = ModelParams(parser, sentinel=True)
122 | pipeline = PipelineParams(parser)
123 | parser.add_argument("--iteration", default=-1, type=int)
124 | parser.add_argument("--skip_train", action="store_true")
125 | parser.add_argument("--skip_test", action="store_true")
126 | parser.add_argument("--quiet", action="store_true")
127 | parser.add_argument("--video", action="store_true")
128 | parser.add_argument("--circular", action="store_true")
129 | parser.add_argument("--radius", default=5, type=float)
130 | parser.add_argument("--gaussians", action="store_true")
131 | parser.add_argument("--mean", default=0, type=float)
132 | parser.add_argument("--std", default=0.03, type=float)
133 | args = get_combined_args(parser)
134 | print("Rendering " + args.model_path)
135 |
136 | # Initialize system state (RNG)
137 | safe_state(args.quiet)
138 |
139 | render_sets(model.extract(args), args.iteration, pipeline.extract(args), args.skip_train, args.skip_test, args.video, args.circular, args.radius, args)
--------------------------------------------------------------------------------
/results/DeepBlending/drjohnson.csv:
--------------------------------------------------------------------------------
1 | Submethod,PSNR,SSIM,LPIPS,Size [Bytes],#Gaussians
2 | w/o PP,29.2641716,0.9000701,0.2578339,47979841,1339005
3 | w/ PP,29.1633472 ,0.8993502 ,0.2573153 ,25739989,1339005
--------------------------------------------------------------------------------
/results/DeepBlending/playroom.csv:
--------------------------------------------------------------------------------
1 | Submethod,PSNR,SSIM,LPIPS,Size [Bytes],#Gaussians
2 | w/o PP,30.3238049,0.9021857,0.2581305,38448757,778353
3 | w/ PP,30.2980682 ,0.9004922 ,0.2593208 ,17492977,778353
--------------------------------------------------------------------------------
/results/MipNeRF360/bicycle.csv:
--------------------------------------------------------------------------------
1 | Submethod,PSNR,SSIM,LPIPS,Size [Bytes],#Gaussians
2 | w/o PP,24.7704182,0.7226102,0.2864661,62985469,2221689
3 | w/ PP,24.7274151,0.7224735,0.2844863,38315366,2221689
--------------------------------------------------------------------------------
/results/MipNeRF360/bonsai.csv:
--------------------------------------------------------------------------------
1 | Submethod,PSNR,SSIM,LPIPS,Size [Bytes],#Gaussians
2 | w/o PP,32.0812378,0.9386276,0.1926254,35434572,601048
3 | w/ PP,31.9813766,0.9368871,0.1932859,16398658 ,601048
--------------------------------------------------------------------------------
/results/MipNeRF360/counter.csv:
--------------------------------------------------------------------------------
1 | Submethod,PSNR,SSIM,LPIPS,Size [Bytes],#Gaussians
2 | w/o PP,28.7091751,0.9023723,0.2054972,34340180,536672
3 | w/ PP,28.6346874,0.9009139,0.2056362,15223889 ,536672
--------------------------------------------------------------------------------
/results/MipNeRF360/flowers.csv:
--------------------------------------------------------------------------------
1 | Submethod,PSNR,SSIM,LPIPS,Size [Bytes],#Gaussians
2 | w/o PP,20.8930435,0.555903,0.3987166,51151922,1525598
3 | w/ PP,20.8905315,0.5539153,0.399207026,32049406 ,1525598
--------------------------------------------------------------------------------
/results/MipNeRF360/garden.csv:
--------------------------------------------------------------------------------
1 | Submethod,PSNR,SSIM,LPIPS,Size [Bytes],#Gaussians
2 | w/o PP,26.8082218,0.831819,0.1607804,62780109,2209609
3 | w/ PP,26.7240162,0.8310673,0.1582346,43256956 ,2209609
--------------------------------------------------------------------------------
/results/MipNeRF360/kitchen.csv:
--------------------------------------------------------------------------------
1 | Submethod,PSNR,SSIM,LPIPS,Size [Bytes],#Gaussians
2 | w/o PP,30.4778423,0.9193306,0.1312616,44446612,1131168
3 | w/ PP,30.4825935,0.9194022,0.1304301,24394343 ,1131168
--------------------------------------------------------------------------------
/results/MipNeRF360/room.csv:
--------------------------------------------------------------------------------
1 | Submethod,PSNR,SSIM,LPIPS,Size [Bytes],#Gaussians
2 | w/o PP,30.8819199,0.9191906,0.2093959,34212068,529136
3 | w/ PP,30.8827095,0.9184549,0.2088341,15013116 ,529136
--------------------------------------------------------------------------------
/results/MipNeRF360/stump.csv:
--------------------------------------------------------------------------------
1 | Submethod,PSNR,SSIM,LPIPS,Size [Bytes],#Gaussians
2 | w/o PP,26.4638596,0.756788,0.2782411,54662269,1732089
3 | w/ PP,26.3067513,0.7535672,0.2796539,33831154 ,1732089
--------------------------------------------------------------------------------
/results/MipNeRF360/treehill.csv:
--------------------------------------------------------------------------------
1 | Submethod,PSNR,SSIM,LPIPS,Size [Bytes],#Gaussians
2 | w/o PP,22.6450615,0.6377673,0.3627161,59326338,2006446
3 | w/ PP,22.671278,0.6367845,0.36341,39079209 ,2006446
--------------------------------------------------------------------------------
/results/SyntheticNeRF/chair.csv:
--------------------------------------------------------------------------------
1 | Submethod,PSNR,SSIM,LPIPS,Size [Bytes],#Gaussians
2 | w/o PP,34.911171,0.9862,0.0125751,6110690,153570
3 | w/ PP,34.5797844,0.9850635,0.0133816,2891271,153570
--------------------------------------------------------------------------------
/results/SyntheticNeRF/drums.csv:
--------------------------------------------------------------------------------
1 | Submethod,PSNR,SSIM,LPIPS,Size [Bytes],#Gaussians
2 | w/o PP,26.1796284,0.9530293,0.0412945,6536455,178615
3 | w/ PP,26.0142574,0.9513033,0.0420193,3250117,178615
--------------------------------------------------------------------------------
/results/SyntheticNeRF/ficus.csv:
--------------------------------------------------------------------------------
1 | Submethod,PSNR,SSIM,LPIPS,Size [Bytes],#Gaussians
2 | w/o PP,35.43647,0.9866442,0.0133926,4926470,83910
3 | w/ PP,35.0561562,0.9866442,0.0133926,1918502,83910
--------------------------------------------------------------------------------
/results/SyntheticNeRF/hotdog.csv:
--------------------------------------------------------------------------------
1 | Submethod,PSNR,SSIM,LPIPS,Size [Bytes],#Gaussians
2 | w/o PP,37.3823586,0.9839702,0.0225346,4591298,64194
3 | w/ PP,36.7135887,0.9826865,0.0234271,1664588,64194
--------------------------------------------------------------------------------
/results/SyntheticNeRF/lego.csv:
--------------------------------------------------------------------------------
1 | Submethod,PSNR,SSIM,LPIPS,Size [Bytes],#Gaussians
2 | w/o PP,35.4798012,0.9812493,0.0181895,6421042,171826
3 | w/ PP,34.9605331,0.9791891,0.0195022,3183639,171826
--------------------------------------------------------------------------------
/results/SyntheticNeRF/materials.csv:
--------------------------------------------------------------------------------
1 | Submethod,PSNR,SSIM,LPIPS,Size [Bytes],#Gaussians
2 | w/o PP,29.9688034,0.9579973,0.0419459,5322196,107188
3 | w/ PP,29.0439186,0.9544277,0.0195022,2312623,107188
--------------------------------------------------------------------------------
/results/SyntheticNeRF/mic.csv:
--------------------------------------------------------------------------------
1 | Submethod,PSNR,SSIM,LPIPS,Size [Bytes],#Gaussians
2 | w/o PP,35.8115273,0.9913212,0.0079311,4452255,56015
3 | w/ PP,35.5709839,0.9908366,0.0083826,1487552,56015
--------------------------------------------------------------------------------
/results/SyntheticNeRF/ship.csv:
--------------------------------------------------------------------------------
1 | Submethod,PSNR,SSIM,LPIPS,Size [Bytes],#Gaussians
2 | w/o PP,31.5086517,0.9049225,0.1134113,6023514,148442
3 | w/ PP,31.061182,0.9026996,0.1148738,3052476,148442
--------------------------------------------------------------------------------
/results/TanksAndTemples/train.csv:
--------------------------------------------------------------------------------
1 | Submethod,PSNR,SSIM,LPIPS,Size [Bytes],#Gaussians
2 | w/o PP,21.5572186,0.7916417,0.2396799,37294134,710434
3 | w/ PP,21.6193301 ,0.7916245 ,0.2401230 ,17521782,710434
--------------------------------------------------------------------------------
/results/TanksAndTemples/truck.csv:
--------------------------------------------------------------------------------
1 | Submethod,PSNR,SSIM,LPIPS,Size [Bytes],#Gaussians
2 | w/o PP,25.0735264,0.8705746,0.163094,41573442,962158
3 | w/ PP,25.0247765 ,0.8698489 ,0.1630282 ,20254537,962158
--------------------------------------------------------------------------------
/scene/__init__.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (C) 2023, Inria
3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco
4 | # All rights reserved.
5 | #
6 | # This software is free for non-commercial, research and evaluation use
7 | # under the terms of the LICENSE.md file.
8 | #
9 | # For inquiries contact george.drettakis@inria.fr
10 | #
11 |
12 | import os
13 | import random
14 | import json
15 | import torch
16 | from utils.system_utils import searchForMaxIteration
17 | from scene.dataset_readers import sceneLoadTypeCallbacks
18 | from scene.gaussian_model import GaussianModel
19 | from arguments import ModelParams
20 | from utils.camera_utils import cameraList_from_camInfos, camera_to_JSON
21 |
22 | class Scene:
23 |
24 | gaussians : GaussianModel
25 |
26 | def __init__(self, args : ModelParams, gaussians : GaussianModel, load_iteration=None, shuffle=True, resolution_scales=[1.0]):
27 | """
28 | :param path: Path to colmap scene main folder.
29 | """
30 | self.model_path = args.model_path
31 | self.loaded_iter = None
32 | self.gaussians = gaussians
33 |
34 | if load_iteration:
35 | if load_iteration == -1:
36 | self.loaded_iter = searchForMaxIteration(os.path.join(self.model_path, "point_cloud"))
37 | else:
38 | self.loaded_iter = load_iteration
39 | print("Loading trained model at iteration {}".format(self.loaded_iter))
40 |
41 | self.train_cameras = {}
42 | self.test_cameras = {}
43 |
44 | if os.path.exists(os.path.join(args.source_path, "sparse")):
45 | scene_info = sceneLoadTypeCallbacks["Colmap"](args.source_path, args.images, args.eval)
46 | elif os.path.exists(os.path.join(args.source_path, "transforms_train.json")):
47 | print("Found transforms_train.json file, assuming Blender data set!")
48 | scene_info = sceneLoadTypeCallbacks["Blender"](args.source_path, args.white_background, args.eval)
49 | else:
50 | assert False, "Could not recognize scene type!"
51 |
52 | if not self.loaded_iter:
53 | with open(scene_info.ply_path, 'rb') as src_file, open(os.path.join(self.model_path, "input.ply") , 'wb') as dest_file:
54 | dest_file.write(src_file.read())
55 | json_cams = []
56 | camlist = []
57 | if scene_info.test_cameras:
58 | camlist.extend(scene_info.test_cameras)
59 | if scene_info.train_cameras:
60 | camlist.extend(scene_info.train_cameras)
61 | for id, cam in enumerate(camlist):
62 | json_cams.append(camera_to_JSON(id, cam))
63 | with open(os.path.join(self.model_path, "cameras.json"), 'w') as file:
64 | json.dump(json_cams, file)
65 |
66 | if shuffle:
67 | random.shuffle(scene_info.train_cameras) # Multi-res consistent random shuffling
68 | random.shuffle(scene_info.test_cameras) # Multi-res consistent random shuffling
69 |
70 | self.cameras_extent = scene_info.nerf_normalization["radius"]
71 |
72 | for resolution_scale in resolution_scales:
73 | print("Loading Training Cameras")
74 | self.train_cameras[resolution_scale] = cameraList_from_camInfos(scene_info.train_cameras, resolution_scale, args)
75 | print("Loading Test Cameras")
76 | self.test_cameras[resolution_scale] = cameraList_from_camInfos(scene_info.test_cameras, resolution_scale, args)
77 |
78 | if self.loaded_iter:
79 | self.gaussians.load_model(os.path.join(self.model_path,
80 | "point_cloud",
81 | "iteration_" + str(self.loaded_iter),
82 | "point_cloud"))
83 | else:
84 | self.gaussians.create_from_pcd(scene_info.point_cloud, self.cameras_extent)
85 |
86 | def save(self, iteration, compress=False, store=False):
87 | point_cloud_path = os.path.join(self.model_path, "point_cloud/iteration_{}".format(iteration))
88 | if store:
89 | if compress:
90 | self.gaussians.save_npz_pp(os.path.join(point_cloud_path, "point_cloud"))
91 | else:
92 | self.gaussians.save_npz(os.path.join(point_cloud_path, "point_cloud"))
93 | else:
94 | self.gaussians.save_ply(os.path.join(point_cloud_path, "point_cloud.ply"))
95 | torch.save(torch.nn.ModuleList([self.gaussians.recolor, self.gaussians.mlp_head]).state_dict(), os.path.join(point_cloud_path, "point_cloud.pth"))
96 |
97 | def getTrainCameras(self, scale=1.0):
98 | return self.train_cameras[scale]
99 |
100 | def getTestCameras(self, scale=1.0):
101 | return self.test_cameras[scale]
--------------------------------------------------------------------------------
/scene/cameras.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (C) 2023, Inria
3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco
4 | # All rights reserved.
5 | #
6 | # This software is free for non-commercial, research and evaluation use
7 | # under the terms of the LICENSE.md file.
8 | #
9 | # For inquiries contact george.drettakis@inria.fr
10 | #
11 |
12 | import torch
13 | from torch import nn
14 | import numpy as np
15 | from utils.graphics_utils import getWorld2View2, getProjectionMatrix
16 |
17 | class Camera(nn.Module):
18 | def __init__(self, colmap_id, R, T, FoVx, FoVy, image, gt_alpha_mask,
19 | image_name, uid,
20 | trans=np.array([0.0, 0.0, 0.0]), scale=1.0, data_device = "cuda"
21 | ):
22 | super(Camera, self).__init__()
23 |
24 | self.uid = uid
25 | self.colmap_id = colmap_id
26 | self.R = R
27 | self.T = T
28 | self.FoVx = FoVx
29 | self.FoVy = FoVy
30 | self.image_name = image_name
31 |
32 | try:
33 | self.data_device = torch.device(data_device)
34 | except Exception as e:
35 | print(e)
36 | print(f"[Warning] Custom device {data_device} failed, fallback to default cuda device" )
37 | self.data_device = torch.device("cuda")
38 |
39 | self.original_image = image.clamp(0.0, 1.0).to(self.data_device)
40 | self.image_width = self.original_image.shape[2]
41 | self.image_height = self.original_image.shape[1]
42 |
43 | if gt_alpha_mask is not None:
44 | self.original_image *= gt_alpha_mask.to(self.data_device)
45 | else:
46 | self.original_image *= torch.ones((1, self.image_height, self.image_width), device=self.data_device)
47 |
48 | self.zfar = 100.0
49 | self.znear = 0.01
50 |
51 | self.trans = trans
52 | self.scale = scale
53 |
54 | self.world_view_transform = torch.tensor(getWorld2View2(R, T, trans, scale)).transpose(0, 1).cuda()
55 | self.projection_matrix = getProjectionMatrix(znear=self.znear, zfar=self.zfar, fovX=self.FoVx, fovY=self.FoVy).transpose(0,1).cuda()
56 | self.full_proj_transform = (self.world_view_transform.unsqueeze(0).bmm(self.projection_matrix.unsqueeze(0))).squeeze(0)
57 | self.camera_center = self.world_view_transform.inverse()[3, :3]
58 |
59 | class MiniCam:
60 | def __init__(self, width, height, fovy, fovx, znear, zfar, world_view_transform, full_proj_transform):
61 | self.image_width = width
62 | self.image_height = height
63 | self.FoVy = fovy
64 | self.FoVx = fovx
65 | self.znear = znear
66 | self.zfar = zfar
67 | self.world_view_transform = world_view_transform
68 | self.full_proj_transform = full_proj_transform
69 | view_inv = torch.inverse(self.world_view_transform)
70 | self.camera_center = view_inv[3][:3]
71 |
72 |
--------------------------------------------------------------------------------
/scene/colmap_loader.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (C) 2023, Inria
3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco
4 | # All rights reserved.
5 | #
6 | # This software is free for non-commercial, research and evaluation use
7 | # under the terms of the LICENSE.md file.
8 | #
9 | # For inquiries contact george.drettakis@inria.fr
10 | #
11 |
12 | import numpy as np
13 | import collections
14 | import struct
15 |
16 | CameraModel = collections.namedtuple(
17 | "CameraModel", ["model_id", "model_name", "num_params"])
18 | Camera = collections.namedtuple(
19 | "Camera", ["id", "model", "width", "height", "params"])
20 | BaseImage = collections.namedtuple(
21 | "Image", ["id", "qvec", "tvec", "camera_id", "name", "xys", "point3D_ids"])
22 | Point3D = collections.namedtuple(
23 | "Point3D", ["id", "xyz", "rgb", "error", "image_ids", "point2D_idxs"])
24 | CAMERA_MODELS = {
25 | CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3),
26 | CameraModel(model_id=1, model_name="PINHOLE", num_params=4),
27 | CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4),
28 | CameraModel(model_id=3, model_name="RADIAL", num_params=5),
29 | CameraModel(model_id=4, model_name="OPENCV", num_params=8),
30 | CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8),
31 | CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12),
32 | CameraModel(model_id=7, model_name="FOV", num_params=5),
33 | CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4),
34 | CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5),
35 | CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12)
36 | }
37 | CAMERA_MODEL_IDS = dict([(camera_model.model_id, camera_model)
38 | for camera_model in CAMERA_MODELS])
39 | CAMERA_MODEL_NAMES = dict([(camera_model.model_name, camera_model)
40 | for camera_model in CAMERA_MODELS])
41 |
42 |
43 | def qvec2rotmat(qvec):
44 | return np.array([
45 | [1 - 2 * qvec[2]**2 - 2 * qvec[3]**2,
46 | 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],
47 | 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],
48 | [2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],
49 | 1 - 2 * qvec[1]**2 - 2 * qvec[3]**2,
50 | 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]],
51 | [2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],
52 | 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],
53 | 1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]])
54 |
55 | def rotmat2qvec(R):
56 | Rxx, Ryx, Rzx, Rxy, Ryy, Rzy, Rxz, Ryz, Rzz = R.flat
57 | K = np.array([
58 | [Rxx - Ryy - Rzz, 0, 0, 0],
59 | [Ryx + Rxy, Ryy - Rxx - Rzz, 0, 0],
60 | [Rzx + Rxz, Rzy + Ryz, Rzz - Rxx - Ryy, 0],
61 | [Ryz - Rzy, Rzx - Rxz, Rxy - Ryx, Rxx + Ryy + Rzz]]) / 3.0
62 | eigvals, eigvecs = np.linalg.eigh(K)
63 | qvec = eigvecs[[3, 0, 1, 2], np.argmax(eigvals)]
64 | if qvec[0] < 0:
65 | qvec *= -1
66 | return qvec
67 |
68 | class Image(BaseImage):
69 | def qvec2rotmat(self):
70 | return qvec2rotmat(self.qvec)
71 |
72 | def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"):
73 | """Read and unpack the next bytes from a binary file.
74 | :param fid:
75 | :param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc.
76 | :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}.
77 | :param endian_character: Any of {@, =, <, >, !}
78 | :return: Tuple of read and unpacked values.
79 | """
80 | data = fid.read(num_bytes)
81 | return struct.unpack(endian_character + format_char_sequence, data)
82 |
83 | def read_points3D_text(path):
84 | """
85 | see: src/base/reconstruction.cc
86 | void Reconstruction::ReadPoints3DText(const std::string& path)
87 | void Reconstruction::WritePoints3DText(const std::string& path)
88 | """
89 | xyzs = None
90 | rgbs = None
91 | errors = None
92 | num_points = 0
93 | with open(path, "r") as fid:
94 | while True:
95 | line = fid.readline()
96 | if not line:
97 | break
98 | line = line.strip()
99 | if len(line) > 0 and line[0] != "#":
100 | num_points += 1
101 |
102 |
103 | xyzs = np.empty((num_points, 3))
104 | rgbs = np.empty((num_points, 3))
105 | errors = np.empty((num_points, 1))
106 | count = 0
107 | with open(path, "r") as fid:
108 | while True:
109 | line = fid.readline()
110 | if not line:
111 | break
112 | line = line.strip()
113 | if len(line) > 0 and line[0] != "#":
114 | elems = line.split()
115 | xyz = np.array(tuple(map(float, elems[1:4])))
116 | rgb = np.array(tuple(map(int, elems[4:7])))
117 | error = np.array(float(elems[7]))
118 | xyzs[count] = xyz
119 | rgbs[count] = rgb
120 | errors[count] = error
121 | count += 1
122 |
123 | return xyzs, rgbs, errors
124 |
125 | def read_points3D_binary(path_to_model_file):
126 | """
127 | see: src/base/reconstruction.cc
128 | void Reconstruction::ReadPoints3DBinary(const std::string& path)
129 | void Reconstruction::WritePoints3DBinary(const std::string& path)
130 | """
131 |
132 |
133 | with open(path_to_model_file, "rb") as fid:
134 | num_points = read_next_bytes(fid, 8, "Q")[0]
135 |
136 | xyzs = np.empty((num_points, 3))
137 | rgbs = np.empty((num_points, 3))
138 | errors = np.empty((num_points, 1))
139 |
140 | for p_id in range(num_points):
141 | binary_point_line_properties = read_next_bytes(
142 | fid, num_bytes=43, format_char_sequence="QdddBBBd")
143 | xyz = np.array(binary_point_line_properties[1:4])
144 | rgb = np.array(binary_point_line_properties[4:7])
145 | error = np.array(binary_point_line_properties[7])
146 | track_length = read_next_bytes(
147 | fid, num_bytes=8, format_char_sequence="Q")[0]
148 | track_elems = read_next_bytes(
149 | fid, num_bytes=8*track_length,
150 | format_char_sequence="ii"*track_length)
151 | xyzs[p_id] = xyz
152 | rgbs[p_id] = rgb
153 | errors[p_id] = error
154 | return xyzs, rgbs, errors
155 |
156 | def read_intrinsics_text(path):
157 | """
158 | Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py
159 | """
160 | cameras = {}
161 | with open(path, "r") as fid:
162 | while True:
163 | line = fid.readline()
164 | if not line:
165 | break
166 | line = line.strip()
167 | if len(line) > 0 and line[0] != "#":
168 | elems = line.split()
169 | camera_id = int(elems[0])
170 | model = elems[1]
171 | assert model == "PINHOLE", "While the loader support other types, the rest of the code assumes PINHOLE"
172 | width = int(elems[2])
173 | height = int(elems[3])
174 | params = np.array(tuple(map(float, elems[4:])))
175 | cameras[camera_id] = Camera(id=camera_id, model=model,
176 | width=width, height=height,
177 | params=params)
178 | return cameras
179 |
180 | def read_extrinsics_binary(path_to_model_file):
181 | """
182 | see: src/base/reconstruction.cc
183 | void Reconstruction::ReadImagesBinary(const std::string& path)
184 | void Reconstruction::WriteImagesBinary(const std::string& path)
185 | """
186 | images = {}
187 | with open(path_to_model_file, "rb") as fid:
188 | num_reg_images = read_next_bytes(fid, 8, "Q")[0]
189 | for _ in range(num_reg_images):
190 | binary_image_properties = read_next_bytes(
191 | fid, num_bytes=64, format_char_sequence="idddddddi")
192 | image_id = binary_image_properties[0]
193 | qvec = np.array(binary_image_properties[1:5])
194 | tvec = np.array(binary_image_properties[5:8])
195 | camera_id = binary_image_properties[8]
196 | image_name = ""
197 | current_char = read_next_bytes(fid, 1, "c")[0]
198 | while current_char != b"\x00": # look for the ASCII 0 entry
199 | image_name += current_char.decode("utf-8")
200 | current_char = read_next_bytes(fid, 1, "c")[0]
201 | num_points2D = read_next_bytes(fid, num_bytes=8,
202 | format_char_sequence="Q")[0]
203 | x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D,
204 | format_char_sequence="ddq"*num_points2D)
205 | xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])),
206 | tuple(map(float, x_y_id_s[1::3]))])
207 | point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))
208 | images[image_id] = Image(
209 | id=image_id, qvec=qvec, tvec=tvec,
210 | camera_id=camera_id, name=image_name,
211 | xys=xys, point3D_ids=point3D_ids)
212 | return images
213 |
214 |
215 | def read_intrinsics_binary(path_to_model_file):
216 | """
217 | see: src/base/reconstruction.cc
218 | void Reconstruction::WriteCamerasBinary(const std::string& path)
219 | void Reconstruction::ReadCamerasBinary(const std::string& path)
220 | """
221 | cameras = {}
222 | with open(path_to_model_file, "rb") as fid:
223 | num_cameras = read_next_bytes(fid, 8, "Q")[0]
224 | for _ in range(num_cameras):
225 | camera_properties = read_next_bytes(
226 | fid, num_bytes=24, format_char_sequence="iiQQ")
227 | camera_id = camera_properties[0]
228 | model_id = camera_properties[1]
229 | model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name
230 | width = camera_properties[2]
231 | height = camera_properties[3]
232 | num_params = CAMERA_MODEL_IDS[model_id].num_params
233 | params = read_next_bytes(fid, num_bytes=8*num_params,
234 | format_char_sequence="d"*num_params)
235 | cameras[camera_id] = Camera(id=camera_id,
236 | model=model_name,
237 | width=width,
238 | height=height,
239 | params=np.array(params))
240 | assert len(cameras) == num_cameras
241 | return cameras
242 |
243 |
244 | def read_extrinsics_text(path):
245 | """
246 | Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py
247 | """
248 | images = {}
249 | with open(path, "r") as fid:
250 | while True:
251 | line = fid.readline()
252 | if not line:
253 | break
254 | line = line.strip()
255 | if len(line) > 0 and line[0] != "#":
256 | elems = line.split()
257 | image_id = int(elems[0])
258 | qvec = np.array(tuple(map(float, elems[1:5])))
259 | tvec = np.array(tuple(map(float, elems[5:8])))
260 | camera_id = int(elems[8])
261 | image_name = elems[9]
262 | elems = fid.readline().split()
263 | xys = np.column_stack([tuple(map(float, elems[0::3])),
264 | tuple(map(float, elems[1::3]))])
265 | point3D_ids = np.array(tuple(map(int, elems[2::3])))
266 | images[image_id] = Image(
267 | id=image_id, qvec=qvec, tvec=tvec,
268 | camera_id=camera_id, name=image_name,
269 | xys=xys, point3D_ids=point3D_ids)
270 | return images
271 |
272 |
273 | def read_colmap_bin_array(path):
274 | """
275 | Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_dense.py
276 |
277 | :param path: path to the colmap binary file.
278 | :return: nd array with the floating point values in the value
279 | """
280 | with open(path, "rb") as fid:
281 | width, height, channels = np.genfromtxt(fid, delimiter="&", max_rows=1,
282 | usecols=(0, 1, 2), dtype=int)
283 | fid.seek(0)
284 | num_delimiter = 0
285 | byte = fid.read(1)
286 | while True:
287 | if byte == b"&":
288 | num_delimiter += 1
289 | if num_delimiter >= 3:
290 | break
291 | byte = fid.read(1)
292 | array = np.fromfile(fid, np.float32)
293 | array = array.reshape((width, height, channels), order="F")
294 | return np.transpose(array, (1, 0, 2)).squeeze()
295 |
--------------------------------------------------------------------------------
/scene/dataset_readers.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (C) 2023, Inria
3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco
4 | # All rights reserved.
5 | #
6 | # This software is free for non-commercial, research and evaluation use
7 | # under the terms of the LICENSE.md file.
8 | #
9 | # For inquiries contact george.drettakis@inria.fr
10 | #
11 |
12 | import os
13 | import sys
14 | from PIL import Image
15 | from typing import NamedTuple
16 | from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \
17 | read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text
18 | from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal
19 | import numpy as np
20 | import json
21 | from pathlib import Path
22 | from plyfile import PlyData, PlyElement
23 | from utils.sh_utils import SH2RGB
24 | from scene.gaussian_model import BasicPointCloud
25 |
26 | class CameraInfo(NamedTuple):
27 | uid: int
28 | R: np.array
29 | T: np.array
30 | FovY: np.array
31 | FovX: np.array
32 | image: np.array
33 | image_path: str
34 | image_name: str
35 | width: int
36 | height: int
37 |
38 | class SceneInfo(NamedTuple):
39 | point_cloud: BasicPointCloud
40 | train_cameras: list
41 | test_cameras: list
42 | nerf_normalization: dict
43 | ply_path: str
44 |
45 | def getNerfppNorm(cam_info):
46 | def get_center_and_diag(cam_centers):
47 | cam_centers = np.hstack(cam_centers)
48 | avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True)
49 | center = avg_cam_center
50 | dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True)
51 | diagonal = np.max(dist)
52 | return center.flatten(), diagonal
53 |
54 | cam_centers = []
55 |
56 | for cam in cam_info:
57 | W2C = getWorld2View2(cam.R, cam.T)
58 | C2W = np.linalg.inv(W2C)
59 | cam_centers.append(C2W[:3, 3:4])
60 |
61 | center, diagonal = get_center_and_diag(cam_centers)
62 | radius = diagonal * 1.1
63 |
64 | translate = -center
65 |
66 | return {"translate": translate, "radius": radius}
67 |
68 | def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder):
69 | cam_infos = []
70 | for idx, key in enumerate(cam_extrinsics):
71 | sys.stdout.write('\r')
72 | # the exact output you're looking for:
73 | sys.stdout.write("Reading camera {}/{}".format(idx+1, len(cam_extrinsics)))
74 | sys.stdout.flush()
75 |
76 | extr = cam_extrinsics[key]
77 | intr = cam_intrinsics[extr.camera_id]
78 | height = intr.height
79 | width = intr.width
80 |
81 | uid = intr.id
82 | R = np.transpose(qvec2rotmat(extr.qvec))
83 | T = np.array(extr.tvec)
84 |
85 | if intr.model=="SIMPLE_PINHOLE":
86 | focal_length_x = intr.params[0]
87 | FovY = focal2fov(focal_length_x, height)
88 | FovX = focal2fov(focal_length_x, width)
89 | elif intr.model=="PINHOLE":
90 | focal_length_x = intr.params[0]
91 | focal_length_y = intr.params[1]
92 | FovY = focal2fov(focal_length_y, height)
93 | FovX = focal2fov(focal_length_x, width)
94 | else:
95 | assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!"
96 |
97 | image_path = os.path.join(images_folder, os.path.basename(extr.name))
98 | image_name = os.path.basename(image_path).split(".")[0]
99 | image = Image.open(image_path)
100 |
101 | cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
102 | image_path=image_path, image_name=image_name, width=width, height=height)
103 | cam_infos.append(cam_info)
104 | sys.stdout.write('\n')
105 | return cam_infos
106 |
107 | def fetchPly(path):
108 | plydata = PlyData.read(path)
109 | vertices = plydata['vertex']
110 | positions = np.vstack([vertices['x'], vertices['y'], vertices['z']]).T
111 | colors = np.vstack([vertices['red'], vertices['green'], vertices['blue']]).T / 255.0
112 | normals = np.vstack([vertices['nx'], vertices['ny'], vertices['nz']]).T
113 | return BasicPointCloud(points=positions, colors=colors, normals=normals)
114 |
115 | def storePly(path, xyz, rgb):
116 | # Define the dtype for the structured array
117 | dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'),
118 | ('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'),
119 | ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]
120 |
121 | normals = np.zeros_like(xyz)
122 |
123 | elements = np.empty(xyz.shape[0], dtype=dtype)
124 | attributes = np.concatenate((xyz, normals, rgb), axis=1)
125 | elements[:] = list(map(tuple, attributes))
126 |
127 | # Create the PlyData object and write to file
128 | vertex_element = PlyElement.describe(elements, 'vertex')
129 | ply_data = PlyData([vertex_element])
130 | ply_data.write(path)
131 |
132 | def readColmapSceneInfo(path, images, eval, llffhold=8):
133 | try:
134 | cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.bin")
135 | cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.bin")
136 | cam_extrinsics = read_extrinsics_binary(cameras_extrinsic_file)
137 | cam_intrinsics = read_intrinsics_binary(cameras_intrinsic_file)
138 | except:
139 | cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.txt")
140 | cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.txt")
141 | cam_extrinsics = read_extrinsics_text(cameras_extrinsic_file)
142 | cam_intrinsics = read_intrinsics_text(cameras_intrinsic_file)
143 |
144 | reading_dir = "images" if images == None else images
145 | cam_infos_unsorted = readColmapCameras(cam_extrinsics=cam_extrinsics, cam_intrinsics=cam_intrinsics, images_folder=os.path.join(path, reading_dir))
146 | cam_infos = sorted(cam_infos_unsorted.copy(), key = lambda x : x.image_name)
147 |
148 | if eval:
149 | train_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold != 0]
150 | test_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold == 0]
151 | else:
152 | train_cam_infos = cam_infos
153 | test_cam_infos = []
154 |
155 | nerf_normalization = getNerfppNorm(train_cam_infos)
156 |
157 | ply_path = os.path.join(path, "sparse/0/points3D.ply")
158 | bin_path = os.path.join(path, "sparse/0/points3D.bin")
159 | txt_path = os.path.join(path, "sparse/0/points3D.txt")
160 | if not os.path.exists(ply_path):
161 | print("Converting point3d.bin to .ply, will happen only the first time you open the scene.")
162 | try:
163 | xyz, rgb, _ = read_points3D_binary(bin_path)
164 | except:
165 | xyz, rgb, _ = read_points3D_text(txt_path)
166 | storePly(ply_path, xyz, rgb)
167 | try:
168 | pcd = fetchPly(ply_path)
169 | except:
170 | pcd = None
171 |
172 | scene_info = SceneInfo(point_cloud=pcd,
173 | train_cameras=train_cam_infos,
174 | test_cameras=test_cam_infos,
175 | nerf_normalization=nerf_normalization,
176 | ply_path=ply_path)
177 | return scene_info
178 |
179 | def readCamerasFromTransforms(path, transformsfile, white_background, extension=".png"):
180 | cam_infos = []
181 |
182 | with open(os.path.join(path, transformsfile)) as json_file:
183 | contents = json.load(json_file)
184 | fovx = contents["camera_angle_x"]
185 |
186 | frames = contents["frames"]
187 | for idx, frame in enumerate(frames):
188 | cam_name = os.path.join(path, frame["file_path"] + extension)
189 |
190 | # NeRF 'transform_matrix' is a camera-to-world transform
191 | c2w = np.array(frame["transform_matrix"])
192 | # change from OpenGL/Blender camera axes (Y up, Z back) to COLMAP (Y down, Z forward)
193 | c2w[:3, 1:3] *= -1
194 |
195 | # get the world-to-camera transform and set R, T
196 | w2c = np.linalg.inv(c2w)
197 | R = np.transpose(w2c[:3,:3]) # R is stored transposed due to 'glm' in CUDA code
198 | T = w2c[:3, 3]
199 |
200 | image_path = os.path.join(path, cam_name)
201 | image_name = Path(cam_name).stem
202 | image = Image.open(image_path)
203 |
204 | im_data = np.array(image.convert("RGBA"))
205 |
206 | bg = np.array([1,1,1]) if white_background else np.array([0, 0, 0])
207 |
208 | norm_data = im_data / 255.0
209 | arr = norm_data[:,:,:3] * norm_data[:, :, 3:4] + bg * (1 - norm_data[:, :, 3:4])
210 | image = Image.fromarray(np.array(arr*255.0, dtype=np.byte), "RGB")
211 |
212 | fovy = focal2fov(fov2focal(fovx, image.size[0]), image.size[1])
213 | FovY = fovy
214 | FovX = fovx
215 |
216 | cam_infos.append(CameraInfo(uid=idx, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
217 | image_path=image_path, image_name=image_name, width=image.size[0], height=image.size[1]))
218 |
219 | return cam_infos
220 |
221 | def readNerfSyntheticInfo(path, white_background, eval, extension=".png"):
222 | print("Reading Training Transforms")
223 | train_cam_infos = readCamerasFromTransforms(path, "transforms_train.json", white_background, extension)
224 | print("Reading Test Transforms")
225 | test_cam_infos = readCamerasFromTransforms(path, "transforms_test.json", white_background, extension)
226 |
227 | if not eval:
228 | train_cam_infos.extend(test_cam_infos)
229 | test_cam_infos = []
230 |
231 | nerf_normalization = getNerfppNorm(train_cam_infos)
232 |
233 | ply_path = os.path.join(path, "points3d.ply")
234 | if not os.path.exists(ply_path):
235 | # Since this data set has no colmap data, we start with random points
236 | num_pts = 100_000
237 | print(f"Generating random point cloud ({num_pts})...")
238 |
239 | # We create random points inside the bounds of the synthetic Blender scenes
240 | xyz = np.random.random((num_pts, 3)) * 2.6 - 1.3
241 | shs = np.random.random((num_pts, 3)) / 255.0
242 | pcd = BasicPointCloud(points=xyz, colors=SH2RGB(shs), normals=np.zeros((num_pts, 3)))
243 |
244 | storePly(ply_path, xyz, SH2RGB(shs) * 255)
245 | try:
246 | pcd = fetchPly(ply_path)
247 | except:
248 | pcd = None
249 |
250 | scene_info = SceneInfo(point_cloud=pcd,
251 | train_cameras=train_cam_infos,
252 | test_cameras=test_cam_infos,
253 | nerf_normalization=nerf_normalization,
254 | ply_path=ply_path)
255 | return scene_info
256 |
257 | sceneLoadTypeCallbacks = {
258 | "Colmap": readColmapSceneInfo,
259 | "Blender" : readNerfSyntheticInfo
260 | }
--------------------------------------------------------------------------------
/scene/gaussian_model.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (C) 2023, Inria
3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco
4 | # All rights reserved.
5 | #
6 | # This software is free for non-commercial, research and evaluation use
7 | # under the terms of the LICENSE.md file.
8 | #
9 | # For inquiries contact george.drettakis@inria.fr
10 | #
11 |
12 | import torch
13 | import numpy as np
14 | from utils.general_utils import inverse_sigmoid, get_expon_lr_func, build_rotation
15 | from torch import nn
16 | import os
17 | from utils.system_utils import mkdir_p
18 | from plyfile import PlyData, PlyElement
19 | from utils.sh_utils import RGB2SH
20 | from simple_knn._C import distCUDA2
21 | from utils.graphics_utils import BasicPointCloud
22 | from utils.general_utils import strip_symmetric, build_scaling_rotation
23 | from utils.general_utils import splitBy3, mortonEncode
24 |
25 | from vector_quantize_pytorch import VectorQuantize, ResidualVQ
26 | import tinycudann as tcnn
27 |
28 | from dahuffman import HuffmanCodec
29 | from dahuffman.huffmancodec import PrefixCodec
30 | import math
31 | from einops import reduce
32 |
33 | class GaussianModel:
34 |
35 | def setup_functions(self):
36 | def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation):
37 | L = build_scaling_rotation(scaling_modifier * scaling, rotation)
38 | actual_covariance = L @ L.transpose(1, 2)
39 | symm = strip_symmetric(actual_covariance)
40 | return symm
41 |
42 | self.scaling_activation = torch.exp
43 | self.scaling_inverse_activation = torch.log
44 |
45 | self.covariance_activation = build_covariance_from_scaling_rotation
46 |
47 | self.opacity_activation = torch.sigmoid
48 | self.inverse_opacity_activation = inverse_sigmoid
49 |
50 | self.rotation_activation = torch.nn.functional.normalize
51 |
52 |
53 | def __init__(self, model):
54 | self.active_sh_degree = 0
55 | self.max_sh_degree = 0
56 | self._xyz = torch.empty(0)
57 | self._scaling = torch.empty(0)
58 | self._rotation = torch.empty(0)
59 | self._opacity = torch.empty(0)
60 | self._mask = torch.empty(0)
61 | self.max_radii2D = torch.empty(0)
62 | self.xyz_gradient_accum = torch.empty(0)
63 | self.denom = torch.empty(0)
64 | self.optimizer = None
65 | self.percent_dense = 0
66 | self.spatial_lr_scale = 0
67 | self.setup_functions()
68 |
69 | self.vq_scale = ResidualVQ(dim = 3, codebook_size = model.rvq_size, num_quantizers = model.rvq_num, commitment_weight = 0., kmeans_init = True, kmeans_iters = 1, ema_update = False, learnable_codebook=True, in_place_codebook_optimizer=lambda *args, **kwargs: torch.optim.Adam(*args, **kwargs, lr=0.0001)).cuda()
70 | self.vq_rot = ResidualVQ(dim = 4, codebook_size = model.rvq_size, num_quantizers = model.rvq_num, commitment_weight = 0., kmeans_init = True, kmeans_iters = 1, ema_update = False, learnable_codebook=True, in_place_codebook_optimizer=lambda *args, **kwargs: torch.optim.Adam(*args, **kwargs, lr=0.0001)).cuda()
71 | self.rvq_bit = math.log2(model.rvq_size)
72 | self.rvq_num = model.rvq_num
73 | self.recolor = tcnn.Encoding(
74 | n_input_dims=3,
75 | encoding_config={
76 | "otype": "HashGrid",
77 | "n_levels": 16,
78 | "n_features_per_level": 2,
79 | "log2_hashmap_size": model.max_hashmap,
80 | "base_resolution": 16,
81 | "per_level_scale": 1.447,
82 | },
83 | )
84 | self.direction_encoding = tcnn.Encoding(
85 | n_input_dims=3,
86 | encoding_config={
87 | "otype": "SphericalHarmonics",
88 | "degree": 3
89 | },
90 | )
91 | self.mlp_head = tcnn.Network(
92 | n_input_dims=(self.direction_encoding.n_output_dims+self.recolor.n_output_dims),
93 | n_output_dims=3,
94 | network_config={
95 | "otype": "FullyFusedMLP",
96 | "activation": "ReLU",
97 | "output_activation": "None",
98 | "n_neurons": 64,
99 | "n_hidden_layers": 2,
100 | },
101 | )
102 |
103 | def capture(self):
104 | return (
105 | self.active_sh_degree,
106 | self._xyz,
107 | self._scaling,
108 | self._rotation,
109 | self._opacity,
110 | self.max_radii2D,
111 | self.xyz_gradient_accum,
112 | self.denom,
113 | self.optimizer.state_dict(),
114 | self.spatial_lr_scale,
115 | )
116 |
117 | def restore(self, model_args, training_args):
118 | (self.active_sh_degree,
119 | self._xyz,
120 | self._scaling,
121 | self._rotation,
122 | self._opacity,
123 | self.max_radii2D,
124 | xyz_gradient_accum,
125 | denom,
126 | opt_dict,
127 | self.spatial_lr_scale) = model_args
128 | self.training_setup(training_args)
129 | self.xyz_gradient_accum = xyz_gradient_accum
130 | self.denom = denom
131 | self.optimizer.load_state_dict(opt_dict)
132 |
133 | @property
134 | def get_scaling(self):
135 | return self.scaling_activation(self._scaling)
136 |
137 | @property
138 | def get_rotation(self):
139 | return self.rotation_activation(self._rotation)
140 |
141 | @property
142 | def get_xyz(self):
143 | return self._xyz
144 |
145 | @property
146 | def get_opacity(self):
147 | return self.opacity_activation(self._opacity)
148 |
149 | def get_covariance(self, scaling_modifier = 1):
150 | return self.covariance_activation(self.get_scaling, scaling_modifier, self._rotation)
151 |
152 | def oneupSHdegree(self):
153 | if self.active_sh_degree < self.max_sh_degree:
154 | self.active_sh_degree += 1
155 |
156 | def create_from_pcd(self, pcd : BasicPointCloud, spatial_lr_scale : float):
157 | self.spatial_lr_scale = spatial_lr_scale
158 | fused_point_cloud = torch.tensor(np.asarray(pcd.points)).float().cuda()
159 | fused_color = RGB2SH(torch.tensor(np.asarray(pcd.colors)).float().cuda())
160 | features = torch.zeros((fused_color.shape[0], 3, (self.max_sh_degree + 1) ** 2)).float().cuda()
161 | features[:, :3, 0 ] = fused_color
162 | features[:, 3:, 1:] = 0.0
163 |
164 | print("Number of points at initialisation : ", fused_point_cloud.shape[0])
165 |
166 | dist2 = torch.clamp_min(distCUDA2(torch.from_numpy(np.asarray(pcd.points)).float().cuda()), 0.0000001)
167 | scales = torch.log(torch.sqrt(dist2))[...,None].repeat(1, 3)
168 | rots = torch.zeros((fused_point_cloud.shape[0], 4), device="cuda")
169 | rots[:, 0] = 1
170 |
171 | opacities = inverse_sigmoid(0.1 * torch.ones((fused_point_cloud.shape[0], 1), dtype=torch.float, device="cuda"))
172 |
173 | self._xyz = nn.Parameter(fused_point_cloud.requires_grad_(True))
174 | self._scaling = nn.Parameter(scales.requires_grad_(True))
175 | self._rotation = nn.Parameter(rots.requires_grad_(True))
176 | self._opacity = nn.Parameter(opacities.requires_grad_(True))
177 | self._mask = nn.Parameter(torch.ones((fused_point_cloud.shape[0], 1), device="cuda").requires_grad_(True))
178 | self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda")
179 |
180 | def training_setup(self, training_args):
181 | self.percent_dense = training_args.percent_dense
182 | self.xyz_gradient_accum = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
183 | self.denom = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
184 |
185 | other_params = []
186 | for params in self.recolor.parameters():
187 | other_params.append(params)
188 | for params in self.mlp_head.parameters():
189 | other_params.append(params)
190 |
191 | l = [
192 | {'params': [self._xyz], 'lr': training_args.position_lr_init * self.spatial_lr_scale, "name": "xyz"},
193 | {'params': [self._opacity], 'lr': training_args.opacity_lr, "name": "opacity"},
194 | {'params': [self._scaling], 'lr': training_args.scaling_lr, "name": "scaling"},
195 | {'params': [self._rotation], 'lr': training_args.rotation_lr, "name": "rotation"},
196 | {'params': [self._mask], 'lr': training_args.mask_lr, "name": "mask"}
197 | ]
198 |
199 | self.optimizer = torch.optim.Adam(l, lr=0.0, eps=1e-15)
200 | self.optimizer_net = torch.optim.Adam(other_params, lr=training_args.net_lr, eps=1e-15)
201 | self.scheduler_net = torch.optim.lr_scheduler.ChainedScheduler(
202 | [
203 | torch.optim.lr_scheduler.LinearLR(
204 | self.optimizer_net, start_factor=0.01, total_iters=100
205 | ),
206 | torch.optim.lr_scheduler.MultiStepLR(
207 | self.optimizer_net,
208 | milestones=training_args.net_lr_step,
209 | gamma=0.33,
210 | ),
211 | ]
212 | )
213 | self.xyz_scheduler_args = get_expon_lr_func(lr_init=training_args.position_lr_init*self.spatial_lr_scale,
214 | lr_final=training_args.position_lr_final*self.spatial_lr_scale,
215 | lr_delay_mult=training_args.position_lr_delay_mult,
216 | max_steps=training_args.position_lr_max_steps)
217 |
218 | def update_learning_rate(self, iteration):
219 | ''' Learning rate scheduling per step '''
220 | for param_group in self.optimizer.param_groups:
221 | if param_group["name"] == "xyz":
222 | lr = self.xyz_scheduler_args(iteration)
223 | param_group['lr'] = lr
224 | return lr
225 |
226 | def construct_list_of_attributes(self):
227 | l = ['x', 'y', 'z', 'nx', 'ny', 'nz']
228 | l.append('opacity')
229 | for i in range(self._scaling.shape[1]):
230 | l.append('scale_{}'.format(i))
231 | for i in range(self._rotation.shape[1]):
232 | l.append('rot_{}'.format(i))
233 | return l
234 |
235 | def save_ply(self, path):
236 | mkdir_p(os.path.dirname(path))
237 |
238 | xyz = self._xyz.detach().cpu().numpy()
239 | normals = np.zeros_like(xyz)
240 | opacities = self._opacity.detach().cpu().numpy()
241 | scale = self._scaling.detach().cpu().numpy()
242 | rotation = self._rotation.detach().cpu().numpy()
243 |
244 | dtype_full = [(attribute, 'f4') for attribute in self.construct_list_of_attributes()]
245 |
246 | elements = np.empty(xyz.shape[0], dtype=dtype_full)
247 | attributes = np.concatenate((xyz, normals, opacities, scale, rotation), axis=1)
248 | elements[:] = list(map(tuple, attributes))
249 | el = PlyElement.describe(elements, 'vertex')
250 | PlyData([el]).write(path)
251 |
252 | def save_npz(self, path):
253 | mkdir_p(os.path.dirname(path))
254 |
255 | save_dict = dict()
256 |
257 | save_dict["xyz"] = self._xyz.detach().cpu().half().numpy()
258 | save_dict["opacity"] = self._opacity.detach().cpu().half().numpy()
259 | save_dict["scale"] = np.packbits(np.unpackbits(self.sca_idx.unsqueeze(-1).cpu().numpy().astype(np.uint8), axis=-1, count=int(self.rvq_bit), bitorder='little').flatten(), axis=None)
260 | save_dict["rotation"] = np.packbits(np.unpackbits(self.rot_idx.unsqueeze(-1).cpu().numpy().astype(np.uint8), axis=-1, count=int(self.rvq_bit), bitorder='little').flatten(), axis=None)
261 | save_dict["hash"] = self.recolor.params.cpu().half().numpy()
262 | save_dict["mlp"] = self.mlp_head.params.cpu().half().numpy()
263 | save_dict["codebook_scale"] = self.vq_scale.cpu().state_dict()
264 | save_dict["codebook_rotation"] = self.vq_rot.cpu().state_dict()
265 | save_dict["rvq_info"] = np.array([int(self.rvq_num), int(self.rvq_bit)])
266 |
267 | np.savez(path, **save_dict)
268 |
269 | def save_npz_pp(self, path):
270 | mkdir_p(os.path.dirname(path))
271 |
272 | save_dict = dict()
273 |
274 | save_dict["xyz"] = self._xyz.detach().cpu().half().numpy()
275 | save_dict["opacity"] = np.frombuffer(self.huf_opa, dtype=np.uint8)
276 | save_dict["scale"] = np.frombuffer(self.huf_sca, dtype=np.uint8)
277 | save_dict["rotation"] = np.frombuffer(self.huf_rot, dtype=np.uint8)
278 | save_dict["hash"] = np.frombuffer(self.huf_hash, dtype=np.uint8)
279 | save_dict["mlp"] = self.mlp_head.params.cpu().half().numpy()
280 | save_dict["huftable_opacity"] = self.tab_opa
281 | save_dict["huftable_scale"] = self.tab_sca
282 | save_dict["huftable_rotation"] = self.tab_rot
283 | save_dict["huftable_hash"] = self.tab_hash
284 | save_dict["codebook_scale"] = self.vq_scale.cpu().state_dict()
285 | save_dict["codebook_rotation"] = self.vq_rot.cpu().state_dict()
286 | save_dict["minmax_opacity"] = self.minmax_opa.numpy()
287 | save_dict["minmax_hash"] = self.minmax_hash.numpy()
288 | save_dict["rvq_info"] = np.array([int(self.rvq_num), int(self.rvq_bit)])
289 |
290 | np.savez_compressed(path+"_pp", **save_dict)
291 |
292 | def reset_opacity(self):
293 | opacities_new = inverse_sigmoid(torch.min(self.get_opacity, torch.ones_like(self.get_opacity)*0.01))
294 | optimizable_tensors = self.replace_tensor_to_optimizer(opacities_new, "opacity")
295 | self._opacity = optimizable_tensors["opacity"]
296 |
297 | def load_model(self, path):
298 | if os.path.isfile(path + '_pp.npz'):
299 | path = path + '_pp.npz'
300 | print("Loading ", path)
301 | load_dict = np.load(path, allow_pickle=True)
302 |
303 | codec = PrefixCodec(load_dict["huftable_opacity"].item())
304 | opacity = torch.tensor(codec.decode(load_dict["opacity"]))
305 |
306 | codec = PrefixCodec(load_dict["huftable_scale"].item())
307 | scale = codec.decode(load_dict["scale"])
308 |
309 | codec = PrefixCodec(load_dict["huftable_rotation"].item())
310 | rotation = codec.decode(load_dict["rotation"])
311 |
312 | codec = PrefixCodec(load_dict["huftable_hash"].item())
313 | hashgrid = torch.tensor(codec.decode(load_dict["hash"]))
314 |
315 | opacity = (float(load_dict["minmax_opacity"][1]) - float(load_dict["minmax_opacity"][0]))*opacity/255.0 + float(load_dict["minmax_opacity"][0])
316 | hashgrid = (float(load_dict["minmax_hash"][1]) - float(load_dict["minmax_hash"][0]))*hashgrid/255.0 + float(load_dict["minmax_hash"][0])
317 |
318 | self.vq_scale.load_state_dict(load_dict["codebook_scale"].item())
319 | self.vq_rot.load_state_dict(load_dict["codebook_rotation"].item())
320 | scale_codes = self.vq_scale.get_codes_from_indices(torch.tensor(scale).cuda().reshape(-1,1,load_dict["rvq_info"][0]))
321 | scale = self.vq_scale.project_out(reduce(scale_codes, 'q ... -> ...', 'sum'))
322 | rotation_codes = self.vq_rot.get_codes_from_indices(torch.tensor(rotation).cuda().reshape(-1,1,load_dict["rvq_info"][0]))
323 | rotation = self.vq_rot.project_out(reduce(rotation_codes, 'q ... -> ...', 'sum'))
324 |
325 | self._xyz = nn.Parameter(torch.from_numpy(load_dict["xyz"]).cuda().float().requires_grad_(True))
326 | self._opacity = nn.Parameter(opacity.cuda().reshape(-1,1).float().requires_grad_(True))
327 | self._scaling = nn.Parameter(scale.squeeze(1).requires_grad_(True))
328 | self._rotation = nn.Parameter(rotation.squeeze(1).requires_grad_(True))
329 | self.recolor.params = nn.Parameter(hashgrid.cuda().half().requires_grad_(True))
330 | self.mlp_head.params = nn.Parameter(torch.from_numpy(load_dict["mlp"]).cuda().half().requires_grad_(True))
331 | elif os.path.isfile(path + '.npz'):
332 | path = path + '.npz'
333 | print("Loading ", path)
334 | load_dict = np.load(path, allow_pickle=True)
335 |
336 | scale = np.packbits(np.unpackbits(load_dict["scale"], axis=None)[:load_dict["xyz"].shape[0]*load_dict["rvq_info"][0]*load_dict["rvq_info"][1]].reshape(-1, load_dict["rvq_info"][1]), axis=-1, bitorder='little')
337 | rotation = np.packbits(np.unpackbits(load_dict["rotation"], axis=None)[:load_dict["xyz"].shape[0]*load_dict["rvq_info"][0]*load_dict["rvq_info"][1]].reshape(-1, load_dict["rvq_info"][1]), axis=-1, bitorder='little')
338 |
339 | self.vq_scale.load_state_dict(load_dict["codebook_scale"].item())
340 | self.vq_rot.load_state_dict(load_dict["codebook_rotation"].item())
341 | scale_codes = self.vq_scale.get_codes_from_indices(torch.from_numpy(scale).cuda().reshape(-1,1,load_dict["rvq_info"][0]).long())
342 | scale = self.vq_scale.project_out(reduce(scale_codes, 'q ... -> ...', 'sum'))
343 | rotation_codes = self.vq_rot.get_codes_from_indices(torch.from_numpy(rotation).cuda().reshape(-1,1,load_dict["rvq_info"][0]).long())
344 | rotation = self.vq_rot.project_out(reduce(rotation_codes, 'q ... -> ...', 'sum'))
345 |
346 | self._xyz = nn.Parameter(torch.from_numpy(load_dict["xyz"]).cuda().float().requires_grad_(True))
347 | self._opacity = nn.Parameter(torch.from_numpy(load_dict["opacity"]).reshape(-1,1).cuda().float().requires_grad_(True))
348 | self._scaling = nn.Parameter(scale.squeeze(1).requires_grad_(True))
349 | self._rotation = nn.Parameter(rotation.squeeze(1).requires_grad_(True))
350 | self.recolor.params = nn.Parameter(torch.from_numpy(load_dict["hash"]).cuda().half().requires_grad_(True))
351 | self.mlp_head.params = nn.Parameter(torch.from_numpy(load_dict["mlp"]).cuda().half().requires_grad_(True))
352 | else:
353 | self.load_ply(path)
354 |
355 | def load_ply(self, path):
356 | print("Loading ", path+".ply")
357 | plydata = PlyData.read(path+".ply")
358 |
359 | xyz = np.stack((np.asarray(plydata.elements[0]["x"]),
360 | np.asarray(plydata.elements[0]["y"]),
361 | np.asarray(plydata.elements[0]["z"])), axis=1)
362 | opacities = np.asarray(plydata.elements[0]["opacity"])[..., np.newaxis]
363 |
364 | scale_names = [p.name for p in plydata.elements[0].properties if p.name.startswith("scale_")]
365 | scale_names = sorted(scale_names, key = lambda x: int(x.split('_')[-1]))
366 | scales = np.zeros((xyz.shape[0], len(scale_names)))
367 | for idx, attr_name in enumerate(scale_names):
368 | scales[:, idx] = np.asarray(plydata.elements[0][attr_name])
369 |
370 | rot_names = [p.name for p in plydata.elements[0].properties if p.name.startswith("rot")]
371 | rot_names = sorted(rot_names, key = lambda x: int(x.split('_')[-1]))
372 | rots = np.zeros((xyz.shape[0], len(rot_names)))
373 | for idx, attr_name in enumerate(rot_names):
374 | rots[:, idx] = np.asarray(plydata.elements[0][attr_name])
375 |
376 | self._xyz = nn.Parameter(torch.tensor(xyz, dtype=torch.float, device="cuda").requires_grad_(True))
377 | self._opacity = nn.Parameter(torch.tensor(opacities, dtype=torch.float, device="cuda").requires_grad_(True))
378 | self._scaling = nn.Parameter(torch.tensor(scales, dtype=torch.float, device="cuda").requires_grad_(True))
379 | self._rotation = nn.Parameter(torch.tensor(rots, dtype=torch.float, device="cuda").requires_grad_(True))
380 |
381 | self.active_sh_degree = self.max_sh_degree
382 |
383 | torch.nn.ModuleList([self.recolor, self.mlp_head]).load_state_dict(torch.load(path +".pth"))
384 |
385 | def replace_tensor_to_optimizer(self, tensor, name):
386 | optimizable_tensors = {}
387 | for group in self.optimizer.param_groups:
388 | if group["name"] == name:
389 | stored_state = self.optimizer.state.get(group['params'][0], None)
390 | stored_state["exp_avg"] = torch.zeros_like(tensor)
391 | stored_state["exp_avg_sq"] = torch.zeros_like(tensor)
392 |
393 | del self.optimizer.state[group['params'][0]]
394 | group["params"][0] = nn.Parameter(tensor.requires_grad_(True))
395 | self.optimizer.state[group['params'][0]] = stored_state
396 |
397 | optimizable_tensors[group["name"]] = group["params"][0]
398 | return optimizable_tensors
399 |
400 | def _prune_optimizer(self, mask):
401 | optimizable_tensors = {}
402 | for group in self.optimizer.param_groups:
403 | stored_state = self.optimizer.state.get(group['params'][0], None)
404 | if stored_state is not None:
405 | stored_state["exp_avg"] = stored_state["exp_avg"][mask]
406 | stored_state["exp_avg_sq"] = stored_state["exp_avg_sq"][mask]
407 |
408 | del self.optimizer.state[group['params'][0]]
409 | group["params"][0] = nn.Parameter((group["params"][0][mask].requires_grad_(True)))
410 | self.optimizer.state[group['params'][0]] = stored_state
411 |
412 | optimizable_tensors[group["name"]] = group["params"][0]
413 | else:
414 | group["params"][0] = nn.Parameter(group["params"][0][mask].requires_grad_(True))
415 | optimizable_tensors[group["name"]] = group["params"][0]
416 | return optimizable_tensors
417 |
418 | def prune_points(self, mask):
419 | valid_points_mask = ~mask
420 | optimizable_tensors = self._prune_optimizer(valid_points_mask)
421 |
422 | self._xyz = optimizable_tensors["xyz"]
423 | self._opacity = optimizable_tensors["opacity"]
424 | self._scaling = optimizable_tensors["scaling"]
425 | self._rotation = optimizable_tensors["rotation"]
426 | self._mask = optimizable_tensors["mask"]
427 |
428 | self.xyz_gradient_accum = self.xyz_gradient_accum[valid_points_mask]
429 |
430 | self.denom = self.denom[valid_points_mask]
431 | self.max_radii2D = self.max_radii2D[valid_points_mask]
432 |
433 | def cat_tensors_to_optimizer(self, tensors_dict):
434 | optimizable_tensors = {}
435 | for group in self.optimizer.param_groups:
436 | assert len(group["params"]) == 1
437 | extension_tensor = tensors_dict[group["name"]]
438 | stored_state = self.optimizer.state.get(group['params'][0], None)
439 | if stored_state is not None:
440 |
441 | stored_state["exp_avg"] = torch.cat((stored_state["exp_avg"], torch.zeros_like(extension_tensor)), dim=0)
442 | stored_state["exp_avg_sq"] = torch.cat((stored_state["exp_avg_sq"], torch.zeros_like(extension_tensor)), dim=0)
443 |
444 | del self.optimizer.state[group['params'][0]]
445 | group["params"][0] = nn.Parameter(torch.cat((group["params"][0], extension_tensor), dim=0).requires_grad_(True))
446 | self.optimizer.state[group['params'][0]] = stored_state
447 |
448 | optimizable_tensors[group["name"]] = group["params"][0]
449 | else:
450 | group["params"][0] = nn.Parameter(torch.cat((group["params"][0], extension_tensor), dim=0).requires_grad_(True))
451 | optimizable_tensors[group["name"]] = group["params"][0]
452 |
453 | return optimizable_tensors
454 |
455 | def densification_postfix(self, new_xyz, new_opacities, new_scaling, new_rotation, new_mask):
456 | d = {"xyz": new_xyz,
457 | "opacity": new_opacities,
458 | "scaling" : new_scaling,
459 | "rotation" : new_rotation,
460 | "mask": new_mask}
461 |
462 | optimizable_tensors = self.cat_tensors_to_optimizer(d)
463 | self._xyz = optimizable_tensors["xyz"]
464 | self._opacity = optimizable_tensors["opacity"]
465 | self._scaling = optimizable_tensors["scaling"]
466 | self._rotation = optimizable_tensors["rotation"]
467 | self._mask = optimizable_tensors["mask"]
468 |
469 | self.xyz_gradient_accum = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
470 | self.denom = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
471 | self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda")
472 |
473 | def densify_and_split(self, grads, grad_threshold, scene_extent, N=2):
474 | n_init_points = self.get_xyz.shape[0]
475 | # Extract points that satisfy the gradient condition
476 | padded_grad = torch.zeros((n_init_points), device="cuda")
477 | padded_grad[:grads.shape[0]] = grads.squeeze()
478 | selected_pts_mask = torch.where(padded_grad >= grad_threshold, True, False)
479 | selected_pts_mask = torch.logical_and(selected_pts_mask,
480 | torch.max(self.get_scaling, dim=1).values > self.percent_dense*scene_extent)
481 |
482 | stds = self.get_scaling[selected_pts_mask].repeat(N,1)
483 | means =torch.zeros((stds.size(0), 3),device="cuda")
484 | samples = torch.normal(mean=means, std=stds)
485 | rots = build_rotation(self._rotation[selected_pts_mask]).repeat(N,1,1)
486 | new_xyz = torch.bmm(rots, samples.unsqueeze(-1)).squeeze(-1) + self.get_xyz[selected_pts_mask].repeat(N, 1)
487 | new_scaling = self.scaling_inverse_activation(self.get_scaling[selected_pts_mask].repeat(N,1) / (0.8*N))
488 | new_rotation = self._rotation[selected_pts_mask].repeat(N,1)
489 | new_opacity = self._opacity[selected_pts_mask].repeat(N,1)
490 | new_mask = self._mask[selected_pts_mask].repeat(N,1)
491 |
492 | self.densification_postfix(new_xyz, new_opacity, new_scaling, new_rotation, new_mask)
493 |
494 | prune_filter = torch.cat((selected_pts_mask, torch.zeros(N * selected_pts_mask.sum(), device="cuda", dtype=bool)))
495 | self.prune_points(prune_filter)
496 |
497 | def densify_and_clone(self, grads, grad_threshold, scene_extent):
498 | # Extract points that satisfy the gradient condition
499 | selected_pts_mask = torch.where(torch.norm(grads, dim=-1) >= grad_threshold, True, False)
500 | selected_pts_mask = torch.logical_and(selected_pts_mask,
501 | torch.max(self.get_scaling, dim=1).values <= self.percent_dense*scene_extent)
502 |
503 | new_xyz = self._xyz[selected_pts_mask]
504 | new_opacities = self._opacity[selected_pts_mask]
505 | new_scaling = self._scaling[selected_pts_mask]
506 | new_rotation = self._rotation[selected_pts_mask]
507 | new_mask = self._mask[selected_pts_mask]
508 |
509 | self.densification_postfix(new_xyz, new_opacities, new_scaling, new_rotation, new_mask)
510 |
511 | def densify_and_prune(self, max_grad, min_opacity, extent, max_screen_size):
512 | grads = self.xyz_gradient_accum / self.denom
513 | grads[grads.isnan()] = 0.0
514 |
515 | self.densify_and_clone(grads, max_grad, extent)
516 | self.densify_and_split(grads, max_grad, extent)
517 |
518 | prune_mask = torch.logical_or((torch.sigmoid(self._mask) <= 0.01).squeeze(),(self.get_opacity < min_opacity).squeeze())
519 | if max_screen_size:
520 | big_points_vs = self.max_radii2D > max_screen_size
521 | big_points_ws = self.get_scaling.max(dim=1).values > 0.1 * extent
522 | prune_mask = torch.logical_or(torch.logical_or(prune_mask, big_points_vs), big_points_ws)
523 | self.prune_points(prune_mask)
524 | torch.cuda.empty_cache()
525 |
526 | def mask_prune(self):
527 | prune_mask = (torch.sigmoid(self._mask) <= 0.01).squeeze()
528 | self.prune_points(prune_mask)
529 | torch.cuda.empty_cache()
530 |
531 | def post_quant(self, param, prune=False):
532 | max_val = torch.amax(param)
533 | min_val = torch.amin(param)
534 | if prune:
535 | param = param*(torch.abs(param) > 0.1)
536 | param = (param - min_val)/(max_val - min_val)
537 | quant = torch.round(param * 255.0)
538 | out = (max_val - min_val)*quant/255.0 + min_val
539 | return torch.nn.Parameter(out), quant, torch.tensor([min_val, max_val])
540 |
541 | def huffman_encode(self, param):
542 | input_code_list = param.view(-1).tolist()
543 | unique, counts = np.unique(input_code_list, return_counts=True)
544 | num_freq = dict(zip(unique, counts))
545 |
546 | codec = HuffmanCodec.from_data(input_code_list)
547 |
548 | sym_bit_dict = {}
549 | for k, v in codec.get_code_table().items():
550 | sym_bit_dict[k] = v[0]
551 | total_bits = 0
552 | for num, freq in num_freq.items():
553 | total_bits += freq * sym_bit_dict[num]
554 | total_mb = total_bits/8/10**6
555 |
556 | return total_mb, codec.encode(input_code_list), codec.get_code_table()
557 |
558 | def final_prune(self, compress=False):
559 | prune_mask = (torch.sigmoid(self._mask) <= 0.01).squeeze()
560 | self.prune_points(prune_mask)
561 | if compress:
562 | self.sort_morton()
563 |
564 | for m in self.vq_scale.layers:
565 | m.training = False
566 | for m in self.vq_rot.layers:
567 | m.training = False
568 |
569 | self._xyz = self._xyz.clone().half().float()
570 | self._scaling, self.sca_idx, _ = self.vq_scale(self.get_scaling.unsqueeze(1))
571 | self._rotation, self.rot_idx, _ = self.vq_rot(self.get_rotation.unsqueeze(1))
572 | self._scaling = self._scaling.squeeze()
573 | self._rotation = self._rotation.squeeze()
574 |
575 | position_mb = self._xyz.shape[0]*3*16/8/10**6
576 | scale_mb = self._xyz.shape[0]*self.rvq_bit*self.rvq_num/8/10**6 + 2**self.rvq_bit*self.rvq_num*3*32/8/10**6
577 | rotation_mb = self._xyz.shape[0]*self.rvq_bit*self.rvq_num/8/10**6 + 2**self.rvq_bit*self.rvq_num*4*32/8/10**6
578 | opacity_mb = self._xyz.shape[0]*16/8/10**6
579 | hash_mb = self.recolor.params.shape[0]*16/8/10**6
580 | mlp_mb = self.mlp_head.params.shape[0]*16/8/10**6
581 | sum_mb = position_mb+scale_mb+rotation_mb+opacity_mb+hash_mb+mlp_mb
582 |
583 | mb_str = "Storage\nposition: "+str(position_mb)+"\nscale: "+str(scale_mb)+"\nrotation: "+str(rotation_mb)+"\nopacity: "+str(opacity_mb)+"\nhash: "+str(hash_mb)+"\nmlp: "+str(mlp_mb)+"\ntotal: "+str(sum_mb)+" MB"
584 |
585 | if compress:
586 | self._opacity, self.quant_opa, self.minmax_opa = self.post_quant(self.get_opacity)
587 | self.recolor.params, self.quant_hash, self.minmax_hash = self.post_quant(self.recolor.params, True)
588 |
589 | scale_mb, self.huf_sca, self.tab_sca = self.huffman_encode(self.sca_idx)
590 | scale_mb += 2**self.rvq_bit*self.rvq_num*3*32/8/10**6
591 | rotation_mb, self.huf_rot, self.tab_rot = self.huffman_encode(self.rot_idx)
592 | rotation_mb += 2**self.rvq_bit*self.rvq_num*4*32/8/10**6
593 | opacity_mb, self.huf_opa, self.tab_opa = self.huffman_encode(self.quant_opa)
594 | hash_mb, self.huf_hash, self.tab_hash = self.huffman_encode(self.quant_hash)
595 | mlp_mb = self.mlp_head.params.shape[0]*16/8/10**6
596 | sum_mb = position_mb+scale_mb+rotation_mb+opacity_mb+hash_mb+mlp_mb
597 |
598 | mb_str = mb_str+"\n\nAfter PP\nposition: "+str(position_mb)+"\nscale: "+str(scale_mb)+"\nrotation: "+str(rotation_mb)+"\nopacity: "+str(opacity_mb)+"\nhash: "+str(hash_mb)+"\nmlp: "+str(mlp_mb)+"\ntotal: "+str(sum_mb)+" MB"
599 | else:
600 | self._opacity = self.get_opacity.clone().half().float()
601 | torch.cuda.empty_cache()
602 | return mb_str
603 |
604 | def precompute(self):
605 | xyz = self.contract_to_unisphere(self.get_xyz.half(), torch.tensor([-1.0, -1.0, -1.0, 1.0, 1.0, 1.0], device='cuda'))
606 | self._feature = self.recolor(xyz)
607 | torch.cuda.empty_cache()
608 |
609 | def add_densification_stats(self, viewspace_point_tensor, update_filter):
610 | self.xyz_gradient_accum[update_filter] += torch.norm(viewspace_point_tensor.grad[update_filter,:2], dim=-1, keepdim=True)
611 | self.denom[update_filter] += 1
612 |
613 | def contract_to_unisphere(self,
614 | x: torch.Tensor,
615 | aabb: torch.Tensor,
616 | ord: int = 2,
617 | eps: float = 1e-6,
618 | derivative: bool = False,
619 | ):
620 | aabb_min, aabb_max = torch.split(aabb, 3, dim=-1)
621 | x = (x - aabb_min) / (aabb_max - aabb_min)
622 | x = x * 2 - 1 # aabb is at [-1, 1]
623 | mag = torch.linalg.norm(x, ord=ord, dim=-1, keepdim=True)
624 | mask = mag.squeeze(-1) > 1
625 |
626 | if derivative:
627 | dev = (2 * mag - 1) / mag**2 + 2 * x**2 * (
628 | 1 / mag**3 - (2 * mag - 1) / mag**4
629 | )
630 | dev[~mask] = 1.0
631 | dev = torch.clamp(dev, min=eps)
632 | return dev
633 | else:
634 | x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])
635 | x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]
636 | return x
637 |
638 | def sort_morton(self):
639 | with torch.no_grad():
640 | xyz_q = (
641 | (2**21 - 1)
642 | * (self._xyz - self._xyz.min(0).values)
643 | / (self._xyz.max(0).values - self._xyz.min(0).values)
644 | ).long()
645 | order = mortonEncode(xyz_q).sort().indices
646 |
647 | self._xyz = nn.Parameter(self._xyz[order], requires_grad=True)
648 | self._opacity = nn.Parameter(self._opacity[order], requires_grad=True)
649 | self._scaling = nn.Parameter(self._scaling[order], requires_grad=True)
650 | self._rotation = nn.Parameter(self._rotation[order], requires_grad=True)
--------------------------------------------------------------------------------
/train.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (C) 2023, Inria
3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco
4 | # All rights reserved.
5 | #
6 | # This software is free for non-commercial, research and evaluation use
7 | # under the terms of the LICENSE.md file.
8 | #
9 | # For inquiries contact george.drettakis@inria.fr
10 | #
11 |
12 | import os
13 | import torch
14 | from random import randint
15 | from utils.loss_utils import l1_loss, ssim
16 | from gaussian_renderer import render, network_gui
17 | import sys
18 | from scene import Scene, GaussianModel
19 | from utils.general_utils import safe_state
20 | import uuid
21 | from tqdm import tqdm
22 | from utils.image_utils import psnr
23 | from argparse import ArgumentParser, Namespace
24 | from arguments import ModelParams, PipelineParams, OptimizationParams
25 | try:
26 | from torch.utils.tensorboard import SummaryWriter
27 | TENSORBOARD_FOUND = True
28 | except ImportError:
29 | TENSORBOARD_FOUND = False
30 |
31 | def training(dataset, opt, pipe, testing_iterations, saving_iterations, checkpoint_iterations, checkpoint, debug_from, comp, store_npz):
32 | first_iter = 0
33 | tb_writer = prepare_output_and_logger(dataset)
34 | gaussians = GaussianModel(dataset)
35 | scene = Scene(dataset, gaussians)
36 | gaussians.training_setup(opt)
37 | if checkpoint:
38 | (model_params, first_iter) = torch.load(checkpoint)
39 | gaussians.restore(model_params, opt)
40 |
41 | bg_color = [1, 1, 1] if dataset.white_background else [0, 0, 0]
42 | background = torch.tensor(bg_color, dtype=torch.float32, device="cuda")
43 |
44 | iter_start = torch.cuda.Event(enable_timing = True)
45 | iter_end = torch.cuda.Event(enable_timing = True)
46 |
47 | viewpoint_stack = None
48 | ema_loss_for_log = 0.0
49 | progress_bar = tqdm(range(first_iter, opt.iterations), desc="Training progress")
50 | first_iter += 1
51 | for iteration in range(first_iter, opt.iterations + 1):
52 | if network_gui.conn == None:
53 | network_gui.try_connect()
54 | while network_gui.conn != None:
55 | try:
56 | net_image_bytes = None
57 | custom_cam, do_training, pipe.convert_SHs_python, pipe.compute_cov3D_python, keep_alive, scaling_modifer = network_gui.receive()
58 | if custom_cam != None:
59 | net_image = render(custom_cam, gaussians, pipe, background, scaling_modifer)["render"]
60 | net_image_bytes = memoryview((torch.clamp(net_image, min=0, max=1.0) * 255).byte().permute(1, 2, 0).contiguous().cpu().numpy())
61 | network_gui.send(net_image_bytes, dataset.source_path)
62 | if do_training and ((iteration < int(opt.iterations)) or not keep_alive):
63 | break
64 | except Exception as e:
65 | network_gui.conn = None
66 |
67 | iter_start.record()
68 |
69 | gaussians.update_learning_rate(iteration)
70 |
71 | # Every 1000 its we increase the levels of SH up to a maximum degree
72 | if iteration % 1000 == 0:
73 | gaussians.oneupSHdegree()
74 |
75 | # Pick a random Camera
76 | if not viewpoint_stack:
77 | viewpoint_stack = scene.getTrainCameras().copy()
78 | viewpoint_cam = viewpoint_stack.pop(randint(0, len(viewpoint_stack)-1))
79 |
80 | # Render
81 | if (iteration - 1) == debug_from:
82 | pipe.debug = True
83 | if iteration <= opt.rvq_iter:
84 | render_pkg = render(viewpoint_cam, gaussians, pipe, background, itr=iteration, rvq_iter=False)
85 | else:
86 | render_pkg = render(viewpoint_cam, gaussians, pipe, background, itr=iteration, rvq_iter=True)
87 | image, viewspace_point_tensor, visibility_filter, radii = render_pkg["render"], render_pkg["viewspace_points"], render_pkg["visibility_filter"], render_pkg["radii"]
88 |
89 | # Loss
90 | gt_image = viewpoint_cam.original_image.cuda()
91 | Ll1 = l1_loss(image, gt_image)
92 | loss = (1.0 - opt.lambda_dssim) * Ll1 + opt.lambda_dssim * (1.0 - ssim(image, gt_image)) + opt.lambda_mask*torch.mean((torch.sigmoid(gaussians._mask)))
93 | loss.backward()
94 |
95 | iter_end.record()
96 |
97 | with torch.no_grad():
98 | # Progress bar
99 | ema_loss_for_log = 0.4 * loss.item() + 0.6 * ema_loss_for_log
100 | if iteration % 10 == 0:
101 | progress_bar.set_postfix({"Loss": f"{ema_loss_for_log:.{7}f}"})
102 | progress_bar.update(10)
103 | if iteration == opt.iterations:
104 | progress_bar.close()
105 |
106 | if iteration == opt.iterations:
107 | storage = gaussians.final_prune(compress=comp)
108 | with open(os.path.join(args.model_path, "storage"), 'w') as c:
109 | c.write(storage)
110 | gaussians.precompute()
111 |
112 | # Log and save
113 | training_report(tb_writer, iteration, Ll1, loss, l1_loss, iter_start.elapsed_time(iter_end), testing_iterations, scene, render, (pipe, background))
114 | if (iteration in saving_iterations):
115 | print("\n[ITER {}] Saving Gaussians".format(iteration))
116 | scene.save(iteration, compress=comp, store=store_npz)
117 |
118 | # Densification
119 | if iteration < opt.densify_until_iter:
120 | # Keep track of max radii in image-space for pruning
121 | gaussians.max_radii2D[visibility_filter] = torch.max(gaussians.max_radii2D[visibility_filter], radii[visibility_filter])
122 | gaussians.add_densification_stats(viewspace_point_tensor, visibility_filter)
123 |
124 | if iteration > opt.densify_from_iter and iteration % opt.densification_interval == 0:
125 | size_threshold = 20 if iteration > opt.opacity_reset_interval else None
126 | gaussians.densify_and_prune(opt.densify_grad_threshold, 0.005, scene.cameras_extent, size_threshold)
127 |
128 | if iteration % opt.opacity_reset_interval == 0 or (dataset.white_background and iteration == opt.densify_from_iter):
129 | gaussians.reset_opacity()
130 | else:
131 | if iteration % opt.mask_prune_iter == 0:
132 | gaussians.mask_prune()
133 |
134 | # Optimizer step
135 | if iteration < opt.iterations:
136 | gaussians.optimizer.step()
137 | gaussians.optimizer.zero_grad(set_to_none = True)
138 | gaussians.optimizer_net.step()
139 | gaussians.optimizer_net.zero_grad(set_to_none = True)
140 | gaussians.scheduler_net.step()
141 | if (iteration in checkpoint_iterations):
142 | print("\n[ITER {}] Saving Checkpoint".format(iteration))
143 | torch.save((gaussians.capture(), iteration), scene.model_path + "/chkpnt" + str(iteration) + ".pth")
144 |
145 | def prepare_output_and_logger(args):
146 | if not args.model_path:
147 | if os.getenv('OAR_JOB_ID'):
148 | unique_str=os.getenv('OAR_JOB_ID')
149 | else:
150 | unique_str = str(uuid.uuid4())
151 | args.model_path = os.path.join("./output/", unique_str[0:10])
152 |
153 | # Set up output folder
154 | print("Output folder: {}".format(args.model_path))
155 | os.makedirs(args.model_path, exist_ok = True)
156 | with open(os.path.join(args.model_path, "cfg_args"), 'w') as cfg_log_f:
157 | cfg_log_f.write(str(Namespace(**vars(args))))
158 |
159 | # Create Tensorboard writer
160 | tb_writer = None
161 | if TENSORBOARD_FOUND:
162 | tb_writer = SummaryWriter(args.model_path)
163 | else:
164 | print("Tensorboard not available: not logging progress")
165 | return tb_writer
166 |
167 | def training_report(tb_writer, iteration, Ll1, loss, l1_loss, elapsed, testing_iterations, scene : Scene, renderFunc, renderArgs):
168 | if tb_writer:
169 | tb_writer.add_scalar('train_loss_patches/l1_loss', Ll1.item(), iteration)
170 | tb_writer.add_scalar('train_loss_patches/total_loss', loss.item(), iteration)
171 | tb_writer.add_scalar('iter_time', elapsed, iteration)
172 |
173 | # Report test and samples of training set
174 | if iteration in testing_iterations:
175 | torch.cuda.empty_cache()
176 | validation_configs = ({'name': 'test', 'cameras' : scene.getTestCameras()},
177 | {'name': 'train', 'cameras' : [scene.getTrainCameras()[idx % len(scene.getTrainCameras())] for idx in range(5, 30, 5)]})
178 |
179 | for config in validation_configs:
180 | if config['cameras'] and len(config['cameras']) > 0:
181 | l1_test = 0.0
182 | psnr_test = 0.0
183 | for idx, viewpoint in enumerate(config['cameras']):
184 | image = torch.clamp(renderFunc(viewpoint, scene.gaussians, *renderArgs)["render"], 0.0, 1.0)
185 | gt_image = torch.clamp(viewpoint.original_image.to("cuda"), 0.0, 1.0)
186 | if tb_writer and (idx < 5):
187 | tb_writer.add_images(config['name'] + "_view_{}/render".format(viewpoint.image_name), image[None], global_step=iteration)
188 | if iteration == testing_iterations[0]:
189 | tb_writer.add_images(config['name'] + "_view_{}/ground_truth".format(viewpoint.image_name), gt_image[None], global_step=iteration)
190 | l1_test += l1_loss(image, gt_image).mean().double()
191 | psnr_test += psnr(image, gt_image).mean().double()
192 | psnr_test /= len(config['cameras'])
193 | l1_test /= len(config['cameras'])
194 | print("\n[ITER {}] Evaluating {}: L1 {} PSNR {}".format(iteration, config['name'], l1_test, psnr_test))
195 | if tb_writer:
196 | tb_writer.add_scalar(config['name'] + '/loss_viewpoint - l1_loss', l1_test, iteration)
197 | tb_writer.add_scalar(config['name'] + '/loss_viewpoint - psnr', psnr_test, iteration)
198 |
199 | if tb_writer:
200 | tb_writer.add_histogram("scene/opacity_histogram", scene.gaussians.get_opacity, iteration)
201 | tb_writer.add_scalar('total_points', scene.gaussians.get_xyz.shape[0], iteration)
202 | torch.cuda.empty_cache()
203 |
204 | if __name__ == "__main__":
205 | # Set up command line argument parser
206 | parser = ArgumentParser(description="Training script parameters")
207 | lp = ModelParams(parser)
208 | op = OptimizationParams(parser)
209 | pp = PipelineParams(parser)
210 | parser.add_argument('--ip', type=str, default="127.0.0.1")
211 | parser.add_argument('--port', type=int, default=6009)
212 | parser.add_argument('--debug_from', type=int, default=-1)
213 | parser.add_argument('--detect_anomaly', action='store_true', default=False)
214 | parser.add_argument("--test_iterations", nargs="+", type=int, default=[30_000])
215 | parser.add_argument("--save_iterations", nargs="+", type=int, default=[30_000])
216 | parser.add_argument("--quiet", action="store_true")
217 | parser.add_argument("--checkpoint_iterations", nargs="+", type=int, default=[])
218 | parser.add_argument("--start_checkpoint", type=str, default = None)
219 | parser.add_argument("--comp", action="store_true")
220 | parser.add_argument("--store_npz", action="store_true")
221 |
222 | args = parser.parse_args(sys.argv[1:])
223 | args.save_iterations.append(args.iterations)
224 |
225 | print("Optimizing " + args.model_path)
226 |
227 | # Initialize system state (RNG)
228 | safe_state(args.quiet)
229 |
230 | # Start GUI server, configure and run training
231 | network_gui.init(args.ip, args.port)
232 | torch.autograd.set_detect_anomaly(args.detect_anomaly)
233 |
234 | training(lp.extract(args), op.extract(args), pp.extract(args), args.test_iterations, args.save_iterations, args.checkpoint_iterations, args.start_checkpoint, args.debug_from, args.comp, args.store_npz)
235 |
236 | # All done
237 | print("\nTraining complete.")
238 |
--------------------------------------------------------------------------------
/utils/camera_utils.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (C) 2023, Inria
3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco
4 | # All rights reserved.
5 | #
6 | # This software is free for non-commercial, research and evaluation use
7 | # under the terms of the LICENSE.md file.
8 | #
9 | # For inquiries contact george.drettakis@inria.fr
10 | #
11 |
12 | from scene.cameras import Camera
13 | import numpy as np
14 | from utils.general_utils import PILtoTorch
15 | from utils.graphics_utils import fov2focal
16 |
17 | WARNED = False
18 |
19 | def loadCam(args, id, cam_info, resolution_scale):
20 | orig_w, orig_h = cam_info.image.size
21 |
22 | if args.resolution in [1, 2, 4, 8]:
23 | resolution = round(orig_w/(resolution_scale * args.resolution)), round(orig_h/(resolution_scale * args.resolution))
24 | else: # should be a type that converts to float
25 | if args.resolution == -1:
26 | if orig_w > 1600:
27 | global WARNED
28 | if not WARNED:
29 | print("[ INFO ] Encountered quite large input images (>1.6K pixels width), rescaling to 1.6K.\n "
30 | "If this is not desired, please explicitly specify '--resolution/-r' as 1")
31 | WARNED = True
32 | global_down = orig_w / 1600
33 | else:
34 | global_down = 1
35 | else:
36 | global_down = orig_w / args.resolution
37 |
38 | scale = float(global_down) * float(resolution_scale)
39 | resolution = (int(orig_w / scale), int(orig_h / scale))
40 |
41 | resized_image_rgb = PILtoTorch(cam_info.image, resolution)
42 |
43 | gt_image = resized_image_rgb[:3, ...]
44 | loaded_mask = None
45 |
46 | if resized_image_rgb.shape[1] == 4:
47 | loaded_mask = resized_image_rgb[3:4, ...]
48 |
49 | return Camera(colmap_id=cam_info.uid, R=cam_info.R, T=cam_info.T,
50 | FoVx=cam_info.FovX, FoVy=cam_info.FovY,
51 | image=gt_image, gt_alpha_mask=loaded_mask,
52 | image_name=cam_info.image_name, uid=id, data_device=args.data_device)
53 |
54 | def cameraList_from_camInfos(cam_infos, resolution_scale, args):
55 | camera_list = []
56 |
57 | for id, c in enumerate(cam_infos):
58 | camera_list.append(loadCam(args, id, c, resolution_scale))
59 |
60 | return camera_list
61 |
62 | def camera_to_JSON(id, camera : Camera):
63 | Rt = np.zeros((4, 4))
64 | Rt[:3, :3] = camera.R.transpose()
65 | Rt[:3, 3] = camera.T
66 | Rt[3, 3] = 1.0
67 |
68 | W2C = np.linalg.inv(Rt)
69 | pos = W2C[:3, 3]
70 | rot = W2C[:3, :3]
71 | serializable_array_2d = [x.tolist() for x in rot]
72 | camera_entry = {
73 | 'id' : id,
74 | 'img_name' : camera.image_name,
75 | 'width' : camera.width,
76 | 'height' : camera.height,
77 | 'position': pos.tolist(),
78 | 'rotation': serializable_array_2d,
79 | 'fy' : fov2focal(camera.FovY, camera.height),
80 | 'fx' : fov2focal(camera.FovX, camera.width)
81 | }
82 | return camera_entry
83 |
--------------------------------------------------------------------------------
/utils/general_utils.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (C) 2023, Inria
3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco
4 | # All rights reserved.
5 | #
6 | # This software is free for non-commercial, research and evaluation use
7 | # under the terms of the LICENSE.md file.
8 | #
9 | # For inquiries contact george.drettakis@inria.fr
10 | #
11 |
12 | import torch
13 | import sys
14 | from datetime import datetime
15 | import numpy as np
16 | import random
17 |
18 | def inverse_sigmoid(x):
19 | return torch.log(x/(1-x))
20 |
21 | def PILtoTorch(pil_image, resolution):
22 | resized_image_PIL = pil_image.resize(resolution)
23 | resized_image = torch.from_numpy(np.array(resized_image_PIL)) / 255.0
24 | if len(resized_image.shape) == 3:
25 | return resized_image.permute(2, 0, 1)
26 | else:
27 | return resized_image.unsqueeze(dim=-1).permute(2, 0, 1)
28 |
29 | def get_expon_lr_func(
30 | lr_init, lr_final, lr_delay_steps=0, lr_delay_mult=1.0, max_steps=1000000
31 | ):
32 | """
33 | Copied from Plenoxels
34 |
35 | Continuous learning rate decay function. Adapted from JaxNeRF
36 | The returned rate is lr_init when step=0 and lr_final when step=max_steps, and
37 | is log-linearly interpolated elsewhere (equivalent to exponential decay).
38 | If lr_delay_steps>0 then the learning rate will be scaled by some smooth
39 | function of lr_delay_mult, such that the initial learning rate is
40 | lr_init*lr_delay_mult at the beginning of optimization but will be eased back
41 | to the normal learning rate when steps>lr_delay_steps.
42 | :param conf: config subtree 'lr' or similar
43 | :param max_steps: int, the number of steps during optimization.
44 | :return HoF which takes step as input
45 | """
46 |
47 | def helper(step):
48 | if step < 0 or (lr_init == 0.0 and lr_final == 0.0):
49 | # Disable this parameter
50 | return 0.0
51 | if lr_delay_steps > 0:
52 | # A kind of reverse cosine decay.
53 | delay_rate = lr_delay_mult + (1 - lr_delay_mult) * np.sin(
54 | 0.5 * np.pi * np.clip(step / lr_delay_steps, 0, 1)
55 | )
56 | else:
57 | delay_rate = 1.0
58 | t = np.clip(step / max_steps, 0, 1)
59 | log_lerp = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t)
60 | return delay_rate * log_lerp
61 |
62 | return helper
63 |
64 | def strip_lowerdiag(L):
65 | uncertainty = torch.zeros((L.shape[0], 6), dtype=torch.float, device="cuda")
66 |
67 | uncertainty[:, 0] = L[:, 0, 0]
68 | uncertainty[:, 1] = L[:, 0, 1]
69 | uncertainty[:, 2] = L[:, 0, 2]
70 | uncertainty[:, 3] = L[:, 1, 1]
71 | uncertainty[:, 4] = L[:, 1, 2]
72 | uncertainty[:, 5] = L[:, 2, 2]
73 | return uncertainty
74 |
75 | def strip_symmetric(sym):
76 | return strip_lowerdiag(sym)
77 |
78 | def build_rotation(r):
79 | norm = torch.sqrt(r[:,0]*r[:,0] + r[:,1]*r[:,1] + r[:,2]*r[:,2] + r[:,3]*r[:,3])
80 |
81 | q = r / norm[:, None]
82 |
83 | R = torch.zeros((q.size(0), 3, 3), device='cuda')
84 |
85 | r = q[:, 0]
86 | x = q[:, 1]
87 | y = q[:, 2]
88 | z = q[:, 3]
89 |
90 | R[:, 0, 0] = 1 - 2 * (y*y + z*z)
91 | R[:, 0, 1] = 2 * (x*y - r*z)
92 | R[:, 0, 2] = 2 * (x*z + r*y)
93 | R[:, 1, 0] = 2 * (x*y + r*z)
94 | R[:, 1, 1] = 1 - 2 * (x*x + z*z)
95 | R[:, 1, 2] = 2 * (y*z - r*x)
96 | R[:, 2, 0] = 2 * (x*z - r*y)
97 | R[:, 2, 1] = 2 * (y*z + r*x)
98 | R[:, 2, 2] = 1 - 2 * (x*x + y*y)
99 | return R
100 |
101 | def build_scaling_rotation(s, r):
102 | L = torch.zeros((s.shape[0], 3, 3), dtype=torch.float, device="cuda")
103 | R = build_rotation(r)
104 |
105 | L[:,0,0] = s[:,0]
106 | L[:,1,1] = s[:,1]
107 | L[:,2,2] = s[:,2]
108 |
109 | L = R @ L
110 | return L
111 |
112 | def safe_state(silent):
113 | old_f = sys.stdout
114 | class F:
115 | def __init__(self, silent):
116 | self.silent = silent
117 |
118 | def write(self, x):
119 | if not self.silent:
120 | if x.endswith("\n"):
121 | old_f.write(x.replace("\n", " [{}]\n".format(str(datetime.now().strftime("%d/%m %H:%M:%S")))))
122 | else:
123 | old_f.write(x)
124 |
125 | def flush(self):
126 | old_f.flush()
127 |
128 | sys.stdout = F(silent)
129 |
130 | random.seed(0)
131 | np.random.seed(0)
132 | torch.manual_seed(0)
133 | torch.cuda.set_device(torch.device("cuda:0"))
134 |
135 |
136 | def splitBy3(a):
137 | x = a & 0x1FFFFF # we only look at the first 21 bits
138 | x = (x | x << 32) & 0x1F00000000FFFF
139 | x = (x | x << 16) & 0x1F0000FF0000FF
140 | x = (x | x << 8) & 0x100F00F00F00F00F
141 | x = (x | x << 4) & 0x10C30C30C30C30C3
142 | x = (x | x << 2) & 0x1249249249249249
143 | return x
144 |
145 |
146 | def mortonEncode(pos: torch.Tensor) -> torch.Tensor:
147 | x, y, z = pos.unbind(-1)
148 | answer = torch.zeros(len(pos), dtype=torch.long, device=pos.device)
149 | answer |= splitBy3(x) | splitBy3(y) << 1 | splitBy3(z) << 2
150 | return answer
--------------------------------------------------------------------------------
/utils/graphics_utils.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (C) 2023, Inria
3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco
4 | # All rights reserved.
5 | #
6 | # This software is free for non-commercial, research and evaluation use
7 | # under the terms of the LICENSE.md file.
8 | #
9 | # For inquiries contact george.drettakis@inria.fr
10 | #
11 |
12 | import torch
13 | import math
14 | import numpy as np
15 | from typing import NamedTuple
16 |
17 | class BasicPointCloud(NamedTuple):
18 | points : np.array
19 | colors : np.array
20 | normals : np.array
21 |
22 | def geom_transform_points(points, transf_matrix):
23 | P, _ = points.shape
24 | ones = torch.ones(P, 1, dtype=points.dtype, device=points.device)
25 | points_hom = torch.cat([points, ones], dim=1)
26 | points_out = torch.matmul(points_hom, transf_matrix.unsqueeze(0))
27 |
28 | denom = points_out[..., 3:] + 0.0000001
29 | return (points_out[..., :3] / denom).squeeze(dim=0)
30 |
31 | def getWorld2View(R, t):
32 | Rt = np.zeros((4, 4))
33 | Rt[:3, :3] = R.transpose()
34 | Rt[:3, 3] = t
35 | Rt[3, 3] = 1.0
36 | return np.float32(Rt)
37 |
38 | def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0):
39 | Rt = np.zeros((4, 4))
40 | Rt[:3, :3] = R.transpose()
41 | Rt[:3, 3] = t
42 | Rt[3, 3] = 1.0
43 |
44 | C2W = np.linalg.inv(Rt)
45 | cam_center = C2W[:3, 3]
46 | cam_center = (cam_center + translate) * scale
47 | C2W[:3, 3] = cam_center
48 | Rt = np.linalg.inv(C2W)
49 | return np.float32(Rt)
50 |
51 | def getProjectionMatrix(znear, zfar, fovX, fovY):
52 | tanHalfFovY = math.tan((fovY / 2))
53 | tanHalfFovX = math.tan((fovX / 2))
54 |
55 | top = tanHalfFovY * znear
56 | bottom = -top
57 | right = tanHalfFovX * znear
58 | left = -right
59 |
60 | P = torch.zeros(4, 4)
61 |
62 | z_sign = 1.0
63 |
64 | P[0, 0] = 2.0 * znear / (right - left)
65 | P[1, 1] = 2.0 * znear / (top - bottom)
66 | P[0, 2] = (right + left) / (right - left)
67 | P[1, 2] = (top + bottom) / (top - bottom)
68 | P[3, 2] = z_sign
69 | P[2, 2] = z_sign * zfar / (zfar - znear)
70 | P[2, 3] = -(zfar * znear) / (zfar - znear)
71 | return P
72 |
73 | def fov2focal(fov, pixels):
74 | return pixels / (2 * math.tan(fov / 2))
75 |
76 | def focal2fov(focal, pixels):
77 | return 2*math.atan(pixels/(2*focal))
--------------------------------------------------------------------------------
/utils/image_utils.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (C) 2023, Inria
3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco
4 | # All rights reserved.
5 | #
6 | # This software is free for non-commercial, research and evaluation use
7 | # under the terms of the LICENSE.md file.
8 | #
9 | # For inquiries contact george.drettakis@inria.fr
10 | #
11 |
12 | import torch
13 |
14 | def mse(img1, img2):
15 | return (((img1 - img2)) ** 2).view(img1.shape[0], -1).mean(1, keepdim=True)
16 |
17 | def psnr(img1, img2):
18 | mse = (((img1 - img2)) ** 2).view(img1.shape[0], -1).mean(1, keepdim=True)
19 | return 20 * torch.log10(1.0 / torch.sqrt(mse))
20 |
--------------------------------------------------------------------------------
/utils/loss_utils.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (C) 2023, Inria
3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco
4 | # All rights reserved.
5 | #
6 | # This software is free for non-commercial, research and evaluation use
7 | # under the terms of the LICENSE.md file.
8 | #
9 | # For inquiries contact george.drettakis@inria.fr
10 | #
11 |
12 | import torch
13 | import torch.nn.functional as F
14 | from torch.autograd import Variable
15 | from math import exp
16 |
17 | def l1_loss(network_output, gt):
18 | return torch.abs((network_output - gt)).mean()
19 |
20 | def l2_loss(network_output, gt):
21 | return ((network_output - gt) ** 2).mean()
22 |
23 | def gaussian(window_size, sigma):
24 | gauss = torch.Tensor([exp(-(x - window_size // 2) ** 2 / float(2 * sigma ** 2)) for x in range(window_size)])
25 | return gauss / gauss.sum()
26 |
27 | def create_window(window_size, channel):
28 | _1D_window = gaussian(window_size, 1.5).unsqueeze(1)
29 | _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
30 | window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
31 | return window
32 |
33 | def ssim(img1, img2, window_size=11, size_average=True):
34 | channel = img1.size(-3)
35 | window = create_window(window_size, channel)
36 |
37 | if img1.is_cuda:
38 | window = window.cuda(img1.get_device())
39 | window = window.type_as(img1)
40 |
41 | return _ssim(img1, img2, window, window_size, channel, size_average)
42 |
43 | def _ssim(img1, img2, window, window_size, channel, size_average=True):
44 | mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel)
45 | mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel)
46 |
47 | mu1_sq = mu1.pow(2)
48 | mu2_sq = mu2.pow(2)
49 | mu1_mu2 = mu1 * mu2
50 |
51 | sigma1_sq = F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq
52 | sigma2_sq = F.conv2d(img2 * img2, window, padding=window_size // 2, groups=channel) - mu2_sq
53 | sigma12 = F.conv2d(img1 * img2, window, padding=window_size // 2, groups=channel) - mu1_mu2
54 |
55 | C1 = 0.01 ** 2
56 | C2 = 0.03 ** 2
57 |
58 | ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
59 |
60 | if size_average:
61 | return ssim_map.mean()
62 | else:
63 | return ssim_map.mean(1).mean(1).mean(1)
64 |
65 |
--------------------------------------------------------------------------------
/utils/pose_utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 | from icecream import ic
4 | from utils.graphics_utils import getWorld2View2
5 |
6 |
7 | def normalize(x):
8 | return x / np.linalg.norm(x)
9 |
10 | def viewmatrix(z, up, pos):
11 | vec2 = normalize(z)
12 | vec1_avg = up
13 | vec0 = normalize(np.cross(vec1_avg, vec2))
14 | vec1 = normalize(np.cross(vec2, vec0))
15 | m = np.stack([vec0, vec1, vec2, pos], 1)
16 | return m
17 |
18 | def poses_avg(poses):
19 | hwf = poses[0, :3, -1:]
20 |
21 | center = poses[:, :3, 3].mean(0)
22 | vec2 = normalize(poses[:, :3, 2].sum(0))
23 | up = poses[:, :3, 1].sum(0)
24 | c2w = np.concatenate([viewmatrix(vec2, up, center), hwf], 1)
25 |
26 | return c2w
27 |
28 | def get_focal(camera):
29 | focal = camera.FoVx
30 | return focal
31 |
32 | def poses_avg_fixed_center(poses):
33 | hwf = poses[0, :3, -1:]
34 | center = poses[:, :3, 3].mean(0)
35 | vec2 = [1, 0, 0]
36 | up = [0, 0, 1]
37 | c2w = np.concatenate([viewmatrix(vec2, up, center), hwf], 1)
38 | return c2w
39 |
40 | def integrate_weights_np(w):
41 | """Compute the cumulative sum of w, assuming all weight vectors sum to 1.
42 |
43 | The output's size on the last dimension is one greater than that of the input,
44 | because we're computing the integral corresponding to the endpoints of a step
45 | function, not the integral of the interior/bin values.
46 |
47 | Args:
48 | w: Tensor, which will be integrated along the last axis. This is assumed to
49 | sum to 1 along the last axis, and this function will (silently) break if
50 | that is not the case.
51 |
52 | Returns:
53 | cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1
54 | """
55 | cw = np.minimum(1, np.cumsum(w[..., :-1], axis=-1))
56 | shape = cw.shape[:-1] + (1,)
57 | # Ensure that the CDF starts with exactly 0 and ends with exactly 1.
58 | cw0 = np.concatenate([np.zeros(shape), cw,
59 | np.ones(shape)], axis=-1)
60 | return cw0
61 |
62 | def invert_cdf_np(u, t, w_logits):
63 | """Invert the CDF defined by (t, w) at the points specified by u in [0, 1)."""
64 | # Compute the PDF and CDF for each weight vector.
65 | w = np.exp(w_logits) / np.exp(w_logits).sum(axis=-1, keepdims=True)
66 | cw = integrate_weights_np(w)
67 | # Interpolate into the inverse CDF.
68 | interp_fn = np.interp
69 | t_new = interp_fn(u, cw, t)
70 | return t_new
71 |
72 | def sample_np(rand,
73 | t,
74 | w_logits,
75 | num_samples,
76 | single_jitter=False,
77 | deterministic_center=False):
78 | """
79 | numpy version of sample()
80 | """
81 | eps = np.finfo(np.float32).eps
82 |
83 | # Draw uniform samples.
84 | if not rand:
85 | if deterministic_center:
86 | pad = 1 / (2 * num_samples)
87 | u = np.linspace(pad, 1. - pad - eps, num_samples)
88 | else:
89 | u = np.linspace(0, 1. - eps, num_samples)
90 | u = np.broadcast_to(u, t.shape[:-1] + (num_samples,))
91 | else:
92 | # `u` is in [0, 1) --- it can be zero, but it can never be 1.
93 | u_max = eps + (1 - eps) / num_samples
94 | max_jitter = (1 - u_max) / (num_samples - 1) - eps
95 | d = 1 if single_jitter else num_samples
96 | u = np.linspace(0, 1 - u_max, num_samples) + \
97 | np.random.rand(*t.shape[:-1], d) * max_jitter
98 |
99 | return invert_cdf_np(u, t, w_logits)
100 |
101 |
102 |
103 | def focus_point_fn(poses):
104 | """Calculate nearest point to all focal axes in poses."""
105 | directions, origins = poses[:, :3, 2:3], poses[:, :3, 3:4]
106 | m = np.eye(3) - directions * np.transpose(directions, [0, 2, 1])
107 | mt_m = np.transpose(m, [0, 2, 1]) @ m
108 | focus_pt = np.linalg.inv(mt_m.mean(0)) @ (mt_m @ origins).mean(0)[:, 0]
109 | return focus_pt
110 |
111 |
112 | def average_pose(poses: np.ndarray) -> np.ndarray:
113 | """New pose using average position, z-axis, and up vector of input poses."""
114 | position = poses[:, :3, 3].mean(0)
115 | z_axis = poses[:, :3, 2].mean(0)
116 | up = poses[:, :3, 1].mean(0)
117 | cam2world = viewmatrix(z_axis, up, position)
118 | return cam2world
119 |
120 | from typing import Tuple
121 | def recenter_poses(poses: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
122 | """Recenter poses around the origin."""
123 | cam2world = average_pose(poses)
124 | transform = np.linalg.inv(pad_poses(cam2world))
125 | poses = transform @ pad_poses(poses)
126 | return unpad_poses(poses), transform
127 |
128 |
129 | NEAR_STRETCH = .9 # Push forward near bound for forward facing render path.
130 | FAR_STRETCH = 5. # Push back far bound for forward facing render path.
131 | FOCUS_DISTANCE = .75 # Relative weighting of near, far bounds for render path.
132 | def generate_spiral_path(views, bounds,
133 | n_frames: int = 180,
134 | n_rots: int = 2,
135 | zrate: float = .5) -> np.ndarray:
136 | """Calculates a forward facing spiral path for rendering."""
137 | # Find a reasonable 'focus depth' for this dataset as a weighted average
138 | # of conservative near and far bounds in disparity space.
139 | poses = []
140 | for view in views:
141 | tmp_view = np.eye(4)
142 | tmp_view[:3] = np.concatenate([view.R.T, view.T[:, None]], 1)
143 | tmp_view = np.linalg.inv(tmp_view)
144 | tmp_view[:, 1:3] *= -1
145 | poses.append(tmp_view)
146 | poses = np.stack(poses, 0)
147 |
148 | print(poses.shape)
149 | bounds = bounds.repeat(poses.shape[0], 0) #np.array([[ 16.21311152, 153.86329729]])
150 | scale = 1. / (bounds.min() * .75)
151 | poses[:, :3, 3] *= scale
152 | bounds *= scale
153 | # Recenter poses.
154 | # tmp, _ = recenter_poses(poses)
155 | # poses[:, :3, :3] = tmp[:, :3, :3] @ np.diag(np.array([1, -1, -1]))
156 |
157 | near_bound = bounds.min() * NEAR_STRETCH
158 | far_bound = bounds.max() * FAR_STRETCH
159 | # All cameras will point towards the world space point (0, 0, -focal).
160 | focal = 1 / (((1 - FOCUS_DISTANCE) / near_bound + FOCUS_DISTANCE / far_bound))
161 |
162 | # Get radii for spiral path using 90th percentile of camera positions.
163 | positions = poses[:, :3, 3]
164 | radii = np.percentile(np.abs(positions), 90, 0)
165 | radii = np.concatenate([radii, [1.]])
166 |
167 | # Generate poses for spiral path.
168 | render_poses = []
169 | cam2world = average_pose(poses)
170 | up = poses[:, :3, 1].mean(0)
171 | for theta in np.linspace(0., 2. * np.pi * n_rots, n_frames, endpoint=False):
172 | t = radii * [np.cos(theta), -np.sin(theta), -np.sin(theta * zrate), 1.]
173 | position = cam2world @ t
174 | lookat = cam2world @ [0, 0, -focal, 1.]
175 | z_axis = position - lookat
176 | render_pose = np.eye(4)
177 | render_pose[:3] = viewmatrix(z_axis, up, position)
178 | render_pose[:3, 1:3] *= -1
179 | render_poses.append(np.linalg.inv(render_pose))
180 | render_poses = np.stack(render_poses, axis=0)
181 | return render_poses
182 |
183 |
184 | def render_path_spiral(views, focal=50, zrate=0.5, rots=2, N=10):
185 | poses = []
186 | for view in views:
187 | tmp_view = np.eye(4)
188 | tmp_view[:3] = np.concatenate([view.R.T, view.T[:, None]], 1)
189 | tmp_view = np.linalg.inv(tmp_view)
190 | tmp_view[:, 1:3] *= -1
191 | poses.append(tmp_view)
192 | poses = np.stack(poses, 0)
193 | # poses = np.stack([np.concatenate([view.R.T, view.T[:, None]], 1) for view in views], 0)
194 | c2w = poses_avg(poses)
195 | up = normalize(poses[:, :3, 1].sum(0))
196 |
197 | # Get radii for spiral path
198 | rads = np.percentile(np.abs(poses[:, :3, 3]), 90, 0)
199 | render_poses = []
200 | rads = np.array(list(rads) + [1.0])
201 |
202 | for theta in np.linspace(0.0, 2.0 * np.pi * rots, N + 1)[:-1]:
203 | c = np.dot(
204 | c2w[:3, :4],
205 | np.array([np.cos(theta), -np.sin(theta), -np.sin(theta * zrate), 1.0]) * rads,
206 | )
207 | z = normalize(c - np.dot(c2w[:3, :4], np.array([0, 0, -focal, 1.0])))
208 | render_pose = np.eye(4)
209 | render_pose[:3] = viewmatrix(z, up, c)
210 | render_pose[:3, 1:3] *= -1
211 | render_poses.append(np.linalg.inv(render_pose))
212 | return render_poses
213 |
214 | def pad_poses(p):
215 | """Pad [..., 3, 4] pose matrices with a homogeneous bottom row [0,0,0,1]."""
216 | bottom = np.broadcast_to([0, 0, 0, 1.], p[..., :1, :4].shape)
217 | return np.concatenate([p[..., :3, :4], bottom], axis=-2)
218 |
219 |
220 | def unpad_poses(p):
221 | """Remove the homogeneous bottom row from [..., 4, 4] pose matrices."""
222 | return p[..., :3, :4]
223 |
224 | def transform_poses_pca(poses):
225 | """Transforms poses so principal components lie on XYZ axes.
226 |
227 | Args:
228 | poses: a (N, 3, 4) array containing the cameras' camera to world transforms.
229 |
230 | Returns:
231 | A tuple (poses, transform), with the transformed poses and the applied
232 | camera_to_world transforms.
233 | """
234 | t = poses[:, :3, 3]
235 | t_mean = t.mean(axis=0)
236 | t = t - t_mean
237 |
238 | eigval, eigvec = np.linalg.eig(t.T @ t)
239 | # Sort eigenvectors in order of largest to smallest eigenvalue.
240 | inds = np.argsort(eigval)[::-1]
241 | eigvec = eigvec[:, inds]
242 | rot = eigvec.T
243 | if np.linalg.det(rot) < 0:
244 | rot = np.diag(np.array([1, 1, -1])) @ rot
245 |
246 | transform = np.concatenate([rot, rot @ -t_mean[:, None]], -1)
247 | poses_recentered = unpad_poses(transform @ pad_poses(poses))
248 | transform = np.concatenate([transform, np.eye(4)[3:]], axis=0)
249 |
250 | # Flip coordinate system if z component of y-axis is negative
251 | if poses_recentered.mean(axis=0)[2, 1] < 0:
252 | poses_recentered = np.diag(np.array([1, -1, -1])) @ poses_recentered
253 | transform = np.diag(np.array([1, -1, -1, 1])) @ transform
254 |
255 | # Just make sure it's it in the [-1, 1]^3 cube
256 | scale_factor = 1. / np.max(np.abs(poses_recentered[:, :3, 3]))
257 | poses_recentered[:, :3, 3] *= scale_factor
258 | transform = np.diag(np.array([scale_factor] * 3 + [1])) @ transform
259 | return poses_recentered, transform
260 |
261 | def generate_ellipse_path(views, n_frames=600, const_speed=True, z_variation=0., z_phase=0.):
262 | poses = []
263 | for view in views:
264 | tmp_view = np.eye(4)
265 | tmp_view[:3] = np.concatenate([view.R.T, view.T[:, None]], 1)
266 | tmp_view = np.linalg.inv(tmp_view)
267 | tmp_view[:, 1:3] *= -1
268 | poses.append(tmp_view)
269 | poses = np.stack(poses, 0)
270 | poses, transform = transform_poses_pca(poses)
271 |
272 |
273 | # Calculate the focal point for the path (cameras point toward this).
274 | center = focus_point_fn(poses)
275 | offset = np.array([center[0] , center[1], center[2]*0 ])
276 | # Calculate scaling for ellipse axes based on input camera positions.
277 | sc = np.percentile(np.abs(poses[:, :3, 3] - offset), 90, axis=0)
278 |
279 | # Use ellipse that is symmetric about the focal point in xy.
280 | low = -sc + offset
281 | high = sc + offset
282 | # Optional height variation need not be symmetric
283 | z_low = np.percentile((poses[:, :3, 3]), 10, axis=0)
284 | z_high = np.percentile((poses[:, :3, 3]), 90, axis=0)
285 |
286 |
287 | def get_positions(theta):
288 | # Interpolate between bounds with trig functions to get ellipse in x-y.
289 | # Optionally also interpolate in z to change camera height along path.
290 | return np.stack([
291 | (low[0] + (high - low)[0] * (np.cos(theta) * .5 + .5)),
292 | (low[1] + (high - low)[1] * (np.sin(theta) * .5 + .5)),
293 | z_variation * (z_low[2] + (z_high - z_low)[2] *
294 | (np.cos(theta + 2 * np.pi * z_phase) * .5 + .5)),
295 | ], -1)
296 |
297 | theta = np.linspace(0, 2. * np.pi, n_frames + 1, endpoint=True)
298 | positions = get_positions(theta)
299 |
300 | if const_speed:
301 | # Resample theta angles so that the velocity is closer to constant.
302 | lengths = np.linalg.norm(positions[1:] - positions[:-1], axis=-1)
303 | theta = sample_np(None, theta, np.log(lengths), n_frames + 1)
304 | positions = get_positions(theta)
305 |
306 | # Throw away duplicated last position.
307 | positions = positions[:-1]
308 |
309 | # Set path's up vector to axis closest to average of input pose up vectors.
310 | avg_up = poses[:, :3, 1].mean(0)
311 | avg_up = avg_up / np.linalg.norm(avg_up)
312 | ind_up = np.argmax(np.abs(avg_up))
313 | up = np.eye(3)[ind_up] * np.sign(avg_up[ind_up])
314 |
315 | render_poses = []
316 | for p in positions:
317 | render_pose = np.eye(4)
318 | render_pose[:3] = viewmatrix(p - center, up, p)
319 | render_pose = np.linalg.inv(transform) @ render_pose
320 | render_pose[:3, 1:3] *= -1
321 | render_poses.append(np.linalg.inv(render_pose))
322 | return render_poses
323 |
324 |
325 | def generate_spherify_path(views):
326 | poses = []
327 | for view in views:
328 | tmp_view = np.eye(4)
329 | tmp_view[:3] = np.concatenate([view.R.T, view.T[:, None]], 1)
330 | tmp_view = np.linalg.inv(tmp_view)
331 | tmp_view[:, 1:3] *= -1
332 | poses.append(tmp_view)
333 | poses = np.stack(poses, 0)
334 |
335 | p34_to_44 = lambda p: np.concatenate(
336 | [p, np.tile(np.reshape(np.eye(4)[-1, :], [1, 1, 4]), [p.shape[0], 1, 1])], 1
337 | )
338 |
339 | rays_d = poses[:, :3, 2:3]
340 | rays_o = poses[:, :3, 3:4]
341 |
342 | def min_line_dist(rays_o, rays_d):
343 | A_i = np.eye(3) - rays_d * np.transpose(rays_d, [0, 2, 1])
344 | b_i = -A_i @ rays_o
345 | pt_mindist = np.squeeze(
346 | -np.linalg.inv((np.transpose(A_i, [0, 2, 1]) @ A_i).mean(0)) @ (b_i).mean(0)
347 | )
348 | return pt_mindist
349 |
350 | pt_mindist = min_line_dist(rays_o, rays_d)
351 |
352 | center = pt_mindist
353 | up = (poses[:, :3, 3] - center).mean(0)
354 |
355 | vec0 = normalize(up)
356 | vec1 = normalize(np.cross([0.1, 0.2, 0.3], vec0))
357 | vec2 = normalize(np.cross(vec0, vec1))
358 | pos = center
359 | c2w = np.stack([vec1, vec2, vec0, pos], 1)
360 |
361 | poses_reset = np.linalg.inv(p34_to_44(c2w[None])) @ p34_to_44(poses[:, :3, :4])
362 |
363 | rad = np.sqrt(np.mean(np.sum(np.square(poses_reset[:, :3, 3]), -1)))
364 |
365 | sc = 1.0 / rad
366 | poses_reset[:, :3, 3] *= sc
367 | rad *= sc
368 |
369 | centroid = np.mean(poses_reset[:, :3, 3], 0)
370 | zh = centroid[2]
371 | radcircle = np.sqrt(rad**2 - zh**2)
372 | new_poses = []
373 |
374 | for th in np.linspace(0.0, 2.0 * np.pi, 120):
375 | camorigin = np.array([radcircle * np.cos(th), radcircle * np.sin(th), zh])
376 | up = np.array([0, 0, -1.0])
377 |
378 | vec2 = normalize(camorigin)
379 | vec0 = normalize(np.cross(vec2, up))
380 | vec1 = normalize(np.cross(vec2, vec0))
381 | pos = camorigin
382 | p = np.stack([vec0, vec1, vec2, pos], 1)
383 |
384 | render_pose = np.eye(4)
385 | render_pose[:3] = p
386 | #render_pose[:3, 1:3] *= -1
387 | new_poses.append(render_pose)
388 |
389 | new_poses = np.stack(new_poses, 0)
390 | return new_poses
391 |
392 | # def gaussian_poses(viewpoint_cam, mean =0, std_dev = 0.03):
393 | # translate_x = np.random.normal(mean, std_dev)
394 | # translate_y = np.random.normal(mean, std_dev)
395 | # translate_z = np.random.normal(mean, std_dev)
396 | # translate = np.array([translate_x, translate_y, translate_z])
397 | # viewpoint_cam.world_view_transform = torch.tensor(getWorld2View2(viewpoint_cam.R, viewpoint_cam.T, translate)).transpose(0, 1).cuda()
398 | # viewpoint_cam.full_proj_transform = (viewpoint_cam.world_view_transform.unsqueeze(0).bmm(viewpoint_cam.projection_matrix.unsqueeze(0))).squeeze(0)
399 | # viewpoint_cam.camera_center = viewpoint_cam.world_view_transform.inverse()[3, :3]
400 | # return viewpoint_cam
401 |
402 | def get_rotation_matrix(axis, angle):
403 | """
404 | Create a rotation matrix for a given axis (x, y, or z) and angle.
405 | """
406 | axis = axis.lower()
407 | cos_angle = np.cos(angle)
408 | sin_angle = np.sin(angle)
409 |
410 | if axis == 'x':
411 | return np.array([
412 | [1, 0, 0],
413 | [0, cos_angle, -sin_angle],
414 | [0, sin_angle, cos_angle]
415 | ])
416 | elif axis == 'y':
417 | return np.array([
418 | [cos_angle, 0, sin_angle],
419 | [0, 1, 0],
420 | [-sin_angle, 0, cos_angle]
421 | ])
422 | elif axis == 'z':
423 | return np.array([
424 | [cos_angle, -sin_angle, 0],
425 | [sin_angle, cos_angle, 0],
426 | [0, 0, 1]
427 | ])
428 | else:
429 | raise ValueError("Invalid axis. Choose from 'x', 'y', 'z'.")
430 |
431 |
432 |
433 | def gaussian_poses(viewpoint_cam, mean=0, std_dev_translation=0.03, std_dev_rotation=0.01):
434 | # Translation Perturbation
435 | translate_x = np.random.normal(mean, std_dev_translation)
436 | translate_y = np.random.normal(mean, std_dev_translation)
437 | translate_z = np.random.normal(mean, std_dev_translation)
438 | translate = np.array([translate_x, translate_y, translate_z])
439 |
440 | # Rotation Perturbation
441 | angle_x = np.random.normal(mean, std_dev_rotation)
442 | angle_y = np.random.normal(mean, std_dev_rotation)
443 | angle_z = np.random.normal(mean, std_dev_rotation)
444 |
445 | rot_x = get_rotation_matrix('x', angle_x)
446 | rot_y = get_rotation_matrix('y', angle_y)
447 | rot_z = get_rotation_matrix('z', angle_z)
448 |
449 | # Combined Rotation Matrix
450 | combined_rot = np.matmul(rot_z, np.matmul(rot_y, rot_x))
451 |
452 | # Apply Rotation to Camera
453 | rotated_R = np.matmul(viewpoint_cam.R, combined_rot)
454 |
455 | # Update Camera Transformation
456 | viewpoint_cam.world_view_transform = torch.tensor(getWorld2View2(rotated_R, viewpoint_cam.T, translate)).transpose(0, 1).cuda()
457 | viewpoint_cam.full_proj_transform = (viewpoint_cam.world_view_transform.unsqueeze(0).bmm(viewpoint_cam.projection_matrix.unsqueeze(0))).squeeze(0)
458 | viewpoint_cam.camera_center = viewpoint_cam.world_view_transform.inverse()[3, :3]
459 |
460 | return viewpoint_cam
461 |
462 |
463 |
464 | def circular_poses(viewpoint_cam, radius, angle=0.0):
465 | translate_x = radius * np.cos(angle)
466 | translate_y = radius * np.sin(angle)
467 | translate_z = 0
468 | translate = np.array([translate_x, translate_y, translate_z])
469 | viewpoint_cam.world_view_transform = torch.tensor(getWorld2View2(viewpoint_cam.R, viewpoint_cam.T, translate)).transpose(0, 1).cuda()
470 | viewpoint_cam.full_proj_transform = (viewpoint_cam.world_view_transform.unsqueeze(0).bmm(viewpoint_cam.projection_matrix.unsqueeze(0))).squeeze(0)
471 | viewpoint_cam.camera_center = viewpoint_cam.world_view_transform.inverse()[3, :3]
472 |
473 | return viewpoint_cam
474 |
475 | def generate_spherical_sample_path(views, azimuthal_rots=1, polar_rots=0.75, N=10):
476 | poses = []
477 | for view in views:
478 | tmp_view = np.eye(4)
479 | tmp_view[:3] = np.concatenate([view.R.T, view.T[:, None]], 1)
480 | tmp_view = np.linalg.inv(tmp_view)
481 | tmp_view[:, 1:3] *= -1
482 | poses.append(tmp_view)
483 | focal = get_focal(view)
484 | poses = np.stack(poses, 0)
485 | # ic(min_focal, max_focal)
486 |
487 | c2w = poses_avg(poses)
488 | up = normalize(poses[:, :3, 1].sum(0))
489 | rads = np.percentile(np.abs(poses[:, :3, 3]), 90, 0)
490 | rads = np.array(list(rads) + [1.0])
491 | ic(rads)
492 | render_poses = []
493 | focal_range = np.linspace(0.5, 3, N **2+1)
494 | index = 0
495 | # Modify this loop to include phi
496 | for theta in np.linspace(0.0, 2.0 * np.pi * azimuthal_rots, N + 1)[:-1]:
497 | for phi in np.linspace(0.0, np.pi * polar_rots, N + 1)[:-1]:
498 | # Modify these lines to use spherical coordinates for c
499 | c = np.dot(
500 | c2w[:3, :4],
501 | rads * np.array([
502 | np.sin(phi) * np.cos(theta),
503 | np.sin(phi) * np.sin(theta),
504 | np.cos(phi),
505 | 1.0
506 | ])
507 | )
508 |
509 | z = normalize(c - np.dot(c2w[:3, :4], np.array([0, 0, -focal_range[index], 1.0])))
510 | render_pose = np.eye(4)
511 | render_pose[:3] = viewmatrix(z, up, c)
512 | render_pose[:3, 1:3] *= -1
513 | render_poses.append(np.linalg.inv(render_pose))
514 | index += 1
515 | return render_poses
516 |
517 |
518 | def generate_spiral_path(views, focal=1.5, zrate= 0, rots=1, N=600):
519 | poses = []
520 | focal = 0
521 | for view in views:
522 | tmp_view = np.eye(4)
523 | tmp_view[:3] = np.concatenate([view.R.T, view.T[:, None]], 1)
524 | tmp_view = np.linalg.inv(tmp_view)
525 | tmp_view[:, 1:3] *= -1
526 | poses.append(tmp_view)
527 | focal += get_focal(views[0])
528 | poses = np.stack(poses, 0)
529 |
530 |
531 | c2w = poses_avg(poses)
532 | up = normalize(poses[:, :3, 1].sum(0))
533 |
534 | # Get radii for spiral path
535 | rads = np.percentile(np.abs(poses[:, :3, 3]), 90, 0)
536 | render_poses = []
537 |
538 | rads = np.array(list(rads) + [1.0])
539 | focal /= len(views)
540 |
541 | for theta in np.linspace(0.0, 2.0 * np.pi * rots, N + 1)[:-1]:
542 | c = np.dot(
543 | c2w[:3, :4],
544 | np.array([np.cos(theta), -np.sin(theta),-np.sin(theta * zrate), 1.0]) * rads,
545 | )
546 | z = normalize(c - np.dot(c2w[:3, :4], np.array([0, 0, -focal, 1.0])))
547 |
548 | render_pose = np.eye(4)
549 | render_pose[:3] = viewmatrix(z, up, c)
550 | render_pose[:3, 1:3] *= -1
551 | render_poses.append(np.linalg.inv(render_pose))
552 | return render_poses
--------------------------------------------------------------------------------
/utils/sh_utils.py:
--------------------------------------------------------------------------------
1 | # Copyright 2021 The PlenOctree Authors.
2 | # Redistribution and use in source and binary forms, with or without
3 | # modification, are permitted provided that the following conditions are met:
4 | #
5 | # 1. Redistributions of source code must retain the above copyright notice,
6 | # this list of conditions and the following disclaimer.
7 | #
8 | # 2. Redistributions in binary form must reproduce the above copyright notice,
9 | # this list of conditions and the following disclaimer in the documentation
10 | # and/or other materials provided with the distribution.
11 | #
12 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
13 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
14 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
15 | # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
16 | # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
17 | # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
18 | # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
19 | # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
20 | # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
21 | # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
22 | # POSSIBILITY OF SUCH DAMAGE.
23 |
24 | import torch
25 |
26 | C0 = 0.28209479177387814
27 | C1 = 0.4886025119029199
28 | C2 = [
29 | 1.0925484305920792,
30 | -1.0925484305920792,
31 | 0.31539156525252005,
32 | -1.0925484305920792,
33 | 0.5462742152960396
34 | ]
35 | C3 = [
36 | -0.5900435899266435,
37 | 2.890611442640554,
38 | -0.4570457994644658,
39 | 0.3731763325901154,
40 | -0.4570457994644658,
41 | 1.445305721320277,
42 | -0.5900435899266435
43 | ]
44 | C4 = [
45 | 2.5033429417967046,
46 | -1.7701307697799304,
47 | 0.9461746957575601,
48 | -0.6690465435572892,
49 | 0.10578554691520431,
50 | -0.6690465435572892,
51 | 0.47308734787878004,
52 | -1.7701307697799304,
53 | 0.6258357354491761,
54 | ]
55 |
56 |
57 | def eval_sh(deg, sh, dirs):
58 | """
59 | Evaluate spherical harmonics at unit directions
60 | using hardcoded SH polynomials.
61 | Works with torch/np/jnp.
62 | ... Can be 0 or more batch dimensions.
63 | Args:
64 | deg: int SH deg. Currently, 0-3 supported
65 | sh: jnp.ndarray SH coeffs [..., C, (deg + 1) ** 2]
66 | dirs: jnp.ndarray unit directions [..., 3]
67 | Returns:
68 | [..., C]
69 | """
70 | assert deg <= 4 and deg >= 0
71 | coeff = (deg + 1) ** 2
72 | assert sh.shape[-1] >= coeff
73 |
74 | result = C0 * sh[..., 0]
75 | if deg > 0:
76 | x, y, z = dirs[..., 0:1], dirs[..., 1:2], dirs[..., 2:3]
77 | result = (result -
78 | C1 * y * sh[..., 1] +
79 | C1 * z * sh[..., 2] -
80 | C1 * x * sh[..., 3])
81 |
82 | if deg > 1:
83 | xx, yy, zz = x * x, y * y, z * z
84 | xy, yz, xz = x * y, y * z, x * z
85 | result = (result +
86 | C2[0] * xy * sh[..., 4] +
87 | C2[1] * yz * sh[..., 5] +
88 | C2[2] * (2.0 * zz - xx - yy) * sh[..., 6] +
89 | C2[3] * xz * sh[..., 7] +
90 | C2[4] * (xx - yy) * sh[..., 8])
91 |
92 | if deg > 2:
93 | result = (result +
94 | C3[0] * y * (3 * xx - yy) * sh[..., 9] +
95 | C3[1] * xy * z * sh[..., 10] +
96 | C3[2] * y * (4 * zz - xx - yy)* sh[..., 11] +
97 | C3[3] * z * (2 * zz - 3 * xx - 3 * yy) * sh[..., 12] +
98 | C3[4] * x * (4 * zz - xx - yy) * sh[..., 13] +
99 | C3[5] * z * (xx - yy) * sh[..., 14] +
100 | C3[6] * x * (xx - 3 * yy) * sh[..., 15])
101 |
102 | if deg > 3:
103 | result = (result + C4[0] * xy * (xx - yy) * sh[..., 16] +
104 | C4[1] * yz * (3 * xx - yy) * sh[..., 17] +
105 | C4[2] * xy * (7 * zz - 1) * sh[..., 18] +
106 | C4[3] * yz * (7 * zz - 3) * sh[..., 19] +
107 | C4[4] * (zz * (35 * zz - 30) + 3) * sh[..., 20] +
108 | C4[5] * xz * (7 * zz - 3) * sh[..., 21] +
109 | C4[6] * (xx - yy) * (7 * zz - 1) * sh[..., 22] +
110 | C4[7] * xz * (xx - 3 * yy) * sh[..., 23] +
111 | C4[8] * (xx * (xx - 3 * yy) - yy * (3 * xx - yy)) * sh[..., 24])
112 | return result
113 |
114 | def RGB2SH(rgb):
115 | return (rgb - 0.5) / C0
116 |
117 | def SH2RGB(sh):
118 | return sh * C0 + 0.5
--------------------------------------------------------------------------------
/utils/system_utils.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (C) 2023, Inria
3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco
4 | # All rights reserved.
5 | #
6 | # This software is free for non-commercial, research and evaluation use
7 | # under the terms of the LICENSE.md file.
8 | #
9 | # For inquiries contact george.drettakis@inria.fr
10 | #
11 |
12 | from errno import EEXIST
13 | from os import makedirs, path
14 | import os
15 |
16 | def mkdir_p(folder_path):
17 | # Creates a directory. equivalent to using mkdir -p on the command line
18 | try:
19 | makedirs(folder_path)
20 | except OSError as exc: # Python >2.5
21 | if exc.errno == EEXIST and path.isdir(folder_path):
22 | pass
23 | else:
24 | raise
25 |
26 | def searchForMaxIteration(folder):
27 | saved_iters = [int(fname.split("_")[-1]) for fname in os.listdir(folder)]
28 | return max(saved_iters)
29 |
--------------------------------------------------------------------------------