├── assets ├── better.png ├── select.png ├── teaser.png ├── worse.png ├── logo_mpi.png ├── logo_uca.png ├── logo_inria.png ├── logo_graphdeco.png └── logo_mpi.svg ├── .gitignore ├── environment.yml ├── .gitmodules ├── utils ├── image_utils.py ├── system_utils.py ├── graphics_utils.py ├── loss_utils.py ├── camera_utils.py ├── general_utils.py └── sh_utils.py ├── lpipsPyTorch ├── __init__.py └── modules │ ├── utils.py │ ├── lpips.py │ └── networks.py ├── scene ├── cameras.py ├── __init__.py ├── dataset_readers.py ├── colmap_loader.py └── gaussian_model.py ├── render.py ├── gaussian_renderer ├── network_gui.py └── __init__.py ├── full_eval.py ├── arguments └── __init__.py ├── metrics.py ├── LICENSE.md ├── convert.py ├── train.py └── README.md /assets/better.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shumash/gaussian-splatting/HEAD/assets/better.png -------------------------------------------------------------------------------- /assets/select.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shumash/gaussian-splatting/HEAD/assets/select.png -------------------------------------------------------------------------------- /assets/teaser.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shumash/gaussian-splatting/HEAD/assets/teaser.png -------------------------------------------------------------------------------- /assets/worse.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shumash/gaussian-splatting/HEAD/assets/worse.png -------------------------------------------------------------------------------- /assets/logo_mpi.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shumash/gaussian-splatting/HEAD/assets/logo_mpi.png -------------------------------------------------------------------------------- /assets/logo_uca.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shumash/gaussian-splatting/HEAD/assets/logo_uca.png -------------------------------------------------------------------------------- /assets/logo_inria.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shumash/gaussian-splatting/HEAD/assets/logo_inria.png -------------------------------------------------------------------------------- /assets/logo_graphdeco.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shumash/gaussian-splatting/HEAD/assets/logo_graphdeco.png -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | .vscode 3 | output 4 | build 5 | diff_rasterization/diff_rast.egg-info 6 | diff_rasterization/dist 7 | tensorboard_3d 8 | screenshots -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: gaussian_splatting 2 | channels: 3 | - pytorch 4 | - conda-forge 5 | - defaults 6 | dependencies: 7 | - cudatoolkit=11.6 8 | - plyfile 9 | - python=3.7.13 10 | - pip=22.3.1 11 | - pytorch=1.12.1 12 | - torchaudio=0.12.1 13 | - torchvision=0.13.1 14 | - tqdm 15 | - pip: 16 | - submodules/diff-gaussian-rasterization 17 | - submodules/simple-knn 18 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "submodules/simple-knn"] 2 | path = submodules/simple-knn 3 | url = https://gitlab.inria.fr/bkerbl/simple-knn.git 4 | [submodule "submodules/diff-gaussian-rasterization"] 5 | path = submodules/diff-gaussian-rasterization 6 | url = https://github.com/graphdeco-inria/diff-gaussian-rasterization 7 | [submodule "SIBR_viewers"] 8 | path = SIBR_viewers 9 | url = https://gitlab.inria.fr/sibr/sibr_core.git 10 | -------------------------------------------------------------------------------- /utils/image_utils.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2023, Inria 3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | # All rights reserved. 5 | # 6 | # This software is free for non-commercial, research and evaluation use 7 | # under the terms of the LICENSE.md file. 8 | # 9 | # For inquiries contact george.drettakis@inria.fr 10 | # 11 | 12 | import torch 13 | 14 | def mse(img1, img2): 15 | return (((img1 - img2)) ** 2).view(img1.shape[0], -1).mean(1, keepdim=True) 16 | 17 | def psnr(img1, img2): 18 | mse = (((img1 - img2)) ** 2).view(img1.shape[0], -1).mean(1, keepdim=True) 19 | return 20 * torch.log10(1.0 / torch.sqrt(mse)) 20 | -------------------------------------------------------------------------------- /lpipsPyTorch/__init__.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from .modules.lpips import LPIPS 4 | 5 | 6 | def lpips(x: torch.Tensor, 7 | y: torch.Tensor, 8 | net_type: str = 'alex', 9 | version: str = '0.1'): 10 | r"""Function that measures 11 | Learned Perceptual Image Patch Similarity (LPIPS). 12 | 13 | Arguments: 14 | x, y (torch.Tensor): the input tensors to compare. 15 | net_type (str): the network type to compare the features: 16 | 'alex' | 'squeeze' | 'vgg'. Default: 'alex'. 17 | version (str): the version of LPIPS. Default: 0.1. 18 | """ 19 | device = x.device 20 | criterion = LPIPS(net_type, version).to(device) 21 | return criterion(x, y) 22 | -------------------------------------------------------------------------------- /utils/system_utils.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2023, Inria 3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | # All rights reserved. 5 | # 6 | # This software is free for non-commercial, research and evaluation use 7 | # under the terms of the LICENSE.md file. 8 | # 9 | # For inquiries contact george.drettakis@inria.fr 10 | # 11 | 12 | from errno import EEXIST 13 | from os import makedirs, path 14 | import os 15 | 16 | def mkdir_p(folder_path): 17 | # Creates a directory. equivalent to using mkdir -p on the command line 18 | try: 19 | makedirs(folder_path) 20 | except OSError as exc: # Python >2.5 21 | if exc.errno == EEXIST and path.isdir(folder_path): 22 | pass 23 | else: 24 | raise 25 | 26 | def searchForMaxIteration(folder): 27 | saved_iters = [int(fname.split("_")[-1]) for fname in os.listdir(folder)] 28 | return max(saved_iters) 29 | -------------------------------------------------------------------------------- /lpipsPyTorch/modules/utils.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | 3 | import torch 4 | 5 | 6 | def normalize_activation(x, eps=1e-10): 7 | norm_factor = torch.sqrt(torch.sum(x ** 2, dim=1, keepdim=True)) 8 | return x / (norm_factor + eps) 9 | 10 | 11 | def get_state_dict(net_type: str = 'alex', version: str = '0.1'): 12 | # build url 13 | url = 'https://raw.githubusercontent.com/richzhang/PerceptualSimilarity/' \ 14 | + f'master/lpips/weights/v{version}/{net_type}.pth' 15 | 16 | # download 17 | old_state_dict = torch.hub.load_state_dict_from_url( 18 | url, progress=True, 19 | map_location=None if torch.cuda.is_available() else torch.device('cpu') 20 | ) 21 | 22 | # rename keys 23 | new_state_dict = OrderedDict() 24 | for key, val in old_state_dict.items(): 25 | new_key = key 26 | new_key = new_key.replace('lin', '') 27 | new_key = new_key.replace('model.', '') 28 | new_state_dict[new_key] = val 29 | 30 | return new_state_dict 31 | -------------------------------------------------------------------------------- /lpipsPyTorch/modules/lpips.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | from .networks import get_network, LinLayers 5 | from .utils import get_state_dict 6 | 7 | 8 | class LPIPS(nn.Module): 9 | r"""Creates a criterion that measures 10 | Learned Perceptual Image Patch Similarity (LPIPS). 11 | 12 | Arguments: 13 | net_type (str): the network type to compare the features: 14 | 'alex' | 'squeeze' | 'vgg'. Default: 'alex'. 15 | version (str): the version of LPIPS. Default: 0.1. 16 | """ 17 | def __init__(self, net_type: str = 'alex', version: str = '0.1'): 18 | 19 | assert version in ['0.1'], 'v0.1 is only supported now' 20 | 21 | super(LPIPS, self).__init__() 22 | 23 | # pretrained network 24 | self.net = get_network(net_type) 25 | 26 | # linear layers 27 | self.lin = LinLayers(self.net.n_channels_list) 28 | self.lin.load_state_dict(get_state_dict(net_type, version)) 29 | 30 | def forward(self, x: torch.Tensor, y: torch.Tensor): 31 | feat_x, feat_y = self.net(x), self.net(y) 32 | 33 | diff = [(fx - fy) ** 2 for fx, fy in zip(feat_x, feat_y)] 34 | res = [l(d).mean((2, 3), True) for d, l in zip(diff, self.lin)] 35 | 36 | return torch.sum(torch.cat(res, 0), 0, True) 37 | -------------------------------------------------------------------------------- /utils/graphics_utils.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2023, Inria 3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | # All rights reserved. 5 | # 6 | # This software is free for non-commercial, research and evaluation use 7 | # under the terms of the LICENSE.md file. 8 | # 9 | # For inquiries contact george.drettakis@inria.fr 10 | # 11 | 12 | import torch 13 | import math 14 | import numpy as np 15 | from typing import NamedTuple 16 | 17 | class BasicPointCloud(NamedTuple): 18 | points : np.array 19 | colors : np.array 20 | normals : np.array 21 | 22 | def geom_transform_points(points, transf_matrix): 23 | P, _ = points.shape 24 | ones = torch.ones(P, 1, dtype=points.dtype, device=points.device) 25 | points_hom = torch.cat([points, ones], dim=1) 26 | points_out = torch.matmul(points_hom, transf_matrix.unsqueeze(0)) 27 | 28 | denom = points_out[..., 3:] + 0.0000001 29 | return (points_out[..., :3] / denom).squeeze(dim=0) 30 | 31 | def getWorld2View(R, t): 32 | Rt = np.zeros((4, 4)) 33 | Rt[:3, :3] = R.transpose() 34 | Rt[:3, 3] = t 35 | Rt[3, 3] = 1.0 36 | return np.float32(Rt) 37 | 38 | def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0): 39 | Rt = np.zeros((4, 4)) 40 | Rt[:3, :3] = R.transpose() 41 | Rt[:3, 3] = t 42 | Rt[3, 3] = 1.0 43 | 44 | C2W = np.linalg.inv(Rt) 45 | cam_center = C2W[:3, 3] 46 | cam_center = (cam_center + translate) * scale 47 | C2W[:3, 3] = cam_center 48 | Rt = np.linalg.inv(C2W) 49 | return np.float32(Rt) 50 | 51 | def getProjectionMatrix(znear, zfar, fovX, fovY): 52 | tanHalfFovY = math.tan((fovY / 2)) 53 | tanHalfFovX = math.tan((fovX / 2)) 54 | 55 | top = tanHalfFovY * znear 56 | bottom = -top 57 | right = tanHalfFovX * znear 58 | left = -right 59 | 60 | P = torch.zeros(4, 4) 61 | 62 | z_sign = 1.0 63 | 64 | P[0, 0] = 2.0 * znear / (right - left) 65 | P[1, 1] = 2.0 * znear / (top - bottom) 66 | P[0, 2] = (right + left) / (right - left) 67 | P[1, 2] = (top + bottom) / (top - bottom) 68 | P[3, 2] = z_sign 69 | P[2, 2] = z_sign * zfar / (zfar - znear) 70 | P[2, 3] = -(zfar * znear) / (zfar - znear) 71 | return P 72 | 73 | def fov2focal(fov, pixels): 74 | return pixels / (2 * math.tan(fov / 2)) 75 | 76 | def focal2fov(focal, pixels): 77 | return 2*math.atan(pixels/(2*focal)) -------------------------------------------------------------------------------- /utils/loss_utils.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2023, Inria 3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | # All rights reserved. 5 | # 6 | # This software is free for non-commercial, research and evaluation use 7 | # under the terms of the LICENSE.md file. 8 | # 9 | # For inquiries contact george.drettakis@inria.fr 10 | # 11 | 12 | import torch 13 | import torch.nn.functional as F 14 | from torch.autograd import Variable 15 | from math import exp 16 | 17 | def l1_loss(network_output, gt): 18 | return torch.abs((network_output - gt)).mean() 19 | 20 | def l2_loss(network_output, gt): 21 | return ((network_output - gt) ** 2).mean() 22 | 23 | def gaussian(window_size, sigma): 24 | gauss = torch.Tensor([exp(-(x - window_size // 2) ** 2 / float(2 * sigma ** 2)) for x in range(window_size)]) 25 | return gauss / gauss.sum() 26 | 27 | def create_window(window_size, channel): 28 | _1D_window = gaussian(window_size, 1.5).unsqueeze(1) 29 | _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0) 30 | window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous()) 31 | return window 32 | 33 | def ssim(img1, img2, window_size=11, size_average=True): 34 | channel = img1.size(-3) 35 | window = create_window(window_size, channel) 36 | 37 | if img1.is_cuda: 38 | window = window.cuda(img1.get_device()) 39 | window = window.type_as(img1) 40 | 41 | return _ssim(img1, img2, window, window_size, channel, size_average) 42 | 43 | def _ssim(img1, img2, window, window_size, channel, size_average=True): 44 | mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel) 45 | mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel) 46 | 47 | mu1_sq = mu1.pow(2) 48 | mu2_sq = mu2.pow(2) 49 | mu1_mu2 = mu1 * mu2 50 | 51 | sigma1_sq = F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq 52 | sigma2_sq = F.conv2d(img2 * img2, window, padding=window_size // 2, groups=channel) - mu2_sq 53 | sigma12 = F.conv2d(img1 * img2, window, padding=window_size // 2, groups=channel) - mu1_mu2 54 | 55 | C1 = 0.01 ** 2 56 | C2 = 0.03 ** 2 57 | 58 | ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)) 59 | 60 | if size_average: 61 | return ssim_map.mean() 62 | else: 63 | return ssim_map.mean(1).mean(1).mean(1) 64 | 65 | -------------------------------------------------------------------------------- /scene/cameras.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2023, Inria 3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | # All rights reserved. 5 | # 6 | # This software is free for non-commercial, research and evaluation use 7 | # under the terms of the LICENSE.md file. 8 | # 9 | # For inquiries contact george.drettakis@inria.fr 10 | # 11 | 12 | import torch 13 | from torch import nn 14 | import numpy as np 15 | from utils.graphics_utils import getWorld2View2, getProjectionMatrix 16 | 17 | class Camera(nn.Module): 18 | def __init__(self, colmap_id, R, T, FoVx, FoVy, image, gt_alpha_mask, 19 | image_name, uid, 20 | trans=np.array([0.0, 0.0, 0.0]), scale=1.0, data_device = "cuda" 21 | ): 22 | super(Camera, self).__init__() 23 | 24 | self.uid = uid 25 | self.colmap_id = colmap_id 26 | self.R = R 27 | self.T = T 28 | self.FoVx = FoVx 29 | self.FoVy = FoVy 30 | self.image_name = image_name 31 | 32 | try: 33 | self.data_device = torch.device(data_device) 34 | except Exception as e: 35 | print(e) 36 | print(f"[Warning] Custom device {data_device} failed, fallback to default cuda device" ) 37 | self.data_device = torch.device("cuda") 38 | 39 | self.original_image = image.clamp(0.0, 1.0).to(self.data_device) 40 | self.image_width = self.original_image.shape[2] 41 | self.image_height = self.original_image.shape[1] 42 | 43 | if gt_alpha_mask is not None: 44 | self.original_image *= gt_alpha_mask.to(self.data_device) 45 | else: 46 | self.original_image *= torch.ones((1, self.image_height, self.image_width), device=self.data_device) 47 | 48 | self.zfar = 100.0 49 | self.znear = 0.01 50 | 51 | self.trans = trans 52 | self.scale = scale 53 | 54 | self.world_view_transform = torch.tensor(getWorld2View2(R, T, trans, scale)).transpose(0, 1).cuda() 55 | self.projection_matrix = getProjectionMatrix(znear=self.znear, zfar=self.zfar, fovX=self.FoVx, fovY=self.FoVy).transpose(0,1).cuda() 56 | self.full_proj_transform = (self.world_view_transform.unsqueeze(0).bmm(self.projection_matrix.unsqueeze(0))).squeeze(0) 57 | self.camera_center = self.world_view_transform.inverse()[3, :3] 58 | 59 | class MiniCam: 60 | def __init__(self, width, height, fovy, fovx, znear, zfar, world_view_transform, full_proj_transform): 61 | self.image_width = width 62 | self.image_height = height 63 | self.FoVy = fovy 64 | self.FoVx = fovx 65 | self.znear = znear 66 | self.zfar = zfar 67 | self.world_view_transform = world_view_transform 68 | self.full_proj_transform = full_proj_transform 69 | view_inv = torch.inverse(self.world_view_transform) 70 | self.camera_center = view_inv[3][:3] 71 | 72 | -------------------------------------------------------------------------------- /render.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2023, Inria 3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | # All rights reserved. 5 | # 6 | # This software is free for non-commercial, research and evaluation use 7 | # under the terms of the LICENSE.md file. 8 | # 9 | # For inquiries contact george.drettakis@inria.fr 10 | # 11 | 12 | import torch 13 | from scene import Scene 14 | import os 15 | from tqdm import tqdm 16 | from os import makedirs 17 | from gaussian_renderer import render 18 | import torchvision 19 | from utils.general_utils import safe_state 20 | from argparse import ArgumentParser 21 | from arguments import ModelParams, PipelineParams, get_combined_args 22 | from gaussian_renderer import GaussianModel 23 | 24 | def render_set(model_path, name, iteration, views, gaussians, pipeline, background): 25 | render_path = os.path.join(model_path, name, "ours_{}".format(iteration), "renders") 26 | gts_path = os.path.join(model_path, name, "ours_{}".format(iteration), "gt") 27 | 28 | makedirs(render_path, exist_ok=True) 29 | makedirs(gts_path, exist_ok=True) 30 | 31 | for idx, view in enumerate(tqdm(views, desc="Rendering progress")): 32 | rendering = render(view, gaussians, pipeline, background)["render"] 33 | gt = view.original_image[0:3, :, :] 34 | torchvision.utils.save_image(rendering, os.path.join(render_path, '{0:05d}'.format(idx) + ".png")) 35 | torchvision.utils.save_image(gt, os.path.join(gts_path, '{0:05d}'.format(idx) + ".png")) 36 | 37 | def render_sets(dataset : ModelParams, iteration : int, pipeline : PipelineParams, skip_train : bool, skip_test : bool): 38 | with torch.no_grad(): 39 | gaussians = GaussianModel(dataset.sh_degree) 40 | scene = Scene(dataset, gaussians, load_iteration=iteration, shuffle=False) 41 | 42 | bg_color = [1,1,1] if dataset.white_background else [0, 0, 0] 43 | background = torch.tensor(bg_color, dtype=torch.float32, device="cuda") 44 | 45 | if not skip_train: 46 | render_set(dataset.model_path, "train", scene.loaded_iter, scene.getTrainCameras(), gaussians, pipeline, background) 47 | 48 | if not skip_test: 49 | render_set(dataset.model_path, "test", scene.loaded_iter, scene.getTestCameras(), gaussians, pipeline, background) 50 | 51 | if __name__ == "__main__": 52 | # Set up command line argument parser 53 | parser = ArgumentParser(description="Testing script parameters") 54 | model = ModelParams(parser, sentinel=True) 55 | pipeline = PipelineParams(parser) 56 | parser.add_argument("--iteration", default=-1, type=int) 57 | parser.add_argument("--skip_train", action="store_true") 58 | parser.add_argument("--skip_test", action="store_true") 59 | parser.add_argument("--quiet", action="store_true") 60 | args = get_combined_args(parser) 61 | print("Rendering " + args.model_path) 62 | 63 | # Initialize system state (RNG) 64 | safe_state(args.quiet) 65 | 66 | render_sets(model.extract(args), args.iteration, pipeline.extract(args), args.skip_train, args.skip_test) -------------------------------------------------------------------------------- /gaussian_renderer/network_gui.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2023, Inria 3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | # All rights reserved. 5 | # 6 | # This software is free for non-commercial, research and evaluation use 7 | # under the terms of the LICENSE.md file. 8 | # 9 | # For inquiries contact george.drettakis@inria.fr 10 | # 11 | 12 | import torch 13 | import traceback 14 | import socket 15 | import json 16 | from scene.cameras import MiniCam 17 | 18 | host = "127.0.0.1" 19 | port = 6009 20 | 21 | conn = None 22 | addr = None 23 | 24 | listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 25 | 26 | def init(wish_host, wish_port): 27 | global host, port, listener 28 | host = wish_host 29 | port = wish_port 30 | listener.bind((host, port)) 31 | listener.listen() 32 | listener.settimeout(0) 33 | 34 | def try_connect(): 35 | global conn, addr, listener 36 | try: 37 | conn, addr = listener.accept() 38 | print(f"\nConnected by {addr}") 39 | conn.settimeout(None) 40 | except Exception as inst: 41 | pass 42 | 43 | def read(): 44 | global conn 45 | messageLength = conn.recv(4) 46 | messageLength = int.from_bytes(messageLength, 'little') 47 | message = conn.recv(messageLength) 48 | return json.loads(message.decode("utf-8")) 49 | 50 | def send(message_bytes, verify): 51 | global conn 52 | if message_bytes != None: 53 | conn.sendall(message_bytes) 54 | conn.sendall(len(verify).to_bytes(4, 'little')) 55 | conn.sendall(bytes(verify, 'ascii')) 56 | 57 | def receive(): 58 | message = read() 59 | 60 | width = message["resolution_x"] 61 | height = message["resolution_y"] 62 | 63 | if width != 0 and height != 0: 64 | try: 65 | do_training = bool(message["train"]) 66 | fovy = message["fov_y"] 67 | fovx = message["fov_x"] 68 | znear = message["z_near"] 69 | zfar = message["z_far"] 70 | do_shs_python = bool(message["shs_python"]) 71 | do_rot_scale_python = bool(message["rot_scale_python"]) 72 | keep_alive = bool(message["keep_alive"]) 73 | scaling_modifier = message["scaling_modifier"] 74 | world_view_transform = torch.reshape(torch.tensor(message["view_matrix"]), (4, 4)).cuda() 75 | world_view_transform[:,1] = -world_view_transform[:,1] 76 | world_view_transform[:,2] = -world_view_transform[:,2] 77 | full_proj_transform = torch.reshape(torch.tensor(message["view_projection_matrix"]), (4, 4)).cuda() 78 | full_proj_transform[:,1] = -full_proj_transform[:,1] 79 | custom_cam = MiniCam(width, height, fovy, fovx, znear, zfar, world_view_transform, full_proj_transform) 80 | except Exception as e: 81 | print("") 82 | traceback.print_exc() 83 | raise e 84 | return custom_cam, do_training, do_shs_python, do_rot_scale_python, keep_alive, scaling_modifier 85 | else: 86 | return None, None, None, None, None, None -------------------------------------------------------------------------------- /utils/camera_utils.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2023, Inria 3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | # All rights reserved. 5 | # 6 | # This software is free for non-commercial, research and evaluation use 7 | # under the terms of the LICENSE.md file. 8 | # 9 | # For inquiries contact george.drettakis@inria.fr 10 | # 11 | 12 | from scene.cameras import Camera 13 | import numpy as np 14 | from utils.general_utils import PILtoTorch 15 | from utils.graphics_utils import fov2focal 16 | 17 | WARNED = False 18 | 19 | def loadCam(args, id, cam_info, resolution_scale): 20 | orig_w, orig_h = cam_info.image.size 21 | 22 | if args.resolution in [1, 2, 4, 8]: 23 | resolution = round(orig_w/(resolution_scale * args.resolution)), round(orig_h/(resolution_scale * args.resolution)) 24 | else: # should be a type that converts to float 25 | if args.resolution == -1: 26 | if orig_w > 1600: 27 | global WARNED 28 | if not WARNED: 29 | print("[ INFO ] Encountered quite large input images (>1.6K pixels width), rescaling to 1.6K.\n " 30 | "If this is not desired, please explicitly specify '--resolution/-r' as 1") 31 | WARNED = True 32 | global_down = orig_w / 1600 33 | else: 34 | global_down = 1 35 | else: 36 | global_down = orig_w / args.resolution 37 | 38 | scale = float(global_down) * float(resolution_scale) 39 | resolution = (int(orig_w / scale), int(orig_h / scale)) 40 | 41 | resized_image_rgb = PILtoTorch(cam_info.image, resolution) 42 | 43 | gt_image = resized_image_rgb[:3, ...] 44 | loaded_mask = None 45 | 46 | if resized_image_rgb.shape[1] == 4: 47 | loaded_mask = resized_image_rgb[3:4, ...] 48 | 49 | return Camera(colmap_id=cam_info.uid, R=cam_info.R, T=cam_info.T, 50 | FoVx=cam_info.FovX, FoVy=cam_info.FovY, 51 | image=gt_image, gt_alpha_mask=loaded_mask, 52 | image_name=cam_info.image_name, uid=id, data_device=args.data_device) 53 | 54 | def cameraList_from_camInfos(cam_infos, resolution_scale, args): 55 | camera_list = [] 56 | 57 | for id, c in enumerate(cam_infos): 58 | camera_list.append(loadCam(args, id, c, resolution_scale)) 59 | 60 | return camera_list 61 | 62 | def camera_to_JSON(id, camera : Camera): 63 | Rt = np.zeros((4, 4)) 64 | Rt[:3, :3] = camera.R.transpose() 65 | Rt[:3, 3] = camera.T 66 | Rt[3, 3] = 1.0 67 | 68 | W2C = np.linalg.inv(Rt) 69 | pos = W2C[:3, 3] 70 | rot = W2C[:3, :3] 71 | serializable_array_2d = [x.tolist() for x in rot] 72 | camera_entry = { 73 | 'id' : id, 74 | 'img_name' : camera.image_name, 75 | 'width' : camera.width, 76 | 'height' : camera.height, 77 | 'position': pos.tolist(), 78 | 'rotation': serializable_array_2d, 79 | 'fy' : fov2focal(camera.FovY, camera.height), 80 | 'fx' : fov2focal(camera.FovX, camera.width) 81 | } 82 | return camera_entry 83 | -------------------------------------------------------------------------------- /lpipsPyTorch/modules/networks.py: -------------------------------------------------------------------------------- 1 | from typing import Sequence 2 | 3 | from itertools import chain 4 | 5 | import torch 6 | import torch.nn as nn 7 | from torchvision import models 8 | 9 | from .utils import normalize_activation 10 | 11 | 12 | def get_network(net_type: str): 13 | if net_type == 'alex': 14 | return AlexNet() 15 | elif net_type == 'squeeze': 16 | return SqueezeNet() 17 | elif net_type == 'vgg': 18 | return VGG16() 19 | else: 20 | raise NotImplementedError('choose net_type from [alex, squeeze, vgg].') 21 | 22 | 23 | class LinLayers(nn.ModuleList): 24 | def __init__(self, n_channels_list: Sequence[int]): 25 | super(LinLayers, self).__init__([ 26 | nn.Sequential( 27 | nn.Identity(), 28 | nn.Conv2d(nc, 1, 1, 1, 0, bias=False) 29 | ) for nc in n_channels_list 30 | ]) 31 | 32 | for param in self.parameters(): 33 | param.requires_grad = False 34 | 35 | 36 | class BaseNet(nn.Module): 37 | def __init__(self): 38 | super(BaseNet, self).__init__() 39 | 40 | # register buffer 41 | self.register_buffer( 42 | 'mean', torch.Tensor([-.030, -.088, -.188])[None, :, None, None]) 43 | self.register_buffer( 44 | 'std', torch.Tensor([.458, .448, .450])[None, :, None, None]) 45 | 46 | def set_requires_grad(self, state: bool): 47 | for param in chain(self.parameters(), self.buffers()): 48 | param.requires_grad = state 49 | 50 | def z_score(self, x: torch.Tensor): 51 | return (x - self.mean) / self.std 52 | 53 | def forward(self, x: torch.Tensor): 54 | x = self.z_score(x) 55 | 56 | output = [] 57 | for i, (_, layer) in enumerate(self.layers._modules.items(), 1): 58 | x = layer(x) 59 | if i in self.target_layers: 60 | output.append(normalize_activation(x)) 61 | if len(output) == len(self.target_layers): 62 | break 63 | return output 64 | 65 | 66 | class SqueezeNet(BaseNet): 67 | def __init__(self): 68 | super(SqueezeNet, self).__init__() 69 | 70 | self.layers = models.squeezenet1_1(True).features 71 | self.target_layers = [2, 5, 8, 10, 11, 12, 13] 72 | self.n_channels_list = [64, 128, 256, 384, 384, 512, 512] 73 | 74 | self.set_requires_grad(False) 75 | 76 | 77 | class AlexNet(BaseNet): 78 | def __init__(self): 79 | super(AlexNet, self).__init__() 80 | 81 | self.layers = models.alexnet(True).features 82 | self.target_layers = [2, 5, 8, 10, 12] 83 | self.n_channels_list = [64, 192, 384, 256, 256] 84 | 85 | self.set_requires_grad(False) 86 | 87 | 88 | class VGG16(BaseNet): 89 | def __init__(self): 90 | super(VGG16, self).__init__() 91 | 92 | self.layers = models.vgg16(weights=models.VGG16_Weights.IMAGENET1K_V1).features 93 | self.target_layers = [4, 9, 16, 23, 30] 94 | self.n_channels_list = [64, 128, 256, 512, 512] 95 | 96 | self.set_requires_grad(False) 97 | -------------------------------------------------------------------------------- /full_eval.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2023, Inria 3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | # All rights reserved. 5 | # 6 | # This software is free for non-commercial, research and evaluation use 7 | # under the terms of the LICENSE.md file. 8 | # 9 | # For inquiries contact george.drettakis@inria.fr 10 | # 11 | 12 | import os 13 | from argparse import ArgumentParser 14 | 15 | mipnerf360_outdoor_scenes = ["bicycle", "flowers", "garden", "stump", "treehill"] 16 | mipnerf360_indoor_scenes = ["room", "counter", "kitchen", "bonsai"] 17 | tanks_and_temples_scenes = ["truck", "train"] 18 | deep_blending_scenes = ["drjohnson", "playroom"] 19 | 20 | parser = ArgumentParser(description="Full evaluation script parameters") 21 | parser.add_argument("--skip_training", action="store_true") 22 | parser.add_argument("--skip_rendering", action="store_true") 23 | parser.add_argument("--skip_metrics", action="store_true") 24 | parser.add_argument("--output_path", default="./eval") 25 | args, _ = parser.parse_known_args() 26 | 27 | all_scenes = [] 28 | all_scenes.extend(mipnerf360_outdoor_scenes) 29 | all_scenes.extend(mipnerf360_indoor_scenes) 30 | all_scenes.extend(tanks_and_temples_scenes) 31 | all_scenes.extend(deep_blending_scenes) 32 | 33 | if not args.skip_training or not args.skip_rendering: 34 | parser.add_argument('--mipnerf360', "-m360", required=True, type=str) 35 | parser.add_argument("--tanksandtemples", "-tat", required=True, type=str) 36 | parser.add_argument("--deepblending", "-db", required=True, type=str) 37 | args = parser.parse_args() 38 | 39 | if not args.skip_training: 40 | common_args = " --quiet --eval --test_iterations -1 " 41 | for scene in mipnerf360_outdoor_scenes: 42 | source = args.mipnerf360 + "/" + scene 43 | os.system("python train.py -s " + source + " -i images_4 -m " + args.output_path + "/" + scene + common_args) 44 | for scene in mipnerf360_indoor_scenes: 45 | source = args.mipnerf360 + "/" + scene 46 | os.system("python train.py -s " + source + " -i images_2 -m " + args.output_path + "/" + scene + common_args) 47 | for scene in tanks_and_temples_scenes: 48 | source = args.tanksandtemples + "/" + scene 49 | os.system("python train.py -s " + source + " -m " + args.output_path + "/" + scene + common_args) 50 | for scene in deep_blending_scenes: 51 | source = args.deepblending + "/" + scene 52 | os.system("python train.py -s " + source + " -m " + args.output_path + "/" + scene + common_args) 53 | 54 | if not args.skip_rendering: 55 | all_sources = [] 56 | for scene in mipnerf360_outdoor_scenes: 57 | all_sources.append(args.mipnerf360 + "/" + scene) 58 | for scene in mipnerf360_indoor_scenes: 59 | all_sources.append(args.mipnerf360 + "/" + scene) 60 | for scene in tanks_and_temples_scenes: 61 | all_sources.append(args.tanksandtemples + "/" + scene) 62 | for scene in deep_blending_scenes: 63 | all_sources.append(args.deepblending + "/" + scene) 64 | 65 | common_args = " --quiet --eval --skip_train" 66 | for scene, source in zip(all_scenes, all_sources): 67 | os.system("python render.py --iteration 7000 -s " + source + " -m " + args.output_path + "/" + scene + common_args) 68 | os.system("python render.py --iteration 30000 -s " + source + " -m " + args.output_path + "/" + scene + common_args) 69 | 70 | if not args.skip_metrics: 71 | scenes_string = "" 72 | for scene in all_scenes: 73 | scenes_string += "\"" + args.output_path + "/" + scene + "\" " 74 | 75 | os.system("python metrics.py -m " + scenes_string) -------------------------------------------------------------------------------- /gaussian_renderer/__init__.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2023, Inria 3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | # All rights reserved. 5 | # 6 | # This software is free for non-commercial, research and evaluation use 7 | # under the terms of the LICENSE.md file. 8 | # 9 | # For inquiries contact george.drettakis@inria.fr 10 | # 11 | 12 | import torch 13 | import math 14 | from diff_gaussian_rasterization import GaussianRasterizationSettings, GaussianRasterizer 15 | from scene.gaussian_model import GaussianModel 16 | from utils.sh_utils import eval_sh 17 | 18 | def render(viewpoint_camera, pc : GaussianModel, pipe, bg_color : torch.Tensor, scaling_modifier = 1.0, override_color = None): 19 | """ 20 | Render the scene. 21 | 22 | Background tensor (bg_color) must be on GPU! 23 | """ 24 | 25 | # Create zero tensor. We will use it to make pytorch return gradients of the 2D (screen-space) means 26 | screenspace_points = torch.zeros_like(pc.get_xyz, dtype=pc.get_xyz.dtype, requires_grad=True, device="cuda") + 0 27 | try: 28 | screenspace_points.retain_grad() 29 | except: 30 | pass 31 | 32 | # Set up rasterization configuration 33 | tanfovx = math.tan(viewpoint_camera.FoVx * 0.5) 34 | tanfovy = math.tan(viewpoint_camera.FoVy * 0.5) 35 | 36 | raster_settings = GaussianRasterizationSettings( 37 | image_height=int(viewpoint_camera.image_height), 38 | image_width=int(viewpoint_camera.image_width), 39 | tanfovx=tanfovx, 40 | tanfovy=tanfovy, 41 | bg=bg_color, 42 | scale_modifier=scaling_modifier, 43 | viewmatrix=viewpoint_camera.world_view_transform, 44 | projmatrix=viewpoint_camera.full_proj_transform, 45 | sh_degree=pc.active_sh_degree, 46 | campos=viewpoint_camera.camera_center, 47 | prefiltered=False, 48 | debug=pipe.debug 49 | ) 50 | 51 | rasterizer = GaussianRasterizer(raster_settings=raster_settings) 52 | 53 | means3D = pc.get_xyz 54 | means2D = screenspace_points 55 | opacity = pc.get_opacity 56 | 57 | # If precomputed 3d covariance is provided, use it. If not, then it will be computed from 58 | # scaling / rotation by the rasterizer. 59 | scales = None 60 | rotations = None 61 | cov3D_precomp = None 62 | if pipe.compute_cov3D_python: 63 | cov3D_precomp = pc.get_covariance(scaling_modifier) 64 | else: 65 | scales = pc.get_scaling 66 | rotations = pc.get_rotation 67 | 68 | # If precomputed colors are provided, use them. Otherwise, if it is desired to precompute colors 69 | # from SHs in Python, do it. If not, then SH -> RGB conversion will be done by rasterizer. 70 | shs = None 71 | colors_precomp = None 72 | if override_color is None: 73 | if pipe.convert_SHs_python: 74 | shs_view = pc.get_features.transpose(1, 2).view(-1, 3, (pc.max_sh_degree+1)**2) 75 | dir_pp = (pc.get_xyz - viewpoint_camera.camera_center.repeat(pc.get_features.shape[0], 1)) 76 | dir_pp_normalized = dir_pp/dir_pp.norm(dim=1, keepdim=True) 77 | sh2rgb = eval_sh(pc.active_sh_degree, shs_view, dir_pp_normalized) 78 | colors_precomp = torch.clamp_min(sh2rgb + 0.5, 0.0) 79 | else: 80 | shs = pc.get_features 81 | else: 82 | colors_precomp = override_color 83 | 84 | # Rasterize visible Gaussians to image, obtain their radii (on screen). 85 | rendered_image, radii = rasterizer( 86 | means3D = means3D, 87 | means2D = means2D, 88 | shs = shs, 89 | colors_precomp = colors_precomp, 90 | opacities = opacity, 91 | scales = scales, 92 | rotations = rotations, 93 | cov3D_precomp = cov3D_precomp) 94 | 95 | # Those Gaussians that were frustum culled or had a radius of 0 were not visible. 96 | # They will be excluded from value updates used in the splitting criteria. 97 | return {"render": rendered_image, 98 | "viewspace_points": screenspace_points, 99 | "visibility_filter" : radii > 0, 100 | "radii": radii} 101 | -------------------------------------------------------------------------------- /arguments/__init__.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2023, Inria 3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | # All rights reserved. 5 | # 6 | # This software is free for non-commercial, research and evaluation use 7 | # under the terms of the LICENSE.md file. 8 | # 9 | # For inquiries contact george.drettakis@inria.fr 10 | # 11 | 12 | from argparse import ArgumentParser, Namespace 13 | import sys 14 | import os 15 | 16 | class GroupParams: 17 | pass 18 | 19 | class ParamGroup: 20 | def __init__(self, parser: ArgumentParser, name : str, fill_none = False): 21 | group = parser.add_argument_group(name) 22 | for key, value in vars(self).items(): 23 | shorthand = False 24 | if key.startswith("_"): 25 | shorthand = True 26 | key = key[1:] 27 | t = type(value) 28 | value = value if not fill_none else None 29 | if shorthand: 30 | if t == bool: 31 | group.add_argument("--" + key, ("-" + key[0:1]), default=value, action="store_true") 32 | else: 33 | group.add_argument("--" + key, ("-" + key[0:1]), default=value, type=t) 34 | else: 35 | if t == bool: 36 | group.add_argument("--" + key, default=value, action="store_true") 37 | else: 38 | group.add_argument("--" + key, default=value, type=t) 39 | 40 | def extract(self, args): 41 | group = GroupParams() 42 | for arg in vars(args).items(): 43 | if arg[0] in vars(self) or ("_" + arg[0]) in vars(self): 44 | setattr(group, arg[0], arg[1]) 45 | return group 46 | 47 | class ModelParams(ParamGroup): 48 | def __init__(self, parser, sentinel=False): 49 | self.sh_degree = 3 50 | self._source_path = "" 51 | self._model_path = "" 52 | self._images = "images" 53 | self._resolution = -1 54 | self._white_background = False 55 | self.data_device = "cuda" 56 | self.eval = False 57 | super().__init__(parser, "Loading Parameters", sentinel) 58 | 59 | def extract(self, args): 60 | g = super().extract(args) 61 | g.source_path = os.path.abspath(g.source_path) 62 | return g 63 | 64 | class PipelineParams(ParamGroup): 65 | def __init__(self, parser): 66 | self.convert_SHs_python = False 67 | self.compute_cov3D_python = False 68 | self.debug = False 69 | super().__init__(parser, "Pipeline Parameters") 70 | 71 | class OptimizationParams(ParamGroup): 72 | def __init__(self, parser): 73 | self.iterations = 30_000 74 | self.position_lr_init = 0.00016 75 | self.position_lr_final = 0.0000016 76 | self.position_lr_delay_mult = 0.01 77 | self.position_lr_max_steps = 30_000 78 | self.feature_lr = 0.0025 79 | self.opacity_lr = 0.05 80 | self.scaling_lr = 0.005 81 | self.rotation_lr = 0.001 82 | self.percent_dense = 0.01 83 | self.lambda_dssim = 0.2 84 | self.densification_interval = 100 85 | self.opacity_reset_interval = 3000 86 | self.densify_from_iter = 500 87 | self.densify_until_iter = 15_000 88 | self.densify_grad_threshold = 0.0002 89 | self.random_background = False 90 | super().__init__(parser, "Optimization Parameters") 91 | 92 | def get_combined_args(parser : ArgumentParser): 93 | cmdlne_string = sys.argv[1:] 94 | cfgfile_string = "Namespace()" 95 | args_cmdline = parser.parse_args(cmdlne_string) 96 | 97 | try: 98 | cfgfilepath = os.path.join(args_cmdline.model_path, "cfg_args") 99 | print("Looking for config file in", cfgfilepath) 100 | with open(cfgfilepath) as cfg_file: 101 | print("Config file found: {}".format(cfgfilepath)) 102 | cfgfile_string = cfg_file.read() 103 | except TypeError: 104 | print("Config file not found at") 105 | pass 106 | args_cfgfile = eval(cfgfile_string) 107 | 108 | merged_dict = vars(args_cfgfile).copy() 109 | for k,v in vars(args_cmdline).items(): 110 | if v != None: 111 | merged_dict[k] = v 112 | return Namespace(**merged_dict) 113 | -------------------------------------------------------------------------------- /scene/__init__.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2023, Inria 3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | # All rights reserved. 5 | # 6 | # This software is free for non-commercial, research and evaluation use 7 | # under the terms of the LICENSE.md file. 8 | # 9 | # For inquiries contact george.drettakis@inria.fr 10 | # 11 | 12 | import os 13 | import random 14 | import json 15 | from utils.system_utils import searchForMaxIteration 16 | from scene.dataset_readers import sceneLoadTypeCallbacks 17 | from scene.gaussian_model import GaussianModel 18 | from arguments import ModelParams 19 | from utils.camera_utils import cameraList_from_camInfos, camera_to_JSON 20 | 21 | class Scene: 22 | 23 | gaussians : GaussianModel 24 | 25 | def __init__(self, args : ModelParams, gaussians : GaussianModel, load_iteration=None, shuffle=True, resolution_scales=[1.0]): 26 | """b 27 | :param path: Path to colmap scene main folder. 28 | """ 29 | self.model_path = args.model_path 30 | self.loaded_iter = None 31 | self.gaussians = gaussians 32 | 33 | if load_iteration: 34 | if load_iteration == -1: 35 | self.loaded_iter = searchForMaxIteration(os.path.join(self.model_path, "point_cloud")) 36 | else: 37 | self.loaded_iter = load_iteration 38 | print("Loading trained model at iteration {}".format(self.loaded_iter)) 39 | 40 | self.train_cameras = {} 41 | self.test_cameras = {} 42 | 43 | if os.path.exists(os.path.join(args.source_path, "sparse")): 44 | scene_info = sceneLoadTypeCallbacks["Colmap"](args.source_path, args.images, args.eval) 45 | elif os.path.exists(os.path.join(args.source_path, "transforms_train.json")): 46 | print("Found transforms_train.json file, assuming Blender data set!") 47 | scene_info = sceneLoadTypeCallbacks["Blender"](args.source_path, args.white_background, args.eval) 48 | else: 49 | assert False, "Could not recognize scene type!" 50 | 51 | if not self.loaded_iter: 52 | with open(scene_info.ply_path, 'rb') as src_file, open(os.path.join(self.model_path, "input.ply") , 'wb') as dest_file: 53 | dest_file.write(src_file.read()) 54 | json_cams = [] 55 | camlist = [] 56 | if scene_info.test_cameras: 57 | camlist.extend(scene_info.test_cameras) 58 | if scene_info.train_cameras: 59 | camlist.extend(scene_info.train_cameras) 60 | for id, cam in enumerate(camlist): 61 | json_cams.append(camera_to_JSON(id, cam)) 62 | with open(os.path.join(self.model_path, "cameras.json"), 'w') as file: 63 | json.dump(json_cams, file) 64 | 65 | if shuffle: 66 | random.shuffle(scene_info.train_cameras) # Multi-res consistent random shuffling 67 | random.shuffle(scene_info.test_cameras) # Multi-res consistent random shuffling 68 | 69 | self.cameras_extent = scene_info.nerf_normalization["radius"] 70 | 71 | for resolution_scale in resolution_scales: 72 | print("Loading Training Cameras") 73 | self.train_cameras[resolution_scale] = cameraList_from_camInfos(scene_info.train_cameras, resolution_scale, args) 74 | print("Loading Test Cameras") 75 | self.test_cameras[resolution_scale] = cameraList_from_camInfos(scene_info.test_cameras, resolution_scale, args) 76 | 77 | if self.loaded_iter: 78 | self.gaussians.load_ply(os.path.join(self.model_path, 79 | "point_cloud", 80 | "iteration_" + str(self.loaded_iter), 81 | "point_cloud.ply")) 82 | else: 83 | self.gaussians.create_from_pcd(scene_info.point_cloud, self.cameras_extent) 84 | 85 | def save(self, iteration): 86 | point_cloud_path = os.path.join(self.model_path, "point_cloud/iteration_{}".format(iteration)) 87 | self.gaussians.save_ply(os.path.join(point_cloud_path, "point_cloud.ply")) 88 | 89 | def getTrainCameras(self, scale=1.0): 90 | return self.train_cameras[scale] 91 | 92 | def getTestCameras(self, scale=1.0): 93 | return self.test_cameras[scale] -------------------------------------------------------------------------------- /metrics.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2023, Inria 3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | # All rights reserved. 5 | # 6 | # This software is free for non-commercial, research and evaluation use 7 | # under the terms of the LICENSE.md file. 8 | # 9 | # For inquiries contact george.drettakis@inria.fr 10 | # 11 | 12 | from pathlib import Path 13 | import os 14 | from PIL import Image 15 | import torch 16 | import torchvision.transforms.functional as tf 17 | from utils.loss_utils import ssim 18 | from lpipsPyTorch import lpips 19 | import json 20 | from tqdm import tqdm 21 | from utils.image_utils import psnr 22 | from argparse import ArgumentParser 23 | 24 | def readImages(renders_dir, gt_dir): 25 | renders = [] 26 | gts = [] 27 | image_names = [] 28 | for fname in os.listdir(renders_dir): 29 | render = Image.open(renders_dir / fname) 30 | gt = Image.open(gt_dir / fname) 31 | renders.append(tf.to_tensor(render).unsqueeze(0)[:, :3, :, :].cuda()) 32 | gts.append(tf.to_tensor(gt).unsqueeze(0)[:, :3, :, :].cuda()) 33 | image_names.append(fname) 34 | return renders, gts, image_names 35 | 36 | def evaluate(model_paths): 37 | 38 | full_dict = {} 39 | per_view_dict = {} 40 | full_dict_polytopeonly = {} 41 | per_view_dict_polytopeonly = {} 42 | print("") 43 | 44 | for scene_dir in model_paths: 45 | try: 46 | print("Scene:", scene_dir) 47 | full_dict[scene_dir] = {} 48 | per_view_dict[scene_dir] = {} 49 | full_dict_polytopeonly[scene_dir] = {} 50 | per_view_dict_polytopeonly[scene_dir] = {} 51 | 52 | test_dir = Path(scene_dir) / "test" 53 | 54 | for method in os.listdir(test_dir): 55 | print("Method:", method) 56 | 57 | full_dict[scene_dir][method] = {} 58 | per_view_dict[scene_dir][method] = {} 59 | full_dict_polytopeonly[scene_dir][method] = {} 60 | per_view_dict_polytopeonly[scene_dir][method] = {} 61 | 62 | method_dir = test_dir / method 63 | gt_dir = method_dir/ "gt" 64 | renders_dir = method_dir / "renders" 65 | renders, gts, image_names = readImages(renders_dir, gt_dir) 66 | 67 | ssims = [] 68 | psnrs = [] 69 | lpipss = [] 70 | 71 | for idx in tqdm(range(len(renders)), desc="Metric evaluation progress"): 72 | ssims.append(ssim(renders[idx], gts[idx])) 73 | psnrs.append(psnr(renders[idx], gts[idx])) 74 | lpipss.append(lpips(renders[idx], gts[idx], net_type='vgg')) 75 | 76 | print(" SSIM : {:>12.7f}".format(torch.tensor(ssims).mean(), ".5")) 77 | print(" PSNR : {:>12.7f}".format(torch.tensor(psnrs).mean(), ".5")) 78 | print(" LPIPS: {:>12.7f}".format(torch.tensor(lpipss).mean(), ".5")) 79 | print("") 80 | 81 | full_dict[scene_dir][method].update({"SSIM": torch.tensor(ssims).mean().item(), 82 | "PSNR": torch.tensor(psnrs).mean().item(), 83 | "LPIPS": torch.tensor(lpipss).mean().item()}) 84 | per_view_dict[scene_dir][method].update({"SSIM": {name: ssim for ssim, name in zip(torch.tensor(ssims).tolist(), image_names)}, 85 | "PSNR": {name: psnr for psnr, name in zip(torch.tensor(psnrs).tolist(), image_names)}, 86 | "LPIPS": {name: lp for lp, name in zip(torch.tensor(lpipss).tolist(), image_names)}}) 87 | 88 | with open(scene_dir + "/results.json", 'w') as fp: 89 | json.dump(full_dict[scene_dir], fp, indent=True) 90 | with open(scene_dir + "/per_view.json", 'w') as fp: 91 | json.dump(per_view_dict[scene_dir], fp, indent=True) 92 | except: 93 | print("Unable to compute metrics for model", scene_dir) 94 | 95 | if __name__ == "__main__": 96 | device = torch.device("cuda:0") 97 | torch.cuda.set_device(device) 98 | 99 | # Set up command line argument parser 100 | parser = ArgumentParser(description="Training script parameters") 101 | parser.add_argument('--model_paths', '-m', required=True, nargs="+", type=str, default=[]) 102 | args = parser.parse_args() 103 | evaluate(args.model_paths) 104 | -------------------------------------------------------------------------------- /utils/general_utils.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2023, Inria 3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | # All rights reserved. 5 | # 6 | # This software is free for non-commercial, research and evaluation use 7 | # under the terms of the LICENSE.md file. 8 | # 9 | # For inquiries contact george.drettakis@inria.fr 10 | # 11 | 12 | import torch 13 | import sys 14 | from datetime import datetime 15 | import numpy as np 16 | import random 17 | 18 | def inverse_sigmoid(x): 19 | return torch.log(x/(1-x)) 20 | 21 | def PILtoTorch(pil_image, resolution): 22 | resized_image_PIL = pil_image.resize(resolution) 23 | resized_image = torch.from_numpy(np.array(resized_image_PIL)) / 255.0 24 | if len(resized_image.shape) == 3: 25 | return resized_image.permute(2, 0, 1) 26 | else: 27 | return resized_image.unsqueeze(dim=-1).permute(2, 0, 1) 28 | 29 | def get_expon_lr_func( 30 | lr_init, lr_final, lr_delay_steps=0, lr_delay_mult=1.0, max_steps=1000000 31 | ): 32 | """ 33 | Copied from Plenoxels 34 | 35 | Continuous learning rate decay function. Adapted from JaxNeRF 36 | The returned rate is lr_init when step=0 and lr_final when step=max_steps, and 37 | is log-linearly interpolated elsewhere (equivalent to exponential decay). 38 | If lr_delay_steps>0 then the learning rate will be scaled by some smooth 39 | function of lr_delay_mult, such that the initial learning rate is 40 | lr_init*lr_delay_mult at the beginning of optimization but will be eased back 41 | to the normal learning rate when steps>lr_delay_steps. 42 | :param conf: config subtree 'lr' or similar 43 | :param max_steps: int, the number of steps during optimization. 44 | :return HoF which takes step as input 45 | """ 46 | 47 | def helper(step): 48 | if step < 0 or (lr_init == 0.0 and lr_final == 0.0): 49 | # Disable this parameter 50 | return 0.0 51 | if lr_delay_steps > 0: 52 | # A kind of reverse cosine decay. 53 | delay_rate = lr_delay_mult + (1 - lr_delay_mult) * np.sin( 54 | 0.5 * np.pi * np.clip(step / lr_delay_steps, 0, 1) 55 | ) 56 | else: 57 | delay_rate = 1.0 58 | t = np.clip(step / max_steps, 0, 1) 59 | log_lerp = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t) 60 | return delay_rate * log_lerp 61 | 62 | return helper 63 | 64 | def strip_lowerdiag(L): 65 | uncertainty = torch.zeros((L.shape[0], 6), dtype=torch.float, device="cuda") 66 | 67 | uncertainty[:, 0] = L[:, 0, 0] 68 | uncertainty[:, 1] = L[:, 0, 1] 69 | uncertainty[:, 2] = L[:, 0, 2] 70 | uncertainty[:, 3] = L[:, 1, 1] 71 | uncertainty[:, 4] = L[:, 1, 2] 72 | uncertainty[:, 5] = L[:, 2, 2] 73 | return uncertainty 74 | 75 | def strip_symmetric(sym): 76 | return strip_lowerdiag(sym) 77 | 78 | def build_rotation(r): 79 | norm = torch.sqrt(r[:,0]*r[:,0] + r[:,1]*r[:,1] + r[:,2]*r[:,2] + r[:,3]*r[:,3]) 80 | 81 | q = r / norm[:, None] 82 | 83 | R = torch.zeros((q.size(0), 3, 3), device='cuda') 84 | 85 | r = q[:, 0] 86 | x = q[:, 1] 87 | y = q[:, 2] 88 | z = q[:, 3] 89 | 90 | R[:, 0, 0] = 1 - 2 * (y*y + z*z) 91 | R[:, 0, 1] = 2 * (x*y - r*z) 92 | R[:, 0, 2] = 2 * (x*z + r*y) 93 | R[:, 1, 0] = 2 * (x*y + r*z) 94 | R[:, 1, 1] = 1 - 2 * (x*x + z*z) 95 | R[:, 1, 2] = 2 * (y*z - r*x) 96 | R[:, 2, 0] = 2 * (x*z - r*y) 97 | R[:, 2, 1] = 2 * (y*z + r*x) 98 | R[:, 2, 2] = 1 - 2 * (x*x + y*y) 99 | return R 100 | 101 | def build_scaling_rotation(s, r): 102 | L = torch.zeros((s.shape[0], 3, 3), dtype=torch.float, device="cuda") 103 | R = build_rotation(r) 104 | 105 | L[:,0,0] = s[:,0] 106 | L[:,1,1] = s[:,1] 107 | L[:,2,2] = s[:,2] 108 | 109 | L = R @ L 110 | return L 111 | 112 | def safe_state(silent): 113 | old_f = sys.stdout 114 | class F: 115 | def __init__(self, silent): 116 | self.silent = silent 117 | 118 | def write(self, x): 119 | if not self.silent: 120 | if x.endswith("\n"): 121 | old_f.write(x.replace("\n", " [{}]\n".format(str(datetime.now().strftime("%d/%m %H:%M:%S"))))) 122 | else: 123 | old_f.write(x) 124 | 125 | def flush(self): 126 | old_f.flush() 127 | 128 | sys.stdout = F(silent) 129 | 130 | random.seed(0) 131 | np.random.seed(0) 132 | torch.manual_seed(0) 133 | torch.cuda.set_device(torch.device("cuda:0")) 134 | -------------------------------------------------------------------------------- /utils/sh_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2021 The PlenOctree Authors. 2 | # Redistribution and use in source and binary forms, with or without 3 | # modification, are permitted provided that the following conditions are met: 4 | # 5 | # 1. Redistributions of source code must retain the above copyright notice, 6 | # this list of conditions and the following disclaimer. 7 | # 8 | # 2. Redistributions in binary form must reproduce the above copyright notice, 9 | # this list of conditions and the following disclaimer in the documentation 10 | # and/or other materials provided with the distribution. 11 | # 12 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 13 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 14 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 15 | # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 16 | # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 17 | # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 18 | # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 19 | # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 20 | # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 21 | # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 22 | # POSSIBILITY OF SUCH DAMAGE. 23 | 24 | import torch 25 | 26 | C0 = 0.28209479177387814 27 | C1 = 0.4886025119029199 28 | C2 = [ 29 | 1.0925484305920792, 30 | -1.0925484305920792, 31 | 0.31539156525252005, 32 | -1.0925484305920792, 33 | 0.5462742152960396 34 | ] 35 | C3 = [ 36 | -0.5900435899266435, 37 | 2.890611442640554, 38 | -0.4570457994644658, 39 | 0.3731763325901154, 40 | -0.4570457994644658, 41 | 1.445305721320277, 42 | -0.5900435899266435 43 | ] 44 | C4 = [ 45 | 2.5033429417967046, 46 | -1.7701307697799304, 47 | 0.9461746957575601, 48 | -0.6690465435572892, 49 | 0.10578554691520431, 50 | -0.6690465435572892, 51 | 0.47308734787878004, 52 | -1.7701307697799304, 53 | 0.6258357354491761, 54 | ] 55 | 56 | 57 | def eval_sh(deg, sh, dirs): 58 | """ 59 | Evaluate spherical harmonics at unit directions 60 | using hardcoded SH polynomials. 61 | Works with torch/np/jnp. 62 | ... Can be 0 or more batch dimensions. 63 | Args: 64 | deg: int SH deg. Currently, 0-3 supported 65 | sh: jnp.ndarray SH coeffs [..., C, (deg + 1) ** 2] 66 | dirs: jnp.ndarray unit directions [..., 3] 67 | Returns: 68 | [..., C] 69 | """ 70 | assert deg <= 4 and deg >= 0 71 | coeff = (deg + 1) ** 2 72 | assert sh.shape[-1] >= coeff 73 | 74 | result = C0 * sh[..., 0] 75 | if deg > 0: 76 | x, y, z = dirs[..., 0:1], dirs[..., 1:2], dirs[..., 2:3] 77 | result = (result - 78 | C1 * y * sh[..., 1] + 79 | C1 * z * sh[..., 2] - 80 | C1 * x * sh[..., 3]) 81 | 82 | if deg > 1: 83 | xx, yy, zz = x * x, y * y, z * z 84 | xy, yz, xz = x * y, y * z, x * z 85 | result = (result + 86 | C2[0] * xy * sh[..., 4] + 87 | C2[1] * yz * sh[..., 5] + 88 | C2[2] * (2.0 * zz - xx - yy) * sh[..., 6] + 89 | C2[3] * xz * sh[..., 7] + 90 | C2[4] * (xx - yy) * sh[..., 8]) 91 | 92 | if deg > 2: 93 | result = (result + 94 | C3[0] * y * (3 * xx - yy) * sh[..., 9] + 95 | C3[1] * xy * z * sh[..., 10] + 96 | C3[2] * y * (4 * zz - xx - yy)* sh[..., 11] + 97 | C3[3] * z * (2 * zz - 3 * xx - 3 * yy) * sh[..., 12] + 98 | C3[4] * x * (4 * zz - xx - yy) * sh[..., 13] + 99 | C3[5] * z * (xx - yy) * sh[..., 14] + 100 | C3[6] * x * (xx - 3 * yy) * sh[..., 15]) 101 | 102 | if deg > 3: 103 | result = (result + C4[0] * xy * (xx - yy) * sh[..., 16] + 104 | C4[1] * yz * (3 * xx - yy) * sh[..., 17] + 105 | C4[2] * xy * (7 * zz - 1) * sh[..., 18] + 106 | C4[3] * yz * (7 * zz - 3) * sh[..., 19] + 107 | C4[4] * (zz * (35 * zz - 30) + 3) * sh[..., 20] + 108 | C4[5] * xz * (7 * zz - 3) * sh[..., 21] + 109 | C4[6] * (xx - yy) * (7 * zz - 1) * sh[..., 22] + 110 | C4[7] * xz * (xx - 3 * yy) * sh[..., 23] + 111 | C4[8] * (xx * (xx - 3 * yy) - yy * (3 * xx - yy)) * sh[..., 24]) 112 | return result 113 | 114 | def RGB2SH(rgb): 115 | return (rgb - 0.5) / C0 116 | 117 | def SH2RGB(sh): 118 | return sh * C0 + 0.5 -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | Gaussian-Splatting License 2 | =========================== 3 | 4 | **Inria** and **the Max Planck Institut for Informatik (MPII)** hold all the ownership rights on the *Software* named **gaussian-splatting**. 5 | The *Software* is in the process of being registered with the Agence pour la Protection des 6 | Programmes (APP). 7 | 8 | The *Software* is still being developed by the *Licensor*. 9 | 10 | *Licensor*'s goal is to allow the research community to use, test and evaluate 11 | the *Software*. 12 | 13 | ## 1. Definitions 14 | 15 | *Licensee* means any person or entity that uses the *Software* and distributes 16 | its *Work*. 17 | 18 | *Licensor* means the owners of the *Software*, i.e Inria and MPII 19 | 20 | *Software* means the original work of authorship made available under this 21 | License ie gaussian-splatting. 22 | 23 | *Work* means the *Software* and any additions to or derivative works of the 24 | *Software* that are made available under this License. 25 | 26 | 27 | ## 2. Purpose 28 | This license is intended to define the rights granted to the *Licensee* by 29 | Licensors under the *Software*. 30 | 31 | ## 3. Rights granted 32 | 33 | For the above reasons Licensors have decided to distribute the *Software*. 34 | Licensors grant non-exclusive rights to use the *Software* for research purposes 35 | to research users (both academic and industrial), free of charge, without right 36 | to sublicense.. The *Software* may be used "non-commercially", i.e., for research 37 | and/or evaluation purposes only. 38 | 39 | Subject to the terms and conditions of this License, you are granted a 40 | non-exclusive, royalty-free, license to reproduce, prepare derivative works of, 41 | publicly display, publicly perform and distribute its *Work* and any resulting 42 | derivative works in any form. 43 | 44 | ## 4. Limitations 45 | 46 | **4.1 Redistribution.** You may reproduce or distribute the *Work* only if (a) you do 47 | so under this License, (b) you include a complete copy of this License with 48 | your distribution, and (c) you retain without modification any copyright, 49 | patent, trademark, or attribution notices that are present in the *Work*. 50 | 51 | **4.2 Derivative Works.** You may specify that additional or different terms apply 52 | to the use, reproduction, and distribution of your derivative works of the *Work* 53 | ("Your Terms") only if (a) Your Terms provide that the use limitation in 54 | Section 2 applies to your derivative works, and (b) you identify the specific 55 | derivative works that are subject to Your Terms. Notwithstanding Your Terms, 56 | this License (including the redistribution requirements in Section 3.1) will 57 | continue to apply to the *Work* itself. 58 | 59 | **4.3** Any other use without of prior consent of Licensors is prohibited. Research 60 | users explicitly acknowledge having received from Licensors all information 61 | allowing to appreciate the adequacy between of the *Software* and their needs and 62 | to undertake all necessary precautions for its execution and use. 63 | 64 | **4.4** The *Software* is provided both as a compiled library file and as source 65 | code. In case of using the *Software* for a publication or other results obtained 66 | through the use of the *Software*, users are strongly encouraged to cite the 67 | corresponding publications as explained in the documentation of the *Software*. 68 | 69 | ## 5. Disclaimer 70 | 71 | THE USER CANNOT USE, EXPLOIT OR DISTRIBUTE THE *SOFTWARE* FOR COMMERCIAL PURPOSES 72 | WITHOUT PRIOR AND EXPLICIT CONSENT OF LICENSORS. YOU MUST CONTACT INRIA FOR ANY 73 | UNAUTHORIZED USE: stip-sophia.transfert@inria.fr . ANY SUCH ACTION WILL 74 | CONSTITUTE A FORGERY. THIS *SOFTWARE* IS PROVIDED "AS IS" WITHOUT ANY WARRANTIES 75 | OF ANY NATURE AND ANY EXPRESS OR IMPLIED WARRANTIES, WITH REGARDS TO COMMERCIAL 76 | USE, PROFESSIONNAL USE, LEGAL OR NOT, OR OTHER, OR COMMERCIALISATION OR 77 | ADAPTATION. UNLESS EXPLICITLY PROVIDED BY LAW, IN NO EVENT, SHALL INRIA OR THE 78 | AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 79 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE 80 | GOODS OR SERVICES, LOSS OF USE, DATA, OR PROFITS OR BUSINESS INTERRUPTION) 81 | HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 82 | LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING FROM, OUT OF OR 83 | IN CONNECTION WITH THE *SOFTWARE* OR THE USE OR OTHER DEALINGS IN THE *SOFTWARE*. 84 | 85 | ## 6. Files subject to permissive licenses 86 | The contents of the file ```utils/loss_utils.py``` are based on publicly available code authored by Evan Su, which falls under the permissive MIT license. 87 | 88 | Title: pytorch-ssim\ 89 | Project code: https://github.com/Po-Hsun-Su/pytorch-ssim\ 90 | Copyright Evan Su, 2017\ 91 | License: https://github.com/Po-Hsun-Su/pytorch-ssim/blob/master/LICENSE.txt (MIT) -------------------------------------------------------------------------------- /convert.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2023, Inria 3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | # All rights reserved. 5 | # 6 | # This software is free for non-commercial, research and evaluation use 7 | # under the terms of the LICENSE.md file. 8 | # 9 | # For inquiries contact george.drettakis@inria.fr 10 | # 11 | 12 | import os 13 | import logging 14 | from argparse import ArgumentParser 15 | import shutil 16 | 17 | # This Python script is based on the shell converter script provided in the MipNerF 360 repository. 18 | parser = ArgumentParser("Colmap converter") 19 | parser.add_argument("--no_gpu", action='store_true') 20 | parser.add_argument("--skip_matching", action='store_true') 21 | parser.add_argument("--source_path", "-s", required=True, type=str) 22 | parser.add_argument("--camera", default="OPENCV", type=str) 23 | parser.add_argument("--colmap_executable", default="", type=str) 24 | parser.add_argument("--resize", action="store_true") 25 | parser.add_argument("--magick_executable", default="", type=str) 26 | args = parser.parse_args() 27 | colmap_command = '"{}"'.format(args.colmap_executable) if len(args.colmap_executable) > 0 else "colmap" 28 | magick_command = '"{}"'.format(args.magick_executable) if len(args.magick_executable) > 0 else "magick" 29 | use_gpu = 1 if not args.no_gpu else 0 30 | 31 | if not args.skip_matching: 32 | os.makedirs(args.source_path + "/distorted/sparse", exist_ok=True) 33 | 34 | ## Feature extraction 35 | feat_extracton_cmd = colmap_command + " feature_extractor "\ 36 | "--database_path " + args.source_path + "/distorted/database.db \ 37 | --image_path " + args.source_path + "/input \ 38 | --ImageReader.single_camera 1 \ 39 | --ImageReader.camera_model " + args.camera + " \ 40 | --SiftExtraction.use_gpu " + str(use_gpu) 41 | exit_code = os.system(feat_extracton_cmd) 42 | if exit_code != 0: 43 | logging.error(f"Feature extraction failed with code {exit_code}. Exiting.") 44 | exit(exit_code) 45 | 46 | ## Feature matching 47 | feat_matching_cmd = colmap_command + " exhaustive_matcher \ 48 | --database_path " + args.source_path + "/distorted/database.db \ 49 | --SiftMatching.use_gpu " + str(use_gpu) 50 | exit_code = os.system(feat_matching_cmd) 51 | if exit_code != 0: 52 | logging.error(f"Feature matching failed with code {exit_code}. Exiting.") 53 | exit(exit_code) 54 | 55 | ### Bundle adjustment 56 | # The default Mapper tolerance is unnecessarily large, 57 | # decreasing it speeds up bundle adjustment steps. 58 | mapper_cmd = (colmap_command + " mapper \ 59 | --database_path " + args.source_path + "/distorted/database.db \ 60 | --image_path " + args.source_path + "/input \ 61 | --output_path " + args.source_path + "/distorted/sparse \ 62 | --Mapper.ba_global_function_tolerance=0.000001") 63 | exit_code = os.system(mapper_cmd) 64 | if exit_code != 0: 65 | logging.error(f"Mapper failed with code {exit_code}. Exiting.") 66 | exit(exit_code) 67 | 68 | ### Image undistortion 69 | ## We need to undistort our images into ideal pinhole intrinsics. 70 | img_undist_cmd = (colmap_command + " image_undistorter \ 71 | --image_path " + args.source_path + "/input \ 72 | --input_path " + args.source_path + "/distorted/sparse/0 \ 73 | --output_path " + args.source_path + "\ 74 | --output_type COLMAP") 75 | exit_code = os.system(img_undist_cmd) 76 | if exit_code != 0: 77 | logging.error(f"Mapper failed with code {exit_code}. Exiting.") 78 | exit(exit_code) 79 | 80 | files = os.listdir(args.source_path + "/sparse") 81 | os.makedirs(args.source_path + "/sparse/0", exist_ok=True) 82 | # Copy each file from the source directory to the destination directory 83 | for file in files: 84 | if file == '0': 85 | continue 86 | source_file = os.path.join(args.source_path, "sparse", file) 87 | destination_file = os.path.join(args.source_path, "sparse", "0", file) 88 | shutil.move(source_file, destination_file) 89 | 90 | if(args.resize): 91 | print("Copying and resizing...") 92 | 93 | # Resize images. 94 | os.makedirs(args.source_path + "/images_2", exist_ok=True) 95 | os.makedirs(args.source_path + "/images_4", exist_ok=True) 96 | os.makedirs(args.source_path + "/images_8", exist_ok=True) 97 | # Get the list of files in the source directory 98 | files = os.listdir(args.source_path + "/images") 99 | # Copy each file from the source directory to the destination directory 100 | for file in files: 101 | source_file = os.path.join(args.source_path, "images", file) 102 | 103 | destination_file = os.path.join(args.source_path, "images_2", file) 104 | shutil.copy2(source_file, destination_file) 105 | exit_code = os.system(magick_command + " mogrify -resize 50% " + destination_file) 106 | if exit_code != 0: 107 | logging.error(f"50% resize failed with code {exit_code}. Exiting.") 108 | exit(exit_code) 109 | 110 | destination_file = os.path.join(args.source_path, "images_4", file) 111 | shutil.copy2(source_file, destination_file) 112 | exit_code = os.system(magick_command + " mogrify -resize 25% " + destination_file) 113 | if exit_code != 0: 114 | logging.error(f"25% resize failed with code {exit_code}. Exiting.") 115 | exit(exit_code) 116 | 117 | destination_file = os.path.join(args.source_path, "images_8", file) 118 | shutil.copy2(source_file, destination_file) 119 | exit_code = os.system(magick_command + " mogrify -resize 12.5% " + destination_file) 120 | if exit_code != 0: 121 | logging.error(f"12.5% resize failed with code {exit_code}. Exiting.") 122 | exit(exit_code) 123 | 124 | print("Done.") 125 | -------------------------------------------------------------------------------- /scene/dataset_readers.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2023, Inria 3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | # All rights reserved. 5 | # 6 | # This software is free for non-commercial, research and evaluation use 7 | # under the terms of the LICENSE.md file. 8 | # 9 | # For inquiries contact george.drettakis@inria.fr 10 | # 11 | 12 | import os 13 | import sys 14 | from PIL import Image 15 | from typing import NamedTuple 16 | from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \ 17 | read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text 18 | from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal 19 | import numpy as np 20 | import json 21 | from pathlib import Path 22 | from plyfile import PlyData, PlyElement 23 | from utils.sh_utils import SH2RGB 24 | from scene.gaussian_model import BasicPointCloud 25 | 26 | class CameraInfo(NamedTuple): 27 | uid: int 28 | R: np.array 29 | T: np.array 30 | FovY: np.array 31 | FovX: np.array 32 | image: np.array 33 | image_path: str 34 | image_name: str 35 | width: int 36 | height: int 37 | 38 | class SceneInfo(NamedTuple): 39 | point_cloud: BasicPointCloud 40 | train_cameras: list 41 | test_cameras: list 42 | nerf_normalization: dict 43 | ply_path: str 44 | 45 | def getNerfppNorm(cam_info): 46 | def get_center_and_diag(cam_centers): 47 | cam_centers = np.hstack(cam_centers) 48 | avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True) 49 | center = avg_cam_center 50 | dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True) 51 | diagonal = np.max(dist) 52 | return center.flatten(), diagonal 53 | 54 | cam_centers = [] 55 | 56 | for cam in cam_info: 57 | W2C = getWorld2View2(cam.R, cam.T) 58 | C2W = np.linalg.inv(W2C) 59 | cam_centers.append(C2W[:3, 3:4]) 60 | 61 | center, diagonal = get_center_and_diag(cam_centers) 62 | radius = diagonal * 1.1 63 | 64 | translate = -center 65 | 66 | return {"translate": translate, "radius": radius} 67 | 68 | def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder): 69 | cam_infos = [] 70 | for idx, key in enumerate(cam_extrinsics): 71 | sys.stdout.write('\r') 72 | # the exact output you're looking for: 73 | sys.stdout.write("Reading camera {}/{}".format(idx+1, len(cam_extrinsics))) 74 | sys.stdout.flush() 75 | 76 | extr = cam_extrinsics[key] 77 | intr = cam_intrinsics[extr.camera_id] 78 | height = intr.height 79 | width = intr.width 80 | 81 | uid = intr.id 82 | R = np.transpose(qvec2rotmat(extr.qvec)) 83 | T = np.array(extr.tvec) 84 | 85 | if intr.model=="SIMPLE_PINHOLE": 86 | focal_length_x = intr.params[0] 87 | FovY = focal2fov(focal_length_x, height) 88 | FovX = focal2fov(focal_length_x, width) 89 | elif intr.model=="PINHOLE": 90 | focal_length_x = intr.params[0] 91 | focal_length_y = intr.params[1] 92 | FovY = focal2fov(focal_length_y, height) 93 | FovX = focal2fov(focal_length_x, width) 94 | else: 95 | assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!" 96 | 97 | image_path = os.path.join(images_folder, os.path.basename(extr.name)) 98 | image_name = os.path.basename(image_path).split(".")[0] 99 | image = Image.open(image_path) 100 | 101 | cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image, 102 | image_path=image_path, image_name=image_name, width=width, height=height) 103 | cam_infos.append(cam_info) 104 | sys.stdout.write('\n') 105 | return cam_infos 106 | 107 | def fetchPly(path): 108 | plydata = PlyData.read(path) 109 | vertices = plydata['vertex'] 110 | positions = np.vstack([vertices['x'], vertices['y'], vertices['z']]).T 111 | colors = np.vstack([vertices['red'], vertices['green'], vertices['blue']]).T / 255.0 112 | normals = np.vstack([vertices['nx'], vertices['ny'], vertices['nz']]).T 113 | return BasicPointCloud(points=positions, colors=colors, normals=normals) 114 | 115 | def storePly(path, xyz, rgb): 116 | # Define the dtype for the structured array 117 | dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), 118 | ('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'), 119 | ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')] 120 | 121 | normals = np.zeros_like(xyz) 122 | 123 | elements = np.empty(xyz.shape[0], dtype=dtype) 124 | attributes = np.concatenate((xyz, normals, rgb), axis=1) 125 | elements[:] = list(map(tuple, attributes)) 126 | 127 | # Create the PlyData object and write to file 128 | vertex_element = PlyElement.describe(elements, 'vertex') 129 | ply_data = PlyData([vertex_element]) 130 | ply_data.write(path) 131 | 132 | def readColmapSceneInfo(path, images, eval, llffhold=8): 133 | try: 134 | cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.bin") 135 | cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.bin") 136 | cam_extrinsics = read_extrinsics_binary(cameras_extrinsic_file) 137 | cam_intrinsics = read_intrinsics_binary(cameras_intrinsic_file) 138 | except: 139 | cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.txt") 140 | cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.txt") 141 | cam_extrinsics = read_extrinsics_text(cameras_extrinsic_file) 142 | cam_intrinsics = read_intrinsics_text(cameras_intrinsic_file) 143 | 144 | reading_dir = "images" if images == None else images 145 | cam_infos_unsorted = readColmapCameras(cam_extrinsics=cam_extrinsics, cam_intrinsics=cam_intrinsics, images_folder=os.path.join(path, reading_dir)) 146 | cam_infos = sorted(cam_infos_unsorted.copy(), key = lambda x : x.image_name) 147 | 148 | if eval: 149 | train_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold != 0] 150 | test_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold == 0] 151 | else: 152 | train_cam_infos = cam_infos 153 | test_cam_infos = [] 154 | 155 | nerf_normalization = getNerfppNorm(train_cam_infos) 156 | 157 | ply_path = os.path.join(path, "sparse/0/points3D.ply") 158 | bin_path = os.path.join(path, "sparse/0/points3D.bin") 159 | txt_path = os.path.join(path, "sparse/0/points3D.txt") 160 | if not os.path.exists(ply_path): 161 | print("Converting point3d.bin to .ply, will happen only the first time you open the scene.") 162 | try: 163 | xyz, rgb, _ = read_points3D_binary(bin_path) 164 | except: 165 | xyz, rgb, _ = read_points3D_text(txt_path) 166 | storePly(ply_path, xyz, rgb) 167 | try: 168 | pcd = fetchPly(ply_path) 169 | except: 170 | pcd = None 171 | 172 | scene_info = SceneInfo(point_cloud=pcd, 173 | train_cameras=train_cam_infos, 174 | test_cameras=test_cam_infos, 175 | nerf_normalization=nerf_normalization, 176 | ply_path=ply_path) 177 | return scene_info 178 | 179 | def readCamerasFromTransforms(path, transformsfile, white_background, extension=".png"): 180 | cam_infos = [] 181 | 182 | with open(os.path.join(path, transformsfile)) as json_file: 183 | contents = json.load(json_file) 184 | fovx = contents["camera_angle_x"] 185 | 186 | frames = contents["frames"] 187 | for idx, frame in enumerate(frames): 188 | cam_name = os.path.join(path, frame["file_path"] + extension) 189 | 190 | # NeRF 'transform_matrix' is a camera-to-world transform 191 | c2w = np.array(frame["transform_matrix"]) 192 | # change from OpenGL/Blender camera axes (Y up, Z back) to COLMAP (Y down, Z forward) 193 | c2w[:3, 1:3] *= -1 194 | 195 | # get the world-to-camera transform and set R, T 196 | w2c = np.linalg.inv(c2w) 197 | R = np.transpose(w2c[:3,:3]) # R is stored transposed due to 'glm' in CUDA code 198 | T = w2c[:3, 3] 199 | 200 | image_path = os.path.join(path, cam_name) 201 | image_name = Path(cam_name).stem 202 | image = Image.open(image_path) 203 | 204 | im_data = np.array(image.convert("RGBA")) 205 | 206 | bg = np.array([1,1,1]) if white_background else np.array([0, 0, 0]) 207 | 208 | norm_data = im_data / 255.0 209 | arr = norm_data[:,:,:3] * norm_data[:, :, 3:4] + bg * (1 - norm_data[:, :, 3:4]) 210 | image = Image.fromarray(np.array(arr*255.0, dtype=np.byte), "RGB") 211 | 212 | fovy = focal2fov(fov2focal(fovx, image.size[0]), image.size[1]) 213 | FovY = fovy 214 | FovX = fovx 215 | 216 | cam_infos.append(CameraInfo(uid=idx, R=R, T=T, FovY=FovY, FovX=FovX, image=image, 217 | image_path=image_path, image_name=image_name, width=image.size[0], height=image.size[1])) 218 | 219 | return cam_infos 220 | 221 | def readNerfSyntheticInfo(path, white_background, eval, extension=".png"): 222 | print("Reading Training Transforms") 223 | train_cam_infos = readCamerasFromTransforms(path, "transforms_train.json", white_background, extension) 224 | print("Reading Test Transforms") 225 | test_cam_infos = readCamerasFromTransforms(path, "transforms_test.json", white_background, extension) 226 | 227 | if not eval: 228 | train_cam_infos.extend(test_cam_infos) 229 | test_cam_infos = [] 230 | 231 | nerf_normalization = getNerfppNorm(train_cam_infos) 232 | 233 | ply_path = os.path.join(path, "points3d.ply") 234 | if not os.path.exists(ply_path): 235 | # Since this data set has no colmap data, we start with random points 236 | num_pts = 100_000 237 | print(f"Generating random point cloud ({num_pts})...") 238 | 239 | # We create random points inside the bounds of the synthetic Blender scenes 240 | xyz = np.random.random((num_pts, 3)) * 2.6 - 1.3 241 | shs = np.random.random((num_pts, 3)) / 255.0 242 | pcd = BasicPointCloud(points=xyz, colors=SH2RGB(shs), normals=np.zeros((num_pts, 3))) 243 | 244 | storePly(ply_path, xyz, SH2RGB(shs) * 255) 245 | try: 246 | pcd = fetchPly(ply_path) 247 | except: 248 | pcd = None 249 | 250 | scene_info = SceneInfo(point_cloud=pcd, 251 | train_cameras=train_cam_infos, 252 | test_cameras=test_cam_infos, 253 | nerf_normalization=nerf_normalization, 254 | ply_path=ply_path) 255 | return scene_info 256 | 257 | sceneLoadTypeCallbacks = { 258 | "Colmap": readColmapSceneInfo, 259 | "Blender" : readNerfSyntheticInfo 260 | } -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2023, Inria 3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | # All rights reserved. 5 | # 6 | # This software is free for non-commercial, research and evaluation use 7 | # under the terms of the LICENSE.md file. 8 | # 9 | # For inquiries contact george.drettakis@inria.fr 10 | # 11 | 12 | import os 13 | import torch 14 | from random import randint 15 | from utils.loss_utils import l1_loss, ssim 16 | from gaussian_renderer import render, network_gui 17 | import sys 18 | from scene import Scene, GaussianModel 19 | from utils.general_utils import safe_state 20 | import uuid 21 | from tqdm import tqdm 22 | from utils.image_utils import psnr 23 | from argparse import ArgumentParser, Namespace 24 | from arguments import ModelParams, PipelineParams, OptimizationParams 25 | try: 26 | from torch.utils.tensorboard import SummaryWriter 27 | TENSORBOARD_FOUND = True 28 | except ImportError: 29 | TENSORBOARD_FOUND = False 30 | 31 | def training(dataset, opt, pipe, testing_iterations, saving_iterations, checkpoint_iterations, checkpoint, debug_from): 32 | first_iter = 0 33 | tb_writer = prepare_output_and_logger(dataset) 34 | gaussians = GaussianModel(dataset.sh_degree) 35 | scene = Scene(dataset, gaussians) 36 | gaussians.training_setup(opt) 37 | if checkpoint: 38 | (model_params, first_iter) = torch.load(checkpoint) 39 | gaussians.restore(model_params, opt) 40 | 41 | bg_color = [1, 1, 1] if dataset.white_background else [0, 0, 0] 42 | background = torch.tensor(bg_color, dtype=torch.float32, device="cuda") 43 | 44 | iter_start = torch.cuda.Event(enable_timing = True) 45 | iter_end = torch.cuda.Event(enable_timing = True) 46 | 47 | viewpoint_stack = None 48 | ema_loss_for_log = 0.0 49 | progress_bar = tqdm(range(first_iter, opt.iterations), desc="Training progress") 50 | first_iter += 1 51 | for iteration in range(first_iter, opt.iterations + 1): 52 | if network_gui.conn == None: 53 | network_gui.try_connect() 54 | while network_gui.conn != None: 55 | try: 56 | net_image_bytes = None 57 | custom_cam, do_training, pipe.convert_SHs_python, pipe.compute_cov3D_python, keep_alive, scaling_modifer = network_gui.receive() 58 | if custom_cam != None: 59 | net_image = render(custom_cam, gaussians, pipe, background, scaling_modifer)["render"] 60 | net_image_bytes = memoryview((torch.clamp(net_image, min=0, max=1.0) * 255).byte().permute(1, 2, 0).contiguous().cpu().numpy()) 61 | network_gui.send(net_image_bytes, dataset.source_path) 62 | if do_training and ((iteration < int(opt.iterations)) or not keep_alive): 63 | break 64 | except Exception as e: 65 | network_gui.conn = None 66 | 67 | iter_start.record() 68 | 69 | gaussians.update_learning_rate(iteration) 70 | 71 | # Every 1000 its we increase the levels of SH up to a maximum degree 72 | if iteration % 1000 == 0: 73 | gaussians.oneupSHdegree() 74 | 75 | # Pick a random Camera 76 | if not viewpoint_stack: 77 | viewpoint_stack = scene.getTrainCameras().copy() 78 | viewpoint_cam = viewpoint_stack.pop(randint(0, len(viewpoint_stack)-1)) 79 | 80 | # Render 81 | if (iteration - 1) == debug_from: 82 | pipe.debug = True 83 | 84 | bg = torch.rand((3), device="cuda") if opt.random_background else background 85 | 86 | render_pkg = render(viewpoint_cam, gaussians, pipe, bg) 87 | image, viewspace_point_tensor, visibility_filter, radii = render_pkg["render"], render_pkg["viewspace_points"], render_pkg["visibility_filter"], render_pkg["radii"] 88 | 89 | # Loss 90 | gt_image = viewpoint_cam.original_image.cuda() 91 | Ll1 = l1_loss(image, gt_image) 92 | loss = (1.0 - opt.lambda_dssim) * Ll1 + opt.lambda_dssim * (1.0 - ssim(image, gt_image)) 93 | loss.backward() 94 | 95 | iter_end.record() 96 | 97 | with torch.no_grad(): 98 | # Progress bar 99 | ema_loss_for_log = 0.4 * loss.item() + 0.6 * ema_loss_for_log 100 | if iteration % 10 == 0: 101 | progress_bar.set_postfix({"Loss": f"{ema_loss_for_log:.{7}f}"}) 102 | progress_bar.update(10) 103 | if iteration == opt.iterations: 104 | progress_bar.close() 105 | 106 | # Log and save 107 | training_report(tb_writer, iteration, Ll1, loss, l1_loss, iter_start.elapsed_time(iter_end), testing_iterations, scene, render, (pipe, background)) 108 | if (iteration in saving_iterations): 109 | print("\n[ITER {}] Saving Gaussians".format(iteration)) 110 | scene.save(iteration) 111 | 112 | # Densification 113 | if iteration < opt.densify_until_iter: 114 | # Keep track of max radii in image-space for pruning 115 | gaussians.max_radii2D[visibility_filter] = torch.max(gaussians.max_radii2D[visibility_filter], radii[visibility_filter]) 116 | gaussians.add_densification_stats(viewspace_point_tensor, visibility_filter) 117 | 118 | if iteration > opt.densify_from_iter and iteration % opt.densification_interval == 0: 119 | size_threshold = 20 if iteration > opt.opacity_reset_interval else None 120 | gaussians.densify_and_prune(opt.densify_grad_threshold, 0.005, scene.cameras_extent, size_threshold) 121 | 122 | if iteration % opt.opacity_reset_interval == 0 or (dataset.white_background and iteration == opt.densify_from_iter): 123 | gaussians.reset_opacity() 124 | 125 | # Optimizer step 126 | if iteration < opt.iterations: 127 | gaussians.optimizer.step() 128 | gaussians.optimizer.zero_grad(set_to_none = True) 129 | 130 | if (iteration in checkpoint_iterations): 131 | print("\n[ITER {}] Saving Checkpoint".format(iteration)) 132 | torch.save((gaussians.capture(), iteration), scene.model_path + "/chkpnt" + str(iteration) + ".pth") 133 | 134 | def prepare_output_and_logger(args): 135 | if not args.model_path: 136 | if os.getenv('OAR_JOB_ID'): 137 | unique_str=os.getenv('OAR_JOB_ID') 138 | else: 139 | unique_str = str(uuid.uuid4()) 140 | args.model_path = os.path.join("./output/", unique_str[0:10]) 141 | 142 | # Set up output folder 143 | print("Output folder: {}".format(args.model_path)) 144 | os.makedirs(args.model_path, exist_ok = True) 145 | with open(os.path.join(args.model_path, "cfg_args"), 'w') as cfg_log_f: 146 | cfg_log_f.write(str(Namespace(**vars(args)))) 147 | 148 | # Create Tensorboard writer 149 | tb_writer = None 150 | if TENSORBOARD_FOUND: 151 | tb_writer = SummaryWriter(args.model_path) 152 | else: 153 | print("Tensorboard not available: not logging progress") 154 | return tb_writer 155 | 156 | def training_report(tb_writer, iteration, Ll1, loss, l1_loss, elapsed, testing_iterations, scene : Scene, renderFunc, renderArgs): 157 | if tb_writer: 158 | tb_writer.add_scalar('train_loss_patches/l1_loss', Ll1.item(), iteration) 159 | tb_writer.add_scalar('train_loss_patches/total_loss', loss.item(), iteration) 160 | tb_writer.add_scalar('iter_time', elapsed, iteration) 161 | 162 | # Report test and samples of training set 163 | if iteration in testing_iterations: 164 | torch.cuda.empty_cache() 165 | validation_configs = ({'name': 'test', 'cameras' : scene.getTestCameras()}, 166 | {'name': 'train', 'cameras' : [scene.getTrainCameras()[idx % len(scene.getTrainCameras())] for idx in range(5, 30, 5)]}) 167 | 168 | for config in validation_configs: 169 | if config['cameras'] and len(config['cameras']) > 0: 170 | l1_test = 0.0 171 | psnr_test = 0.0 172 | for idx, viewpoint in enumerate(config['cameras']): 173 | image = torch.clamp(renderFunc(viewpoint, scene.gaussians, *renderArgs)["render"], 0.0, 1.0) 174 | gt_image = torch.clamp(viewpoint.original_image.to("cuda"), 0.0, 1.0) 175 | if tb_writer and (idx < 5): 176 | tb_writer.add_images(config['name'] + "_view_{}/render".format(viewpoint.image_name), image[None], global_step=iteration) 177 | if iteration == testing_iterations[0]: 178 | tb_writer.add_images(config['name'] + "_view_{}/ground_truth".format(viewpoint.image_name), gt_image[None], global_step=iteration) 179 | l1_test += l1_loss(image, gt_image).mean().double() 180 | psnr_test += psnr(image, gt_image).mean().double() 181 | psnr_test /= len(config['cameras']) 182 | l1_test /= len(config['cameras']) 183 | print("\n[ITER {}] Evaluating {}: L1 {} PSNR {}".format(iteration, config['name'], l1_test, psnr_test)) 184 | if tb_writer: 185 | tb_writer.add_scalar(config['name'] + '/loss_viewpoint - l1_loss', l1_test, iteration) 186 | tb_writer.add_scalar(config['name'] + '/loss_viewpoint - psnr', psnr_test, iteration) 187 | 188 | if tb_writer: 189 | tb_writer.add_histogram("scene/opacity_histogram", scene.gaussians.get_opacity, iteration) 190 | tb_writer.add_scalar('total_points', scene.gaussians.get_xyz.shape[0], iteration) 191 | torch.cuda.empty_cache() 192 | 193 | if __name__ == "__main__": 194 | # Set up command line argument parser 195 | parser = ArgumentParser(description="Training script parameters") 196 | lp = ModelParams(parser) 197 | op = OptimizationParams(parser) 198 | pp = PipelineParams(parser) 199 | parser.add_argument('--ip', type=str, default="127.0.0.1") 200 | parser.add_argument('--port', type=int, default=6009) 201 | parser.add_argument('--debug_from', type=int, default=-1) 202 | parser.add_argument('--detect_anomaly', action='store_true', default=False) 203 | parser.add_argument("--test_iterations", nargs="+", type=int, default=[7_000, 30_000]) 204 | parser.add_argument("--save_iterations", nargs="+", type=int, default=[7_000, 30_000]) 205 | parser.add_argument("--quiet", action="store_true") 206 | parser.add_argument("--checkpoint_iterations", nargs="+", type=int, default=[]) 207 | parser.add_argument("--start_checkpoint", type=str, default = None) 208 | args = parser.parse_args(sys.argv[1:]) 209 | args.save_iterations.append(args.iterations) 210 | 211 | print("Optimizing " + args.model_path) 212 | 213 | # Initialize system state (RNG) 214 | safe_state(args.quiet) 215 | 216 | # Start GUI server, configure and run training 217 | network_gui.init(args.ip, args.port) 218 | torch.autograd.set_detect_anomaly(args.detect_anomaly) 219 | training(lp.extract(args), op.extract(args), pp.extract(args), args.test_iterations, args.save_iterations, args.checkpoint_iterations, args.start_checkpoint, args.debug_from) 220 | 221 | # All done 222 | print("\nTraining complete.") 223 | -------------------------------------------------------------------------------- /scene/colmap_loader.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2023, Inria 3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | # All rights reserved. 5 | # 6 | # This software is free for non-commercial, research and evaluation use 7 | # under the terms of the LICENSE.md file. 8 | # 9 | # For inquiries contact george.drettakis@inria.fr 10 | # 11 | 12 | import numpy as np 13 | import collections 14 | import struct 15 | 16 | CameraModel = collections.namedtuple( 17 | "CameraModel", ["model_id", "model_name", "num_params"]) 18 | Camera = collections.namedtuple( 19 | "Camera", ["id", "model", "width", "height", "params"]) 20 | BaseImage = collections.namedtuple( 21 | "Image", ["id", "qvec", "tvec", "camera_id", "name", "xys", "point3D_ids"]) 22 | Point3D = collections.namedtuple( 23 | "Point3D", ["id", "xyz", "rgb", "error", "image_ids", "point2D_idxs"]) 24 | CAMERA_MODELS = { 25 | CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3), 26 | CameraModel(model_id=1, model_name="PINHOLE", num_params=4), 27 | CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4), 28 | CameraModel(model_id=3, model_name="RADIAL", num_params=5), 29 | CameraModel(model_id=4, model_name="OPENCV", num_params=8), 30 | CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8), 31 | CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12), 32 | CameraModel(model_id=7, model_name="FOV", num_params=5), 33 | CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4), 34 | CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5), 35 | CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12) 36 | } 37 | CAMERA_MODEL_IDS = dict([(camera_model.model_id, camera_model) 38 | for camera_model in CAMERA_MODELS]) 39 | CAMERA_MODEL_NAMES = dict([(camera_model.model_name, camera_model) 40 | for camera_model in CAMERA_MODELS]) 41 | 42 | 43 | def qvec2rotmat(qvec): 44 | return np.array([ 45 | [1 - 2 * qvec[2]**2 - 2 * qvec[3]**2, 46 | 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3], 47 | 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]], 48 | [2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3], 49 | 1 - 2 * qvec[1]**2 - 2 * qvec[3]**2, 50 | 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]], 51 | [2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2], 52 | 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1], 53 | 1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]]) 54 | 55 | def rotmat2qvec(R): 56 | Rxx, Ryx, Rzx, Rxy, Ryy, Rzy, Rxz, Ryz, Rzz = R.flat 57 | K = np.array([ 58 | [Rxx - Ryy - Rzz, 0, 0, 0], 59 | [Ryx + Rxy, Ryy - Rxx - Rzz, 0, 0], 60 | [Rzx + Rxz, Rzy + Ryz, Rzz - Rxx - Ryy, 0], 61 | [Ryz - Rzy, Rzx - Rxz, Rxy - Ryx, Rxx + Ryy + Rzz]]) / 3.0 62 | eigvals, eigvecs = np.linalg.eigh(K) 63 | qvec = eigvecs[[3, 0, 1, 2], np.argmax(eigvals)] 64 | if qvec[0] < 0: 65 | qvec *= -1 66 | return qvec 67 | 68 | class Image(BaseImage): 69 | def qvec2rotmat(self): 70 | return qvec2rotmat(self.qvec) 71 | 72 | def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"): 73 | """Read and unpack the next bytes from a binary file. 74 | :param fid: 75 | :param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc. 76 | :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}. 77 | :param endian_character: Any of {@, =, <, >, !} 78 | :return: Tuple of read and unpacked values. 79 | """ 80 | data = fid.read(num_bytes) 81 | return struct.unpack(endian_character + format_char_sequence, data) 82 | 83 | def read_points3D_text(path): 84 | """ 85 | see: src/base/reconstruction.cc 86 | void Reconstruction::ReadPoints3DText(const std::string& path) 87 | void Reconstruction::WritePoints3DText(const std::string& path) 88 | """ 89 | xyzs = None 90 | rgbs = None 91 | errors = None 92 | num_points = 0 93 | with open(path, "r") as fid: 94 | while True: 95 | line = fid.readline() 96 | if not line: 97 | break 98 | line = line.strip() 99 | if len(line) > 0 and line[0] != "#": 100 | num_points += 1 101 | 102 | 103 | xyzs = np.empty((num_points, 3)) 104 | rgbs = np.empty((num_points, 3)) 105 | errors = np.empty((num_points, 1)) 106 | count = 0 107 | with open(path, "r") as fid: 108 | while True: 109 | line = fid.readline() 110 | if not line: 111 | break 112 | line = line.strip() 113 | if len(line) > 0 and line[0] != "#": 114 | elems = line.split() 115 | xyz = np.array(tuple(map(float, elems[1:4]))) 116 | rgb = np.array(tuple(map(int, elems[4:7]))) 117 | error = np.array(float(elems[7])) 118 | xyzs[count] = xyz 119 | rgbs[count] = rgb 120 | errors[count] = error 121 | count += 1 122 | 123 | return xyzs, rgbs, errors 124 | 125 | def read_points3D_binary(path_to_model_file): 126 | """ 127 | see: src/base/reconstruction.cc 128 | void Reconstruction::ReadPoints3DBinary(const std::string& path) 129 | void Reconstruction::WritePoints3DBinary(const std::string& path) 130 | """ 131 | 132 | 133 | with open(path_to_model_file, "rb") as fid: 134 | num_points = read_next_bytes(fid, 8, "Q")[0] 135 | 136 | xyzs = np.empty((num_points, 3)) 137 | rgbs = np.empty((num_points, 3)) 138 | errors = np.empty((num_points, 1)) 139 | 140 | for p_id in range(num_points): 141 | binary_point_line_properties = read_next_bytes( 142 | fid, num_bytes=43, format_char_sequence="QdddBBBd") 143 | xyz = np.array(binary_point_line_properties[1:4]) 144 | rgb = np.array(binary_point_line_properties[4:7]) 145 | error = np.array(binary_point_line_properties[7]) 146 | track_length = read_next_bytes( 147 | fid, num_bytes=8, format_char_sequence="Q")[0] 148 | track_elems = read_next_bytes( 149 | fid, num_bytes=8*track_length, 150 | format_char_sequence="ii"*track_length) 151 | xyzs[p_id] = xyz 152 | rgbs[p_id] = rgb 153 | errors[p_id] = error 154 | return xyzs, rgbs, errors 155 | 156 | def read_intrinsics_text(path): 157 | """ 158 | Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py 159 | """ 160 | cameras = {} 161 | with open(path, "r") as fid: 162 | while True: 163 | line = fid.readline() 164 | if not line: 165 | break 166 | line = line.strip() 167 | if len(line) > 0 and line[0] != "#": 168 | elems = line.split() 169 | camera_id = int(elems[0]) 170 | model = elems[1] 171 | assert model == "PINHOLE", "While the loader support other types, the rest of the code assumes PINHOLE" 172 | width = int(elems[2]) 173 | height = int(elems[3]) 174 | params = np.array(tuple(map(float, elems[4:]))) 175 | cameras[camera_id] = Camera(id=camera_id, model=model, 176 | width=width, height=height, 177 | params=params) 178 | return cameras 179 | 180 | def read_extrinsics_binary(path_to_model_file): 181 | """ 182 | see: src/base/reconstruction.cc 183 | void Reconstruction::ReadImagesBinary(const std::string& path) 184 | void Reconstruction::WriteImagesBinary(const std::string& path) 185 | """ 186 | images = {} 187 | with open(path_to_model_file, "rb") as fid: 188 | num_reg_images = read_next_bytes(fid, 8, "Q")[0] 189 | for _ in range(num_reg_images): 190 | binary_image_properties = read_next_bytes( 191 | fid, num_bytes=64, format_char_sequence="idddddddi") 192 | image_id = binary_image_properties[0] 193 | qvec = np.array(binary_image_properties[1:5]) 194 | tvec = np.array(binary_image_properties[5:8]) 195 | camera_id = binary_image_properties[8] 196 | image_name = "" 197 | current_char = read_next_bytes(fid, 1, "c")[0] 198 | while current_char != b"\x00": # look for the ASCII 0 entry 199 | image_name += current_char.decode("utf-8") 200 | current_char = read_next_bytes(fid, 1, "c")[0] 201 | num_points2D = read_next_bytes(fid, num_bytes=8, 202 | format_char_sequence="Q")[0] 203 | x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D, 204 | format_char_sequence="ddq"*num_points2D) 205 | xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])), 206 | tuple(map(float, x_y_id_s[1::3]))]) 207 | point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3]))) 208 | images[image_id] = Image( 209 | id=image_id, qvec=qvec, tvec=tvec, 210 | camera_id=camera_id, name=image_name, 211 | xys=xys, point3D_ids=point3D_ids) 212 | return images 213 | 214 | 215 | def read_intrinsics_binary(path_to_model_file): 216 | """ 217 | see: src/base/reconstruction.cc 218 | void Reconstruction::WriteCamerasBinary(const std::string& path) 219 | void Reconstruction::ReadCamerasBinary(const std::string& path) 220 | """ 221 | cameras = {} 222 | with open(path_to_model_file, "rb") as fid: 223 | num_cameras = read_next_bytes(fid, 8, "Q")[0] 224 | for _ in range(num_cameras): 225 | camera_properties = read_next_bytes( 226 | fid, num_bytes=24, format_char_sequence="iiQQ") 227 | camera_id = camera_properties[0] 228 | model_id = camera_properties[1] 229 | model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name 230 | width = camera_properties[2] 231 | height = camera_properties[3] 232 | num_params = CAMERA_MODEL_IDS[model_id].num_params 233 | params = read_next_bytes(fid, num_bytes=8*num_params, 234 | format_char_sequence="d"*num_params) 235 | cameras[camera_id] = Camera(id=camera_id, 236 | model=model_name, 237 | width=width, 238 | height=height, 239 | params=np.array(params)) 240 | assert len(cameras) == num_cameras 241 | return cameras 242 | 243 | 244 | def read_extrinsics_text(path): 245 | """ 246 | Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py 247 | """ 248 | images = {} 249 | with open(path, "r") as fid: 250 | while True: 251 | line = fid.readline() 252 | if not line: 253 | break 254 | line = line.strip() 255 | if len(line) > 0 and line[0] != "#": 256 | elems = line.split() 257 | image_id = int(elems[0]) 258 | qvec = np.array(tuple(map(float, elems[1:5]))) 259 | tvec = np.array(tuple(map(float, elems[5:8]))) 260 | camera_id = int(elems[8]) 261 | image_name = elems[9] 262 | elems = fid.readline().split() 263 | xys = np.column_stack([tuple(map(float, elems[0::3])), 264 | tuple(map(float, elems[1::3]))]) 265 | point3D_ids = np.array(tuple(map(int, elems[2::3]))) 266 | images[image_id] = Image( 267 | id=image_id, qvec=qvec, tvec=tvec, 268 | camera_id=camera_id, name=image_name, 269 | xys=xys, point3D_ids=point3D_ids) 270 | return images 271 | 272 | 273 | def read_colmap_bin_array(path): 274 | """ 275 | Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_dense.py 276 | 277 | :param path: path to the colmap binary file. 278 | :return: nd array with the floating point values in the value 279 | """ 280 | with open(path, "rb") as fid: 281 | width, height, channels = np.genfromtxt(fid, delimiter="&", max_rows=1, 282 | usecols=(0, 1, 2), dtype=int) 283 | fid.seek(0) 284 | num_delimiter = 0 285 | byte = fid.read(1) 286 | while True: 287 | if byte == b"&": 288 | num_delimiter += 1 289 | if num_delimiter >= 3: 290 | break 291 | byte = fid.read(1) 292 | array = np.fromfile(fid, np.float32) 293 | array = array.reshape((width, height, channels), order="F") 294 | return np.transpose(array, (1, 0, 2)).squeeze() 295 | -------------------------------------------------------------------------------- /scene/gaussian_model.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2023, Inria 3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | # All rights reserved. 5 | # 6 | # This software is free for non-commercial, research and evaluation use 7 | # under the terms of the LICENSE.md file. 8 | # 9 | # For inquiries contact george.drettakis@inria.fr 10 | # 11 | 12 | import torch 13 | import numpy as np 14 | from utils.general_utils import inverse_sigmoid, get_expon_lr_func, build_rotation 15 | from torch import nn 16 | import os 17 | from utils.system_utils import mkdir_p 18 | from plyfile import PlyData, PlyElement 19 | from utils.sh_utils import RGB2SH 20 | from simple_knn._C import distCUDA2 21 | from utils.graphics_utils import BasicPointCloud 22 | from utils.general_utils import strip_symmetric, build_scaling_rotation 23 | 24 | class GaussianModel: 25 | 26 | def setup_functions(self): 27 | def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation): 28 | L = build_scaling_rotation(scaling_modifier * scaling, rotation) 29 | actual_covariance = L @ L.transpose(1, 2) 30 | symm = strip_symmetric(actual_covariance) 31 | return symm 32 | 33 | self.scaling_activation = torch.exp 34 | self.scaling_inverse_activation = torch.log 35 | 36 | self.covariance_activation = build_covariance_from_scaling_rotation 37 | 38 | self.opacity_activation = torch.sigmoid 39 | self.inverse_opacity_activation = inverse_sigmoid 40 | 41 | self.rotation_activation = torch.nn.functional.normalize 42 | 43 | 44 | def __init__(self, sh_degree : int): 45 | self.active_sh_degree = 0 46 | self.max_sh_degree = sh_degree 47 | self._xyz = torch.empty(0) 48 | self._features_dc = torch.empty(0) 49 | self._features_rest = torch.empty(0) 50 | self._scaling = torch.empty(0) 51 | self._rotation = torch.empty(0) 52 | self._opacity = torch.empty(0) 53 | self.max_radii2D = torch.empty(0) 54 | self.xyz_gradient_accum = torch.empty(0) 55 | self.denom = torch.empty(0) 56 | self.optimizer = None 57 | self.percent_dense = 0 58 | self.spatial_lr_scale = 0 59 | self.setup_functions() 60 | 61 | def capture(self): 62 | return ( 63 | self.active_sh_degree, 64 | self._xyz, 65 | self._features_dc, 66 | self._features_rest, 67 | self._scaling, 68 | self._rotation, 69 | self._opacity, 70 | self.max_radii2D, 71 | self.xyz_gradient_accum, 72 | self.denom, 73 | self.optimizer.state_dict(), 74 | self.spatial_lr_scale, 75 | ) 76 | 77 | def restore(self, model_args, training_args): 78 | (self.active_sh_degree, 79 | self._xyz, 80 | self._features_dc, 81 | self._features_rest, 82 | self._scaling, 83 | self._rotation, 84 | self._opacity, 85 | self.max_radii2D, 86 | xyz_gradient_accum, 87 | denom, 88 | opt_dict, 89 | self.spatial_lr_scale) = model_args 90 | self.training_setup(training_args) 91 | self.xyz_gradient_accum = xyz_gradient_accum 92 | self.denom = denom 93 | self.optimizer.load_state_dict(opt_dict) 94 | 95 | @property 96 | def get_scaling(self): 97 | return self.scaling_activation(self._scaling) 98 | 99 | @property 100 | def get_rotation(self): 101 | return self.rotation_activation(self._rotation) 102 | 103 | @property 104 | def get_xyz(self): 105 | return self._xyz 106 | 107 | @property 108 | def get_features(self): 109 | features_dc = self._features_dc 110 | features_rest = self._features_rest 111 | return torch.cat((features_dc, features_rest), dim=1) 112 | 113 | @property 114 | def get_opacity(self): 115 | return self.opacity_activation(self._opacity) 116 | 117 | def get_covariance(self, scaling_modifier = 1): 118 | return self.covariance_activation(self.get_scaling, scaling_modifier, self._rotation) 119 | 120 | def oneupSHdegree(self): 121 | if self.active_sh_degree < self.max_sh_degree: 122 | self.active_sh_degree += 1 123 | 124 | def create_from_pcd(self, pcd : BasicPointCloud, spatial_lr_scale : float): 125 | self.spatial_lr_scale = spatial_lr_scale 126 | fused_point_cloud = torch.tensor(np.asarray(pcd.points)).float().cuda() 127 | fused_color = RGB2SH(torch.tensor(np.asarray(pcd.colors)).float().cuda()) 128 | features = torch.zeros((fused_color.shape[0], 3, (self.max_sh_degree + 1) ** 2)).float().cuda() 129 | features[:, :3, 0 ] = fused_color 130 | features[:, 3:, 1:] = 0.0 131 | 132 | print("Number of points at initialisation : ", fused_point_cloud.shape[0]) 133 | 134 | dist2 = torch.clamp_min(distCUDA2(torch.from_numpy(np.asarray(pcd.points)).float().cuda()), 0.0000001) 135 | scales = torch.log(torch.sqrt(dist2))[...,None].repeat(1, 3) 136 | rots = torch.zeros((fused_point_cloud.shape[0], 4), device="cuda") 137 | rots[:, 0] = 1 138 | 139 | opacities = inverse_sigmoid(0.1 * torch.ones((fused_point_cloud.shape[0], 1), dtype=torch.float, device="cuda")) 140 | 141 | self._xyz = nn.Parameter(fused_point_cloud.requires_grad_(True)) 142 | self._features_dc = nn.Parameter(features[:,:,0:1].transpose(1, 2).contiguous().requires_grad_(True)) 143 | self._features_rest = nn.Parameter(features[:,:,1:].transpose(1, 2).contiguous().requires_grad_(True)) 144 | self._scaling = nn.Parameter(scales.requires_grad_(True)) 145 | self._rotation = nn.Parameter(rots.requires_grad_(True)) 146 | self._opacity = nn.Parameter(opacities.requires_grad_(True)) 147 | self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda") 148 | 149 | def training_setup(self, training_args): 150 | self.percent_dense = training_args.percent_dense 151 | self.xyz_gradient_accum = torch.zeros((self.get_xyz.shape[0], 1), device="cuda") 152 | self.denom = torch.zeros((self.get_xyz.shape[0], 1), device="cuda") 153 | 154 | l = [ 155 | {'params': [self._xyz], 'lr': training_args.position_lr_init * self.spatial_lr_scale, "name": "xyz"}, 156 | {'params': [self._features_dc], 'lr': training_args.feature_lr, "name": "f_dc"}, 157 | {'params': [self._features_rest], 'lr': training_args.feature_lr / 20.0, "name": "f_rest"}, 158 | {'params': [self._opacity], 'lr': training_args.opacity_lr, "name": "opacity"}, 159 | {'params': [self._scaling], 'lr': training_args.scaling_lr, "name": "scaling"}, 160 | {'params': [self._rotation], 'lr': training_args.rotation_lr, "name": "rotation"} 161 | ] 162 | 163 | self.optimizer = torch.optim.Adam(l, lr=0.0, eps=1e-15) 164 | self.xyz_scheduler_args = get_expon_lr_func(lr_init=training_args.position_lr_init*self.spatial_lr_scale, 165 | lr_final=training_args.position_lr_final*self.spatial_lr_scale, 166 | lr_delay_mult=training_args.position_lr_delay_mult, 167 | max_steps=training_args.position_lr_max_steps) 168 | 169 | def update_learning_rate(self, iteration): 170 | ''' Learning rate scheduling per step ''' 171 | for param_group in self.optimizer.param_groups: 172 | if param_group["name"] == "xyz": 173 | lr = self.xyz_scheduler_args(iteration) 174 | param_group['lr'] = lr 175 | return lr 176 | 177 | def construct_list_of_attributes(self): 178 | l = ['x', 'y', 'z', 'nx', 'ny', 'nz'] 179 | # All channels except the 3 DC 180 | for i in range(self._features_dc.shape[1]*self._features_dc.shape[2]): 181 | l.append('f_dc_{}'.format(i)) 182 | for i in range(self._features_rest.shape[1]*self._features_rest.shape[2]): 183 | l.append('f_rest_{}'.format(i)) 184 | l.append('opacity') 185 | for i in range(self._scaling.shape[1]): 186 | l.append('scale_{}'.format(i)) 187 | for i in range(self._rotation.shape[1]): 188 | l.append('rot_{}'.format(i)) 189 | return l 190 | 191 | def save_ply(self, path): 192 | mkdir_p(os.path.dirname(path)) 193 | 194 | xyz = self._xyz.detach().cpu().numpy() 195 | normals = np.zeros_like(xyz) 196 | f_dc = self._features_dc.detach().transpose(1, 2).flatten(start_dim=1).contiguous().cpu().numpy() 197 | f_rest = self._features_rest.detach().transpose(1, 2).flatten(start_dim=1).contiguous().cpu().numpy() 198 | opacities = self._opacity.detach().cpu().numpy() 199 | scale = self._scaling.detach().cpu().numpy() 200 | rotation = self._rotation.detach().cpu().numpy() 201 | 202 | dtype_full = [(attribute, 'f4') for attribute in self.construct_list_of_attributes()] 203 | 204 | elements = np.empty(xyz.shape[0], dtype=dtype_full) 205 | attributes = np.concatenate((xyz, normals, f_dc, f_rest, opacities, scale, rotation), axis=1) 206 | elements[:] = list(map(tuple, attributes)) 207 | el = PlyElement.describe(elements, 'vertex') 208 | PlyData([el]).write(path) 209 | 210 | def reset_opacity(self): 211 | opacities_new = inverse_sigmoid(torch.min(self.get_opacity, torch.ones_like(self.get_opacity)*0.01)) 212 | optimizable_tensors = self.replace_tensor_to_optimizer(opacities_new, "opacity") 213 | self._opacity = optimizable_tensors["opacity"] 214 | 215 | def load_ply(self, path): 216 | plydata = PlyData.read(path) 217 | 218 | xyz = np.stack((np.asarray(plydata.elements[0]["x"]), 219 | np.asarray(plydata.elements[0]["y"]), 220 | np.asarray(plydata.elements[0]["z"])), axis=1) 221 | opacities = np.asarray(plydata.elements[0]["opacity"])[..., np.newaxis] 222 | 223 | features_dc = np.zeros((xyz.shape[0], 3, 1)) 224 | features_dc[:, 0, 0] = np.asarray(plydata.elements[0]["f_dc_0"]) 225 | features_dc[:, 1, 0] = np.asarray(plydata.elements[0]["f_dc_1"]) 226 | features_dc[:, 2, 0] = np.asarray(plydata.elements[0]["f_dc_2"]) 227 | 228 | extra_f_names = [p.name for p in plydata.elements[0].properties if p.name.startswith("f_rest_")] 229 | extra_f_names = sorted(extra_f_names, key = lambda x: int(x.split('_')[-1])) 230 | assert len(extra_f_names)==3*(self.max_sh_degree + 1) ** 2 - 3 231 | features_extra = np.zeros((xyz.shape[0], len(extra_f_names))) 232 | for idx, attr_name in enumerate(extra_f_names): 233 | features_extra[:, idx] = np.asarray(plydata.elements[0][attr_name]) 234 | # Reshape (P,F*SH_coeffs) to (P, F, SH_coeffs except DC) 235 | features_extra = features_extra.reshape((features_extra.shape[0], 3, (self.max_sh_degree + 1) ** 2 - 1)) 236 | 237 | scale_names = [p.name for p in plydata.elements[0].properties if p.name.startswith("scale_")] 238 | scale_names = sorted(scale_names, key = lambda x: int(x.split('_')[-1])) 239 | scales = np.zeros((xyz.shape[0], len(scale_names))) 240 | for idx, attr_name in enumerate(scale_names): 241 | scales[:, idx] = np.asarray(plydata.elements[0][attr_name]) 242 | 243 | rot_names = [p.name for p in plydata.elements[0].properties if p.name.startswith("rot")] 244 | rot_names = sorted(rot_names, key = lambda x: int(x.split('_')[-1])) 245 | rots = np.zeros((xyz.shape[0], len(rot_names))) 246 | for idx, attr_name in enumerate(rot_names): 247 | rots[:, idx] = np.asarray(plydata.elements[0][attr_name]) 248 | 249 | self._xyz = nn.Parameter(torch.tensor(xyz, dtype=torch.float, device="cuda").requires_grad_(True)) 250 | self._features_dc = nn.Parameter(torch.tensor(features_dc, dtype=torch.float, device="cuda").transpose(1, 2).contiguous().requires_grad_(True)) 251 | self._features_rest = nn.Parameter(torch.tensor(features_extra, dtype=torch.float, device="cuda").transpose(1, 2).contiguous().requires_grad_(True)) 252 | self._opacity = nn.Parameter(torch.tensor(opacities, dtype=torch.float, device="cuda").requires_grad_(True)) 253 | self._scaling = nn.Parameter(torch.tensor(scales, dtype=torch.float, device="cuda").requires_grad_(True)) 254 | self._rotation = nn.Parameter(torch.tensor(rots, dtype=torch.float, device="cuda").requires_grad_(True)) 255 | 256 | self.active_sh_degree = self.max_sh_degree 257 | 258 | def replace_tensor_to_optimizer(self, tensor, name): 259 | optimizable_tensors = {} 260 | for group in self.optimizer.param_groups: 261 | if group["name"] == name: 262 | stored_state = self.optimizer.state.get(group['params'][0], None) 263 | stored_state["exp_avg"] = torch.zeros_like(tensor) 264 | stored_state["exp_avg_sq"] = torch.zeros_like(tensor) 265 | 266 | del self.optimizer.state[group['params'][0]] 267 | group["params"][0] = nn.Parameter(tensor.requires_grad_(True)) 268 | self.optimizer.state[group['params'][0]] = stored_state 269 | 270 | optimizable_tensors[group["name"]] = group["params"][0] 271 | return optimizable_tensors 272 | 273 | def _prune_optimizer(self, mask): 274 | optimizable_tensors = {} 275 | for group in self.optimizer.param_groups: 276 | stored_state = self.optimizer.state.get(group['params'][0], None) 277 | if stored_state is not None: 278 | stored_state["exp_avg"] = stored_state["exp_avg"][mask] 279 | stored_state["exp_avg_sq"] = stored_state["exp_avg_sq"][mask] 280 | 281 | del self.optimizer.state[group['params'][0]] 282 | group["params"][0] = nn.Parameter((group["params"][0][mask].requires_grad_(True))) 283 | self.optimizer.state[group['params'][0]] = stored_state 284 | 285 | optimizable_tensors[group["name"]] = group["params"][0] 286 | else: 287 | group["params"][0] = nn.Parameter(group["params"][0][mask].requires_grad_(True)) 288 | optimizable_tensors[group["name"]] = group["params"][0] 289 | return optimizable_tensors 290 | 291 | def prune_points(self, mask): 292 | valid_points_mask = ~mask 293 | optimizable_tensors = self._prune_optimizer(valid_points_mask) 294 | 295 | self._xyz = optimizable_tensors["xyz"] 296 | self._features_dc = optimizable_tensors["f_dc"] 297 | self._features_rest = optimizable_tensors["f_rest"] 298 | self._opacity = optimizable_tensors["opacity"] 299 | self._scaling = optimizable_tensors["scaling"] 300 | self._rotation = optimizable_tensors["rotation"] 301 | 302 | self.xyz_gradient_accum = self.xyz_gradient_accum[valid_points_mask] 303 | 304 | self.denom = self.denom[valid_points_mask] 305 | self.max_radii2D = self.max_radii2D[valid_points_mask] 306 | 307 | def cat_tensors_to_optimizer(self, tensors_dict): 308 | optimizable_tensors = {} 309 | for group in self.optimizer.param_groups: 310 | assert len(group["params"]) == 1 311 | extension_tensor = tensors_dict[group["name"]] 312 | stored_state = self.optimizer.state.get(group['params'][0], None) 313 | if stored_state is not None: 314 | 315 | stored_state["exp_avg"] = torch.cat((stored_state["exp_avg"], torch.zeros_like(extension_tensor)), dim=0) 316 | stored_state["exp_avg_sq"] = torch.cat((stored_state["exp_avg_sq"], torch.zeros_like(extension_tensor)), dim=0) 317 | 318 | del self.optimizer.state[group['params'][0]] 319 | group["params"][0] = nn.Parameter(torch.cat((group["params"][0], extension_tensor), dim=0).requires_grad_(True)) 320 | self.optimizer.state[group['params'][0]] = stored_state 321 | 322 | optimizable_tensors[group["name"]] = group["params"][0] 323 | else: 324 | group["params"][0] = nn.Parameter(torch.cat((group["params"][0], extension_tensor), dim=0).requires_grad_(True)) 325 | optimizable_tensors[group["name"]] = group["params"][0] 326 | 327 | return optimizable_tensors 328 | 329 | def densification_postfix(self, new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation): 330 | d = {"xyz": new_xyz, 331 | "f_dc": new_features_dc, 332 | "f_rest": new_features_rest, 333 | "opacity": new_opacities, 334 | "scaling" : new_scaling, 335 | "rotation" : new_rotation} 336 | 337 | optimizable_tensors = self.cat_tensors_to_optimizer(d) 338 | self._xyz = optimizable_tensors["xyz"] 339 | self._features_dc = optimizable_tensors["f_dc"] 340 | self._features_rest = optimizable_tensors["f_rest"] 341 | self._opacity = optimizable_tensors["opacity"] 342 | self._scaling = optimizable_tensors["scaling"] 343 | self._rotation = optimizable_tensors["rotation"] 344 | 345 | self.xyz_gradient_accum = torch.zeros((self.get_xyz.shape[0], 1), device="cuda") 346 | self.denom = torch.zeros((self.get_xyz.shape[0], 1), device="cuda") 347 | self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda") 348 | 349 | def densify_and_split(self, grads, grad_threshold, scene_extent, N=2): 350 | n_init_points = self.get_xyz.shape[0] 351 | # Extract points that satisfy the gradient condition 352 | padded_grad = torch.zeros((n_init_points), device="cuda") 353 | padded_grad[:grads.shape[0]] = grads.squeeze() 354 | selected_pts_mask = torch.where(padded_grad >= grad_threshold, True, False) 355 | selected_pts_mask = torch.logical_and(selected_pts_mask, 356 | torch.max(self.get_scaling, dim=1).values > self.percent_dense*scene_extent) 357 | 358 | stds = self.get_scaling[selected_pts_mask].repeat(N,1) 359 | means =torch.zeros((stds.size(0), 3),device="cuda") 360 | samples = torch.normal(mean=means, std=stds) 361 | rots = build_rotation(self._rotation[selected_pts_mask]).repeat(N,1,1) 362 | new_xyz = torch.bmm(rots, samples.unsqueeze(-1)).squeeze(-1) + self.get_xyz[selected_pts_mask].repeat(N, 1) 363 | new_scaling = self.scaling_inverse_activation(self.get_scaling[selected_pts_mask].repeat(N,1) / (0.8*N)) 364 | new_rotation = self._rotation[selected_pts_mask].repeat(N,1) 365 | new_features_dc = self._features_dc[selected_pts_mask].repeat(N,1,1) 366 | new_features_rest = self._features_rest[selected_pts_mask].repeat(N,1,1) 367 | new_opacity = self._opacity[selected_pts_mask].repeat(N,1) 368 | 369 | self.densification_postfix(new_xyz, new_features_dc, new_features_rest, new_opacity, new_scaling, new_rotation) 370 | 371 | prune_filter = torch.cat((selected_pts_mask, torch.zeros(N * selected_pts_mask.sum(), device="cuda", dtype=bool))) 372 | self.prune_points(prune_filter) 373 | 374 | def densify_and_clone(self, grads, grad_threshold, scene_extent): 375 | # Extract points that satisfy the gradient condition 376 | selected_pts_mask = torch.where(torch.norm(grads, dim=-1) >= grad_threshold, True, False) 377 | selected_pts_mask = torch.logical_and(selected_pts_mask, 378 | torch.max(self.get_scaling, dim=1).values <= self.percent_dense*scene_extent) 379 | 380 | new_xyz = self._xyz[selected_pts_mask] 381 | new_features_dc = self._features_dc[selected_pts_mask] 382 | new_features_rest = self._features_rest[selected_pts_mask] 383 | new_opacities = self._opacity[selected_pts_mask] 384 | new_scaling = self._scaling[selected_pts_mask] 385 | new_rotation = self._rotation[selected_pts_mask] 386 | 387 | self.densification_postfix(new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation) 388 | 389 | def densify_and_prune(self, max_grad, min_opacity, extent, max_screen_size): 390 | grads = self.xyz_gradient_accum / self.denom 391 | grads[grads.isnan()] = 0.0 392 | 393 | self.densify_and_clone(grads, max_grad, extent) 394 | self.densify_and_split(grads, max_grad, extent) 395 | 396 | prune_mask = (self.get_opacity < min_opacity).squeeze() 397 | if max_screen_size: 398 | big_points_vs = self.max_radii2D > max_screen_size 399 | big_points_ws = self.get_scaling.max(dim=1).values > 0.1 * extent 400 | prune_mask = torch.logical_or(torch.logical_or(prune_mask, big_points_vs), big_points_ws) 401 | self.prune_points(prune_mask) 402 | 403 | torch.cuda.empty_cache() 404 | 405 | def add_densification_stats(self, viewspace_point_tensor, update_filter): 406 | self.xyz_gradient_accum[update_filter] += torch.norm(viewspace_point_tensor.grad[update_filter,:2], dim=-1, keepdim=True) 407 | self.denom[update_filter] += 1 -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 3D Gaussian Splatting for Real-Time Radiance Field Rendering 2 | Bernhard Kerbl*, Georgios Kopanas*, Thomas Leimkühler, George Drettakis (* indicates equal contribution)
3 | | [Webpage](https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/) | [Full Paper](https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/3d_gaussian_splatting_high.pdf) | [Video](https://youtu.be/T_kXY43VZnk) | [Other GRAPHDECO Publications](http://www-sop.inria.fr/reves/publis/gdindex.php) | [FUNGRAPH project page](https://fungraph.inria.fr) |
4 | | [T&T+DB COLMAP (650MB)](https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/datasets/input/tandt_db.zip) | [Pre-trained Models (14 GB)](https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/datasets/pretrained/models.zip) | [Viewers for Windows (60MB)](https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/binaries/viewers.zip) | [Evaluation Images (7 GB)](https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/evaluation/images.zip) |
5 | ![Teaser image](assets/teaser.png) 6 | 7 | This repository contains the official authors implementation associated with the paper "3D Gaussian Splatting for Real-Time Radiance Field Rendering", which can be found [here](https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/). We further provide the reference images used to create the error metrics reported in the paper, as well as recently created, pre-trained models. 8 | 9 | 10 | 11 | 12 | 13 | 14 | Abstract: *Radiance Field methods have recently revolutionized novel-view synthesis of scenes captured with multiple photos or videos. However, achieving high visual quality still requires neural networks that are costly to train and render, while recent faster methods inevitably trade off speed for quality. For unbounded and complete scenes (rather than isolated objects) and 1080p resolution rendering, no current method can achieve real-time display rates. We introduce three key elements that allow us to achieve state-of-the-art visual quality while maintaining competitive training times and importantly allow high-quality real-time (≥ 30 fps) novel-view synthesis at 1080p resolution. First, starting from sparse points produced during camera calibration, we represent the scene with 3D Gaussians that preserve desirable properties of continuous volumetric radiance fields for scene optimization while avoiding unnecessary computation in empty space; Second, we perform interleaved optimization/density control of the 3D Gaussians, notably optimizing anisotropic covariance to achieve an accurate representation of the scene; Third, we develop a fast visibility-aware rendering algorithm that supports anisotropic splatting and both accelerates training and allows realtime rendering. We demonstrate state-of-the-art visual quality and real-time rendering on several established datasets.* 15 | 16 |
17 |
18 |

BibTeX

19 |
@Article{kerbl3Dgaussians,
 20 |       author       = {Kerbl, Bernhard and Kopanas, Georgios and Leimk{\"u}hler, Thomas and Drettakis, George},
 21 |       title        = {3D Gaussian Splatting for Real-Time Radiance Field Rendering},
 22 |       journal      = {ACM Transactions on Graphics},
 23 |       number       = {4},
 24 |       volume       = {42},
 25 |       month        = {July},
 26 |       year         = {2023},
 27 |       url          = {https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/}
 28 | }
29 |
30 |
31 | 32 | 33 | ## Funding and Acknowledgments 34 | 35 | This research was funded by the ERC Advanced grant FUNGRAPH No 788065. The authors are grateful to Adobe for generous donations, the OPAL infrastructure from Université Côte d’Azur and for the HPC resources from GENCI–IDRIS (Grant 2022-AD011013409). The authors thank the anonymous reviewers for their valuable feedback, P. Hedman and A. Tewari for proofreading earlier drafts also T. Müller, A. Yu and S. Fridovich-Keil for helping with the comparisons. 36 | 37 | ## Step-by-step Tutorial 38 | 39 | Jonathan Stephens made a fantastic step-by-step tutorial for setting up Gaussian Splatting on your machine, along with instructions for creating usable datasets from videos. If the instructions below are too dry for you, go ahead and check it out [here](https://www.youtube.com/watch?v=UXtuigy_wYc). 40 | 41 | ## Colab 42 | 43 | User [camenduru](https://github.com/camenduru) was kind enough to provide a Colab template that uses this repo's source (status: August 2023!) for quick and easy access to the method. Please check it out [here](https://github.com/camenduru/gaussian-splatting-colab). 44 | 45 | ## Cloning the Repository 46 | 47 | The repository contains submodules, thus please check it out with 48 | ```shell 49 | # SSH 50 | git clone git@github.com:graphdeco-inria/gaussian-splatting.git --recursive 51 | ``` 52 | or 53 | ```shell 54 | # HTTPS 55 | git clone https://github.com/graphdeco-inria/gaussian-splatting --recursive 56 | ``` 57 | 58 | ## Overview 59 | 60 | The codebase has 4 main components: 61 | - A PyTorch-based optimizer to produce a 3D Gaussian model from SfM inputs 62 | - A network viewer that allows to connect to and visualize the optimization process 63 | - An OpenGL-based real-time viewer to render trained models in real-time. 64 | - A script to help you turn your own images into optimization-ready SfM data sets 65 | 66 | The components have different requirements w.r.t. both hardware and software. They have been tested on Windows 10 and Ubuntu Linux 22.04. Instructions for setting up and running each of them are found in the sections below. 67 | 68 | ## New features [Please check regularly!] 69 | 70 | We will be adding several new features soon. In the meantime Orange has kindly added [OpenXR support](#openXR-support) for VR viewing. Please come back soon, we will be adding other features, building among others on recent 3DGS followup papers. 71 | 72 | ## Optimizer 73 | 74 | The optimizer uses PyTorch and CUDA extensions in a Python environment to produce trained models. 75 | 76 | ### Hardware Requirements 77 | 78 | - CUDA-ready GPU with Compute Capability 7.0+ 79 | - 24 GB VRAM (to train to paper evaluation quality) 80 | - Please see FAQ for smaller VRAM configurations 81 | 82 | ### Software Requirements 83 | - Conda (recommended for easy setup) 84 | - C++ Compiler for PyTorch extensions (we used Visual Studio 2019 for Windows) 85 | - CUDA SDK 11 for PyTorch extensions, install *after* Visual Studio (we used 11.8, **known issues with 11.6**) 86 | - C++ Compiler and CUDA SDK must be compatible 87 | 88 | ### Setup 89 | 90 | #### Local Setup 91 | 92 | Our default, provided install method is based on Conda package and environment management: 93 | ```shell 94 | SET DISTUTILS_USE_SDK=1 # Windows only 95 | conda env create --file environment.yml 96 | conda activate gaussian_splatting 97 | ``` 98 | Please note that this process assumes that you have CUDA SDK **11** installed, not **12**. For modifications, see below. 99 | 100 | Tip: Downloading packages and creating a new environment with Conda can require a significant amount of disk space. By default, Conda will use the main system hard drive. You can avoid this by specifying a different package download location and an environment on a different drive: 101 | 102 | ```shell 103 | conda config --add pkgs_dirs / 104 | conda env create --file environment.yml --prefix //gaussian_splatting 105 | conda activate //gaussian_splatting 106 | ``` 107 | 108 | #### Modifications 109 | 110 | If you can afford the disk space, we recommend using our environment files for setting up a training environment identical to ours. If you want to make modifications, please note that major version changes might affect the results of our method. However, our (limited) experiments suggest that the codebase works just fine inside a more up-to-date environment (Python 3.8, PyTorch 2.0.0, CUDA 12). Make sure to create an environment where PyTorch and its CUDA runtime version match and the installed CUDA SDK has no major version difference with PyTorch's CUDA version. 111 | 112 | #### Known Issues 113 | 114 | Some users experience problems building the submodules on Windows (```cl.exe: File not found``` or similar). Please consider the workaround for this problem from the FAQ. 115 | 116 | ### Running 117 | 118 | To run the optimizer, simply use 119 | 120 | ```shell 121 | python train.py -s 122 | ``` 123 | 124 |
125 | Command Line Arguments for train.py 126 | 127 | #### --source_path / -s 128 | Path to the source directory containing a COLMAP or Synthetic NeRF data set. 129 | #### --model_path / -m 130 | Path where the trained model should be stored (```output/``` by default). 131 | #### --images / -i 132 | Alternative subdirectory for COLMAP images (```images``` by default). 133 | #### --eval 134 | Add this flag to use a MipNeRF360-style training/test split for evaluation. 135 | #### --resolution / -r 136 | Specifies resolution of the loaded images before training. If provided ```1, 2, 4``` or ```8```, uses original, 1/2, 1/4 or 1/8 resolution, respectively. For all other values, rescales the width to the given number while maintaining image aspect. **If not set and input image width exceeds 1.6K pixels, inputs are automatically rescaled to this target.** 137 | #### --data_device 138 | Specifies where to put the source image data, ```cuda``` by default, recommended to use ```cpu``` if training on large/high-resolution dataset, will reduce VRAM consumption, but slightly slow down training. Thanks to [HrsPythonix](https://github.com/HrsPythonix). 139 | #### --white_background / -w 140 | Add this flag to use white background instead of black (default), e.g., for evaluation of NeRF Synthetic dataset. 141 | #### --sh_degree 142 | Order of spherical harmonics to be used (no larger than 3). ```3``` by default. 143 | #### --convert_SHs_python 144 | Flag to make pipeline compute forward and backward of SHs with PyTorch instead of ours. 145 | #### --convert_cov3D_python 146 | Flag to make pipeline compute forward and backward of the 3D covariance with PyTorch instead of ours. 147 | #### --debug 148 | Enables debug mode if you experience erros. If the rasterizer fails, a ```dump``` file is created that you may forward to us in an issue so we can take a look. 149 | #### --debug_from 150 | Debugging is **slow**. You may specify an iteration (starting from 0) after which the above debugging becomes active. 151 | #### --iterations 152 | Number of total iterations to train for, ```30_000``` by default. 153 | #### --ip 154 | IP to start GUI server on, ```127.0.0.1``` by default. 155 | #### --port 156 | Port to use for GUI server, ```6009``` by default. 157 | #### --test_iterations 158 | Space-separated iterations at which the training script computes L1 and PSNR over test set, ```7000 30000``` by default. 159 | #### --save_iterations 160 | Space-separated iterations at which the training script saves the Gaussian model, ```7000 30000 ``` by default. 161 | #### --checkpoint_iterations 162 | Space-separated iterations at which to store a checkpoint for continuing later, saved in the model directory. 163 | #### --start_checkpoint 164 | Path to a saved checkpoint to continue training from. 165 | #### --quiet 166 | Flag to omit any text written to standard out pipe. 167 | #### --feature_lr 168 | Spherical harmonics features learning rate, ```0.0025``` by default. 169 | #### --opacity_lr 170 | Opacity learning rate, ```0.05``` by default. 171 | #### --scaling_lr 172 | Scaling learning rate, ```0.005``` by default. 173 | #### --rotation_lr 174 | Rotation learning rate, ```0.001``` by default. 175 | #### --position_lr_max_steps 176 | Number of steps (from 0) where position learning rate goes from ```initial``` to ```final```. ```30_000``` by default. 177 | #### --position_lr_init 178 | Initial 3D position learning rate, ```0.00016``` by default. 179 | #### --position_lr_final 180 | Final 3D position learning rate, ```0.0000016``` by default. 181 | #### --position_lr_delay_mult 182 | Position learning rate multiplier (cf. Plenoxels), ```0.01``` by default. 183 | #### --densify_from_iter 184 | Iteration where densification starts, ```500``` by default. 185 | #### --densify_until_iter 186 | Iteration where densification stops, ```15_000``` by default. 187 | #### --densify_grad_threshold 188 | Limit that decides if points should be densified based on 2D position gradient, ```0.0002``` by default. 189 | #### --densification_interval 190 | How frequently to densify, ```100``` (every 100 iterations) by default. 191 | #### --opacity_reset_interval 192 | How frequently to reset opacity, ```3_000``` by default. 193 | #### --lambda_dssim 194 | Influence of SSIM on total loss from 0 to 1, ```0.2``` by default. 195 | #### --percent_dense 196 | Percentage of scene extent (0--1) a point must exceed to be forcibly densified, ```0.01``` by default. 197 | 198 |
199 |
200 | 201 | Note that similar to MipNeRF360, we target images at resolutions in the 1-1.6K pixel range. For convenience, arbitrary-size inputs can be passed and will be automatically resized if their width exceeds 1600 pixels. We recommend to keep this behavior, but you may force training to use your higher-resolution images by setting ```-r 1```. 202 | 203 | The MipNeRF360 scenes are hosted by the paper authors [here](https://jonbarron.info/mipnerf360/). You can find our SfM data sets for Tanks&Temples and Deep Blending [here](https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/datasets/input/tandt_db.zip). If you do not provide an output model directory (```-m```), trained models are written to folders with randomized unique names inside the ```output``` directory. At this point, the trained models may be viewed with the real-time viewer (see further below). 204 | 205 | ### Evaluation 206 | By default, the trained models use all available images in the dataset. To train them while withholding a test set for evaluation, use the ```--eval``` flag. This way, you can render training/test sets and produce error metrics as follows: 207 | ```shell 208 | python train.py -s --eval # Train with train/test split 209 | python render.py -m # Generate renderings 210 | python metrics.py -m # Compute error metrics on renderings 211 | ``` 212 | 213 | If you want to evaluate our [pre-trained models](https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/datasets/pretrained/models.zip), you will have to download the corresponding source data sets and indicate their location to ```render.py``` with an additional ```--source_path/-s``` flag. Note: The pre-trained models were created with the release codebase. This code base has been cleaned up and includes bugfixes, hence the metrics you get from evaluating them will differ from those in the paper. 214 | ```shell 215 | python render.py -m -s 216 | python metrics.py -m 217 | ``` 218 | 219 |
220 | Command Line Arguments for render.py 221 | 222 | #### --model_path / -m 223 | Path to the trained model directory you want to create renderings for. 224 | #### --skip_train 225 | Flag to skip rendering the training set. 226 | #### --skip_test 227 | Flag to skip rendering the test set. 228 | #### --quiet 229 | Flag to omit any text written to standard out pipe. 230 | 231 | **The below parameters will be read automatically from the model path, based on what was used for training. However, you may override them by providing them explicitly on the command line.** 232 | 233 | #### --source_path / -s 234 | Path to the source directory containing a COLMAP or Synthetic NeRF data set. 235 | #### --images / -i 236 | Alternative subdirectory for COLMAP images (```images``` by default). 237 | #### --eval 238 | Add this flag to use a MipNeRF360-style training/test split for evaluation. 239 | #### --resolution / -r 240 | Changes the resolution of the loaded images before training. If provided ```1, 2, 4``` or ```8```, uses original, 1/2, 1/4 or 1/8 resolution, respectively. For all other values, rescales the width to the given number while maintaining image aspect. ```1``` by default. 241 | #### --white_background / -w 242 | Add this flag to use white background instead of black (default), e.g., for evaluation of NeRF Synthetic dataset. 243 | #### --convert_SHs_python 244 | Flag to make pipeline render with computed SHs from PyTorch instead of ours. 245 | #### --convert_cov3D_python 246 | Flag to make pipeline render with computed 3D covariance from PyTorch instead of ours. 247 | 248 |
249 | 250 |
251 | Command Line Arguments for metrics.py 252 | 253 | #### --model_paths / -m 254 | Space-separated list of model paths for which metrics should be computed. 255 |
256 |
257 | 258 | We further provide the ```full_eval.py``` script. This script specifies the routine used in our evaluation and demonstrates the use of some additional parameters, e.g., ```--images (-i)``` to define alternative image directories within COLMAP data sets. If you have downloaded and extracted all the training data, you can run it like this: 259 | ```shell 260 | python full_eval.py -m360 -tat -db 261 | ``` 262 | In the current version, this process takes about 7h on our reference machine containing an A6000. If you want to do the full evaluation on our pre-trained models, you can specify their download location and skip training. 263 | ```shell 264 | python full_eval.py -o --skip_training -m360 -tat -db 265 | ``` 266 | 267 | If you want to compute the metrics on our paper's [evaluation images](https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/evaluation/images.zip), you can also skip rendering. In this case it is not necessary to provide the source datasets. You can compute metrics for multiple image sets at a time. 268 | ```shell 269 | python full_eval.py -m /garden ... --skip_training --skip_rendering 270 | ``` 271 | 272 |
273 | Command Line Arguments for full_eval.py 274 | 275 | #### --skip_training 276 | Flag to skip training stage. 277 | #### --skip_rendering 278 | Flag to skip rendering stage. 279 | #### --skip_metrics 280 | Flag to skip metrics calculation stage. 281 | #### --output_path 282 | Directory to put renderings and results in, ```./eval``` by default, set to pre-trained model location if evaluating them. 283 | #### --mipnerf360 / -m360 284 | Path to MipNeRF360 source datasets, required if training or rendering. 285 | #### --tanksandtemples / -tat 286 | Path to Tanks&Temples source datasets, required if training or rendering. 287 | #### --deepblending / -db 288 | Path to Deep Blending source datasets, required if training or rendering. 289 |
290 |
291 | 292 | ## Interactive Viewers 293 | We provide two interactive viewers for our method: remote and real-time. Our viewing solutions are based on the [SIBR](https://sibr.gitlabpages.inria.fr/) framework, developed by the GRAPHDECO group for several novel-view synthesis projects. 294 | 295 | ### Hardware Requirements 296 | - OpenGL 4.5-ready GPU and drivers (or latest MESA software) 297 | - 4 GB VRAM recommended 298 | - CUDA-ready GPU with Compute Capability 7.0+ (only for Real-Time Viewer) 299 | 300 | ### Software Requirements 301 | - Visual Studio or g++, **not Clang** (we used Visual Studio 2019 for Windows) 302 | - CUDA SDK 11, install *after* Visual Studio (we used 11.8) 303 | - CMake (recent version, we used 3.24) 304 | - 7zip (only on Windows) 305 | 306 | ### Pre-built Windows Binaries 307 | We provide pre-built binaries for Windows [here](https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/binaries/viewers.zip). We recommend using them on Windows for an efficient setup, since the building of SIBR involves several external dependencies that must be downloaded and compiled on-the-fly. 308 | 309 | ### Installation from Source 310 | If you cloned with submodules (e.g., using ```--recursive```), the source code for the viewers is found in ```SIBR_viewers```. The network viewer runs within the SIBR framework for Image-based Rendering applications. 311 | 312 | #### Windows 313 | CMake should take care of your dependencies. 314 | ```shell 315 | cd SIBR_viewers 316 | cmake -Bbuild . 317 | cmake --build build --target install --config RelWithDebInfo 318 | ``` 319 | You may specify a different configuration, e.g. ```Debug``` if you need more control during development. 320 | 321 | #### Ubuntu 22.04 322 | You will need to install a few dependencies before running the project setup. 323 | ```shell 324 | # Dependencies 325 | sudo apt install -y libglew-dev libassimp-dev libboost-all-dev libgtk-3-dev libopencv-dev libglfw3-dev libavdevice-dev libavcodec-dev libeigen3-dev libxxf86vm-dev libembree-dev 326 | # Project setup 327 | cd SIBR_viewers 328 | cmake -Bbuild . -DCMAKE_BUILD_TYPE=Release # add -G Ninja to build faster 329 | cmake --build build -j24 --target install 330 | ``` 331 | 332 | #### Ubuntu 20.04 333 | Backwards compatibility with Focal Fossa is not fully tested, but building SIBR with CMake should still work after invoking 334 | ```shell 335 | git checkout fossa_compatibility 336 | ``` 337 | 338 | ### Navigation in SIBR Viewers 339 | The SIBR interface provides several methods of navigating the scene. By default, you will be started with an FPS navigator, which you can control with ```W, A, S, D, Q, E``` for camera translation and ```I, K, J, L, U, O``` for rotation. Alternatively, you may want to use a Trackball-style navigator (select from the floating menu). You can also snap to a camera from the data set with the ```Snap to``` button or find the closest camera with ```Snap to closest```. The floating menues also allow you to change the navigation speed. You can use the ```Scaling Modifier``` to control the size of the displayed Gaussians, or show the initial point cloud. 340 | 341 | ### Running the Network Viewer 342 | 343 | 344 | 345 | https://github.com/graphdeco-inria/gaussian-splatting/assets/40643808/90a2e4d3-cf2e-4633-b35f-bfe284e28ff7 346 | 347 | 348 | 349 | After extracting or installing the viewers, you may run the compiled ```SIBR_remoteGaussian_app[_config]``` app in ```/bin```, e.g.: 350 | ```shell 351 | .//bin/SIBR_remoteGaussian_app 352 | ``` 353 | The network viewer allows you to connect to a running training process on the same or a different machine. If you are training on the same machine and OS, no command line parameters should be required: the optimizer communicates the location of the training data to the network viewer. By default, optimizer and network viewer will try to establish a connection on **localhost** on port **6009**. You can change this behavior by providing matching ```--ip``` and ```--port``` parameters to both the optimizer and the network viewer. If for some reason the path used by the optimizer to find the training data is not reachable by the network viewer (e.g., due to them running on different (virtual) machines), you may specify an override location to the viewer by using ```-s ```. 354 | 355 |
356 | Primary Command Line Arguments for Network Viewer 357 | 358 | #### --path / -s 359 | Argument to override model's path to source dataset. 360 | #### --ip 361 | IP to use for connection to a running training script. 362 | #### --port 363 | Port to use for connection to a running training script. 364 | #### --rendering-size 365 | Takes two space separated numbers to define the resolution at which network rendering occurs, ```1200``` width by default. 366 | Note that to enforce an aspect that differs from the input images, you need ```--force-aspect-ratio``` too. 367 | #### --load_images 368 | Flag to load source dataset images to be displayed in the top view for each camera. 369 |
370 |
371 | 372 | ### Running the Real-Time Viewer 373 | 374 | 375 | 376 | 377 | https://github.com/graphdeco-inria/gaussian-splatting/assets/40643808/0940547f-1d82-4c2f-a616-44eabbf0f816 378 | 379 | 380 | 381 | 382 | After extracting or installing the viewers, you may run the compiled ```SIBR_gaussianViewer_app[_config]``` app in ```/bin```, e.g.: 383 | ```shell 384 | .//bin/SIBR_gaussianViewer_app -m 385 | ``` 386 | 387 | It should suffice to provide the ```-m``` parameter pointing to a trained model directory. Alternatively, you can specify an override location for training input data using ```-s```. To use a specific resolution other than the auto-chosen one, specify ```--rendering-size ```. Combine it with ```--force-aspect-ratio``` if you want the exact resolution and don't mind image distortion. 388 | 389 | **To unlock the full frame rate, please disable V-Sync on your machine and also in the application (Menu → Display). In a multi-GPU system (e.g., laptop) your OpenGL/Display GPU should be the same as your CUDA GPU (e.g., by setting the application's GPU preference on Windows, see below) for maximum performance.** 390 | 391 | ![Teaser image](assets/select.png) 392 | 393 | In addition to the initial point cloud and the splats, you also have the option to visualize the Gaussians by rendering them as ellipsoids from the floating menu. 394 | SIBR has many other functionalities, please see the [documentation](https://sibr.gitlabpages.inria.fr/) for more details on the viewer, navigation options etc. There is also a Top View (available from the menu) that shows the placement of the input cameras and the original SfM point cloud; please note that Top View slows rendering when enabled. The real-time viewer also uses slightly more aggressive, fast culling, which can be toggled in the floating menu. If you ever encounter an issue that can be solved by turning fast culling off, please let us know. 395 | 396 |
397 | Primary Command Line Arguments for Real-Time Viewer 398 | 399 | #### --model-path / -m 400 | Path to trained model. 401 | #### --iteration 402 | Specifies which of state to load if multiple are available. Defaults to latest available iteration. 403 | #### --path / -s 404 | Argument to override model's path to source dataset. 405 | #### --rendering-size 406 | Takes two space separated numbers to define the resolution at which real-time rendering occurs, ```1200``` width by default. Note that to enforce an aspect that differs from the input images, you need ```--force-aspect-ratio``` too. 407 | #### --load_images 408 | Flag to load source dataset images to be displayed in the top view for each camera. 409 | #### --device 410 | Index of CUDA device to use for rasterization if multiple are available, ```0``` by default. 411 | #### --no_interop 412 | Disables CUDA/GL interop forcibly. Use on systems that may not behave according to spec (e.g., WSL2 with MESA GL 4.5 software rendering). 413 |
414 |
415 | 416 | ## Processing your own Scenes 417 | 418 | Our COLMAP loaders expect the following dataset structure in the source path location: 419 | 420 | ``` 421 | 422 | |---images 423 | | |--- 424 | | |--- 425 | | |---... 426 | |---sparse 427 | |---0 428 | |---cameras.bin 429 | |---images.bin 430 | |---points3D.bin 431 | ``` 432 | 433 | For rasterization, the camera models must be either a SIMPLE_PINHOLE or PINHOLE camera. We provide a converter script ```convert.py```, to extract undistorted images and SfM information from input images. Optionally, you can use ImageMagick to resize the undistorted images. This rescaling is similar to MipNeRF360, i.e., it creates images with 1/2, 1/4 and 1/8 the original resolution in corresponding folders. To use them, please first install a recent version of COLMAP (ideally CUDA-powered) and ImageMagick. Put the images you want to use in a directory ```/input```. 434 | ``` 435 | 436 | |---input 437 | |--- 438 | |--- 439 | |---... 440 | ``` 441 | If you have COLMAP and ImageMagick on your system path, you can simply run 442 | ```shell 443 | python convert.py -s [--resize] #If not resizing, ImageMagick is not needed 444 | ``` 445 | Alternatively, you can use the optional parameters ```--colmap_executable``` and ```--magick_executable``` to point to the respective paths. Please note that on Windows, the executable should point to the COLMAP ```.bat``` file that takes care of setting the execution environment. Once done, `````` will contain the expected COLMAP data set structure with undistorted, resized input images, in addition to your original images and some temporary (distorted) data in the directory ```distorted```. 446 | 447 | If you have your own COLMAP dataset without undistortion (e.g., using ```OPENCV``` camera), you can try to just run the last part of the script: Put the images in ```input``` and the COLMAP info in a subdirectory ```distorted```: 448 | ``` 449 | 450 | |---input 451 | | |--- 452 | | |--- 453 | | |---... 454 | |---distorted 455 | |---database.db 456 | |---sparse 457 | |---0 458 | |---... 459 | ``` 460 | Then run 461 | ```shell 462 | python convert.py -s --skip_matching [--resize] #If not resizing, ImageMagick is not needed 463 | ``` 464 | 465 |
466 | Command Line Arguments for convert.py 467 | 468 | #### --no_gpu 469 | Flag to avoid using GPU in COLMAP. 470 | #### --skip_matching 471 | Flag to indicate that COLMAP info is available for images. 472 | #### --source_path / -s 473 | Location of the inputs. 474 | #### --camera 475 | Which camera model to use for the early matching steps, ```OPENCV``` by default. 476 | #### --resize 477 | Flag for creating resized versions of input images. 478 | #### --colmap_executable 479 | Path to the COLMAP executable (```.bat``` on Windows). 480 | #### --magick_executable 481 | Path to the ImageMagick executable. 482 |
483 |
484 | 485 | ### OpenXR support 486 | 487 | OpenXR is supported in the branch gaussian_code_release_openxr 488 | Within that branch, you can find documentation for VR support [here](https://gitlab.inria.fr/sibr/sibr_core/-/tree/gaussian_code_release_openxr?ref_type=heads). 489 | 490 | ## FAQ 491 | - *Where do I get data sets, e.g., those referenced in ```full_eval.py```?* The MipNeRF360 data set is provided by the authors of the original paper on the project site. Note that two of the data sets cannot be openly shared and require you to consult the authors directly. For Tanks&Temples and Deep Blending, please use the download links provided at the top of the page. Alternatively, you may access the cloned data (status: August 2023!) from [HuggingFace](https://huggingface.co/camenduru/gaussian-splatting) 492 | 493 | 494 | - *How can I use this for a much larger dataset, like a city district?* The current method was not designed for these, but given enough memory, it should work out. However, the approach can struggle in multi-scale detail scenes (extreme close-ups, mixed with far-away shots). This is usually the case in, e.g., driving data sets (cars close up, buildings far away). For such scenes, you can lower the ```--position_lr_init```, ```--position_lr_final``` and ```--scaling_lr``` (x0.3, x0.1, ...). The more extensive the scene, the lower these values should be. Below, we use default learning rates (left) and ```--position_lr_init 0.000016 --scaling_lr 0.001"``` (right). 495 | 496 | | ![Default learning rate result](assets/worse.png "title-1") | ![Reduced learning rate result](assets/better.png "title-2") | 497 | | --- | --- | 498 | 499 | - *I'm on Windows and I can't manage to build the submodules, what do I do?* Consider following the steps in the excellent video tutorial [here](https://www.youtube.com/watch?v=UXtuigy_wYc), hopefully they should help. The order in which the steps are done is important! Alternatively, consider using the linked Colab template. 500 | 501 | - *It still doesn't work. It says something about ```cl.exe```. What do I do?* User Henry Pearce found a workaround. You can you try adding the visual studio path to your environment variables (your version number might differ); 502 | ```C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.29.30133\bin\Hostx64\x64``` 503 | Then make sure you start a new conda prompt and cd to your repo location and try this; 504 | ``` 505 | conda activate gaussian_splatting 506 | cd /gaussian-splatting 507 | pip install submodules\diff-gaussian-rasterization 508 | pip install submodules\simple-knn 509 | ``` 510 | 511 | - *I'm on macOS/Puppy Linux/Greenhat and I can't manage to build, what do I do?* Sorry, we can't provide support for platforms outside of the ones we list in this README. Consider using the linked Colab template. 512 | 513 | - *I don't have 24 GB of VRAM for training, what do I do?* The VRAM consumption is determined by the number of points that are being optimized, which increases over time. If you only want to train to 7k iterations, you will need significantly less. To do the full training routine and avoid running out of memory, you can increase the ```--densify_grad_threshold```, ```--densification_interval``` or reduce the value of ```--densify_until_iter```. Note however that this will affect the quality of the result. Also try setting ```--test_iterations``` to ```-1``` to avoid memory spikes during testing. If ```--densify_grad_threshold``` is very high, no densification should occur and training should complete if the scene itself loads successfully. 514 | 515 | - *24 GB of VRAM for reference quality training is still a lot! Can't we do it with less?* Yes, most likely. By our calculations it should be possible with **way** less memory (~8GB). If we can find the time we will try to achieve this. If some PyTorch veteran out there wants to tackle this, we look forward to your pull request! 516 | 517 | 518 | - *How can I use the differentiable Gaussian rasterizer for my own project?* Easy, it is included in this repo as a submodule ```diff-gaussian-rasterization```. Feel free to check out and install the package. It's not really documented, but using it from the Python side is very straightforward (cf. ```gaussian_renderer/__init__.py```). 519 | 520 | - *Wait, but `````` isn't optimized and could be much better?* There are several parts we didn't even have time to think about improving (yet). The performance you get with this prototype is probably a rather slow baseline for what is physically possible. 521 | 522 | - *Something is broken, how did this happen?* We tried hard to provide a solid and comprehensible basis to make use of the paper's method. We have refactored the code quite a bit, but we have limited capacity to test all possible usage scenarios. Thus, if part of the website, the code or the performance is lacking, please create an issue. If we find the time, we will do our best to address it. 523 | -------------------------------------------------------------------------------- /assets/logo_mpi.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | 139 | 140 | 141 | 142 | 143 | 144 | 145 | 146 | 147 | 148 | 149 | 150 | 151 | 152 | 153 | 154 | 155 | 156 | 157 | 158 | 159 | 160 | 161 | 162 | 163 | 164 | 165 | 166 | 167 | 168 | 169 | 170 | 171 | 172 | 173 | 174 | 175 | 176 | 177 | 178 | 179 | 180 | 181 | 182 | 183 | 184 | 185 | 186 | 187 | 188 | 189 | 190 | 191 | 192 | 193 | 194 | 195 | 196 | 197 | 198 | 199 | 200 | 201 | 202 | 203 | 204 | 205 | 206 | 207 | 208 | 209 | 210 | 211 | 212 | 213 | 214 | 215 | 216 | 217 | 218 | 219 | 220 | 221 | 222 | 223 | 224 | 225 | 226 | 227 | 228 | 229 | 230 | 231 | 232 | 233 | 234 | 235 | 236 | 237 | 238 | 239 | 240 | 241 | 242 | 243 | 244 | 245 | 246 | 247 | 248 | 249 | 250 | 251 | 252 | 253 | 254 | 255 | 256 | 257 | 258 | 259 | 260 | 261 | 262 | 263 | 264 | 265 | 266 | 267 | 268 | 269 | 270 | 271 | 272 | 273 | 274 | 275 | 276 | 277 | 278 | 279 | 280 | 281 | 282 | 283 | 284 | 285 | 286 | 287 | 288 | 289 | 290 | 291 | 292 | 293 | 294 | 295 | 296 | 297 | 298 | 299 | 300 | 301 | 302 | 303 | 304 | 305 | 306 | 307 | 308 | 309 | 310 | 311 | 312 | 313 | 314 | 315 | 316 | 317 | 318 | 319 | 320 | 321 | 322 | 323 | 324 | 325 | 326 | 327 | 328 | 329 | 330 | 331 | 332 | 333 | 334 | 335 | 336 | 337 | 338 | 339 | 340 | 341 | 342 | 343 | 344 | 345 | 346 | 347 | 348 | 349 | 350 | 351 | 352 | 353 | 354 | 355 | 356 | 357 | 358 | 359 | 360 | 361 | 362 | 363 | 364 | 365 | 366 | 367 | 368 | 369 | 370 | 371 | 372 | 373 | 374 | 375 | 376 | 377 | 378 | 379 | 380 | 381 | 382 | 383 | 384 | 385 | 386 | 387 | 388 | 389 | 390 | 391 | 392 | 393 | 394 | 395 | 396 | 397 | 398 | 399 | 400 | 401 | 402 | 403 | 404 | 405 | 406 | 407 | 408 | 409 | 410 | 411 | 412 | 413 | 414 | 415 | 416 | 417 | 418 | 419 | 420 | 421 | 422 | 423 | 424 | 425 | 426 | 427 | 428 | 429 | 430 | 431 | 432 | 433 | 434 | 435 | 436 | 437 | 438 | 439 | 440 | 441 | 442 | 443 | 444 | 445 | 446 | 447 | 448 | 449 | 450 | 451 | 452 | 453 | 454 | 455 | 456 | 457 | 458 | 459 | 460 | 461 | 462 | 463 | 464 | 465 | 466 | 467 | 468 | 469 | 470 | 471 | 472 | 473 | 474 | 475 | 476 | 477 | 478 | 479 | 480 | 481 | 482 | 483 | 484 | 485 | 486 | 487 | 488 | --------------------------------------------------------------------------------