├── assets
├── teaser.png
└── tuwien.jpg
├── .gitignore
├── environment.yml
├── .gitmodules
├── utils
├── image_utils.py
├── system_utils.py
├── graphics_utils.py
├── loss_utils.py
├── camera_utils.py
├── general_utils.py
└── sh_utils.py
├── lpipsPyTorch
├── __init__.py
└── modules
│ ├── utils.py
│ ├── lpips.py
│ └── networks.py
├── scene
├── cameras.py
├── __init__.py
├── dataset_readers.py
├── colmap_loader.py
├── gaussian_model_old.py
└── gaussian_model.py
├── gaussian_renderer
├── network_gui.py
└── __init__.py
├── README.md
├── arguments
└── __init__.py
├── LICENSE.md
├── nnfm_utils.py
├── train_original.py
└── train.py
/assets/teaser.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AronKovacs/g-style/HEAD/assets/teaser.png
--------------------------------------------------------------------------------
/assets/tuwien.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AronKovacs/g-style/HEAD/assets/tuwien.jpg
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.pyc
2 | .vscode
3 | output
4 | build
5 | diff_rasterization/diff_rast.egg-info
6 | diff_rasterization/dist
7 | tensorboard_3d
8 | screenshots
--------------------------------------------------------------------------------
/environment.yml:
--------------------------------------------------------------------------------
1 | name: gaussian_splatting
2 | channels:
3 | - pytorch
4 | - conda-forge
5 | - defaults
6 | dependencies:
7 | - cudatoolkit=11.6
8 | - plyfile=0.8.1
9 | - python=3.7.13
10 | - pip=22.3.1
11 | - pytorch=1.12.1
12 | - torchaudio=0.12.1
13 | - torchvision=0.13.1
14 | - tqdm
15 | - pip:
16 | - submodules/diff-gaussian-rasterization
17 | - submodules/simple-knn
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "submodules/simple-knn"]
2 | path = submodules/simple-knn
3 | url = https://gitlab.inria.fr/bkerbl/simple-knn.git
4 | [submodule "submodules/diff-gaussian-rasterization"]
5 | path = submodules/diff-gaussian-rasterization
6 | url = https://github.com/graphdeco-inria/diff-gaussian-rasterization
7 | [submodule "SIBR_viewers"]
8 | path = SIBR_viewers
9 | url = https://gitlab.inria.fr/sibr/sibr_core.git
10 |
--------------------------------------------------------------------------------
/utils/image_utils.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (C) 2023, Inria
3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco
4 | # All rights reserved.
5 | #
6 | # This software is free for non-commercial, research and evaluation use
7 | # under the terms of the LICENSE.md file.
8 | #
9 | # For inquiries contact george.drettakis@inria.fr
10 | #
11 |
12 | import torch
13 |
14 | def mse(img1, img2):
15 | return (((img1 - img2)) ** 2).view(img1.shape[0], -1).mean(1, keepdim=True)
16 |
17 | def psnr(img1, img2):
18 | mse = (((img1 - img2)) ** 2).view(img1.shape[0], -1).mean(1, keepdim=True)
19 | return 20 * torch.log10(1.0 / torch.sqrt(mse))
20 |
--------------------------------------------------------------------------------
/lpipsPyTorch/__init__.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from .modules.lpips import LPIPS
4 |
5 |
6 | def lpips(x: torch.Tensor,
7 | y: torch.Tensor,
8 | net_type: str = 'alex',
9 | version: str = '0.1'):
10 | r"""Function that measures
11 | Learned Perceptual Image Patch Similarity (LPIPS).
12 |
13 | Arguments:
14 | x, y (torch.Tensor): the input tensors to compare.
15 | net_type (str): the network type to compare the features:
16 | 'alex' | 'squeeze' | 'vgg'. Default: 'alex'.
17 | version (str): the version of LPIPS. Default: 0.1.
18 | """
19 | device = x.device
20 | criterion = LPIPS(net_type, version).to(device)
21 | return criterion(x, y)
22 |
--------------------------------------------------------------------------------
/utils/system_utils.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (C) 2023, Inria
3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco
4 | # All rights reserved.
5 | #
6 | # This software is free for non-commercial, research and evaluation use
7 | # under the terms of the LICENSE.md file.
8 | #
9 | # For inquiries contact george.drettakis@inria.fr
10 | #
11 |
12 | from errno import EEXIST
13 | from os import makedirs, path
14 | import os
15 |
16 | def mkdir_p(folder_path):
17 | # Creates a directory. equivalent to using mkdir -p on the command line
18 | try:
19 | makedirs(folder_path)
20 | except OSError as exc: # Python >2.5
21 | if exc.errno == EEXIST and path.isdir(folder_path):
22 | pass
23 | else:
24 | raise
25 |
26 | def searchForMaxIteration(folder):
27 | saved_iters = [int(fname.split("_")[-1]) for fname in os.listdir(folder)]
28 | return max(saved_iters)
29 |
--------------------------------------------------------------------------------
/lpipsPyTorch/modules/utils.py:
--------------------------------------------------------------------------------
1 | from collections import OrderedDict
2 |
3 | import torch
4 |
5 |
6 | def normalize_activation(x, eps=1e-10):
7 | norm_factor = torch.sqrt(torch.sum(x ** 2, dim=1, keepdim=True))
8 | return x / (norm_factor + eps)
9 |
10 |
11 | def get_state_dict(net_type: str = 'alex', version: str = '0.1'):
12 | # build url
13 | url = 'https://raw.githubusercontent.com/richzhang/PerceptualSimilarity/' \
14 | + f'master/lpips/weights/v{version}/{net_type}.pth'
15 |
16 | # download
17 | old_state_dict = torch.hub.load_state_dict_from_url(
18 | url, progress=True,
19 | map_location=None if torch.cuda.is_available() else torch.device('cpu')
20 | )
21 |
22 | # rename keys
23 | new_state_dict = OrderedDict()
24 | for key, val in old_state_dict.items():
25 | new_key = key
26 | new_key = new_key.replace('lin', '')
27 | new_key = new_key.replace('model.', '')
28 | new_state_dict[new_key] = val
29 |
30 | return new_state_dict
31 |
--------------------------------------------------------------------------------
/lpipsPyTorch/modules/lpips.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 | from .networks import get_network, LinLayers
5 | from .utils import get_state_dict
6 |
7 |
8 | class LPIPS(nn.Module):
9 | r"""Creates a criterion that measures
10 | Learned Perceptual Image Patch Similarity (LPIPS).
11 |
12 | Arguments:
13 | net_type (str): the network type to compare the features:
14 | 'alex' | 'squeeze' | 'vgg'. Default: 'alex'.
15 | version (str): the version of LPIPS. Default: 0.1.
16 | """
17 | def __init__(self, net_type: str = 'alex', version: str = '0.1'):
18 |
19 | assert version in ['0.1'], 'v0.1 is only supported now'
20 |
21 | super(LPIPS, self).__init__()
22 |
23 | # pretrained network
24 | self.net = get_network(net_type)
25 |
26 | # linear layers
27 | self.lin = LinLayers(self.net.n_channels_list)
28 | self.lin.load_state_dict(get_state_dict(net_type, version))
29 |
30 | def forward(self, x: torch.Tensor, y: torch.Tensor):
31 | feat_x, feat_y = self.net(x), self.net(y)
32 |
33 | diff = [(fx - fy) ** 2 for fx, fy in zip(feat_x, feat_y)]
34 | res = [l(d).mean((2, 3), True) for d, l in zip(diff, self.lin)]
35 |
36 | return torch.sum(torch.cat(res, 0), 0, True)
37 |
--------------------------------------------------------------------------------
/utils/graphics_utils.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (C) 2023, Inria
3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco
4 | # All rights reserved.
5 | #
6 | # This software is free for non-commercial, research and evaluation use
7 | # under the terms of the LICENSE.md file.
8 | #
9 | # For inquiries contact george.drettakis@inria.fr
10 | #
11 |
12 | import torch
13 | import math
14 | import numpy as np
15 | from typing import NamedTuple
16 |
17 | class BasicPointCloud(NamedTuple):
18 | points : np.array
19 | colors : np.array
20 | normals : np.array
21 |
22 | def geom_transform_points(points, transf_matrix):
23 | P, _ = points.shape
24 | ones = torch.ones(P, 1, dtype=points.dtype, device=points.device)
25 | points_hom = torch.cat([points, ones], dim=1)
26 | points_out = torch.matmul(points_hom, transf_matrix.unsqueeze(0))
27 |
28 | denom = points_out[..., 3:] + 0.0000001
29 | return (points_out[..., :3] / denom).squeeze(dim=0)
30 |
31 | def getWorld2View(R, t):
32 | Rt = np.zeros((4, 4))
33 | Rt[:3, :3] = R.transpose()
34 | Rt[:3, 3] = t
35 | Rt[3, 3] = 1.0
36 | return np.float32(Rt)
37 |
38 | def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0):
39 | Rt = np.zeros((4, 4))
40 | Rt[:3, :3] = R.transpose()
41 | Rt[:3, 3] = t
42 | Rt[3, 3] = 1.0
43 |
44 | C2W = np.linalg.inv(Rt)
45 | cam_center = C2W[:3, 3]
46 | cam_center = (cam_center + translate) * scale
47 | C2W[:3, 3] = cam_center
48 | Rt = np.linalg.inv(C2W)
49 | return np.float32(Rt)
50 |
51 | def getProjectionMatrix(znear, zfar, fovX, fovY):
52 | tanHalfFovY = math.tan((fovY / 2))
53 | tanHalfFovX = math.tan((fovX / 2))
54 |
55 | top = tanHalfFovY * znear
56 | bottom = -top
57 | right = tanHalfFovX * znear
58 | left = -right
59 |
60 | P = torch.zeros(4, 4)
61 |
62 | z_sign = 1.0
63 |
64 | P[0, 0] = 2.0 * znear / (right - left)
65 | P[1, 1] = 2.0 * znear / (top - bottom)
66 | P[0, 2] = (right + left) / (right - left)
67 | P[1, 2] = (top + bottom) / (top - bottom)
68 | P[3, 2] = z_sign
69 | P[2, 2] = z_sign * zfar / (zfar - znear)
70 | P[2, 3] = -(zfar * znear) / (zfar - znear)
71 | return P
72 |
73 | def fov2focal(fov, pixels):
74 | return pixels / (2 * math.tan(fov / 2))
75 |
76 | def focal2fov(focal, pixels):
77 | return 2*math.atan(pixels/(2*focal))
--------------------------------------------------------------------------------
/utils/loss_utils.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (C) 2023, Inria
3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco
4 | # All rights reserved.
5 | #
6 | # This software is free for non-commercial, research and evaluation use
7 | # under the terms of the LICENSE.md file.
8 | #
9 | # For inquiries contact george.drettakis@inria.fr
10 | #
11 |
12 | import torch
13 | import torch.nn.functional as F
14 | from torch.autograd import Variable
15 | from math import exp
16 |
17 | def l1_loss(network_output, gt):
18 | return torch.abs((network_output - gt)).mean()
19 |
20 | def l2_loss(network_output, gt):
21 | return ((network_output - gt) ** 2).mean()
22 |
23 | def gaussian(window_size, sigma):
24 | gauss = torch.Tensor([exp(-(x - window_size // 2) ** 2 / float(2 * sigma ** 2)) for x in range(window_size)])
25 | return gauss / gauss.sum()
26 |
27 | def create_window(window_size, channel):
28 | _1D_window = gaussian(window_size, 1.5).unsqueeze(1)
29 | _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
30 | window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
31 | return window
32 |
33 | def ssim(img1, img2, window_size=11, size_average=True):
34 | channel = img1.size(-3)
35 | window = create_window(window_size, channel)
36 |
37 | if img1.is_cuda:
38 | window = window.cuda(img1.get_device())
39 | window = window.type_as(img1)
40 |
41 | return _ssim(img1, img2, window, window_size, channel, size_average)
42 |
43 | def _ssim(img1, img2, window, window_size, channel, size_average=True):
44 | mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel)
45 | mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel)
46 |
47 | mu1_sq = mu1.pow(2)
48 | mu2_sq = mu2.pow(2)
49 | mu1_mu2 = mu1 * mu2
50 |
51 | sigma1_sq = F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq
52 | sigma2_sq = F.conv2d(img2 * img2, window, padding=window_size // 2, groups=channel) - mu2_sq
53 | sigma12 = F.conv2d(img1 * img2, window, padding=window_size // 2, groups=channel) - mu1_mu2
54 |
55 | C1 = 0.01 ** 2
56 | C2 = 0.03 ** 2
57 |
58 | ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
59 |
60 | if size_average:
61 | return ssim_map.mean()
62 | else:
63 | return ssim_map.mean(1).mean(1).mean(1)
64 |
65 |
--------------------------------------------------------------------------------
/scene/cameras.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (C) 2023, Inria
3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco
4 | # All rights reserved.
5 | #
6 | # This software is free for non-commercial, research and evaluation use
7 | # under the terms of the LICENSE.md file.
8 | #
9 | # For inquiries contact george.drettakis@inria.fr
10 | #
11 |
12 | import torch
13 | from torch import nn
14 | import numpy as np
15 | from utils.graphics_utils import getWorld2View2, getProjectionMatrix
16 |
17 | class Camera(nn.Module):
18 | def __init__(self, colmap_id, R, T, FoVx, FoVy, image, gt_alpha_mask,
19 | image_name, uid,
20 | trans=np.array([0.0, 0.0, 0.0]), scale=1.0, data_device = "cuda"
21 | ):
22 | super(Camera, self).__init__()
23 |
24 | self.uid = uid
25 | self.colmap_id = colmap_id
26 | self.R = R
27 | self.T = T
28 | self.FoVx = FoVx
29 | self.FoVy = FoVy
30 | self.image_name = image_name
31 |
32 | try:
33 | self.data_device = torch.device(data_device)
34 | except Exception as e:
35 | print(e)
36 | print(f"[Warning] Custom device {data_device} failed, fallback to default cuda device" )
37 | self.data_device = torch.device("cuda")
38 |
39 | self.original_image = image.clamp(0.0, 1.0).to(self.data_device)
40 | self.image_width = self.original_image.shape[2]
41 | self.image_height = self.original_image.shape[1]
42 |
43 | if gt_alpha_mask is not None:
44 | self.original_image *= gt_alpha_mask.to(self.data_device)
45 | else:
46 | self.original_image *= torch.ones((1, self.image_height, self.image_width), device=self.data_device)
47 |
48 | self.zfar = 100.0
49 | self.znear = 0.01
50 |
51 | self.trans = trans
52 | self.scale = scale
53 |
54 | self.world_view_transform = torch.tensor(getWorld2View2(R, T, trans, scale)).transpose(0, 1).cuda()
55 | self.projection_matrix = getProjectionMatrix(znear=self.znear, zfar=self.zfar, fovX=self.FoVx, fovY=self.FoVy).transpose(0,1).cuda()
56 | self.full_proj_transform = (self.world_view_transform.unsqueeze(0).bmm(self.projection_matrix.unsqueeze(0))).squeeze(0)
57 | self.camera_center = self.world_view_transform.inverse()[3, :3]
58 |
59 | class MiniCam:
60 | def __init__(self, width, height, fovy, fovx, znear, zfar, world_view_transform, full_proj_transform):
61 | self.image_width = width
62 | self.image_height = height
63 | self.FoVy = fovy
64 | self.FoVx = fovx
65 | self.znear = znear
66 | self.zfar = zfar
67 | self.world_view_transform = world_view_transform
68 | self.full_proj_transform = full_proj_transform
69 | view_inv = torch.inverse(self.world_view_transform)
70 | self.camera_center = view_inv[3][:3]
71 |
72 |
--------------------------------------------------------------------------------
/gaussian_renderer/network_gui.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (C) 2023, Inria
3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco
4 | # All rights reserved.
5 | #
6 | # This software is free for non-commercial, research and evaluation use
7 | # under the terms of the LICENSE.md file.
8 | #
9 | # For inquiries contact george.drettakis@inria.fr
10 | #
11 |
12 | import torch
13 | import traceback
14 | import socket
15 | import json
16 | from scene.cameras import MiniCam
17 |
18 | host = "127.0.0.1"
19 | port = 6009
20 |
21 | conn = None
22 | addr = None
23 |
24 | listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
25 |
26 | def init(wish_host, wish_port):
27 | global host, port, listener
28 | host = wish_host
29 | port = wish_port
30 | listener.bind((host, port))
31 | listener.listen()
32 | listener.settimeout(0)
33 |
34 | def try_connect():
35 | global conn, addr, listener
36 | try:
37 | conn, addr = listener.accept()
38 | print(f"\nConnected by {addr}")
39 | conn.settimeout(None)
40 | except Exception as inst:
41 | pass
42 |
43 | def read():
44 | global conn
45 | messageLength = conn.recv(4)
46 | messageLength = int.from_bytes(messageLength, 'little')
47 | message = conn.recv(messageLength)
48 | return json.loads(message.decode("utf-8"))
49 |
50 | def send(message_bytes, verify):
51 | global conn
52 | if message_bytes != None:
53 | conn.sendall(message_bytes)
54 | conn.sendall(len(verify).to_bytes(4, 'little'))
55 | conn.sendall(bytes(verify, 'ascii'))
56 |
57 | def receive():
58 | message = read()
59 |
60 | width = message["resolution_x"]
61 | height = message["resolution_y"]
62 |
63 | if width != 0 and height != 0:
64 | try:
65 | do_training = bool(message["train"])
66 | fovy = message["fov_y"]
67 | fovx = message["fov_x"]
68 | znear = message["z_near"]
69 | zfar = message["z_far"]
70 | do_shs_python = bool(message["shs_python"])
71 | do_rot_scale_python = bool(message["rot_scale_python"])
72 | keep_alive = bool(message["keep_alive"])
73 | scaling_modifier = message["scaling_modifier"]
74 | world_view_transform = torch.reshape(torch.tensor(message["view_matrix"]), (4, 4)).cuda()
75 | world_view_transform[:,1] = -world_view_transform[:,1]
76 | world_view_transform[:,2] = -world_view_transform[:,2]
77 | full_proj_transform = torch.reshape(torch.tensor(message["view_projection_matrix"]), (4, 4)).cuda()
78 | full_proj_transform[:,1] = -full_proj_transform[:,1]
79 | custom_cam = MiniCam(width, height, fovy, fovx, znear, zfar, world_view_transform, full_proj_transform)
80 | except Exception as e:
81 | print("")
82 | traceback.print_exc()
83 | raise e
84 | return custom_cam, do_training, do_shs_python, do_rot_scale_python, keep_alive, scaling_modifier
85 | else:
86 | return None, None, None, None, None, None
--------------------------------------------------------------------------------
/utils/camera_utils.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (C) 2023, Inria
3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco
4 | # All rights reserved.
5 | #
6 | # This software is free for non-commercial, research and evaluation use
7 | # under the terms of the LICENSE.md file.
8 | #
9 | # For inquiries contact george.drettakis@inria.fr
10 | #
11 |
12 | from scene.cameras import Camera
13 | import numpy as np
14 | from utils.general_utils import PILtoTorch
15 | from utils.graphics_utils import fov2focal
16 |
17 | WARNED = False
18 |
19 | def loadCam(args, id, cam_info, resolution_scale):
20 | orig_w, orig_h = cam_info.image.size
21 |
22 | if args.resolution in [1, 2, 4, 8]:
23 | resolution = round(orig_w/(resolution_scale * args.resolution)), round(orig_h/(resolution_scale * args.resolution))
24 | else: # should be a type that converts to float
25 | if args.resolution == -1:
26 | if orig_w > 1600:
27 | global WARNED
28 | if not WARNED:
29 | print("[ INFO ] Encountered quite large input images (>1.6K pixels width), rescaling to 1.6K.\n "
30 | "If this is not desired, please explicitly specify '--resolution/-r' as 1")
31 | WARNED = True
32 | global_down = orig_w / 1600
33 | else:
34 | global_down = 1
35 | else:
36 | global_down = orig_w / args.resolution
37 |
38 | scale = float(global_down) * float(resolution_scale)
39 | resolution = (int(orig_w / scale), int(orig_h / scale))
40 |
41 | resized_image_rgb = PILtoTorch(cam_info.image, resolution)
42 |
43 | gt_image = resized_image_rgb[:3, ...]
44 | loaded_mask = None
45 |
46 | if resized_image_rgb.shape[1] == 4:
47 | loaded_mask = resized_image_rgb[3:4, ...]
48 |
49 | return Camera(colmap_id=cam_info.uid, R=cam_info.R, T=cam_info.T,
50 | FoVx=cam_info.FovX, FoVy=cam_info.FovY,
51 | image=gt_image, gt_alpha_mask=loaded_mask,
52 | image_name=cam_info.image_name, uid=id, data_device=args.data_device)
53 |
54 | def cameraList_from_camInfos(cam_infos, resolution_scale, args):
55 | camera_list = []
56 |
57 | for id, c in enumerate(cam_infos):
58 | camera_list.append(loadCam(args, id, c, resolution_scale))
59 |
60 | return camera_list
61 |
62 | def camera_to_JSON(id, camera : Camera):
63 | Rt = np.zeros((4, 4))
64 | Rt[:3, :3] = camera.R.transpose()
65 | Rt[:3, 3] = camera.T
66 | Rt[3, 3] = 1.0
67 |
68 | W2C = np.linalg.inv(Rt)
69 | pos = W2C[:3, 3]
70 | rot = W2C[:3, :3]
71 | serializable_array_2d = [x.tolist() for x in rot]
72 | camera_entry = {
73 | 'id' : id,
74 | 'img_name' : camera.image_name,
75 | 'width' : camera.width,
76 | 'height' : camera.height,
77 | 'position': pos.tolist(),
78 | 'rotation': serializable_array_2d,
79 | 'fy' : fov2focal(camera.FovY, camera.height),
80 | 'fx' : fov2focal(camera.FovX, camera.width)
81 | }
82 | return camera_entry
83 |
--------------------------------------------------------------------------------
/lpipsPyTorch/modules/networks.py:
--------------------------------------------------------------------------------
1 | from typing import Sequence
2 |
3 | from itertools import chain
4 |
5 | import torch
6 | import torch.nn as nn
7 | from torchvision import models
8 |
9 | from .utils import normalize_activation
10 |
11 |
12 | def get_network(net_type: str):
13 | if net_type == 'alex':
14 | return AlexNet()
15 | elif net_type == 'squeeze':
16 | return SqueezeNet()
17 | elif net_type == 'vgg':
18 | return VGG16()
19 | else:
20 | raise NotImplementedError('choose net_type from [alex, squeeze, vgg].')
21 |
22 |
23 | class LinLayers(nn.ModuleList):
24 | def __init__(self, n_channels_list: Sequence[int]):
25 | super(LinLayers, self).__init__([
26 | nn.Sequential(
27 | nn.Identity(),
28 | nn.Conv2d(nc, 1, 1, 1, 0, bias=False)
29 | ) for nc in n_channels_list
30 | ])
31 |
32 | for param in self.parameters():
33 | param.requires_grad = False
34 |
35 |
36 | class BaseNet(nn.Module):
37 | def __init__(self):
38 | super(BaseNet, self).__init__()
39 |
40 | # register buffer
41 | self.register_buffer(
42 | 'mean', torch.Tensor([-.030, -.088, -.188])[None, :, None, None])
43 | self.register_buffer(
44 | 'std', torch.Tensor([.458, .448, .450])[None, :, None, None])
45 |
46 | def set_requires_grad(self, state: bool):
47 | for param in chain(self.parameters(), self.buffers()):
48 | param.requires_grad = state
49 |
50 | def z_score(self, x: torch.Tensor):
51 | return (x - self.mean) / self.std
52 |
53 | def forward(self, x: torch.Tensor):
54 | x = self.z_score(x)
55 |
56 | output = []
57 | for i, (_, layer) in enumerate(self.layers._modules.items(), 1):
58 | x = layer(x)
59 | if i in self.target_layers:
60 | output.append(normalize_activation(x))
61 | if len(output) == len(self.target_layers):
62 | break
63 | return output
64 |
65 |
66 | class SqueezeNet(BaseNet):
67 | def __init__(self):
68 | super(SqueezeNet, self).__init__()
69 |
70 | self.layers = models.squeezenet1_1(True).features
71 | self.target_layers = [2, 5, 8, 10, 11, 12, 13]
72 | self.n_channels_list = [64, 128, 256, 384, 384, 512, 512]
73 |
74 | self.set_requires_grad(False)
75 |
76 |
77 | class AlexNet(BaseNet):
78 | def __init__(self):
79 | super(AlexNet, self).__init__()
80 |
81 | self.layers = models.alexnet(True).features
82 | self.target_layers = [2, 5, 8, 10, 12]
83 | self.n_channels_list = [64, 192, 384, 256, 256]
84 |
85 | self.set_requires_grad(False)
86 |
87 |
88 | class VGG16(BaseNet):
89 | def __init__(self):
90 | super(VGG16, self).__init__()
91 |
92 | self.layers = models.vgg16(weights=models.VGG16_Weights.IMAGENET1K_V1).features
93 | self.target_layers = [4, 9, 16, 23, 30]
94 | self.n_channels_list = [64, 128, 256, 512, 512]
95 |
96 | self.set_requires_grad(False)
97 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # G-Style: Stylized Gaussian Splatting
2 | [Áron Samuel Kovács](https://www.cg.tuwien.ac.at/staff/AronKovacs), [Pedro Hermosilla](https://phermosilla.github.io/), [Renata G. Raidou](https://www.cg.tuwien.ac.at/staff/RenataRaidou)
3 |
4 | 
5 |
6 |
7 |
8 | This repository contains the official implementation of the paper "G-Style: Stylized Gaussian Splatting."
9 |
10 | Abstract: *We introduce G-Style, a novel algorithm designed to transfer the style of an image onto a 3D scene represented using Gaussian Splatting. Gaussian Splatting is a powerful 3D representation for novel view synthesis, as—compared to other approaches based on Neural Radiance Fields—it provides fast scene renderings and user control over the scene. Recent pre-prints have demonstrated that the style of Gaussian Splatting scenes can be modified using an image exemplar. However, since the scene geometry remains fixed during the stylization process, current solutions fall short of producing satisfactory results. Our algorithm aims to address these limitations by following a three-step process: In a pre-processing step, we remove undesirable Gaussians with large projection areas or highly elongated shapes. Subsequently, we combine several losses carefully designed to preserve different scales of the style in the image, while maintaining as much as possible the integrity of the original scene content. During the stylization process and following the original design of Gaussian Splatting, we split Gaussians where additional detail is necessary within our scene by tracking the gradient of the stylized color. Our experiments demonstrate that G-Style generates high-quality stylizations within just a few minutes, outperforming existing methods both qualitatively and quantitatively.*
11 |
12 | ## Running the code
13 |
14 | You need to have Python 3. This implementation also requires CUDA.
15 |
16 | Python 3 dependencies:
17 |
18 | * numpy>=1.21.6
19 | * torch>=1.12.1
20 |
21 | Run the `train.py` script with the following parameters:
22 | ```
23 | python train.py -s -m --resolution --starting_iter 30000 --path_style
24 | ```
25 |
26 | Optionally you can add `--forward_facing` for forward-facing scenes.
27 |
28 | This script assumes that the `-m` folder contains a pretrained scene, by default stored within a `iteration_30000` subfolder. The result will be saved in the same folder. To view it, you can use the `SIBR_viewer` as in the original work by Kerbl et al.
29 |
30 | ## BibTeX
31 | Paper:
32 | ```
33 | @inproceedings{kovacs2024gstyle,
34 | title={𝒢-Style: Stylized Gaussian Splatting},
35 | author={Kov{\'a}cs, {\'A}ron Samuel and Hermosilla, Pedro and Raidou, Renata G},
36 | booktitle={Computer Graphics Forum},
37 | volume={43},
38 | number={7},
39 | pages={e15259},
40 | year={2024},
41 | organization={Wiley Online Library}
42 | }
43 | ```
44 |
45 | Code:
46 | ```
47 | @inproceedings{kovacs2024gstyle_code,
48 | title={G-Style: Stylized Gaussian Splatting: Code Implementation},
49 | author={Kov{\'a}cs, {\'A}ron Samuel and Hermosilla, Pedro and Raidou, Renata G},
50 | year={2024},
51 | url={\url{https://github.com/AronKovacs/g-style/}}
52 | }
53 | ```
54 |
--------------------------------------------------------------------------------
/gaussian_renderer/__init__.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (C) 2023, Inria
3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco
4 | # All rights reserved.
5 | #
6 | # This software is free for non-commercial, research and evaluation use
7 | # under the terms of the LICENSE.md file.
8 | #
9 | # For inquiries contact george.drettakis@inria.fr
10 | #
11 |
12 | import torch
13 | import math
14 | from diff_gaussian_rasterization import GaussianRasterizationSettings, GaussianRasterizer
15 | from scene.gaussian_model import GaussianModel
16 | from utils.sh_utils import eval_sh
17 |
18 | def render(viewpoint_camera, pc : GaussianModel, pipe, bg_color : torch.Tensor, scaling_modifier = 1.0, override_color = None, primary_features=True):
19 | """
20 | Render the scene.
21 |
22 | Background tensor (bg_color) must be on GPU!
23 | """
24 |
25 | # Create zero tensor. We will use it to make pytorch return gradients of the 2D (screen-space) means
26 | screenspace_points = torch.zeros_like(pc.get_xyz, dtype=pc.get_xyz.dtype, requires_grad=True, device="cuda") + 0
27 | try:
28 | screenspace_points.retain_grad()
29 | except:
30 | pass
31 |
32 | # Set up rasterization configuration
33 | tanfovx = math.tan(viewpoint_camera.FoVx * 0.5)
34 | tanfovy = math.tan(viewpoint_camera.FoVy * 0.5)
35 |
36 | raster_settings = GaussianRasterizationSettings(
37 | image_height=int(viewpoint_camera.image_height),
38 | image_width=int(viewpoint_camera.image_width),
39 | tanfovx=tanfovx,
40 | tanfovy=tanfovy,
41 | bg=bg_color,
42 | scale_modifier=scaling_modifier,
43 | viewmatrix=viewpoint_camera.world_view_transform,
44 | projmatrix=viewpoint_camera.full_proj_transform,
45 | sh_degree=pc.active_sh_degree,
46 | campos=viewpoint_camera.camera_center,
47 | prefiltered=False,
48 | debug=pipe.debug
49 | )
50 |
51 | rasterizer = GaussianRasterizer(raster_settings=raster_settings)
52 |
53 | means3D = pc.get_xyz
54 | means2D = screenspace_points
55 | opacity = pc.get_opacity
56 |
57 | # If precomputed 3d covariance is provided, use it. If not, then it will be computed from
58 | # scaling / rotation by the rasterizer.
59 | scales = None
60 | rotations = None
61 | cov3D_precomp = None
62 | if pipe.compute_cov3D_python:
63 | cov3D_precomp = pc.get_covariance(scaling_modifier)
64 | else:
65 | scales = pc.get_scaling
66 | rotations = pc.get_rotation
67 |
68 | # If precomputed colors are provided, use them. Otherwise, if it is desired to precompute colors
69 | # from SHs in Python, do it. If not, then SH -> RGB conversion will be done by rasterizer.
70 | shs = None
71 | colors_precomp = None
72 | if override_color is None:
73 | if pipe.convert_SHs_python:
74 | shs_view = pc.get_features(primary_features).transpose(1, 2).view(-1, 3, (pc.max_sh_degree+1)**2)
75 | dir_pp = (pc.get_xyz - viewpoint_camera.camera_center.repeat(pc.get_features(primary_features).shape[0], 1))
76 | dir_pp_normalized = dir_pp/dir_pp.norm(dim=1, keepdim=True)
77 | sh2rgb = eval_sh(pc.active_sh_degree, shs_view, dir_pp_normalized)
78 | colors_precomp = torch.clamp_min(sh2rgb + 0.5, 0.0)
79 | else:
80 | shs = pc.get_features(primary_features)
81 | else:
82 | colors_precomp = override_color
83 |
84 | # Rasterize visible Gaussians to image, obtain their radii (on screen).
85 | rendered_image, radii = rasterizer(
86 | means3D = means3D,
87 | means2D = means2D,
88 | shs = shs,
89 | colors_precomp = colors_precomp,
90 | opacities = opacity,
91 | scales = scales,
92 | rotations = rotations,
93 | cov3D_precomp = cov3D_precomp)
94 |
95 | # Those Gaussians that were frustum culled or had a radius of 0 were not visible.
96 | # They will be excluded from value updates used in the splitting criteria.
97 | return {"render": rendered_image,
98 | "viewspace_points": screenspace_points,
99 | "visibility_filter" : radii > 0,
100 | "radii": radii}
101 |
--------------------------------------------------------------------------------
/arguments/__init__.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (C) 2023, Inria
3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco
4 | # All rights reserved.
5 | #
6 | # This software is free for non-commercial, research and evaluation use
7 | # under the terms of the LICENSE.md file.
8 | #
9 | # For inquiries contact george.drettakis@inria.fr
10 | #
11 |
12 | from argparse import ArgumentParser, Namespace
13 | import sys
14 | import os
15 |
16 | class GroupParams:
17 | pass
18 |
19 | class ParamGroup:
20 | def __init__(self, parser: ArgumentParser, name : str, fill_none = False):
21 | group = parser.add_argument_group(name)
22 | for key, value in vars(self).items():
23 | shorthand = False
24 | if key.startswith("_"):
25 | shorthand = True
26 | key = key[1:]
27 | t = type(value)
28 | value = value if not fill_none else None
29 | if shorthand:
30 | if t == bool:
31 | group.add_argument("--" + key, ("-" + key[0:1]), default=value, action="store_true")
32 | else:
33 | group.add_argument("--" + key, ("-" + key[0:1]), default=value, type=t)
34 | else:
35 | if t == bool:
36 | group.add_argument("--" + key, default=value, action="store_true")
37 | else:
38 | group.add_argument("--" + key, default=value, type=t)
39 |
40 | def extract(self, args):
41 | group = GroupParams()
42 | for arg in vars(args).items():
43 | if arg[0] in vars(self) or ("_" + arg[0]) in vars(self):
44 | setattr(group, arg[0], arg[1])
45 | return group
46 |
47 | class ModelParams(ParamGroup):
48 | def __init__(self, parser, sentinel=False):
49 | self.sh_degree = 0 #1 #3
50 | self._source_path = ""
51 | self._model_path = ""
52 | self._path_style = ""
53 | self._images = "images"
54 | self._resolution = -1
55 | self._forward_facing = False
56 | self._white_background = False
57 | self.data_device = "cuda"
58 | self.eval = False
59 |
60 | self.starting_iter = ""
61 | super().__init__(parser, "Loading Parameters", sentinel)
62 |
63 | def extract(self, args):
64 | g = super().extract(args)
65 | g.source_path = os.path.abspath(g.source_path)
66 | return g
67 |
68 | class PipelineParams(ParamGroup):
69 | def __init__(self, parser):
70 | self.convert_SHs_python = False
71 | self.compute_cov3D_python = False
72 | self.debug = False
73 | super().__init__(parser, "Pipeline Parameters")
74 |
75 | class OptimizationParams(ParamGroup):
76 | def __init__(self, parser):
77 | self.iterations = 30_000
78 | self.position_lr_init = 0.00016
79 | self.position_lr_final = 0.0000016
80 | self.position_lr_delay_mult = 0.01
81 | self.position_lr_max_steps = 30_000
82 | self.feature_lr = 0.0025
83 | self.opacity_lr = 0.05
84 | self.scaling_lr = 0.005
85 | self.rotation_lr = 0.001
86 | self.percent_dense = 0.01
87 | self.lambda_dssim = 0.2
88 | self.densification_interval = 100
89 | self.opacity_reset_interval = 3000
90 | self.densify_from_iter = 500
91 | self.densify_until_iter = 15_000
92 | self.densify_grad_threshold = 0.0002
93 | self.random_background = False
94 | super().__init__(parser, "Optimization Parameters")
95 |
96 | def get_combined_args(parser : ArgumentParser):
97 | cmdlne_string = sys.argv[1:]
98 | cfgfile_string = "Namespace()"
99 | args_cmdline = parser.parse_args(cmdlne_string)
100 |
101 | try:
102 | cfgfilepath = os.path.join(args_cmdline.model_path, "cfg_args")
103 | print("Looking for config file in", cfgfilepath)
104 | with open(cfgfilepath) as cfg_file:
105 | print("Config file found: {}".format(cfgfilepath))
106 | cfgfile_string = cfg_file.read()
107 | except TypeError:
108 | print("Config file not found at")
109 | pass
110 | args_cfgfile = eval(cfgfile_string)
111 |
112 | merged_dict = vars(args_cfgfile).copy()
113 | for k,v in vars(args_cmdline).items():
114 | if v != None:
115 | merged_dict[k] = v
116 | return Namespace(**merged_dict)
117 |
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | Gaussian-Splatting License
2 | ===========================
3 |
4 | **Inria** and **the Max Planck Institut for Informatik (MPII)** hold all the ownership rights on the *Software* named **gaussian-splatting**.
5 | The *Software* is in the process of being registered with the Agence pour la Protection des
6 | Programmes (APP).
7 |
8 | The *Software* is still being developed by the *Licensor*.
9 |
10 | *Licensor*'s goal is to allow the research community to use, test and evaluate
11 | the *Software*.
12 |
13 | ## 1. Definitions
14 |
15 | *Licensee* means any person or entity that uses the *Software* and distributes
16 | its *Work*.
17 |
18 | *Licensor* means the owners of the *Software*, i.e Inria and MPII
19 |
20 | *Software* means the original work of authorship made available under this
21 | License ie gaussian-splatting.
22 |
23 | *Work* means the *Software* and any additions to or derivative works of the
24 | *Software* that are made available under this License.
25 |
26 |
27 | ## 2. Purpose
28 | This license is intended to define the rights granted to the *Licensee* by
29 | Licensors under the *Software*.
30 |
31 | ## 3. Rights granted
32 |
33 | For the above reasons Licensors have decided to distribute the *Software*.
34 | Licensors grant non-exclusive rights to use the *Software* for research purposes
35 | to research users (both academic and industrial), free of charge, without right
36 | to sublicense.. The *Software* may be used "non-commercially", i.e., for research
37 | and/or evaluation purposes only.
38 |
39 | Subject to the terms and conditions of this License, you are granted a
40 | non-exclusive, royalty-free, license to reproduce, prepare derivative works of,
41 | publicly display, publicly perform and distribute its *Work* and any resulting
42 | derivative works in any form.
43 |
44 | ## 4. Limitations
45 |
46 | **4.1 Redistribution.** You may reproduce or distribute the *Work* only if (a) you do
47 | so under this License, (b) you include a complete copy of this License with
48 | your distribution, and (c) you retain without modification any copyright,
49 | patent, trademark, or attribution notices that are present in the *Work*.
50 |
51 | **4.2 Derivative Works.** You may specify that additional or different terms apply
52 | to the use, reproduction, and distribution of your derivative works of the *Work*
53 | ("Your Terms") only if (a) Your Terms provide that the use limitation in
54 | Section 2 applies to your derivative works, and (b) you identify the specific
55 | derivative works that are subject to Your Terms. Notwithstanding Your Terms,
56 | this License (including the redistribution requirements in Section 3.1) will
57 | continue to apply to the *Work* itself.
58 |
59 | **4.3** Any other use without of prior consent of Licensors is prohibited. Research
60 | users explicitly acknowledge having received from Licensors all information
61 | allowing to appreciate the adequacy between of the *Software* and their needs and
62 | to undertake all necessary precautions for its execution and use.
63 |
64 | **4.4** The *Software* is provided both as a compiled library file and as source
65 | code. In case of using the *Software* for a publication or other results obtained
66 | through the use of the *Software*, users are strongly encouraged to cite the
67 | corresponding publications as explained in the documentation of the *Software*.
68 |
69 | ## 5. Disclaimer
70 |
71 | THE USER CANNOT USE, EXPLOIT OR DISTRIBUTE THE *SOFTWARE* FOR COMMERCIAL PURPOSES
72 | WITHOUT PRIOR AND EXPLICIT CONSENT OF LICENSORS. YOU MUST CONTACT INRIA FOR ANY
73 | UNAUTHORIZED USE: stip-sophia.transfert@inria.fr . ANY SUCH ACTION WILL
74 | CONSTITUTE A FORGERY. THIS *SOFTWARE* IS PROVIDED "AS IS" WITHOUT ANY WARRANTIES
75 | OF ANY NATURE AND ANY EXPRESS OR IMPLIED WARRANTIES, WITH REGARDS TO COMMERCIAL
76 | USE, PROFESSIONNAL USE, LEGAL OR NOT, OR OTHER, OR COMMERCIALISATION OR
77 | ADAPTATION. UNLESS EXPLICITLY PROVIDED BY LAW, IN NO EVENT, SHALL INRIA OR THE
78 | AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
79 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
80 | GOODS OR SERVICES, LOSS OF USE, DATA, OR PROFITS OR BUSINESS INTERRUPTION)
81 | HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
82 | LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING FROM, OUT OF OR
83 | IN CONNECTION WITH THE *SOFTWARE* OR THE USE OR OTHER DEALINGS IN THE *SOFTWARE*.
84 |
--------------------------------------------------------------------------------
/utils/general_utils.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (C) 2023, Inria
3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco
4 | # All rights reserved.
5 | #
6 | # This software is free for non-commercial, research and evaluation use
7 | # under the terms of the LICENSE.md file.
8 | #
9 | # For inquiries contact george.drettakis@inria.fr
10 | #
11 |
12 | import torch
13 | import sys
14 | from datetime import datetime
15 | import numpy as np
16 | import random
17 |
18 | def inverse_sigmoid(x):
19 | return torch.log(x/(1-x))
20 |
21 | def PILtoTorch(pil_image, resolution):
22 | resized_image_PIL = pil_image.resize(resolution)
23 | resized_image = torch.from_numpy(np.array(resized_image_PIL)) / 255.0
24 | if len(resized_image.shape) == 3:
25 | return resized_image.permute(2, 0, 1)
26 | else:
27 | return resized_image.unsqueeze(dim=-1).permute(2, 0, 1)
28 |
29 | def get_expon_lr_func(
30 | lr_init, lr_final, lr_delay_steps=0, lr_delay_mult=1.0, max_steps=1000000
31 | ):
32 | """
33 | Copied from Plenoxels
34 |
35 | Continuous learning rate decay function. Adapted from JaxNeRF
36 | The returned rate is lr_init when step=0 and lr_final when step=max_steps, and
37 | is log-linearly interpolated elsewhere (equivalent to exponential decay).
38 | If lr_delay_steps>0 then the learning rate will be scaled by some smooth
39 | function of lr_delay_mult, such that the initial learning rate is
40 | lr_init*lr_delay_mult at the beginning of optimization but will be eased back
41 | to the normal learning rate when steps>lr_delay_steps.
42 | :param conf: config subtree 'lr' or similar
43 | :param max_steps: int, the number of steps during optimization.
44 | :return HoF which takes step as input
45 | """
46 |
47 | def helper(step):
48 | if step < 0 or (lr_init == 0.0 and lr_final == 0.0):
49 | # Disable this parameter
50 | return 0.0
51 | if lr_delay_steps > 0:
52 | # A kind of reverse cosine decay.
53 | delay_rate = lr_delay_mult + (1 - lr_delay_mult) * np.sin(
54 | 0.5 * np.pi * np.clip(step / lr_delay_steps, 0, 1)
55 | )
56 | else:
57 | delay_rate = 1.0
58 | t = np.clip(step / max_steps, 0, 1)
59 | log_lerp = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t)
60 | return delay_rate * log_lerp
61 |
62 | return helper
63 |
64 | def strip_lowerdiag(L):
65 | uncertainty = torch.zeros((L.shape[0], 6), dtype=torch.float, device="cuda")
66 |
67 | uncertainty[:, 0] = L[:, 0, 0]
68 | uncertainty[:, 1] = L[:, 0, 1]
69 | uncertainty[:, 2] = L[:, 0, 2]
70 | uncertainty[:, 3] = L[:, 1, 1]
71 | uncertainty[:, 4] = L[:, 1, 2]
72 | uncertainty[:, 5] = L[:, 2, 2]
73 | return uncertainty
74 |
75 | def strip_symmetric(sym):
76 | return strip_lowerdiag(sym)
77 |
78 | def build_rotation(r):
79 | norm = torch.sqrt(r[:,0]*r[:,0] + r[:,1]*r[:,1] + r[:,2]*r[:,2] + r[:,3]*r[:,3])
80 |
81 | q = r / norm[:, None]
82 |
83 | R = torch.zeros((q.size(0), 3, 3), device='cuda')
84 |
85 | r = q[:, 0]
86 | x = q[:, 1]
87 | y = q[:, 2]
88 | z = q[:, 3]
89 |
90 | R[:, 0, 0] = 1 - 2 * (y*y + z*z)
91 | R[:, 0, 1] = 2 * (x*y - r*z)
92 | R[:, 0, 2] = 2 * (x*z + r*y)
93 | R[:, 1, 0] = 2 * (x*y + r*z)
94 | R[:, 1, 1] = 1 - 2 * (x*x + z*z)
95 | R[:, 1, 2] = 2 * (y*z - r*x)
96 | R[:, 2, 0] = 2 * (x*z - r*y)
97 | R[:, 2, 1] = 2 * (y*z + r*x)
98 | R[:, 2, 2] = 1 - 2 * (x*x + y*y)
99 | return R
100 |
101 | def build_scaling_rotation(s, r):
102 | L = torch.zeros((s.shape[0], 3, 3), dtype=torch.float, device="cuda")
103 | R = build_rotation(r)
104 |
105 | L[:,0,0] = s[:,0]
106 | L[:,1,1] = s[:,1]
107 | L[:,2,2] = s[:,2]
108 |
109 | L = R @ L
110 | return L
111 |
112 | def safe_state(silent):
113 | old_f = sys.stdout
114 | class F:
115 | def __init__(self, silent):
116 | self.silent = silent
117 |
118 | def write(self, x):
119 | if not self.silent:
120 | if x.endswith("\n"):
121 | old_f.write(x.replace("\n", " [{}]\n".format(str(datetime.now().strftime("%d/%m %H:%M:%S")))))
122 | else:
123 | old_f.write(x)
124 |
125 | def flush(self):
126 | old_f.flush()
127 |
128 | sys.stdout = F(silent)
129 |
130 | random.seed(0)
131 | np.random.seed(0)
132 | torch.manual_seed(0)
133 | torch.cuda.set_device(torch.device("cuda:0"))
134 |
--------------------------------------------------------------------------------
/scene/__init__.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (C) 2023, Inria
3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco
4 | # All rights reserved.
5 | #
6 | # This software is free for non-commercial, research and evaluation use
7 | # under the terms of the LICENSE.md file.
8 | #
9 | # For inquiries contact george.drettakis@inria.fr
10 | #
11 |
12 | import os
13 | import random
14 | import json
15 | from utils.system_utils import searchForMaxIteration
16 | from scene.dataset_readers import sceneLoadTypeCallbacks
17 | from scene.gaussian_model import GaussianModel
18 | from arguments import ModelParams
19 | from utils.camera_utils import cameraList_from_camInfos, camera_to_JSON
20 |
21 | class Scene:
22 |
23 | gaussians : GaussianModel
24 |
25 | def __init__(self, args : ModelParams, gaussians : GaussianModel, load_iteration=None, shuffle=True, resolution_scales=[1.0]):
26 | """b
27 | :param path: Path to colmap scene main folder.
28 | """
29 | self.model_path = args.model_path
30 | self.loaded_iter = None
31 | self.gaussians = gaussians
32 |
33 | if load_iteration:
34 | if load_iteration == -1:
35 | self.loaded_iter = searchForMaxIteration(os.path.join(self.model_path, "point_cloud"))
36 | else:
37 | self.loaded_iter = load_iteration
38 | print("Loading trained model at iteration {}".format(self.loaded_iter))
39 |
40 | self.train_cameras = {}
41 | self.test_cameras = {}
42 |
43 | if os.path.exists(os.path.join(args.source_path, "sparse")):
44 | scene_info = sceneLoadTypeCallbacks["Colmap"](args.source_path, args.images, args.eval)
45 | elif os.path.exists(os.path.join(args.source_path, "transforms_train.json")):
46 | print("Found transforms_train.json file, assuming Blender data set!")
47 | scene_info = sceneLoadTypeCallbacks["Blender"](args.source_path, args.white_background, args.eval)
48 | else:
49 | assert False, "Could not recognize scene type!"
50 |
51 | if not self.loaded_iter:
52 | with open(scene_info.ply_path, 'rb') as src_file, open(os.path.join(self.model_path, "input.ply") , 'wb') as dest_file:
53 | dest_file.write(src_file.read())
54 | json_cams = []
55 | camlist = []
56 | if scene_info.test_cameras:
57 | camlist.extend(scene_info.test_cameras)
58 | if scene_info.train_cameras:
59 | camlist.extend(scene_info.train_cameras)
60 | for id, cam in enumerate(camlist):
61 | json_cams.append(camera_to_JSON(id, cam))
62 | with open(os.path.join(self.model_path, "cameras.json"), 'w') as file:
63 | json.dump(json_cams, file)
64 |
65 | if shuffle:
66 | random.shuffle(scene_info.train_cameras) # Multi-res consistent random shuffling
67 | random.shuffle(scene_info.test_cameras) # Multi-res consistent random shuffling
68 |
69 | self.cameras_extent = scene_info.nerf_normalization["radius"]
70 |
71 | for resolution_scale in resolution_scales:
72 | print("Loading Training Cameras")
73 | self.train_cameras[resolution_scale] = cameraList_from_camInfos(scene_info.train_cameras, resolution_scale, args)
74 | print("Loading Test Cameras")
75 | self.test_cameras[resolution_scale] = cameraList_from_camInfos(scene_info.test_cameras, resolution_scale, args)
76 |
77 | if self.loaded_iter:
78 | self.gaussians.load_ply(os.path.join(self.model_path,
79 | "point_cloud",
80 | "iteration_" + str(self.loaded_iter),
81 | # PATCH "point_cloud.ply"))
82 | "point_cloud.ply"), self.cameras_extent)
83 | else:
84 | self.gaussians.create_from_pcd(scene_info.point_cloud, self.cameras_extent)
85 |
86 | def save(self, iteration, primary_features, iteration_prefix=""):
87 | prefix_separator = "" if iteration_prefix == "" else "_"
88 | point_cloud_path = os.path.join(self.model_path, f"point_cloud/{iteration_prefix}{prefix_separator}iteration_{iteration}")
89 | self.gaussians.save_ply(os.path.join(point_cloud_path, "point_cloud.ply"), primary_features)
90 |
91 | def getTrainCameras(self, scale=1.0):
92 | return self.train_cameras[scale]
93 |
94 | def getTestCameras(self, scale=1.0):
95 | return self.test_cameras[scale]
--------------------------------------------------------------------------------
/utils/sh_utils.py:
--------------------------------------------------------------------------------
1 | # Copyright 2021 The PlenOctree Authors.
2 | # Redistribution and use in source and binary forms, with or without
3 | # modification, are permitted provided that the following conditions are met:
4 | #
5 | # 1. Redistributions of source code must retain the above copyright notice,
6 | # this list of conditions and the following disclaimer.
7 | #
8 | # 2. Redistributions in binary form must reproduce the above copyright notice,
9 | # this list of conditions and the following disclaimer in the documentation
10 | # and/or other materials provided with the distribution.
11 | #
12 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
13 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
14 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
15 | # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
16 | # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
17 | # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
18 | # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
19 | # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
20 | # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
21 | # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
22 | # POSSIBILITY OF SUCH DAMAGE.
23 |
24 | import torch
25 |
26 | C0 = 0.28209479177387814
27 | C1 = 0.4886025119029199
28 | C2 = [
29 | 1.0925484305920792,
30 | -1.0925484305920792,
31 | 0.31539156525252005,
32 | -1.0925484305920792,
33 | 0.5462742152960396
34 | ]
35 | C3 = [
36 | -0.5900435899266435,
37 | 2.890611442640554,
38 | -0.4570457994644658,
39 | 0.3731763325901154,
40 | -0.4570457994644658,
41 | 1.445305721320277,
42 | -0.5900435899266435
43 | ]
44 | C4 = [
45 | 2.5033429417967046,
46 | -1.7701307697799304,
47 | 0.9461746957575601,
48 | -0.6690465435572892,
49 | 0.10578554691520431,
50 | -0.6690465435572892,
51 | 0.47308734787878004,
52 | -1.7701307697799304,
53 | 0.6258357354491761,
54 | ]
55 |
56 |
57 | def eval_sh(deg, sh, dirs):
58 | """
59 | Evaluate spherical harmonics at unit directions
60 | using hardcoded SH polynomials.
61 | Works with torch/np/jnp.
62 | ... Can be 0 or more batch dimensions.
63 | Args:
64 | deg: int SH deg. Currently, 0-3 supported
65 | sh: jnp.ndarray SH coeffs [..., C, (deg + 1) ** 2]
66 | dirs: jnp.ndarray unit directions [..., 3]
67 | Returns:
68 | [..., C]
69 | """
70 | assert deg <= 4 and deg >= 0
71 | coeff = (deg + 1) ** 2
72 | assert sh.shape[-1] >= coeff
73 |
74 | result = C0 * sh[..., 0]
75 | if deg > 0:
76 | x, y, z = dirs[..., 0:1], dirs[..., 1:2], dirs[..., 2:3]
77 | result = (result -
78 | C1 * y * sh[..., 1] +
79 | C1 * z * sh[..., 2] -
80 | C1 * x * sh[..., 3])
81 |
82 | if deg > 1:
83 | xx, yy, zz = x * x, y * y, z * z
84 | xy, yz, xz = x * y, y * z, x * z
85 | result = (result +
86 | C2[0] * xy * sh[..., 4] +
87 | C2[1] * yz * sh[..., 5] +
88 | C2[2] * (2.0 * zz - xx - yy) * sh[..., 6] +
89 | C2[3] * xz * sh[..., 7] +
90 | C2[4] * (xx - yy) * sh[..., 8])
91 |
92 | if deg > 2:
93 | result = (result +
94 | C3[0] * y * (3 * xx - yy) * sh[..., 9] +
95 | C3[1] * xy * z * sh[..., 10] +
96 | C3[2] * y * (4 * zz - xx - yy)* sh[..., 11] +
97 | C3[3] * z * (2 * zz - 3 * xx - 3 * yy) * sh[..., 12] +
98 | C3[4] * x * (4 * zz - xx - yy) * sh[..., 13] +
99 | C3[5] * z * (xx - yy) * sh[..., 14] +
100 | C3[6] * x * (xx - 3 * yy) * sh[..., 15])
101 |
102 | if deg > 3:
103 | result = (result + C4[0] * xy * (xx - yy) * sh[..., 16] +
104 | C4[1] * yz * (3 * xx - yy) * sh[..., 17] +
105 | C4[2] * xy * (7 * zz - 1) * sh[..., 18] +
106 | C4[3] * yz * (7 * zz - 3) * sh[..., 19] +
107 | C4[4] * (zz * (35 * zz - 30) + 3) * sh[..., 20] +
108 | C4[5] * xz * (7 * zz - 3) * sh[..., 21] +
109 | C4[6] * (xx - yy) * (7 * zz - 1) * sh[..., 22] +
110 | C4[7] * xz * (xx - 3 * yy) * sh[..., 23] +
111 | C4[8] * (xx * (xx - 3 * yy) - yy * (3 * xx - yy)) * sh[..., 24])
112 | return result
113 |
114 | def RGB2SH(rgb):
115 | return (rgb - 0.5) / C0
116 |
117 | def SH2RGB(sh):
118 | return sh * C0 + 0.5
--------------------------------------------------------------------------------
/nnfm_utils.py:
--------------------------------------------------------------------------------
1 | # Adapted from https://github.com/Kai-46/ARF-svox2
2 |
3 | """
4 | BSD 2-Clause License
5 |
6 | Copyright (c) 2021, the ARF and Plenoxels authors
7 | All rights reserved.
8 |
9 | Redistribution and use in source and binary forms, with or without
10 | modification, are permitted provided that the following conditions are met:
11 |
12 | 1. Redistributions of source code must retain the above copyright notice, this
13 | list of conditions and the following disclaimer.
14 |
15 | 2. Redistributions in binary form must reproduce the above copyright notice,
16 | this list of conditions and the following disclaimer in the documentation
17 | and/or other materials provided with the distribution.
18 |
19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 | """
30 |
31 | import torch
32 |
33 | def match_colors_for_image_set(image_set, style_img):
34 | """
35 | image_set: [N, H, W, 3]
36 | style_img: [H, W, 3]
37 | """
38 | sh = image_set.shape
39 | image_set = image_set.view(-1, 3)
40 | style_img = style_img.view(-1, 3).to(image_set.device)
41 |
42 | mu_c = image_set.mean(0, keepdim=True)
43 | mu_s = style_img.mean(0, keepdim=True)
44 |
45 | cov_c = torch.matmul((image_set - mu_c).transpose(1, 0), image_set - mu_c) / float(image_set.size(0))
46 | cov_s = torch.matmul((style_img - mu_s).transpose(1, 0), style_img - mu_s) / float(style_img.size(0))
47 |
48 | u_c, sig_c, _ = torch.svd(cov_c)
49 | u_s, sig_s, _ = torch.svd(cov_s)
50 |
51 | u_c_i = u_c.transpose(1, 0)
52 | u_s_i = u_s.transpose(1, 0)
53 |
54 | scl_c = torch.diag(1.0 / torch.sqrt(torch.clamp(sig_c, 1e-8, 1e8)))
55 | scl_s = torch.diag(torch.sqrt(torch.clamp(sig_s, 1e-8, 1e8)))
56 |
57 | tmp_mat = u_s @ scl_s @ u_s_i @ u_c @ scl_c @ u_c_i
58 | tmp_vec = mu_s.view(1, 3) - mu_c.view(1, 3) @ tmp_mat.T
59 |
60 | image_set = image_set @ tmp_mat.T + tmp_vec.view(1, 3)
61 | image_set = image_set.contiguous().clamp_(0.0, 1.0).view(sh)
62 |
63 | color_tf = torch.eye(4).float().to(tmp_mat.device)
64 | color_tf[:3, :3] = tmp_mat
65 | color_tf[:3, 3:4] = tmp_vec.T
66 | return image_set, color_tf
67 |
68 | def argmin_cos_distance(a, b, center=False, neg_s_flat=None):
69 | """
70 | a: [b, c, hw],
71 | b: [b, c, h2w2]
72 |
73 | neg_s_flat: [b,c,h3w3] for neg NNFM
74 | """
75 | with torch.no_grad():
76 | if center:
77 | a = a - a.mean(2, keepdims=True)
78 | b = b - b.mean(2, keepdims=True)
79 | if neg_s_flat is not None:
80 | neg_s_flat = neg_s_flat - neg_s_flat.mean(2, keepdims=True)
81 |
82 | a_norm = ((a * a).sum(1, keepdims=True) + 1e-8).sqrt() # global normalize across channel
83 | a = a / (a_norm + 1e-8)
84 | b_norm = ((b * b).sum(1, keepdims=True) + 1e-8).sqrt() # global normalize across channel
85 | b = b / (b_norm + 1e-8)
86 | if neg_s_flat is not None:
87 | neg_s_norm = ((neg_s_flat * neg_s_flat).sum(1, keepdims=True) + 1e-8).sqrt() # global normalize across channel
88 | neg_s_flat = neg_s_flat / (neg_s_norm + 1e-8)
89 | b = torch.cat([b, neg_s_flat], dim=-1) # [b, c, h2w2 + h3w3]
90 |
91 | z_best = []
92 | loop_batch_size = int(1e8 / b.shape[-1])
93 | for i in range(0, a.shape[-1], loop_batch_size): # over some dimension of generated image spatial dim
94 | a_batch = a[..., i : i + loop_batch_size]
95 |
96 | d_mat = 1.0 - torch.matmul(a_batch.transpose(2, 1), b) # [1, loop_batch_size, h2w2]
97 |
98 | z_best_batch = torch.argmin(d_mat, 2)
99 | z_best.append(z_best_batch)
100 | z_best = torch.cat(z_best, dim=-1) # [1, hw]
101 |
102 | return z_best
103 |
104 | def nn_feat_replace(a, b, neg_s_feats=None):
105 | """
106 | return feature from generated image a, with NN feature replaced by
107 | features from b(style)
108 | """
109 | n, c, h, w = a.size()
110 | n2, c, h2, w2 = b.size()
111 |
112 | assert (n == 1) and (n2 == 1)
113 |
114 | a_flat = a.view(n, c, -1)
115 | b_flat = b.view(n2, c, -1)
116 | b_ref = b_flat.clone() # [n2, c, h2w2]
117 | if neg_s_feats is not None:
118 | n3, c, h3, w3 = neg_s_feats.size()
119 | neg_s_flat = neg_s_feats.view(n3, c, -1)
120 | neg_s_ref = neg_s_flat
121 | merged_ref = torch.cat([b_ref, neg_s_ref], dim=-1) # [1, c, h2w2 + h3w3]
122 |
123 | z_new = []
124 | for i in range(n):
125 | if neg_s_feats is None:
126 | z_best = argmin_cos_distance(a_flat[i : i + 1], b_flat[i : i + 1]) # [1, hw]
127 | z_best = z_best.unsqueeze(1).repeat(1, c, 1) # [1, C, hw]
128 | feat = torch.gather(b_ref, 2, z_best) # [1, C, hw]
129 | else:
130 | z_best = argmin_cos_distance(a_flat[i : i + 1], b_flat[i : i + 1], neg_s_flat=neg_s_flat[i:i+1]) # [1, hw]
131 | z_best = z_best.unsqueeze(1).repeat(1, c, 1) # [1, C, hw]
132 | feat = torch.gather(merged_ref, 2, z_best) # [1, C, hw]
133 | z_new.append(feat)
134 |
135 | z_new = torch.cat(z_new, 0)
136 | z_new = z_new.view(n, c, h, w)
137 | return z_new
138 |
139 | def cos_loss(a, b):
140 | a_norm = (a * a).sum(1, keepdims=True).sqrt()
141 | b_norm = (b * b).sum(1, keepdims=True).sqrt()
142 | a_tmp = a / (a_norm + 1e-8)
143 | b_tmp = b / (b_norm + 1e-8)
144 | cossim = (a_tmp * b_tmp).sum(1)
145 | cos_d = 1.0 - cossim
146 | return cos_d.mean()
147 |
148 | _nnfm_block_indices = [[1, 3], [6, 8], [11, 13, 15], [18, 20, 22], [25, 27, 29]]
149 | _nnfm_blocks = [2,]
150 | _nnfm_blocks.sort()
151 | _nnfm_all_layers = []
152 | for block in _nnfm_blocks:
153 | _nnfm_all_layers += _nnfm_block_indices[block]
154 |
155 | def nnfm_block_indices():
156 | return _nnfm_block_indices
157 |
158 | def nnfm_blocks():
159 | return _nnfm_blocks
160 |
161 | def nnfm_all_layers():
162 | return _nnfm_all_layers
163 |
164 | def calculate_nnfm_loss(
165 | gen_features, #outputs,
166 | style_features, #styles, # [1, C, H, W]
167 | ):
168 | ix_map = {}
169 | for a, b in enumerate(_nnfm_all_layers):
170 | ix_map[b] = a
171 |
172 | loss = 0.0
173 | for block in _nnfm_blocks:
174 | layers = _nnfm_block_indices[block]
175 | x_feats = torch.cat([gen_features[ix_map[ix]] for ix in layers], 1)
176 | s_feats = torch.cat([style_features[ix_map[ix]] for ix in layers], 1)
177 |
178 | target_feats = nn_feat_replace(x_feats, s_feats)
179 | loss += cos_loss(x_feats, target_feats)
180 |
181 | return loss
--------------------------------------------------------------------------------
/scene/dataset_readers.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (C) 2023, Inria
3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco
4 | # All rights reserved.
5 | #
6 | # This software is free for non-commercial, research and evaluation use
7 | # under the terms of the LICENSE.md file.
8 | #
9 | # For inquiries contact george.drettakis@inria.fr
10 | #
11 |
12 | import os
13 | import sys
14 | from PIL import Image
15 | from typing import NamedTuple
16 | from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \
17 | read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text
18 | from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal
19 | import numpy as np
20 | import json
21 | from pathlib import Path
22 | from plyfile import PlyData, PlyElement
23 | from utils.sh_utils import SH2RGB
24 | from scene.gaussian_model import BasicPointCloud
25 |
26 | class CameraInfo(NamedTuple):
27 | uid: int
28 | R: np.array
29 | T: np.array
30 | FovY: np.array
31 | FovX: np.array
32 | image: np.array
33 | image_path: str
34 | image_name: str
35 | width: int
36 | height: int
37 |
38 | class SceneInfo(NamedTuple):
39 | point_cloud: BasicPointCloud
40 | train_cameras: list
41 | test_cameras: list
42 | nerf_normalization: dict
43 | ply_path: str
44 |
45 | def getNerfppNorm(cam_info):
46 | def get_center_and_diag(cam_centers):
47 | cam_centers = np.hstack(cam_centers)
48 | avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True)
49 | center = avg_cam_center
50 | dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True)
51 | diagonal = np.max(dist)
52 | return center.flatten(), diagonal
53 |
54 | cam_centers = []
55 |
56 | for cam in cam_info:
57 | W2C = getWorld2View2(cam.R, cam.T)
58 | C2W = np.linalg.inv(W2C)
59 | cam_centers.append(C2W[:3, 3:4])
60 |
61 | center, diagonal = get_center_and_diag(cam_centers)
62 | radius = diagonal * 1.1
63 |
64 | translate = -center
65 |
66 | return {"translate": translate, "radius": radius}
67 |
68 | def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder):
69 | cam_infos = []
70 | for idx, key in enumerate(cam_extrinsics):
71 | sys.stdout.write('\r')
72 | # the exact output you're looking for:
73 | sys.stdout.write("Reading camera {}/{}".format(idx+1, len(cam_extrinsics)))
74 | sys.stdout.flush()
75 |
76 | extr = cam_extrinsics[key]
77 | intr = cam_intrinsics[extr.camera_id]
78 | height = intr.height
79 | width = intr.width
80 |
81 | uid = intr.id
82 | R = np.transpose(qvec2rotmat(extr.qvec))
83 | T = np.array(extr.tvec)
84 |
85 | if intr.model=="SIMPLE_PINHOLE":
86 | focal_length_x = intr.params[0]
87 | FovY = focal2fov(focal_length_x, height)
88 | FovX = focal2fov(focal_length_x, width)
89 | elif intr.model=="PINHOLE":
90 | focal_length_x = intr.params[0]
91 | focal_length_y = intr.params[1]
92 | FovY = focal2fov(focal_length_y, height)
93 | FovX = focal2fov(focal_length_x, width)
94 | else:
95 | assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!"
96 |
97 | image_path = os.path.join(images_folder, os.path.basename(extr.name))
98 | image_name = os.path.basename(image_path).split(".")[0]
99 | image = Image.open(image_path)
100 |
101 | cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
102 | image_path=image_path, image_name=image_name, width=width, height=height)
103 | cam_infos.append(cam_info)
104 | sys.stdout.write('\n')
105 | return cam_infos
106 |
107 | def fetchPly(path):
108 | plydata = PlyData.read(path)
109 | vertices = plydata['vertex']
110 | positions = np.vstack([vertices['x'], vertices['y'], vertices['z']]).T
111 | colors = np.vstack([vertices['red'], vertices['green'], vertices['blue']]).T / 255.0
112 | normals = np.vstack([vertices['nx'], vertices['ny'], vertices['nz']]).T
113 | return BasicPointCloud(points=positions, colors=colors, normals=normals)
114 |
115 | def storePly(path, xyz, rgb):
116 | # Define the dtype for the structured array
117 | dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'),
118 | ('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'),
119 | ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]
120 |
121 | normals = np.zeros_like(xyz)
122 |
123 | elements = np.empty(xyz.shape[0], dtype=dtype)
124 | attributes = np.concatenate((xyz, normals, rgb), axis=1)
125 | elements[:] = list(map(tuple, attributes))
126 |
127 | # Create the PlyData object and write to file
128 | vertex_element = PlyElement.describe(elements, 'vertex')
129 | ply_data = PlyData([vertex_element])
130 | ply_data.write(path)
131 |
132 | def readColmapSceneInfo(path, images, eval, llffhold=8):
133 | try:
134 | cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.bin")
135 | cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.bin")
136 | cam_extrinsics = read_extrinsics_binary(cameras_extrinsic_file)
137 | cam_intrinsics = read_intrinsics_binary(cameras_intrinsic_file)
138 | except:
139 | cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.txt")
140 | cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.txt")
141 | cam_extrinsics = read_extrinsics_text(cameras_extrinsic_file)
142 | cam_intrinsics = read_intrinsics_text(cameras_intrinsic_file)
143 |
144 | reading_dir = "images" if images == None else images
145 | cam_infos_unsorted = readColmapCameras(cam_extrinsics=cam_extrinsics, cam_intrinsics=cam_intrinsics, images_folder=os.path.join(path, reading_dir))
146 | cam_infos = sorted(cam_infos_unsorted.copy(), key = lambda x : x.image_name)
147 |
148 | if eval:
149 | train_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold != 0]
150 | test_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold == 0]
151 | else:
152 | train_cam_infos = cam_infos
153 | test_cam_infos = []
154 |
155 | nerf_normalization = getNerfppNorm(train_cam_infos)
156 |
157 | ply_path = os.path.join(path, "sparse/0/points3D.ply")
158 | bin_path = os.path.join(path, "sparse/0/points3D.bin")
159 | txt_path = os.path.join(path, "sparse/0/points3D.txt")
160 | if not os.path.exists(ply_path):
161 | print("Converting point3d.bin to .ply, will happen only the first time you open the scene.")
162 | try:
163 | xyz, rgb, _ = read_points3D_binary(bin_path)
164 | except:
165 | xyz, rgb, _ = read_points3D_text(txt_path)
166 | storePly(ply_path, xyz, rgb)
167 | try:
168 | pcd = fetchPly(ply_path)
169 | except:
170 | pcd = None
171 |
172 | scene_info = SceneInfo(point_cloud=pcd,
173 | train_cameras=train_cam_infos,
174 | test_cameras=test_cam_infos,
175 | nerf_normalization=nerf_normalization,
176 | ply_path=ply_path)
177 | return scene_info
178 |
179 | def readCamerasFromTransforms(path, transformsfile, white_background, extension=".png"):
180 | cam_infos = []
181 |
182 | with open(os.path.join(path, transformsfile)) as json_file:
183 | contents = json.load(json_file)
184 | fovx = contents["camera_angle_x"]
185 |
186 | frames = contents["frames"]
187 | for idx, frame in enumerate(frames):
188 | cam_name = os.path.join(path, frame["file_path"] + extension)
189 |
190 | # NeRF 'transform_matrix' is a camera-to-world transform
191 | c2w = np.array(frame["transform_matrix"])
192 | # change from OpenGL/Blender camera axes (Y up, Z back) to COLMAP (Y down, Z forward)
193 | c2w[:3, 1:3] *= -1
194 |
195 | # get the world-to-camera transform and set R, T
196 | w2c = np.linalg.inv(c2w)
197 | R = np.transpose(w2c[:3,:3]) # R is stored transposed due to 'glm' in CUDA code
198 | T = w2c[:3, 3]
199 |
200 | image_path = os.path.join(path, cam_name)
201 | image_name = Path(cam_name).stem
202 | image = Image.open(image_path)
203 |
204 | im_data = np.array(image.convert("RGBA"))
205 |
206 | bg = np.array([1,1,1]) if white_background else np.array([0, 0, 0])
207 |
208 | norm_data = im_data / 255.0
209 | arr = norm_data[:,:,:3] * norm_data[:, :, 3:4] + bg * (1 - norm_data[:, :, 3:4])
210 | image = Image.fromarray(np.array(arr*255.0, dtype=np.byte), "RGB")
211 |
212 | fovy = focal2fov(fov2focal(fovx, image.size[0]), image.size[1])
213 | FovY = fovy
214 | FovX = fovx
215 |
216 | cam_infos.append(CameraInfo(uid=idx, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
217 | image_path=image_path, image_name=image_name, width=image.size[0], height=image.size[1]))
218 |
219 | return cam_infos
220 |
221 | def readNerfSyntheticInfo(path, white_background, eval, extension=".png"):
222 | print("Reading Training Transforms")
223 | train_cam_infos = readCamerasFromTransforms(path, "transforms_train.json", white_background, extension)
224 | print("Reading Test Transforms")
225 | test_cam_infos = readCamerasFromTransforms(path, "transforms_test.json", white_background, extension)
226 |
227 | if not eval:
228 | train_cam_infos.extend(test_cam_infos)
229 | test_cam_infos = []
230 |
231 | nerf_normalization = getNerfppNorm(train_cam_infos)
232 |
233 | ply_path = os.path.join(path, "points3d.ply")
234 | if not os.path.exists(ply_path):
235 | # Since this data set has no colmap data, we start with random points
236 | num_pts = 100_000
237 | print(f"Generating random point cloud ({num_pts})...")
238 |
239 | # We create random points inside the bounds of the synthetic Blender scenes
240 | xyz = np.random.random((num_pts, 3)) * 2.6 - 1.3
241 | shs = np.random.random((num_pts, 3)) / 255.0
242 | pcd = BasicPointCloud(points=xyz, colors=SH2RGB(shs), normals=np.zeros((num_pts, 3)))
243 |
244 | storePly(ply_path, xyz, SH2RGB(shs) * 255)
245 | try:
246 | pcd = fetchPly(ply_path)
247 | except:
248 | pcd = None
249 |
250 | scene_info = SceneInfo(point_cloud=pcd,
251 | train_cameras=train_cam_infos,
252 | test_cameras=test_cam_infos,
253 | nerf_normalization=nerf_normalization,
254 | ply_path=ply_path)
255 | return scene_info
256 |
257 | sceneLoadTypeCallbacks = {
258 | "Colmap": readColmapSceneInfo,
259 | "Blender" : readNerfSyntheticInfo
260 | }
--------------------------------------------------------------------------------
/train_original.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (C) 2023, Inria
3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco
4 | # All rights reserved.
5 | #
6 | # This software is free for non-commercial, research and evaluation use
7 | # under the terms of the LICENSE.md file.
8 | #
9 | # For inquiries contact george.drettakis@inria.fr
10 | #
11 |
12 | import os
13 | import torch
14 | from random import randint
15 | from utils.loss_utils import l1_loss, ssim
16 | from gaussian_renderer import render, network_gui
17 | import sys
18 | from scene import Scene, GaussianModel
19 | from utils.general_utils import safe_state
20 | import uuid
21 | from tqdm import tqdm
22 | from utils.image_utils import psnr
23 | from argparse import ArgumentParser, Namespace
24 | from arguments import ModelParams, PipelineParams, OptimizationParams
25 | try:
26 | from torch.utils.tensorboard import SummaryWriter
27 | TENSORBOARD_FOUND = True
28 | except ImportError:
29 | TENSORBOARD_FOUND = False
30 |
31 | import numpy as np
32 |
33 | def training(dataset, opt, pipe, testing_iterations, saving_iterations, checkpoint_iterations, checkpoint, debug_from):
34 | first_iter = 0
35 | tb_writer = prepare_output_and_logger(dataset)
36 | gaussians = GaussianModel(dataset.sh_degree)
37 | scene = Scene(dataset, gaussians)
38 | gaussians.training_setup(opt)
39 | if checkpoint:
40 | (model_params, first_iter) = torch.load(checkpoint)
41 | gaussians.restore(model_params, opt)
42 |
43 | bg_color = [1, 1, 1] if dataset.white_background else [0, 0, 0]
44 | background = torch.tensor(bg_color, dtype=torch.float32, device="cuda")
45 |
46 | iter_start = torch.cuda.Event(enable_timing = True)
47 | iter_end = torch.cuda.Event(enable_timing = True)
48 |
49 | viewpoint_stack = None
50 | ema_loss_for_log = 0.0
51 | progress_bar = tqdm(range(first_iter, opt.iterations), desc="Training progress")
52 | first_iter += 1
53 | for iteration in range(first_iter, opt.iterations + 1):
54 | if network_gui.conn == None:
55 | network_gui.try_connect()
56 | while network_gui.conn != None:
57 | try:
58 | net_image_bytes = None
59 | custom_cam, do_training, pipe.convert_SHs_python, pipe.compute_cov3D_python, keep_alive, scaling_modifer = network_gui.receive()
60 | if custom_cam != None:
61 | net_image = render(custom_cam, gaussians, pipe, background, scaling_modifer)["render"]
62 | net_image_bytes = memoryview((torch.clamp(net_image, min=0, max=1.0) * 255).byte().permute(1, 2, 0).contiguous().cpu().numpy())
63 | network_gui.send(net_image_bytes, dataset.source_path)
64 | if do_training and ((iteration < int(opt.iterations)) or not keep_alive):
65 | break
66 | except Exception as e:
67 | network_gui.conn = None
68 |
69 | iter_start.record()
70 |
71 | gaussians.update_learning_rate(iteration)
72 |
73 | # Every 1000 its we increase the levels of SH up to a maximum degree
74 | if iteration % 1000 == 0:
75 | gaussians.oneupSHdegree()
76 |
77 | # Pick a random Camera
78 | if not viewpoint_stack:
79 | viewpoint_stack = scene.getTrainCameras().copy()
80 | viewpoint_cam = viewpoint_stack.pop(randint(0, len(viewpoint_stack)-1))
81 |
82 | # Render
83 | if (iteration - 1) == debug_from:
84 | pipe.debug = True
85 |
86 | bg = torch.rand((3), device="cuda") if opt.random_background else background
87 |
88 | render_pkg = render(viewpoint_cam, gaussians, pipe, bg)
89 | image, viewspace_point_tensor, visibility_filter, radii = render_pkg["render"], render_pkg["viewspace_points"], render_pkg["visibility_filter"], render_pkg["radii"]
90 |
91 | # Loss
92 | gt_image = viewpoint_cam.original_image.cuda()
93 | Ll1 = l1_loss(image, gt_image)
94 | loss = (1.0 - opt.lambda_dssim) * Ll1 + opt.lambda_dssim * (1.0 - ssim(image, gt_image))
95 | loss.backward()
96 |
97 | iter_end.record()
98 |
99 | with torch.no_grad():
100 | # Progress bar
101 | ema_loss_for_log = 0.4 * loss.item() + 0.6 * ema_loss_for_log
102 | if iteration % 10 == 0:
103 | progress_bar.set_postfix({"Loss": f"{ema_loss_for_log:.{7}f}"})
104 | progress_bar.update(10)
105 | if iteration == opt.iterations:
106 | progress_bar.close()
107 |
108 | # Log and save
109 | training_report(tb_writer, iteration, Ll1, loss, l1_loss, iter_start.elapsed_time(iter_end), testing_iterations, scene, render, (pipe, background))
110 | if (iteration in saving_iterations):
111 | print("\n[ITER {}] Saving Gaussians".format(iteration))
112 | scene.save(iteration, primary_features=True)
113 |
114 | # Densification
115 | if iteration < opt.densify_until_iter:
116 | # Keep track of max radii in image-space for pruning
117 | gaussians.max_radii2D[visibility_filter] = torch.max(gaussians.max_radii2D[visibility_filter], radii[visibility_filter])
118 | gaussians.add_densification_stats(viewspace_point_tensor, visibility_filter)
119 |
120 | if iteration > opt.densify_from_iter and iteration % opt.densification_interval == 0:
121 | size_threshold = 20 if iteration > opt.opacity_reset_interval else None
122 | gaussians.densify_and_prune(opt.densify_grad_threshold, 0.005, scene.cameras_extent, size_threshold)
123 |
124 | if iteration % opt.opacity_reset_interval == 0 or (dataset.white_background and iteration == opt.densify_from_iter):
125 | gaussians.reset_opacity()
126 |
127 | # Optimizer step
128 | if iteration < opt.iterations:
129 | gaussians.optimizer.step()
130 | gaussians.optimizer.zero_grad(set_to_none = True)
131 |
132 | if (iteration in checkpoint_iterations):
133 | print("\n[ITER {}] Saving Checkpoint".format(iteration))
134 | torch.save((gaussians.capture(), iteration), scene.model_path + "/chkpnt" + str(iteration) + ".pth")
135 |
136 | def prepare_output_and_logger(args):
137 | if not args.model_path:
138 | if os.getenv('OAR_JOB_ID'):
139 | unique_str=os.getenv('OAR_JOB_ID')
140 | else:
141 | unique_str = str(uuid.uuid4())
142 | args.model_path = os.path.join("./output/", unique_str[0:10])
143 |
144 | # Set up output folder
145 | print("Output folder: {}".format(args.model_path))
146 | os.makedirs(args.model_path, exist_ok = True)
147 | with open(os.path.join(args.model_path, "cfg_args"), 'w') as cfg_log_f:
148 | cfg_log_f.write(str(Namespace(**vars(args))))
149 |
150 | # Create Tensorboard writer
151 | tb_writer = None
152 | if TENSORBOARD_FOUND:
153 | tb_writer = SummaryWriter(args.model_path)
154 | else:
155 | print("Tensorboard not available: not logging progress")
156 | return tb_writer
157 |
158 | def training_report(tb_writer, iteration, Ll1, loss, l1_loss, elapsed, testing_iterations, scene : Scene, renderFunc, renderArgs):
159 | if tb_writer:
160 | tb_writer.add_scalar('train_loss_patches/l1_loss', Ll1.item(), iteration)
161 | tb_writer.add_scalar('train_loss_patches/total_loss', loss.item(), iteration)
162 | tb_writer.add_scalar('iter_time', elapsed, iteration)
163 |
164 | # Report test and samples of training set
165 | if iteration in testing_iterations:
166 | torch.cuda.empty_cache()
167 | validation_configs = ({'name': 'test', 'cameras' : scene.getTestCameras()},
168 | {'name': 'train', 'cameras' : [scene.getTrainCameras()[idx % len(scene.getTrainCameras())] for idx in range(5, 30, 5)]})
169 |
170 | for config in validation_configs:
171 | if config['cameras'] and len(config['cameras']) > 0:
172 | l1_test = 0.0
173 | psnr_test = 0.0
174 | for idx, viewpoint in enumerate(config['cameras']):
175 | image = torch.clamp(renderFunc(viewpoint, scene.gaussians, *renderArgs)["render"], 0.0, 1.0)
176 | gt_image = torch.clamp(viewpoint.original_image.to("cuda"), 0.0, 1.0)
177 | if tb_writer and (idx < 5):
178 | tb_writer.add_images(config['name'] + "_view_{}/render".format(viewpoint.image_name), image[None], global_step=iteration)
179 | if iteration == testing_iterations[0]:
180 | tb_writer.add_images(config['name'] + "_view_{}/ground_truth".format(viewpoint.image_name), gt_image[None], global_step=iteration)
181 | l1_test += l1_loss(image, gt_image).mean().double()
182 | psnr_test += psnr(image, gt_image).mean().double()
183 | psnr_test /= len(config['cameras'])
184 | l1_test /= len(config['cameras'])
185 | print("\n[ITER {}] Evaluating {}: L1 {} PSNR {}".format(iteration, config['name'], l1_test, psnr_test))
186 | if tb_writer:
187 | tb_writer.add_scalar(config['name'] + '/loss_viewpoint - l1_loss', l1_test, iteration)
188 | tb_writer.add_scalar(config['name'] + '/loss_viewpoint - psnr', psnr_test, iteration)
189 |
190 | if tb_writer:
191 | tb_writer.add_histogram("scene/opacity_histogram", scene.gaussians.get_opacity, iteration)
192 | tb_writer.add_scalar('total_points', scene.gaussians.get_xyz.shape[0], iteration)
193 | torch.cuda.empty_cache()
194 |
195 | if __name__ == "__main__":
196 | # Set up command line argument parser
197 | parser = ArgumentParser(description="Training script parameters")
198 | lp = ModelParams(parser)
199 | op = OptimizationParams(parser)
200 | pp = PipelineParams(parser)
201 | parser.add_argument('--ip', type=str, default="127.0.0.1")
202 | parser.add_argument('--port', type=int, default=6009)
203 | parser.add_argument('--debug_from', type=int, default=-1)
204 | parser.add_argument('--detect_anomaly', action='store_true', default=False)
205 | parser.add_argument("--test_iterations", nargs="+", type=int, default=[7_000, 30_000])
206 | parser.add_argument("--save_iterations", nargs="+", type=int, default=[7_000, 30_000])
207 | parser.add_argument("--quiet", action="store_true")
208 | parser.add_argument("--checkpoint_iterations", nargs="+", type=int, default=[])
209 | parser.add_argument("--start_checkpoint", type=str, default = None)
210 | args = parser.parse_args(sys.argv[1:])
211 | args.save_iterations.append(args.iterations)
212 |
213 | print("Optimizing " + args.model_path)
214 |
215 | # Initialize system state (RNG)
216 | safe_state(args.quiet)
217 |
218 | # Start GUI server, configure and run training
219 | network_gui.init(args.ip, args.port)
220 | torch.autograd.set_detect_anomaly(args.detect_anomaly)
221 | training(lp.extract(args), op.extract(args), pp.extract(args), args.test_iterations, args.save_iterations, args.checkpoint_iterations, args.start_checkpoint, args.debug_from)
222 |
223 | # All done
224 | print("\nTraining complete.")
225 |
--------------------------------------------------------------------------------
/scene/colmap_loader.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (C) 2023, Inria
3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco
4 | # All rights reserved.
5 | #
6 | # This software is free for non-commercial, research and evaluation use
7 | # under the terms of the LICENSE.md file.
8 | #
9 | # For inquiries contact george.drettakis@inria.fr
10 | #
11 |
12 | import numpy as np
13 | import collections
14 | import struct
15 |
16 | CameraModel = collections.namedtuple(
17 | "CameraModel", ["model_id", "model_name", "num_params"])
18 | Camera = collections.namedtuple(
19 | "Camera", ["id", "model", "width", "height", "params"])
20 | BaseImage = collections.namedtuple(
21 | "Image", ["id", "qvec", "tvec", "camera_id", "name", "xys", "point3D_ids"])
22 | Point3D = collections.namedtuple(
23 | "Point3D", ["id", "xyz", "rgb", "error", "image_ids", "point2D_idxs"])
24 | CAMERA_MODELS = {
25 | CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3),
26 | CameraModel(model_id=1, model_name="PINHOLE", num_params=4),
27 | CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4),
28 | CameraModel(model_id=3, model_name="RADIAL", num_params=5),
29 | CameraModel(model_id=4, model_name="OPENCV", num_params=8),
30 | CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8),
31 | CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12),
32 | CameraModel(model_id=7, model_name="FOV", num_params=5),
33 | CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4),
34 | CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5),
35 | CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12)
36 | }
37 | CAMERA_MODEL_IDS = dict([(camera_model.model_id, camera_model)
38 | for camera_model in CAMERA_MODELS])
39 | CAMERA_MODEL_NAMES = dict([(camera_model.model_name, camera_model)
40 | for camera_model in CAMERA_MODELS])
41 |
42 |
43 | def qvec2rotmat(qvec):
44 | return np.array([
45 | [1 - 2 * qvec[2]**2 - 2 * qvec[3]**2,
46 | 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],
47 | 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],
48 | [2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],
49 | 1 - 2 * qvec[1]**2 - 2 * qvec[3]**2,
50 | 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]],
51 | [2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],
52 | 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],
53 | 1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]])
54 |
55 | def rotmat2qvec(R):
56 | Rxx, Ryx, Rzx, Rxy, Ryy, Rzy, Rxz, Ryz, Rzz = R.flat
57 | K = np.array([
58 | [Rxx - Ryy - Rzz, 0, 0, 0],
59 | [Ryx + Rxy, Ryy - Rxx - Rzz, 0, 0],
60 | [Rzx + Rxz, Rzy + Ryz, Rzz - Rxx - Ryy, 0],
61 | [Ryz - Rzy, Rzx - Rxz, Rxy - Ryx, Rxx + Ryy + Rzz]]) / 3.0
62 | eigvals, eigvecs = np.linalg.eigh(K)
63 | qvec = eigvecs[[3, 0, 1, 2], np.argmax(eigvals)]
64 | if qvec[0] < 0:
65 | qvec *= -1
66 | return qvec
67 |
68 | class Image(BaseImage):
69 | def qvec2rotmat(self):
70 | return qvec2rotmat(self.qvec)
71 |
72 | def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"):
73 | """Read and unpack the next bytes from a binary file.
74 | :param fid:
75 | :param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc.
76 | :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}.
77 | :param endian_character: Any of {@, =, <, >, !}
78 | :return: Tuple of read and unpacked values.
79 | """
80 | data = fid.read(num_bytes)
81 | return struct.unpack(endian_character + format_char_sequence, data)
82 |
83 | def read_points3D_text(path):
84 | """
85 | see: src/base/reconstruction.cc
86 | void Reconstruction::ReadPoints3DText(const std::string& path)
87 | void Reconstruction::WritePoints3DText(const std::string& path)
88 | """
89 | xyzs = None
90 | rgbs = None
91 | errors = None
92 | num_points = 0
93 | with open(path, "r") as fid:
94 | while True:
95 | line = fid.readline()
96 | if not line:
97 | break
98 | line = line.strip()
99 | if len(line) > 0 and line[0] != "#":
100 | num_points += 1
101 |
102 |
103 | xyzs = np.empty((num_points, 3))
104 | rgbs = np.empty((num_points, 3))
105 | errors = np.empty((num_points, 1))
106 | count = 0
107 | with open(path, "r") as fid:
108 | while True:
109 | line = fid.readline()
110 | if not line:
111 | break
112 | line = line.strip()
113 | if len(line) > 0 and line[0] != "#":
114 | elems = line.split()
115 | xyz = np.array(tuple(map(float, elems[1:4])))
116 | rgb = np.array(tuple(map(int, elems[4:7])))
117 | error = np.array(float(elems[7]))
118 | xyzs[count] = xyz
119 | rgbs[count] = rgb
120 | errors[count] = error
121 | count += 1
122 |
123 | return xyzs, rgbs, errors
124 |
125 | def read_points3D_binary(path_to_model_file):
126 | """
127 | see: src/base/reconstruction.cc
128 | void Reconstruction::ReadPoints3DBinary(const std::string& path)
129 | void Reconstruction::WritePoints3DBinary(const std::string& path)
130 | """
131 |
132 |
133 | with open(path_to_model_file, "rb") as fid:
134 | num_points = read_next_bytes(fid, 8, "Q")[0]
135 |
136 | xyzs = np.empty((num_points, 3))
137 | rgbs = np.empty((num_points, 3))
138 | errors = np.empty((num_points, 1))
139 |
140 | for p_id in range(num_points):
141 | binary_point_line_properties = read_next_bytes(
142 | fid, num_bytes=43, format_char_sequence="QdddBBBd")
143 | xyz = np.array(binary_point_line_properties[1:4])
144 | rgb = np.array(binary_point_line_properties[4:7])
145 | error = np.array(binary_point_line_properties[7])
146 | track_length = read_next_bytes(
147 | fid, num_bytes=8, format_char_sequence="Q")[0]
148 | track_elems = read_next_bytes(
149 | fid, num_bytes=8*track_length,
150 | format_char_sequence="ii"*track_length)
151 | xyzs[p_id] = xyz
152 | rgbs[p_id] = rgb
153 | errors[p_id] = error
154 | return xyzs, rgbs, errors
155 |
156 | def read_intrinsics_text(path):
157 | """
158 | Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py
159 | """
160 | cameras = {}
161 | with open(path, "r") as fid:
162 | while True:
163 | line = fid.readline()
164 | if not line:
165 | break
166 | line = line.strip()
167 | if len(line) > 0 and line[0] != "#":
168 | elems = line.split()
169 | camera_id = int(elems[0])
170 | model = elems[1]
171 | assert model == "PINHOLE", "While the loader support other types, the rest of the code assumes PINHOLE"
172 | width = int(elems[2])
173 | height = int(elems[3])
174 | params = np.array(tuple(map(float, elems[4:])))
175 | cameras[camera_id] = Camera(id=camera_id, model=model,
176 | width=width, height=height,
177 | params=params)
178 | return cameras
179 |
180 | def read_extrinsics_binary(path_to_model_file):
181 | """
182 | see: src/base/reconstruction.cc
183 | void Reconstruction::ReadImagesBinary(const std::string& path)
184 | void Reconstruction::WriteImagesBinary(const std::string& path)
185 | """
186 | images = {}
187 | with open(path_to_model_file, "rb") as fid:
188 | num_reg_images = read_next_bytes(fid, 8, "Q")[0]
189 | for _ in range(num_reg_images):
190 | binary_image_properties = read_next_bytes(
191 | fid, num_bytes=64, format_char_sequence="idddddddi")
192 | image_id = binary_image_properties[0]
193 | qvec = np.array(binary_image_properties[1:5])
194 | tvec = np.array(binary_image_properties[5:8])
195 | camera_id = binary_image_properties[8]
196 | image_name = ""
197 | current_char = read_next_bytes(fid, 1, "c")[0]
198 | while current_char != b"\x00": # look for the ASCII 0 entry
199 | image_name += current_char.decode("utf-8")
200 | current_char = read_next_bytes(fid, 1, "c")[0]
201 | num_points2D = read_next_bytes(fid, num_bytes=8,
202 | format_char_sequence="Q")[0]
203 | x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D,
204 | format_char_sequence="ddq"*num_points2D)
205 | xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])),
206 | tuple(map(float, x_y_id_s[1::3]))])
207 | point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))
208 | images[image_id] = Image(
209 | id=image_id, qvec=qvec, tvec=tvec,
210 | camera_id=camera_id, name=image_name,
211 | xys=xys, point3D_ids=point3D_ids)
212 | return images
213 |
214 |
215 | def read_intrinsics_binary(path_to_model_file):
216 | """
217 | see: src/base/reconstruction.cc
218 | void Reconstruction::WriteCamerasBinary(const std::string& path)
219 | void Reconstruction::ReadCamerasBinary(const std::string& path)
220 | """
221 | cameras = {}
222 | with open(path_to_model_file, "rb") as fid:
223 | num_cameras = read_next_bytes(fid, 8, "Q")[0]
224 | for _ in range(num_cameras):
225 | camera_properties = read_next_bytes(
226 | fid, num_bytes=24, format_char_sequence="iiQQ")
227 | camera_id = camera_properties[0]
228 | model_id = camera_properties[1]
229 | model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name
230 | width = camera_properties[2]
231 | height = camera_properties[3]
232 | num_params = CAMERA_MODEL_IDS[model_id].num_params
233 | params = read_next_bytes(fid, num_bytes=8*num_params,
234 | format_char_sequence="d"*num_params)
235 | cameras[camera_id] = Camera(id=camera_id,
236 | model=model_name,
237 | width=width,
238 | height=height,
239 | params=np.array(params))
240 | assert len(cameras) == num_cameras
241 | return cameras
242 |
243 |
244 | def read_extrinsics_text(path):
245 | """
246 | Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py
247 | """
248 | images = {}
249 | with open(path, "r") as fid:
250 | while True:
251 | line = fid.readline()
252 | if not line:
253 | break
254 | line = line.strip()
255 | if len(line) > 0 and line[0] != "#":
256 | elems = line.split()
257 | image_id = int(elems[0])
258 | qvec = np.array(tuple(map(float, elems[1:5])))
259 | tvec = np.array(tuple(map(float, elems[5:8])))
260 | camera_id = int(elems[8])
261 | image_name = elems[9]
262 | elems = fid.readline().split()
263 | xys = np.column_stack([tuple(map(float, elems[0::3])),
264 | tuple(map(float, elems[1::3]))])
265 | point3D_ids = np.array(tuple(map(int, elems[2::3])))
266 | images[image_id] = Image(
267 | id=image_id, qvec=qvec, tvec=tvec,
268 | camera_id=camera_id, name=image_name,
269 | xys=xys, point3D_ids=point3D_ids)
270 | return images
271 |
272 |
273 | def read_colmap_bin_array(path):
274 | """
275 | Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_dense.py
276 |
277 | :param path: path to the colmap binary file.
278 | :return: nd array with the floating point values in the value
279 | """
280 | with open(path, "rb") as fid:
281 | width, height, channels = np.genfromtxt(fid, delimiter="&", max_rows=1,
282 | usecols=(0, 1, 2), dtype=int)
283 | fid.seek(0)
284 | num_delimiter = 0
285 | byte = fid.read(1)
286 | while True:
287 | if byte == b"&":
288 | num_delimiter += 1
289 | if num_delimiter >= 3:
290 | break
291 | byte = fid.read(1)
292 | array = np.fromfile(fid, np.float32)
293 | array = array.reshape((width, height, channels), order="F")
294 | return np.transpose(array, (1, 0, 2)).squeeze()
295 |
--------------------------------------------------------------------------------
/scene/gaussian_model_old.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (C) 2023, Inria
3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco
4 | # All rights reserved.
5 | #
6 | # This software is free for non-commercial, research and evaluation use
7 | # under the terms of the LICENSE.md file.
8 | #
9 | # For inquiries contact george.drettakis@inria.fr
10 | #
11 |
12 | import torch
13 | import numpy as np
14 | from utils.general_utils import inverse_sigmoid, get_expon_lr_func, build_rotation
15 | from torch import nn
16 | import os
17 | from utils.system_utils import mkdir_p
18 | from plyfile import PlyData, PlyElement
19 | from utils.sh_utils import RGB2SH
20 | from simple_knn._C import distCUDA2
21 | from utils.graphics_utils import BasicPointCloud
22 | from utils.general_utils import strip_symmetric, build_scaling_rotation
23 |
24 | class GaussianModel:
25 |
26 | def setup_functions(self):
27 | def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation):
28 | L = build_scaling_rotation(scaling_modifier * scaling, rotation)
29 | actual_covariance = L @ L.transpose(1, 2)
30 | symm = strip_symmetric(actual_covariance)
31 | return symm
32 |
33 | self.scaling_activation = torch.exp
34 | self.scaling_inverse_activation = torch.log
35 |
36 | self.covariance_activation = build_covariance_from_scaling_rotation
37 |
38 | self.opacity_activation = torch.sigmoid
39 | self.inverse_opacity_activation = inverse_sigmoid
40 |
41 | self.rotation_activation = torch.nn.functional.normalize
42 |
43 |
44 | def __init__(self, sh_degree : int):
45 | self.active_sh_degree = 0
46 | self.max_sh_degree = sh_degree
47 | self._xyz = torch.empty(0)
48 | self._features_dc = torch.empty(0)
49 | self._features_rest = torch.empty(0)
50 | self._scaling = torch.empty(0)
51 | self._rotation = torch.empty(0)
52 | self._opacity = torch.empty(0)
53 | self.max_radii2D = torch.empty(0)
54 | self.xyz_gradient_accum = torch.empty(0)
55 | self.denom = torch.empty(0)
56 | self.optimizer = None
57 | self.percent_dense = 0
58 | self.spatial_lr_scale = 0
59 | self.setup_functions()
60 |
61 | def capture(self):
62 | return (
63 | self.active_sh_degree,
64 | self._xyz,
65 | self._features_dc,
66 | self._features_rest,
67 | self._scaling,
68 | self._rotation,
69 | self._opacity,
70 | self.max_radii2D,
71 | self.xyz_gradient_accum,
72 | self.denom,
73 | self.optimizer.state_dict(),
74 | self.spatial_lr_scale,
75 | )
76 |
77 | def restore(self, model_args, training_args):
78 | (self.active_sh_degree,
79 | self._xyz,
80 | self._features_dc,
81 | self._features_rest,
82 | self._scaling,
83 | self._rotation,
84 | self._opacity,
85 | self.max_radii2D,
86 | xyz_gradient_accum,
87 | denom,
88 | opt_dict,
89 | self.spatial_lr_scale) = model_args
90 | self.training_setup(training_args)
91 | self.xyz_gradient_accum = xyz_gradient_accum
92 | self.denom = denom
93 | self.optimizer.load_state_dict(opt_dict)
94 |
95 | @property
96 | def get_scaling(self):
97 | return self.scaling_activation(self._scaling)
98 |
99 | @property
100 | def get_rotation(self):
101 | return self.rotation_activation(self._rotation)
102 |
103 | @property
104 | def get_xyz(self):
105 | return self._xyz
106 |
107 | @property
108 | def get_features(self):
109 | features_dc = self._features_dc
110 | features_rest = self._features_rest
111 | return torch.cat((features_dc, features_rest), dim=1)
112 |
113 | @property
114 | def get_opacity(self):
115 | return self.opacity_activation(self._opacity)
116 |
117 | def get_covariance(self, scaling_modifier = 1):
118 | return self.covariance_activation(self.get_scaling, scaling_modifier, self._rotation)
119 |
120 | def oneupSHdegree(self):
121 | if self.active_sh_degree < self.max_sh_degree:
122 | self.active_sh_degree += 1
123 |
124 | def create_from_pcd(self, pcd : BasicPointCloud, spatial_lr_scale : float):
125 | self.spatial_lr_scale = spatial_lr_scale
126 | fused_point_cloud = torch.tensor(np.asarray(pcd.points)).float().cuda()
127 | fused_color = RGB2SH(torch.tensor(np.asarray(pcd.colors)).float().cuda())
128 | features = torch.zeros((fused_color.shape[0], 3, (self.max_sh_degree + 1) ** 2)).float().cuda()
129 | features[:, :3, 0 ] = fused_color
130 | features[:, 3:, 1:] = 0.0
131 |
132 | print("Number of points at initialisation : ", fused_point_cloud.shape[0])
133 |
134 | dist2 = torch.clamp_min(distCUDA2(torch.from_numpy(np.asarray(pcd.points)).float().cuda()), 0.0000001)
135 | scales = torch.log(torch.sqrt(dist2))[...,None].repeat(1, 3)
136 | rots = torch.zeros((fused_point_cloud.shape[0], 4), device="cuda")
137 | rots[:, 0] = 1
138 |
139 | opacities = inverse_sigmoid(0.1 * torch.ones((fused_point_cloud.shape[0], 1), dtype=torch.float, device="cuda"))
140 |
141 | self._xyz = nn.Parameter(fused_point_cloud.requires_grad_(True))
142 | self._features_dc = nn.Parameter(features[:,:,0:1].transpose(1, 2).contiguous().requires_grad_(True))
143 | self._features_rest = nn.Parameter(features[:,:,1:].transpose(1, 2).contiguous().requires_grad_(True))
144 | self._scaling = nn.Parameter(scales.requires_grad_(True))
145 | self._rotation = nn.Parameter(rots.requires_grad_(True))
146 | self._opacity = nn.Parameter(opacities.requires_grad_(True))
147 | self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda")
148 |
149 | def training_setup(self, training_args):
150 | self.percent_dense = training_args.percent_dense
151 | self.xyz_gradient_accum = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
152 | self.denom = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
153 |
154 | l = [
155 | {'params': [self._xyz], 'lr': training_args.position_lr_init * self.spatial_lr_scale, "name": "xyz"},
156 | {'params': [self._features_dc], 'lr': training_args.feature_lr, "name": "f_dc"},
157 | {'params': [self._features_rest], 'lr': training_args.feature_lr / 20.0, "name": "f_rest"},
158 | {'params': [self._opacity], 'lr': training_args.opacity_lr, "name": "opacity"},
159 | {'params': [self._scaling], 'lr': training_args.scaling_lr, "name": "scaling"},
160 | {'params': [self._rotation], 'lr': training_args.rotation_lr, "name": "rotation"}
161 | ]
162 |
163 | self.optimizer = torch.optim.Adam(l, lr=0.0, eps=1e-15)
164 | self.xyz_scheduler_args = get_expon_lr_func(lr_init=training_args.position_lr_init*self.spatial_lr_scale,
165 | lr_final=training_args.position_lr_final*self.spatial_lr_scale,
166 | lr_delay_mult=training_args.position_lr_delay_mult,
167 | max_steps=training_args.position_lr_max_steps)
168 |
169 | def update_learning_rate(self, iteration):
170 | ''' Learning rate scheduling per step '''
171 | for param_group in self.optimizer.param_groups:
172 | if param_group["name"] == "xyz":
173 | lr = self.xyz_scheduler_args(iteration)
174 | param_group['lr'] = lr
175 | return lr
176 |
177 | def construct_list_of_attributes(self):
178 | l = ['x', 'y', 'z', 'nx', 'ny', 'nz']
179 | # All channels except the 3 DC
180 | for i in range(self._features_dc.shape[1]*self._features_dc.shape[2]):
181 | l.append('f_dc_{}'.format(i))
182 | for i in range(self._features_rest.shape[1]*self._features_rest.shape[2]):
183 | l.append('f_rest_{}'.format(i))
184 | l.append('opacity')
185 | for i in range(self._scaling.shape[1]):
186 | l.append('scale_{}'.format(i))
187 | for i in range(self._rotation.shape[1]):
188 | l.append('rot_{}'.format(i))
189 | return l
190 |
191 | def save_ply(self, path):
192 | mkdir_p(os.path.dirname(path))
193 |
194 | xyz = self._xyz.detach().cpu().numpy()
195 | normals = np.zeros_like(xyz)
196 | f_dc = self._features_dc.detach().transpose(1, 2).flatten(start_dim=1).contiguous().cpu().numpy()
197 | f_rest = self._features_rest.detach().transpose(1, 2).flatten(start_dim=1).contiguous().cpu().numpy()
198 | opacities = self._opacity.detach().cpu().numpy()
199 | scale = self._scaling.detach().cpu().numpy()
200 | rotation = self._rotation.detach().cpu().numpy()
201 |
202 | dtype_full = [(attribute, 'f4') for attribute in self.construct_list_of_attributes()]
203 |
204 | elements = np.empty(xyz.shape[0], dtype=dtype_full)
205 | attributes = np.concatenate((xyz, normals, f_dc, f_rest, opacities, scale, rotation), axis=1)
206 | elements[:] = list(map(tuple, attributes))
207 | el = PlyElement.describe(elements, 'vertex')
208 | PlyData([el]).write(path)
209 |
210 | def reset_opacity(self):
211 | opacities_new = inverse_sigmoid(torch.min(self.get_opacity, torch.ones_like(self.get_opacity)*0.01))
212 | optimizable_tensors = self.replace_tensor_to_optimizer(opacities_new, "opacity")
213 | self._opacity = optimizable_tensors["opacity"]
214 |
215 | #PATCH def load_ply(self, path):
216 | def load_ply(self, path, spatial_lr_scale : float = 0):
217 | plydata = PlyData.read(path)
218 |
219 | xyz = np.stack((np.asarray(plydata.elements[0]["x"]),
220 | np.asarray(plydata.elements[0]["y"]),
221 | np.asarray(plydata.elements[0]["z"])), axis=1)
222 | opacities = np.asarray(plydata.elements[0]["opacity"])[..., np.newaxis]
223 |
224 | features_dc = np.zeros((xyz.shape[0], 3, 1))
225 | features_dc[:, 0, 0] = np.asarray(plydata.elements[0]["f_dc_0"])
226 | features_dc[:, 1, 0] = np.asarray(plydata.elements[0]["f_dc_1"])
227 | features_dc[:, 2, 0] = np.asarray(plydata.elements[0]["f_dc_2"])
228 |
229 | extra_f_names = [p.name for p in plydata.elements[0].properties if p.name.startswith("f_rest_")]
230 | extra_f_names = sorted(extra_f_names, key = lambda x: int(x.split('_')[-1]))
231 | assert len(extra_f_names)==3*(self.max_sh_degree + 1) ** 2 - 3
232 | features_extra = np.zeros((xyz.shape[0], len(extra_f_names)))
233 | for idx, attr_name in enumerate(extra_f_names):
234 | features_extra[:, idx] = np.asarray(plydata.elements[0][attr_name])
235 | # Reshape (P,F*SH_coeffs) to (P, F, SH_coeffs except DC)
236 | features_extra = features_extra.reshape((features_extra.shape[0], 3, (self.max_sh_degree + 1) ** 2 - 1))
237 |
238 | scale_names = [p.name for p in plydata.elements[0].properties if p.name.startswith("scale_")]
239 | scale_names = sorted(scale_names, key = lambda x: int(x.split('_')[-1]))
240 | scales = np.zeros((xyz.shape[0], len(scale_names)))
241 | for idx, attr_name in enumerate(scale_names):
242 | scales[:, idx] = np.asarray(plydata.elements[0][attr_name])
243 |
244 | rot_names = [p.name for p in plydata.elements[0].properties if p.name.startswith("rot")]
245 | rot_names = sorted(rot_names, key = lambda x: int(x.split('_')[-1]))
246 | rots = np.zeros((xyz.shape[0], len(rot_names)))
247 | for idx, attr_name in enumerate(rot_names):
248 | rots[:, idx] = np.asarray(plydata.elements[0][attr_name])
249 |
250 | self._xyz = nn.Parameter(torch.tensor(xyz, dtype=torch.float, device="cuda").requires_grad_(True))
251 | self._features_dc = nn.Parameter(torch.tensor(features_dc, dtype=torch.float, device="cuda").transpose(1, 2).contiguous().requires_grad_(True))
252 | self._features_rest = nn.Parameter(torch.tensor(features_extra, dtype=torch.float, device="cuda").transpose(1, 2).contiguous().requires_grad_(True))
253 | self._opacity = nn.Parameter(torch.tensor(opacities, dtype=torch.float, device="cuda").requires_grad_(True))
254 | self._scaling = nn.Parameter(torch.tensor(scales, dtype=torch.float, device="cuda").requires_grad_(True))
255 | self._rotation = nn.Parameter(torch.tensor(rots, dtype=torch.float, device="cuda").requires_grad_(True))
256 |
257 | self.active_sh_degree = self.max_sh_degree
258 |
259 | # PATCH
260 | self.spatial_lr_scale = spatial_lr_scale
261 | self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda")
262 | # /PATCH
263 |
264 | def replace_tensor_to_optimizer(self, tensor, name):
265 | optimizable_tensors = {}
266 | for group in self.optimizer.param_groups:
267 | if group["name"] == name:
268 | stored_state = self.optimizer.state.get(group['params'][0], None)
269 | stored_state["exp_avg"] = torch.zeros_like(tensor)
270 | stored_state["exp_avg_sq"] = torch.zeros_like(tensor)
271 |
272 | del self.optimizer.state[group['params'][0]]
273 | group["params"][0] = nn.Parameter(tensor.requires_grad_(True))
274 | self.optimizer.state[group['params'][0]] = stored_state
275 |
276 | optimizable_tensors[group["name"]] = group["params"][0]
277 | return optimizable_tensors
278 |
279 | def _prune_optimizer(self, mask):
280 | optimizable_tensors = {}
281 | for group in self.optimizer.param_groups:
282 | stored_state = self.optimizer.state.get(group['params'][0], None)
283 | if stored_state is not None:
284 | stored_state["exp_avg"] = stored_state["exp_avg"][mask]
285 | stored_state["exp_avg_sq"] = stored_state["exp_avg_sq"][mask]
286 |
287 | del self.optimizer.state[group['params'][0]]
288 | group["params"][0] = nn.Parameter((group["params"][0][mask].requires_grad_(True)))
289 | self.optimizer.state[group['params'][0]] = stored_state
290 |
291 | optimizable_tensors[group["name"]] = group["params"][0]
292 | else:
293 | group["params"][0] = nn.Parameter(group["params"][0][mask].requires_grad_(True))
294 | optimizable_tensors[group["name"]] = group["params"][0]
295 | return optimizable_tensors
296 |
297 | def prune_points(self, mask):
298 | valid_points_mask = ~mask
299 | optimizable_tensors = self._prune_optimizer(valid_points_mask)
300 |
301 | self._xyz = optimizable_tensors["xyz"]
302 | self._features_dc = optimizable_tensors["f_dc"]
303 | self._features_rest = optimizable_tensors["f_rest"]
304 | self._opacity = optimizable_tensors["opacity"]
305 | self._scaling = optimizable_tensors["scaling"]
306 | self._rotation = optimizable_tensors["rotation"]
307 |
308 | self.xyz_gradient_accum = self.xyz_gradient_accum[valid_points_mask]
309 |
310 | self.denom = self.denom[valid_points_mask]
311 | self.max_radii2D = self.max_radii2D[valid_points_mask]
312 |
313 | def cat_tensors_to_optimizer(self, tensors_dict):
314 | optimizable_tensors = {}
315 | for group in self.optimizer.param_groups:
316 | assert len(group["params"]) == 1
317 | extension_tensor = tensors_dict[group["name"]]
318 | stored_state = self.optimizer.state.get(group['params'][0], None)
319 | if stored_state is not None:
320 |
321 | stored_state["exp_avg"] = torch.cat((stored_state["exp_avg"], torch.zeros_like(extension_tensor)), dim=0)
322 | stored_state["exp_avg_sq"] = torch.cat((stored_state["exp_avg_sq"], torch.zeros_like(extension_tensor)), dim=0)
323 |
324 | del self.optimizer.state[group['params'][0]]
325 | group["params"][0] = nn.Parameter(torch.cat((group["params"][0], extension_tensor), dim=0).requires_grad_(True))
326 | self.optimizer.state[group['params'][0]] = stored_state
327 |
328 | optimizable_tensors[group["name"]] = group["params"][0]
329 | else:
330 | group["params"][0] = nn.Parameter(torch.cat((group["params"][0], extension_tensor), dim=0).requires_grad_(True))
331 | optimizable_tensors[group["name"]] = group["params"][0]
332 |
333 | return optimizable_tensors
334 |
335 | def densification_postfix(self, new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation):
336 | d = {"xyz": new_xyz,
337 | "f_dc": new_features_dc,
338 | "f_rest": new_features_rest,
339 | "opacity": new_opacities,
340 | "scaling" : new_scaling,
341 | "rotation" : new_rotation}
342 |
343 | optimizable_tensors = self.cat_tensors_to_optimizer(d)
344 | self._xyz = optimizable_tensors["xyz"]
345 | self._features_dc = optimizable_tensors["f_dc"]
346 | self._features_rest = optimizable_tensors["f_rest"]
347 | self._opacity = optimizable_tensors["opacity"]
348 | self._scaling = optimizable_tensors["scaling"]
349 | self._rotation = optimizable_tensors["rotation"]
350 |
351 | self.xyz_gradient_accum = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
352 | self.denom = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
353 | self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda")
354 |
355 | def densify_and_split(self, grads, grad_threshold, scene_extent, N=2):
356 | n_init_points = self.get_xyz.shape[0]
357 | # Extract points that satisfy the gradient condition
358 | padded_grad = torch.zeros((n_init_points), device="cuda")
359 | padded_grad[:grads.shape[0]] = grads.squeeze()
360 | selected_pts_mask = torch.where(padded_grad >= grad_threshold, True, False)
361 | selected_pts_mask = torch.logical_and(selected_pts_mask,
362 | torch.max(self.get_scaling, dim=1).values > self.percent_dense*scene_extent)
363 |
364 | stds = self.get_scaling[selected_pts_mask].repeat(N,1)
365 | means =torch.zeros((stds.size(0), 3),device="cuda")
366 | samples = torch.normal(mean=means, std=stds)
367 | rots = build_rotation(self._rotation[selected_pts_mask]).repeat(N,1,1)
368 | new_xyz = torch.bmm(rots, samples.unsqueeze(-1)).squeeze(-1) + self.get_xyz[selected_pts_mask].repeat(N, 1)
369 | new_scaling = self.scaling_inverse_activation(self.get_scaling[selected_pts_mask].repeat(N,1) / (0.8*N))
370 | new_rotation = self._rotation[selected_pts_mask].repeat(N,1)
371 | new_features_dc = self._features_dc[selected_pts_mask].repeat(N,1,1)
372 | new_features_rest = self._features_rest[selected_pts_mask].repeat(N,1,1)
373 | new_opacity = self._opacity[selected_pts_mask].repeat(N,1)
374 |
375 | self.densification_postfix(new_xyz, new_features_dc, new_features_rest, new_opacity, new_scaling, new_rotation)
376 |
377 | prune_filter = torch.cat((selected_pts_mask, torch.zeros(N * selected_pts_mask.sum(), device="cuda", dtype=bool)))
378 | self.prune_points(prune_filter)
379 |
380 | def densify_and_clone(self, grads, grad_threshold, scene_extent):
381 | # Extract points that satisfy the gradient condition
382 | selected_pts_mask = torch.where(torch.norm(grads, dim=-1) >= grad_threshold, True, False)
383 | selected_pts_mask = torch.logical_and(selected_pts_mask,
384 | torch.max(self.get_scaling, dim=1).values <= self.percent_dense*scene_extent)
385 |
386 | new_xyz = self._xyz[selected_pts_mask]
387 | new_features_dc = self._features_dc[selected_pts_mask]
388 | new_features_rest = self._features_rest[selected_pts_mask]
389 | new_opacities = self._opacity[selected_pts_mask]
390 | new_scaling = self._scaling[selected_pts_mask]
391 | new_rotation = self._rotation[selected_pts_mask]
392 |
393 | self.densification_postfix(new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation)
394 |
395 | def densify_and_prune(self, max_grad, min_opacity, extent, max_screen_size):
396 | grads = self.xyz_gradient_accum / self.denom
397 | grads[grads.isnan()] = 0.0
398 |
399 | self.densify_and_clone(grads, max_grad, extent)
400 | self.densify_and_split(grads, max_grad, extent)
401 |
402 | prune_mask = (self.get_opacity < min_opacity).squeeze()
403 | if max_screen_size:
404 | big_points_vs = self.max_radii2D > max_screen_size
405 | big_points_ws = self.get_scaling.max(dim=1).values > 0.1 * extent
406 | prune_mask = torch.logical_or(torch.logical_or(prune_mask, big_points_vs), big_points_ws)
407 | self.prune_points(prune_mask)
408 |
409 | torch.cuda.empty_cache()
410 |
411 | def add_densification_stats(self, viewspace_point_tensor, update_filter):
412 | self.xyz_gradient_accum[update_filter] += torch.norm(viewspace_point_tensor.grad[update_filter,:2], dim=-1, keepdim=True)
413 | self.denom[update_filter] += 1
--------------------------------------------------------------------------------
/scene/gaussian_model.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (C) 2023, Inria
3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco
4 | # All rights reserved.
5 | #
6 | # This software is free for non-commercial, research and evaluation use
7 | # under the terms of the LICENSE.md file.
8 | #
9 | # For inquiries contact george.drettakis@inria.fr
10 | #
11 |
12 | import torch
13 | import numpy as np
14 | from utils.general_utils import inverse_sigmoid, get_expon_lr_func, build_rotation
15 | from torch import nn
16 | import os
17 | from utils.system_utils import mkdir_p
18 | from plyfile import PlyData, PlyElement
19 | from utils.sh_utils import RGB2SH
20 | from simple_knn._C import distCUDA2
21 | from utils.graphics_utils import BasicPointCloud
22 | from utils.general_utils import strip_symmetric, build_scaling_rotation
23 |
24 | class GaussianModel:
25 |
26 | def setup_functions(self):
27 | def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation):
28 | L = build_scaling_rotation(scaling_modifier * scaling, rotation)
29 | actual_covariance = L @ L.transpose(1, 2)
30 | symm = strip_symmetric(actual_covariance)
31 | return symm
32 |
33 | self.scaling_activation = torch.exp
34 | self.scaling_inverse_activation = torch.log
35 |
36 | self.covariance_activation = build_covariance_from_scaling_rotation
37 |
38 | self.opacity_activation = torch.sigmoid
39 | self.inverse_opacity_activation = inverse_sigmoid
40 |
41 | self.rotation_activation = torch.nn.functional.normalize
42 |
43 |
44 | def __init__(self, sh_degree : int):
45 | self.active_sh_degree = 0
46 | self.max_sh_degree = sh_degree
47 | self._xyz = torch.empty(0)
48 | self._features_dc = torch.empty(0)
49 | self._features_rest = torch.empty(0)
50 | self._features_secondary_dc = torch.empty(0)
51 | #self._features_secondary_rest = torch.empty(0)
52 | self._scaling = torch.empty(0)
53 | self._rotation = torch.empty(0)
54 | self._opacity = torch.empty(0)
55 | self.max_radii2D = torch.empty(0)
56 | self.xyz_gradient_accum = torch.empty(0)
57 | self.denom = torch.empty(0)
58 | self.optimizer = None
59 | self.percent_dense = 0
60 | self.spatial_lr_scale = 0
61 | self.setup_functions()
62 |
63 | def capture(self):
64 | return (
65 | self.active_sh_degree,
66 | self._xyz,
67 | self._features_dc,
68 | self._features_rest,
69 | self._scaling,
70 | self._rotation,
71 | self._opacity,
72 | self.max_radii2D,
73 | self.xyz_gradient_accum,
74 | self.denom,
75 | self.optimizer.state_dict(),
76 | self.spatial_lr_scale,
77 | )
78 |
79 | def restore(self, model_args, training_args):
80 | (self.active_sh_degree,
81 | self._xyz,
82 | self._features_dc,
83 | self._features_rest,
84 | self._scaling,
85 | self._rotation,
86 | self._opacity,
87 | self.max_radii2D,
88 | xyz_gradient_accum,
89 | denom,
90 | opt_dict,
91 | self.spatial_lr_scale) = model_args
92 | self.training_setup(training_args)
93 | self.xyz_gradient_accum = xyz_gradient_accum
94 | self.denom = denom
95 | self.optimizer.load_state_dict(opt_dict)
96 |
97 | @property
98 | def get_scaling(self):
99 | return self.scaling_activation(self._scaling)
100 |
101 | @property
102 | def get_rotation(self):
103 | return self.rotation_activation(self._rotation)
104 |
105 | @property
106 | def get_xyz(self):
107 | return self._xyz
108 |
109 | def copy_features_primary_to_secondary(self):
110 | dc_tensor = self._features_dc.data.clone().detach().requires_grad_(True)
111 | #rest_tensor = self._features_rest.clone().detach().requires_grad_(True)
112 |
113 | self._features_secondary_dc = nn.Parameter(dc_tensor)
114 | #self._features_secondary_rest = nn.Parameter(rest_tensor)
115 |
116 | def copy_features_secondary_to_primary(self):
117 | dc_tensor = self._features_secondary_dc.data.clone().detach().requires_grad_(True)
118 | #rest_tensor = self._features_secondary_rest.clone().detach().requires_grad_(True)
119 |
120 | self._features_dc = nn.Parameter(dc_tensor)
121 | #self._features_rest = nn.Parameter(rest_tensor)
122 |
123 | #@property
124 | def get_features(self, primary_features):
125 | if primary_features:
126 | features_dc = self._features_dc
127 | features_rest = self._features_rest
128 | else:
129 | features_dc = self._features_secondary_dc
130 | features_rest = self._features_rest #self._features_secondary_rest
131 | return torch.cat((features_dc, features_rest), dim=1)
132 |
133 | @property
134 | def get_opacity(self):
135 | return self.opacity_activation(self._opacity)
136 |
137 | def get_covariance(self, scaling_modifier = 1):
138 | return self.covariance_activation(self.get_scaling, scaling_modifier, self._rotation)
139 |
140 | def oneupSHdegree(self):
141 | if self.active_sh_degree < self.max_sh_degree:
142 | self.active_sh_degree += 1
143 |
144 | def create_from_pcd(self, pcd : BasicPointCloud, spatial_lr_scale : float):
145 | self.spatial_lr_scale = spatial_lr_scale
146 | fused_point_cloud = torch.tensor(np.asarray(pcd.points)).float().cuda()
147 | fused_color = RGB2SH(torch.tensor(np.asarray(pcd.colors)).float().cuda())
148 | features = torch.zeros((fused_color.shape[0], 3, (self.max_sh_degree + 1) ** 2)).float().cuda()
149 | features[:, :3, 0 ] = fused_color
150 | features[:, 3:, 1:] = 0.0
151 |
152 | print("Number of points at initialisation : ", fused_point_cloud.shape[0])
153 |
154 | dist2 = torch.clamp_min(distCUDA2(torch.from_numpy(np.asarray(pcd.points)).float().cuda()), 0.0000001)
155 | scales = torch.log(torch.sqrt(dist2))[...,None].repeat(1, 3)
156 | rots = torch.zeros((fused_point_cloud.shape[0], 4), device="cuda")
157 | rots[:, 0] = 1
158 |
159 | opacities = inverse_sigmoid(0.1 * torch.ones((fused_point_cloud.shape[0], 1), dtype=torch.float, device="cuda"))
160 |
161 | self._xyz = nn.Parameter(fused_point_cloud.requires_grad_(True))
162 | self._features_dc = nn.Parameter(features[:,:,0:1].transpose(1, 2).contiguous().requires_grad_(True))
163 | self._features_rest = nn.Parameter(features[:,:,1:].transpose(1, 2).contiguous().requires_grad_(True))
164 | self._features_secondary_dc = nn.Parameter(torch.clone(features[:,:,0:1]).detach().transpose(1, 2).contiguous().requires_grad_(True))
165 | #self._features_secondary_rest = nn.Parameter(features[:,:,1:].transpose(1, 2).contiguous().requires_grad_(True))
166 | self._scaling = nn.Parameter(scales.requires_grad_(True))
167 | self._rotation = nn.Parameter(rots.requires_grad_(True))
168 | self._opacity = nn.Parameter(opacities.requires_grad_(True))
169 | self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda")
170 |
171 | def training_setup(self, training_args):
172 | self.training_args = training_args
173 | self.percent_dense = training_args.percent_dense
174 | self.xyz_gradient_accum = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
175 | self.denom = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
176 |
177 | l = [
178 | {'params': [self._xyz], 'lr': training_args.position_lr_init * self.spatial_lr_scale, "name": "xyz"},
179 | {'params': [self._features_dc], 'lr': training_args.feature_lr, "name": "f_dc"},
180 | {'params': [self._features_rest], 'lr': training_args.feature_lr / 20.0, "name": "f_rest"},
181 | #{'params': [self._features_secondary_dc], 'lr': training_args.feature_lr, "name": "f_secondary_dc"},
182 | #{'params': [self._features_secondary_rest], 'lr': training_args.feature_lr / 20.0, "name": "f_secondary_rest"},
183 | {'params': [self._opacity], 'lr': training_args.opacity_lr, "name": "opacity"},
184 | {'params': [self._scaling], 'lr': training_args.scaling_lr, "name": "scaling"},
185 | {'params': [self._rotation], 'lr': training_args.rotation_lr, "name": "rotation"}
186 | ]
187 |
188 | self.optimizer = torch.optim.Adam(l, lr=0.0, eps=1e-15)
189 | self.xyz_scheduler_args = get_expon_lr_func(lr_init=training_args.position_lr_init*self.spatial_lr_scale,
190 | lr_final=training_args.position_lr_final*self.spatial_lr_scale,
191 | lr_delay_mult=training_args.position_lr_delay_mult,
192 | max_steps=training_args.position_lr_max_steps)
193 |
194 | def update_learning_rate(self, iteration):
195 | ''' Learning rate scheduling per step '''
196 | for param_group in self.optimizer.param_groups:
197 | if param_group["name"] == "xyz":
198 | lr = self.xyz_scheduler_args(iteration)
199 | param_group['lr'] = lr
200 | return lr
201 |
202 | def enable_geometry_learning(self, iteration):
203 | for param_group in self.optimizer.param_groups:
204 | if param_group["name"] == "xyz":
205 | lr = self.xyz_scheduler_args(iteration)
206 | param_group['lr'] = lr
207 | elif param_group["name"] == "opacity":
208 | param_group['lr'] = self.training_args.opacity_lr
209 | elif param_group["name"] == "scaling":
210 | param_group['lr'] = self.training_args.scaling_lr
211 | elif param_group["name"] == "rotation":
212 | param_group['lr'] = self.training_args.rotation_lr
213 |
214 | def disable_geometry_learning(self):
215 | geometry_params = ["xyz", "opacity", "scaling", "rotation"]
216 | for param_group in self.optimizer.param_groups:
217 | if param_group["name"] in geometry_params:
218 | param_group['lr'] = 0.0
219 | return 0.0
220 |
221 | def construct_list_of_attributes(self):
222 | l = ['x', 'y', 'z', 'nx', 'ny', 'nz']
223 | # All channels except the 3 DC
224 | for i in range(self._features_dc.shape[1]*self._features_dc.shape[2]):
225 | l.append('f_dc_{}'.format(i))
226 | for i in range(self._features_rest.shape[1]*self._features_rest.shape[2]):
227 | l.append('f_rest_{}'.format(i))
228 | l.append('opacity')
229 | for i in range(self._scaling.shape[1]):
230 | l.append('scale_{}'.format(i))
231 | for i in range(self._rotation.shape[1]):
232 | l.append('rot_{}'.format(i))
233 | return l
234 |
235 | def save_ply(self, path, primary_features):
236 | mkdir_p(os.path.dirname(path))
237 |
238 | xyz = self._xyz.detach().cpu().numpy()
239 | normals = np.zeros_like(xyz)
240 | if primary_features:
241 | f_dc = self._features_dc.detach().transpose(1, 2).flatten(start_dim=1).contiguous().cpu().numpy()
242 | f_rest = self._features_rest.detach().transpose(1, 2).flatten(start_dim=1).contiguous().cpu().numpy()
243 | else:
244 | f_dc = self._features_secondary_dc.detach().transpose(1, 2).flatten(start_dim=1).contiguous().cpu().numpy()
245 | f_rest = self._features_rest.detach().transpose(1, 2).flatten(start_dim=1).contiguous().cpu().numpy() #self._features_secondary_rest.detach().transpose(1, 2).flatten(start_dim=1).contiguous().cpu().numpy()
246 | opacities = self._opacity.detach().cpu().numpy()
247 | scale = self._scaling.detach().cpu().numpy()
248 | rotation = self._rotation.detach().cpu().numpy()
249 |
250 | dtype_full = [(attribute, 'f4') for attribute in self.construct_list_of_attributes()]
251 |
252 | elements = np.empty(xyz.shape[0], dtype=dtype_full)
253 | attributes = np.concatenate((xyz, normals, f_dc, f_rest, opacities, scale, rotation), axis=1)
254 | elements[:] = list(map(tuple, attributes))
255 | el = PlyElement.describe(elements, 'vertex')
256 | PlyData([el]).write(path)
257 |
258 | def reset_opacity(self):
259 | opacities_new = inverse_sigmoid(torch.min(self.get_opacity, torch.ones_like(self.get_opacity)*0.01))
260 | optimizable_tensors = self.replace_tensor_to_optimizer(opacities_new, "opacity")
261 | self._opacity = optimizable_tensors["opacity"]
262 |
263 | #PATCH def load_ply(self, path):
264 | def load_ply(self, path, spatial_lr_scale : float = 0):
265 | plydata = PlyData.read(path)
266 |
267 | xyz = np.stack((np.asarray(plydata.elements[0]["x"]),
268 | np.asarray(plydata.elements[0]["y"]),
269 | np.asarray(plydata.elements[0]["z"])), axis=1)
270 | opacities = np.asarray(plydata.elements[0]["opacity"])[..., np.newaxis]
271 |
272 | features_dc = np.zeros((xyz.shape[0], 3, 1))
273 | features_dc[:, 0, 0] = np.asarray(plydata.elements[0]["f_dc_0"])
274 | features_dc[:, 1, 0] = np.asarray(plydata.elements[0]["f_dc_1"])
275 | features_dc[:, 2, 0] = np.asarray(plydata.elements[0]["f_dc_2"])
276 |
277 | #extra_f_names = [p.name for p in plydata.elements[0].properties if p.name.startswith("f_rest_")]
278 | #extra_f_names = sorted(extra_f_names, key = lambda x: int(x.split('_')[-1]))
279 | #assert len(extra_f_names)==3*(self.max_sh_degree + 1) ** 2 - 3
280 | #features_extra = np.zeros((xyz.shape[0], len(extra_f_names)))
281 | features_extra = np.zeros((xyz.shape[0], (3*(self.max_sh_degree + 1) ** 2 - 3)))
282 | #for idx, attr_name in enumerate(extra_f_names):
283 | # features_extra[:, idx] = np.asarray(plydata.elements[0][attr_name])
284 | # Reshape (P,F*SH_coeffs) to (P, F, SH_coeffs except DC)
285 | features_extra = features_extra.reshape((features_extra.shape[0], 3, (self.max_sh_degree + 1) ** 2 - 1))
286 |
287 | scale_names = [p.name for p in plydata.elements[0].properties if p.name.startswith("scale_")]
288 | scale_names = sorted(scale_names, key = lambda x: int(x.split('_')[-1]))
289 | scales = np.zeros((xyz.shape[0], len(scale_names)))
290 | for idx, attr_name in enumerate(scale_names):
291 | scales[:, idx] = np.asarray(plydata.elements[0][attr_name])
292 |
293 | rot_names = [p.name for p in plydata.elements[0].properties if p.name.startswith("rot")]
294 | rot_names = sorted(rot_names, key = lambda x: int(x.split('_')[-1]))
295 | rots = np.zeros((xyz.shape[0], len(rot_names)))
296 | for idx, attr_name in enumerate(rot_names):
297 | rots[:, idx] = np.asarray(plydata.elements[0][attr_name])
298 |
299 | self._xyz = nn.Parameter(torch.tensor(xyz, dtype=torch.float, device="cuda").requires_grad_(True))
300 | self._features_dc = nn.Parameter(torch.tensor(features_dc, dtype=torch.float, device="cuda").transpose(1, 2).contiguous().requires_grad_(True))
301 | self._features_rest = nn.Parameter(torch.tensor(features_extra, dtype=torch.float, device="cuda").transpose(1, 2).contiguous().requires_grad_(True))
302 | self._opacity = nn.Parameter(torch.tensor(opacities, dtype=torch.float, device="cuda").requires_grad_(True))
303 | self._scaling = nn.Parameter(torch.tensor(scales, dtype=torch.float, device="cuda").requires_grad_(True))
304 | self._rotation = nn.Parameter(torch.tensor(rots, dtype=torch.float, device="cuda").requires_grad_(True))
305 |
306 | self.active_sh_degree = self.max_sh_degree
307 |
308 | # PATCH
309 | self.spatial_lr_scale = spatial_lr_scale
310 | self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda")
311 | # /PATCH
312 |
313 | def replace_tensor_to_optimizer(self, tensor, name):
314 | optimizable_tensors = {}
315 | for group in self.optimizer.param_groups:
316 | if group["name"] == name:
317 | stored_state = self.optimizer.state.get(group['params'][0], None)
318 | stored_state["exp_avg"] = torch.zeros_like(tensor)
319 | stored_state["exp_avg_sq"] = torch.zeros_like(tensor)
320 |
321 | del self.optimizer.state[group['params'][0]]
322 | group["params"][0] = nn.Parameter(tensor.requires_grad_(True))
323 | self.optimizer.state[group['params'][0]] = stored_state
324 |
325 | optimizable_tensors[group["name"]] = group["params"][0]
326 | return optimizable_tensors
327 |
328 | def _prune_optimizer(self, mask):
329 | optimizable_tensors = {}
330 | for group in self.optimizer.param_groups:
331 | stored_state = self.optimizer.state.get(group['params'][0], None)
332 | if stored_state is not None:
333 | stored_state["exp_avg"] = stored_state["exp_avg"][mask]
334 | stored_state["exp_avg_sq"] = stored_state["exp_avg_sq"][mask]
335 |
336 | del self.optimizer.state[group['params'][0]]
337 | group["params"][0] = nn.Parameter((group["params"][0][mask].requires_grad_(True)))
338 | self.optimizer.state[group['params'][0]] = stored_state
339 |
340 | optimizable_tensors[group["name"]] = group["params"][0]
341 | else:
342 | group["params"][0] = nn.Parameter(group["params"][0][mask].requires_grad_(True))
343 | optimizable_tensors[group["name"]] = group["params"][0]
344 | return optimizable_tensors
345 |
346 | def prune_points(self, mask):
347 | valid_points_mask = ~mask
348 | optimizable_tensors = self._prune_optimizer(valid_points_mask)
349 |
350 | self._xyz = optimizable_tensors["xyz"]
351 | self._features_dc = optimizable_tensors["f_dc"]
352 | self._features_rest = optimizable_tensors["f_rest"]
353 | #self._features_secondary_dc = optimizable_tensors["f_secondary_dc"]
354 | #self._features_secondary_rest = optimizable_tensors["f_secondary_rest"]
355 | self._opacity = optimizable_tensors["opacity"]
356 | self._scaling = optimizable_tensors["scaling"]
357 | self._rotation = optimizable_tensors["rotation"]
358 |
359 | self.xyz_gradient_accum = self.xyz_gradient_accum[valid_points_mask]
360 |
361 | self.denom = self.denom[valid_points_mask]
362 | self.max_radii2D = self.max_radii2D[valid_points_mask]
363 |
364 | self._features_secondary_dc = nn.Parameter(self._features_secondary_dc[valid_points_mask].requires_grad_(True))
365 |
366 | def cat_tensors_to_optimizer(self, tensors_dict):
367 | optimizable_tensors = {}
368 | for group in self.optimizer.param_groups:
369 | assert len(group["params"]) == 1
370 | extension_tensor = tensors_dict[group["name"]]
371 | stored_state = self.optimizer.state.get(group['params'][0], None)
372 | if stored_state is not None:
373 |
374 | stored_state["exp_avg"] = torch.cat((stored_state["exp_avg"], torch.zeros_like(extension_tensor)), dim=0)
375 | stored_state["exp_avg_sq"] = torch.cat((stored_state["exp_avg_sq"], torch.zeros_like(extension_tensor)), dim=0)
376 |
377 | del self.optimizer.state[group['params'][0]]
378 | group["params"][0] = nn.Parameter(torch.cat((group["params"][0], extension_tensor), dim=0).requires_grad_(True))
379 | self.optimizer.state[group['params'][0]] = stored_state
380 |
381 | optimizable_tensors[group["name"]] = group["params"][0]
382 | else:
383 | group["params"][0] = nn.Parameter(torch.cat((group["params"][0], extension_tensor), dim=0).requires_grad_(True))
384 | optimizable_tensors[group["name"]] = group["params"][0]
385 |
386 | return optimizable_tensors
387 |
388 | #def densification_postfix(self, new_xyz, new_features_dc, new_features_rest, new_features_secondary_dc, new_features_secondary_rest, new_opacities, new_scaling, new_rotation):
389 | def densification_postfix(self, new_xyz, new_features_dc, new_features_rest, new_features_secondary_dc, new_opacities, new_scaling, new_rotation):
390 | d = {"xyz": new_xyz,
391 | "f_dc": new_features_dc,
392 | "f_rest": new_features_rest,
393 | #"f_secondary_dc": new_features_secondary_dc,
394 | #"f_secondary_rest": new_features_secondary_rest,
395 | "opacity": new_opacities,
396 | "scaling" : new_scaling,
397 | "rotation" : new_rotation}
398 |
399 | optimizable_tensors = self.cat_tensors_to_optimizer(d)
400 | self._xyz = optimizable_tensors["xyz"]
401 | self._features_dc = optimizable_tensors["f_dc"]
402 | self._features_rest = optimizable_tensors["f_rest"]
403 | #self._features_secondary_dc = optimizable_tensors["f_secondary_dc"]
404 | #self._features_secondary_rest = optimizable_tensors["f_secondary_rest"]
405 | self._opacity = optimizable_tensors["opacity"]
406 | self._scaling = optimizable_tensors["scaling"]
407 | self._rotation = optimizable_tensors["rotation"]
408 |
409 | self.xyz_gradient_accum = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
410 | self.denom = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
411 | self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda")
412 |
413 | self._features_secondary_dc = nn.Parameter(torch.cat((self._features_secondary_dc, new_features_secondary_dc), dim=0).requires_grad_(True))
414 |
415 | def densify_and_split_with_mask(self, selected_pts_mask, N=2, scaling_factor=0.8):
416 | stds = self.get_scaling[selected_pts_mask].repeat(N,1)
417 | means =torch.zeros((stds.size(0), 3),device="cuda")
418 | samples = torch.normal(mean=means, std=stds)
419 | rots = build_rotation(self._rotation[selected_pts_mask]).repeat(N,1,1)
420 | new_xyz = torch.bmm(rots, samples.unsqueeze(-1)).squeeze(-1) + self.get_xyz[selected_pts_mask].repeat(N, 1)
421 | new_scaling = self.scaling_inverse_activation(self.get_scaling[selected_pts_mask].repeat(N,1) / (scaling_factor*N))
422 | new_rotation = self._rotation[selected_pts_mask].repeat(N,1)
423 | new_features_dc = self._features_dc[selected_pts_mask].repeat(N,1,1)
424 | new_features_rest = self._features_rest[selected_pts_mask].repeat(N,1,1)
425 | new_features_secondary_dc = self._features_secondary_dc[selected_pts_mask].repeat(N,1,1)
426 | #new_features_secondary_rest = self._features_secondary_rest[selected_pts_mask].repeat(N,1,1)
427 | new_opacity = self._opacity[selected_pts_mask].repeat(N,1)
428 |
429 | #self.densification_postfix(new_xyz, new_features_dc, new_features_rest, new_features_secondary_dc, new_features_secondary_rest, new_opacity, new_scaling, new_rotation)
430 | self.densification_postfix(new_xyz, new_features_dc, new_features_rest, new_features_secondary_dc, new_opacity, new_scaling, new_rotation)
431 |
432 | prune_filter = torch.cat((selected_pts_mask, torch.zeros(N * selected_pts_mask.sum(), device="cuda", dtype=bool)))
433 | self.prune_points(prune_filter)
434 |
435 | def densify_and_split(self, grads, grad_threshold, scene_extent, N=2):
436 | n_init_points = self.get_xyz.shape[0]
437 | # Extract points that satisfy the gradient condition
438 | padded_grad = torch.zeros((n_init_points), device="cuda")
439 | padded_grad[:grads.shape[0]] = grads.squeeze()
440 | selected_pts_mask = torch.where(padded_grad >= grad_threshold, True, False)
441 | selected_pts_mask = torch.logical_and(selected_pts_mask,
442 | torch.max(self.get_scaling, dim=1).values > self.percent_dense*scene_extent)
443 |
444 | stds = self.get_scaling[selected_pts_mask].repeat(N,1)
445 | means =torch.zeros((stds.size(0), 3),device="cuda")
446 | samples = torch.normal(mean=means, std=stds)
447 | rots = build_rotation(self._rotation[selected_pts_mask]).repeat(N,1,1)
448 | new_xyz = torch.bmm(rots, samples.unsqueeze(-1)).squeeze(-1) + self.get_xyz[selected_pts_mask].repeat(N, 1)
449 | new_scaling = self.scaling_inverse_activation(self.get_scaling[selected_pts_mask].repeat(N,1) / (0.8*N))
450 | new_rotation = self._rotation[selected_pts_mask].repeat(N,1)
451 | new_features_dc = self._features_dc[selected_pts_mask].repeat(N,1,1)
452 | new_features_rest = self._features_rest[selected_pts_mask].repeat(N,1,1)
453 | new_features_secondary_dc = self._features_secondary_dc[selected_pts_mask].repeat(N,1,1)
454 | #new_features_secondary_rest = self._features_secondary_rest[selected_pts_mask].repeat(N,1,1)
455 | new_opacity = self._opacity[selected_pts_mask].repeat(N,1)
456 |
457 | self.densification_postfix(new_xyz, new_features_dc, new_features_rest, new_features_secondary_dc, new_opacity, new_scaling, new_rotation)
458 |
459 | prune_filter = torch.cat((selected_pts_mask, torch.zeros(N * selected_pts_mask.sum(), device="cuda", dtype=bool)))
460 | self.prune_points(prune_filter)
461 |
462 | def densify_and_clone(self, grads, grad_threshold, scene_extent):
463 | # Extract points that satisfy the gradient condition
464 | selected_pts_mask = torch.where(torch.norm(grads, dim=-1) >= grad_threshold, True, False)
465 | selected_pts_mask = torch.logical_and(selected_pts_mask,
466 | torch.max(self.get_scaling, dim=1).values <= self.percent_dense*scene_extent)
467 |
468 | new_xyz = self._xyz[selected_pts_mask]
469 | new_features_dc = self._features_dc[selected_pts_mask]
470 | new_features_rest = self._features_rest[selected_pts_mask]
471 | new_features_secondary_dc = self._features_secondary_dc[selected_pts_mask]
472 | #new_features_secondary_rest = self._features_secondary_rest[selected_pts_mask]
473 | new_opacities = self._opacity[selected_pts_mask]
474 | new_scaling = self._scaling[selected_pts_mask]
475 | new_rotation = self._rotation[selected_pts_mask]
476 |
477 | self.densification_postfix(new_xyz, new_features_dc, new_features_rest, new_features_secondary_dc, new_opacities, new_scaling, new_rotation)
478 |
479 | def densify_and_prune(self, max_grad, min_opacity, extent, max_screen_size):
480 | grads = self.xyz_gradient_accum / self.denom
481 | grads[grads.isnan()] = 0.0
482 |
483 | self.densify_and_clone(grads, max_grad, extent)
484 | self.densify_and_split(grads, max_grad, extent)
485 |
486 | prune_mask = (self.get_opacity < min_opacity).squeeze()
487 | if max_screen_size:
488 | big_points_vs = self.max_radii2D > max_screen_size
489 | big_points_ws = self.get_scaling.max(dim=1).values > 0.1 * extent
490 | prune_mask = torch.logical_or(torch.logical_or(prune_mask, big_points_vs), big_points_ws)
491 | self.prune_points(prune_mask)
492 |
493 | torch.cuda.empty_cache()
494 |
495 | def add_densification_stats(self, viewspace_point_tensor, update_filter):
496 | self.xyz_gradient_accum[update_filter] += torch.norm(viewspace_point_tensor.grad[update_filter,:2], dim=-1, keepdim=True)
497 | self.denom[update_filter] += 1
--------------------------------------------------------------------------------
/train.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright (C) 2023, Inria
3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco
4 | # All rights reserved.
5 | #
6 | # This software is free for non-commercial, research and evaluation use
7 | # under the terms of the LICENSE.md file.
8 | #
9 | # For inquiries contact george.drettakis@inria.fr
10 | #
11 |
12 | import os
13 | import torch
14 | from random import randint
15 | from utils.loss_utils import l1_loss, ssim
16 | from gaussian_renderer import render, network_gui
17 | import sys
18 | from scene import Scene, GaussianModel
19 | from utils.general_utils import safe_state
20 | import uuid
21 | from tqdm import tqdm
22 | from utils.image_utils import psnr
23 | from argparse import ArgumentParser, Namespace
24 | from arguments import ModelParams, PipelineParams, OptimizationParams
25 | try:
26 | from torch.utils.tensorboard import SummaryWriter
27 | TENSORBOARD_FOUND = True
28 | except ImportError:
29 | TENSORBOARD_FOUND = False
30 |
31 | from PIL import Image
32 | import torchvision.transforms as transforms
33 | import torchvision.models as models
34 |
35 | import open_clip
36 |
37 | import numpy as np
38 |
39 | from pathlib import Path
40 |
41 | import nnfm_utils
42 |
43 | def image_loader(path):
44 | image=Image.open(path)
45 | loader=transforms.Compose([transforms.ToTensor()])
46 |
47 | image=loader(image).unsqueeze(0)
48 | return image.to('cuda', torch.float)
49 |
50 | def bcwh_to_bwhc(data):
51 | return torch.permute(data, (0, 2, 3, 1))
52 |
53 | def bwhc_to_bcwh(data):
54 | return torch.permute(data, (0, 3, 1, 2))
55 |
56 | class VGG(torch.nn.Module):
57 | def __init__(self):
58 | super(VGG,self).__init__()
59 |
60 | cnn_normalization_mean = torch.tensor([0.485, 0.456, 0.406]).to('cuda')
61 | cnn_normalization_std = torch.tensor([0.229, 0.224, 0.225]).to('cuda')
62 |
63 | self.mean = torch.tensor(cnn_normalization_mean).view(-1, 1, 1)
64 | self.std = torch.tensor(cnn_normalization_std).view(-1, 1, 1)
65 |
66 | self.req_features= ['0','5','10','19','28']
67 | self.model=models.vgg19(pretrained=True).features[:29]
68 |
69 | def forward(self, x):
70 | x = (x - self.mean) / self.std
71 | features=[]
72 | for layer_num,layer in enumerate(self.model):
73 | x=layer(x)
74 | if str(layer_num) in self.req_features:
75 | features.append(x)
76 |
77 | return features
78 |
79 | def calc_content_loss(gen_feat,orig_feat):
80 | content_l = torch.mean((gen_feat - orig_feat) ** 2)
81 | return content_l
82 |
83 | def calculate_content_loss(generated_features, content_features):
84 | content_loss = 0.0
85 | for g, c in zip(generated_features, content_features):
86 | content_loss += torch.nn.functional.mse_loss(g, c)
87 | return content_loss
88 |
89 | def calculate_total_variation_loss(image):
90 | width_variance = torch.sum(torch.pow(image[:,:,:,:-1] - image[:,:,:,1:], 2))
91 | height_variance = torch.sum(torch.pow(image[:,:,:-1,:] - image[:,:,1:,:], 2))
92 | loss = width_variance + height_variance
93 | return loss
94 |
95 | def get_feats(x, vgg16, vgg16_normalize, layers=[]):
96 | x = vgg16_normalize(x)
97 | final_ix = max(layers)
98 | outputs = []
99 |
100 | for ix, layer in enumerate(vgg16.features):
101 | x = layer(x)
102 | if ix in layers:
103 | outputs.append(x)
104 |
105 | if ix == final_ix:
106 | break
107 | return outputs
108 |
109 | class CLIPLoss(torch.nn.Module):
110 | def __init__(self, text_prompts=[], image_prompts=[], n_cuts=16):
111 | super(CLIPLoss, self).__init__()
112 |
113 | clip_model, _, _ = open_clip.create_model_and_transforms('ViT-B-32-quickgelu', pretrained='laion400m_e32')
114 | self.clip_model = clip_model
115 | self.clip_model_input_size = 224
116 | self.preprocess = transforms.Compose([
117 | transforms.Resize(size=self.clip_model_input_size, max_size=None, antialias=None),
118 | transforms.CenterCrop(size=(self.clip_model_input_size, self.clip_model_input_size)),
119 | transforms.Normalize(mean=(0.48145466, 0.4578275, 0.40821073), std=(0.26862954, 0.26130258, 0.27577711))
120 | ])
121 | self.clip_model.to('cuda')
122 | self.clip_model.eval()
123 |
124 | self.target_embeds = []
125 | with torch.no_grad():
126 | for text_prompt in text_prompts:
127 | tokenized_text = open_clip.tokenize([text_prompt]).to('cuda')
128 | self.target_embeds.append(clip_model.encode_text(tokenized_text))
129 | for image_prompt in image_prompts:
130 | image_embed = clip_model.encode_image(self.preprocess(image_prompt))
131 | self.target_embeds.append(image_embed)
132 |
133 | self.target_embeds = torch.cat(self.target_embeds)
134 |
135 | self.n_cuts = n_cuts
136 |
137 | def forward(self, input):
138 | if self.n_cuts > 1:
139 | sideY, sideX = input.shape[2:4]
140 | max_size = min(sideX, sideY)
141 | min_size = min(sideX, sideY, self.clip_model_input_size)
142 | cutouts = []
143 | for _ in range(self.n_cuts):
144 | size = int(torch.rand([]) * (max_size - min_size) + min_size)
145 | offsetx = torch.randint(0, sideX - size + 1, ())
146 | offsety = torch.randint(0, sideY - size + 1, ())
147 | cutout = input[:, :, offsety : offsety + size, offsetx : offsetx + size]
148 | cutouts.append(torch.nn.functional.adaptive_avg_pool2d(cutout, self.clip_model_input_size))
149 | input = torch.cat(cutouts)
150 |
151 | input_embed = self.clip_model.encode_image(self.preprocess(input))
152 | input_normed = torch.nn.functional.normalize(input_embed.unsqueeze(1), dim=-1)
153 | embed_normed = torch.nn.functional.normalize(self.target_embeds.unsqueeze(0), dim=-1)
154 | dists = input_normed.sub(embed_normed).norm(dim=2).div(2).arcsin().pow(2).mul(2)
155 |
156 | return dists.mean()
157 |
158 | vgg16 = models.vgg16(pretrained=True).eval().to('cuda')
159 | vgg16_normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
160 |
161 | def projected_area(scaling):
162 | s0 = scaling[0]
163 | s1 = scaling[1]
164 | s2 = scaling[2]
165 |
166 | a = 0
167 | b = 0
168 | if s0 < s1 and s0 < s2:
169 | a = s1
170 | b = s2
171 | elif s1 < s0 and s1 < s2:
172 | a = s0
173 | b = s2
174 | else:
175 | a = s0
176 | b = s1
177 | return a * b
178 |
179 | def shorten_scaling(scaling, threshold):
180 | s0 = scaling[0]
181 | s1 = scaling[1]
182 | s2 = scaling[2]
183 |
184 | a = 0
185 | b = 0
186 | if s0 < s1 and s0 < s2:
187 | a = s1
188 | b = s2
189 | elif s1 < s0 and s1 < s2:
190 | a = s0
191 | b = s2
192 | else:
193 | a = s0
194 | b = s1
195 |
196 | a, b = max(a, b), min(a, b)
197 | ratio = a / b
198 | if ratio > threshold:
199 | new_a = b + (a - b) * 0.5
200 |
201 | if scaling[0] == a:
202 | scaling[0] = new_a
203 | elif scaling[1] == a:
204 | scaling[1] = new_a
205 | else:
206 | scaling[2] = new_a
207 |
208 |
209 | def training(dataset, opt, pipe, checkpoint):
210 | first_iter = 0
211 | gaussians = GaussianModel(dataset.sh_degree)
212 | scene = Scene(dataset, gaussians, int(dataset.starting_iter))
213 | # copy everything, but we mostly care about the shape
214 | gaussians.copy_features_primary_to_secondary()
215 | with torch.no_grad():
216 | gaussians._features_rest[:] = 0.0
217 |
218 | gaussians.training_setup(opt)
219 | if checkpoint:
220 | (model_params, first_iter) = torch.load(checkpoint)
221 | gaussians.restore(model_params, opt)
222 |
223 | style_name = Path(dataset.path_style).stem
224 | style_image = image_loader(dataset.path_style)[:, :3]
225 |
226 | clip_loss_fn = CLIPLoss(
227 | image_prompts=[style_image],
228 | )
229 | vgg16_style_feats = [x.detach() for x in get_feats(style_image, vgg16, vgg16_normalize, nnfm_utils.nnfm_all_layers())]
230 |
231 | bg_color = [1, 1, 1] if dataset.white_background else [0, 0, 0]
232 | background = torch.tensor(bg_color, dtype=torch.float32, device="cuda")
233 |
234 | iter_start = torch.cuda.Event(enable_timing = True)
235 | iter_end = torch.cuda.Event(enable_timing = True)
236 |
237 | viewpoint_stack = None
238 | ema_loss_for_log = 0.0
239 | first_iter += 1
240 |
241 | scaling = gaussians._scaling.cpu().detach().numpy()
242 | print(scaling.shape)
243 |
244 | n_views = len(scene.getTrainCameras())
245 | pretrain_until_at_least = None
246 | dont_split_when_above = 3250000
247 |
248 | if dataset.forward_facing:
249 | print('Setting params for a forward facing scene')
250 | clip_weight = 10
251 | nnfm_weight = 100
252 | content_weight = 0.05
253 | tv_weight = 0.0001
254 |
255 | n_retraining = 5
256 |
257 | split_std_threshold = 1.0
258 | split_std_multiplier = 1.0
259 | split_max_n = 0.10
260 | split_by = 8
261 | split_scaling_factor = 0.8
262 | retrain_iterations = max(n_views * 5, 2000)
263 | shorten_scaling_iter = 250
264 | shorten_scaling_max_iter = 1000
265 | shorten_scaling_threshold = 1.5
266 | stylization_size = 1024
267 | stylization_iterations = max(n_views * 15, 450)
268 | stylization_gradient_accum_start = n_views * 2
269 | stylization_split_iterations = [n_views * 3]
270 | stylization_densify_by = 4
271 | stylization_densify_scaling_factor = 4
272 | stylization_densify_n = 0.05
273 | stylization_retrain_iterations = retrain_iterations
274 | color_matching_iterations = max(n_views * 4, 1000)
275 |
276 | stylization_lr_start = 0.1
277 | stylization_lr_end = 0.01
278 | stylization_decay = -np.log(stylization_lr_end/stylization_lr_start)/stylization_iterations
279 | color_matching_start_iteration = int(1 * stylization_iterations)
280 | else:
281 | print('Setting params for a 360 scene')
282 |
283 | clip_weight = 10
284 | nnfm_weight = 10
285 | content_weight = 0.05
286 | tv_weight = 0.0001
287 |
288 | n_retraining = 10
289 |
290 | split_std_threshold = 1.1
291 | split_std_multiplier = 1.125
292 | split_max_n = 0.05
293 | split_by = 4
294 | split_scaling_factor = 2
295 | retrain_iterations = max(n_views * 5, 2000)
296 | shorten_scaling_iter = 250
297 | shorten_scaling_max_iter = 1000
298 | shorten_scaling_threshold = 1.5
299 | stylization_size = 1024
300 | stylization_iterations = max(n_views * 15, 300)
301 | stylization_gradient_accum_start = n_views * 2
302 | stylization_split_iterations = [n_views * 3, n_views, n_views * 7]
303 | stylization_densify_by = 4
304 | stylization_densify_scaling_factor = 2
305 | stylization_densify_n = 0.02
306 | stylization_retrain_iterations = retrain_iterations
307 | color_matching_iterations = max(n_views * 4, 1000)
308 |
309 | stylization_lr_start = 0.01
310 | stylization_lr_end = 0.005
311 | stylization_decay = -np.log(stylization_lr_end/stylization_lr_start)/stylization_iterations
312 | color_matching_start_iteration = int(1 * stylization_iterations)
313 |
314 | starting_iter = 100000
315 | preprocess_color_matching_iter = 100001
316 | if int(dataset.starting_iter) == starting_iter:
317 | n_retraining = 0
318 | print('Loaded a retrained dataset')
319 | else:
320 | print('Retraining a dataset')
321 |
322 | gaussians.active_sh_degree = 0
323 |
324 | print('Number of Gaussians:', gaussians._features_dc.shape[0])
325 |
326 | if pretrain_until_at_least is not None:
327 | n_retraining = 10000000 # a large number
328 | for retraining in range(n_retraining):
329 | if gaussians._features_dc.shape[0] > dont_split_when_above:
330 | break
331 | if pretrain_until_at_least is not None and gaussians._features_dc.shape[0] > pretrain_until_at_least:
332 | break
333 |
334 | progress_bar = tqdm(range(0, retrain_iterations), desc=f"Retraining progress {retraining + 1}")
335 |
336 | with torch.no_grad():
337 | if split_max_n > 0.0:
338 | scaling = gaussians.get_scaling.cpu().detach().numpy()
339 | n_gaussians = gaussians._features_dc.shape[0]
340 |
341 | projected_areas = np.zeros(scaling.shape[0], np.float32)
342 | for i in range(scaling.shape[0]):
343 | projected_areas[i] = projected_area(scaling[i])
344 |
345 | areas_mean = np.mean(projected_areas)
346 | areas_std = np.std(projected_areas)
347 |
348 | std_split_threshold = areas_mean + areas_std * split_std_threshold * (split_std_multiplier ** retraining)
349 | split_counter = np.count_nonzero(projected_areas > std_split_threshold)
350 | print('split_counter', split_counter)
351 | if split_counter > split_max_n * n_gaussians:
352 | sorted_projected_areas = np.sort(projected_areas)
353 | percentage_split_threshold = sorted_projected_areas[n_gaussians - int(split_max_n * n_gaussians)]
354 | split_threshold = percentage_split_threshold
355 | else:
356 | split_threshold = std_split_threshold
357 |
358 | gaussians_to_split = torch.empty(n_gaussians, dtype=torch.bool, device='cpu')
359 | for i in range(n_gaussians):
360 | gaussians_to_split[i] = bool(projected_areas[i] > split_threshold)
361 | gaussians_to_split = gaussians_to_split.cuda()
362 | gaussians.densify_and_split_with_mask(gaussians_to_split, N=split_by, scaling_factor=split_scaling_factor)
363 |
364 | for iteration in range(1, retrain_iterations + 1):
365 | iter_start.record()
366 |
367 | if iteration % shorten_scaling_iter == 0 and iteration <= shorten_scaling_max_iter:
368 | with torch.no_grad():
369 | scaling = gaussians.get_scaling.cpu().detach().numpy()
370 | for i in range(scaling.shape[0]):
371 | shorten_scaling(scaling[i], shorten_scaling_threshold)
372 | gaussians._scaling[:, :] = gaussians.scaling_inverse_activation(torch.from_numpy(scaling))
373 |
374 | gaussians.optimizer.zero_grad()
375 | gaussians.update_learning_rate(30000 - retrain_iterations + iteration)
376 |
377 | # Pick a random Camera
378 | if not viewpoint_stack:
379 | viewpoint_stack = scene.getTrainCameras().copy()
380 | viewpoint_cam = viewpoint_stack.pop(randint(0, len(viewpoint_stack)-1))
381 |
382 | bg = torch.rand((3), device="cuda") if opt.random_background else background
383 |
384 | render_pkg = render(viewpoint_cam, gaussians, pipe, bg, primary_features=True)
385 | image, viewspace_point_tensor, visibility_filter, radii = render_pkg["render"].unsqueeze(0), render_pkg["viewspace_points"], render_pkg["visibility_filter"], render_pkg["radii"]
386 |
387 | gt_image = viewpoint_cam.original_image.cuda().unsqueeze(0)
388 | Ll1 = l1_loss(image, gt_image)
389 | content_loss = (1.0 - opt.lambda_dssim) * Ll1 + opt.lambda_dssim * (1.0 - ssim(image, gt_image))
390 |
391 | loss = content_loss
392 |
393 | loss.backward()
394 | gaussians.optimizer.step()
395 |
396 | iter_end.record()
397 |
398 | with torch.no_grad():
399 | # Progress bar
400 | ema_loss_for_log = 0.4 * loss.item() + 0.6 * ema_loss_for_log
401 | if iteration % 10 == 0:
402 | progress_bar.set_postfix({"Loss": f"{ema_loss_for_log:.{7}f}"})
403 | progress_bar.update(10)
404 | if iteration == retrain_iterations:
405 | progress_bar.close()
406 |
407 | print('Number of Gaussians after pretraining:', gaussians._features_dc.shape[0])
408 |
409 | if n_retraining > 0:
410 | print('Saving retrained Gaussians')
411 | scene.save(100000, primary_features=True)
412 |
413 | gaussians.copy_features_primary_to_secondary()
414 |
415 | if int(dataset.starting_iter) >= preprocess_color_matching_iter:
416 | color_matching_iterations = 0
417 | else:
418 | gaussians._features_secondary_dc.data[:, :, 0] = ((gaussians._features_secondary_dc.data[:, :, 0] + gaussians._features_secondary_dc.data[:, :, 1] + gaussians._features_secondary_dc.data[:, :, 2]) / 3.0)
419 | gaussians._features_secondary_dc.data[:, :, 1] = ((gaussians._features_secondary_dc.data[:, :, 0] + gaussians._features_secondary_dc.data[:, :, 1] + gaussians._features_secondary_dc.data[:, :, 2]) / 3.0)
420 | gaussians._features_secondary_dc.data[:, :, 2] = ((gaussians._features_secondary_dc.data[:, :, 0] + gaussians._features_secondary_dc.data[:, :, 1] + gaussians._features_secondary_dc.data[:, :, 2]) / 3.0)
421 |
422 | progress_bar = tqdm(range(0, color_matching_iterations), desc=f"Color matching progress")
423 | color_matching_optimizer = torch.optim.Adam([gaussians._features_secondary_dc], lr=0.01)
424 |
425 | for iteration in range(1, color_matching_iterations + 1):
426 | iter_start.record()
427 |
428 | color_matching_optimizer.zero_grad()
429 |
430 | # Pick a random Camera
431 | if not viewpoint_stack:
432 | viewpoint_stack = scene.getTrainCameras().copy()
433 | viewpoint_cam = viewpoint_stack.pop(randint(0, len(viewpoint_stack)-1))
434 |
435 | bg = torch.rand((3), device="cuda") if opt.random_background else background
436 |
437 | render_pkg = render(viewpoint_cam, gaussians, pipe, bg, primary_features=False)
438 | image, viewspace_point_tensor, visibility_filter, radii = render_pkg["render"].unsqueeze(0), render_pkg["viewspace_points"], render_pkg["visibility_filter"], render_pkg["radii"]
439 |
440 | gt_image = viewpoint_cam.original_image.cuda().unsqueeze(0)
441 | gt_image = bwhc_to_bcwh(nnfm_utils.match_colors_for_image_set(bcwh_to_bwhc(gt_image), bcwh_to_bwhc(style_image)[0])[0])
442 |
443 | Ll1 = l1_loss(image, gt_image)
444 | content_loss = (1.0 - opt.lambda_dssim) * Ll1 + opt.lambda_dssim * (1.0 - ssim(image, gt_image))
445 |
446 | loss = content_loss
447 |
448 | loss.backward()
449 | color_matching_optimizer.step()
450 |
451 | iter_end.record()
452 |
453 | with torch.no_grad():
454 | # Progress bar
455 | ema_loss_for_log = 0.4 * loss.item() + 0.6 * ema_loss_for_log
456 | if iteration % 10 == 0:
457 | progress_bar.set_postfix({"Loss": f"{ema_loss_for_log:.{7}f}"})
458 | progress_bar.update(10)
459 | if iteration == color_matching_iterations:
460 | progress_bar.close()
461 |
462 | if color_matching_iterations > 0:
463 | print('Saving recolored Gaussians')
464 | scene.save(100001, primary_features=False)
465 |
466 | progress_bar = tqdm(range(0, stylization_iterations), desc=f"Stylization progress")
467 |
468 | gaussian_optimizer = torch.optim.Adam([gaussians._features_secondary_dc], lr=stylization_lr_start)
469 | gaussians_grad = torch.zeros((gaussians._features_secondary_dc.shape[0]), device='cuda')
470 | gaussians_denom = torch.zeros((gaussians._features_secondary_dc.shape[0]), device='cuda')
471 |
472 | for iteration in range(1, stylization_iterations + 1):
473 | iter_start.record()
474 |
475 | gaussian_optimizer.param_groups[0]['lr'] = stylization_lr_start * np.exp(-stylization_decay * iteration)
476 |
477 | gaussian_optimizer.zero_grad()
478 |
479 | # Pick a random Camera
480 | if not viewpoint_stack:
481 | viewpoint_stack = scene.getTrainCameras().copy()
482 | viewpoint_cam = viewpoint_stack.pop(randint(0, len(viewpoint_stack)-1))
483 | gt_image = viewpoint_cam.original_image.cuda().unsqueeze(0)
484 | gt_image = bwhc_to_bcwh(nnfm_utils.match_colors_for_image_set(bcwh_to_bwhc(gt_image), bcwh_to_bwhc(style_image)[0])[0])
485 |
486 | bg = torch.rand((3), device="cuda") if opt.random_background else background
487 |
488 | render_pkg = render(viewpoint_cam, gaussians, pipe, bg, primary_features=False)
489 | image, viewspace_point_tensor, visibility_filter, radii = render_pkg["render"].unsqueeze(0), render_pkg["viewspace_points"], render_pkg["visibility_filter"], render_pkg["radii"]
490 |
491 | if stylization_size is not None:
492 | with torch.no_grad():
493 | old_size = (image.shape[2], image.shape[3])
494 | size_ratio = stylization_size / old_size[1]
495 | new_size = (int(old_size[0] * size_ratio), stylization_size)
496 | else:
497 | new_size = (image.shape[2], image.shape[3])
498 |
499 | with torch.no_grad():
500 | downsampled_gt_image = torch.nn.functional.interpolate(gt_image, size=new_size, mode='bilinear', antialias=True)
501 | downsampled_gt_image_feats = [x.detach() for x in get_feats(downsampled_gt_image, vgg16, vgg16_normalize, nnfm_utils.nnfm_all_layers())]
502 | downsampled_image = torch.nn.functional.interpolate(image, size=new_size, mode='bilinear', antialias=True)
503 | downsampled_image_feats = get_feats(downsampled_image, vgg16, vgg16_normalize, nnfm_utils.nnfm_all_layers())
504 |
505 | clip_loss = clip_loss_fn(downsampled_image) * clip_weight if clip_weight != 0 else 0
506 | nnfm_loss = nnfm_utils.calculate_nnfm_loss(downsampled_image_feats, vgg16_style_feats) * nnfm_weight if nnfm_weight != 0 else 0
507 | content_loss = calculate_content_loss(downsampled_image_feats, downsampled_gt_image_feats) * content_weight if content_weight != 0 else 0
508 | total_variation_loss = calculate_total_variation_loss(image) * tv_weight if tv_weight != 0 else 0
509 |
510 | loss = clip_loss + nnfm_loss + content_loss + total_variation_loss
511 |
512 | loss.backward()
513 | gaussian_optimizer.step()
514 |
515 | with torch.no_grad():
516 | if iteration % 5000 == 0 and iteration != stylization_iterations:
517 | print('Saving scene', 100000 + iteration)
518 | scene.save(100000 + iteration, primary_features=False)
519 |
520 | with torch.no_grad():
521 | if iteration >= stylization_gradient_accum_start:
522 | visible_gaussians = torch.count_nonzero(visibility_filter)
523 | gaussians_grad[visibility_filter] += torch.norm(torch.reshape(gaussians._features_secondary_dc.grad[visibility_filter], (visible_gaussians, 3)), dim=-1)
524 | gaussians_denom[visibility_filter] += 1
525 |
526 | if iteration in stylization_split_iterations and gaussians._features_dc.shape[0] <= dont_split_when_above:
527 | gaussians_grad /= gaussians_denom + 1
528 |
529 | gaussians_grad_threshold = np.sort(gaussians_grad.cpu().detach().numpy())[int((1.0 - stylization_densify_n) * gaussians_grad.shape[0])]
530 | split_mask = gaussians_grad > gaussians_grad_threshold
531 | print('splitting from', gaussians._features_secondary_dc.shape[0])
532 | gaussians.densify_and_split_with_mask(split_mask, N=stylization_densify_by, scaling_factor=stylization_densify_scaling_factor)
533 | print('splitting to', gaussians._features_secondary_dc.shape[0])
534 |
535 | gaussian_optimizer = torch.optim.Adam([gaussians._features_secondary_dc], lr=gaussian_optimizer.param_groups[0]['lr'])
536 | gaussians_grad = torch.zeros((gaussians._features_secondary_dc.shape[0]), device='cuda')
537 | gaussians_denom = torch.zeros((gaussians._features_secondary_dc.shape[0]), device='cuda')
538 |
539 | if iteration in stylization_split_iterations and gaussians._features_dc.shape[0] <= dont_split_when_above:
540 | for retrain_iteration in range(1, stylization_retrain_iterations + 1):
541 | if retrain_iteration % shorten_scaling_iter == 0 and retrain_iteration <= shorten_scaling_max_iter:
542 | with torch.no_grad():
543 | scaling = gaussians.get_scaling.cpu().detach().numpy()
544 | for i in range(scaling.shape[0]):
545 | shorten_scaling(scaling[i], shorten_scaling_threshold)
546 | gaussians._scaling[:, :] = gaussians.scaling_inverse_activation(torch.from_numpy(scaling))
547 |
548 | gaussians.enable_geometry_learning(30000 - stylization_retrain_iterations + retrain_iteration)
549 | gaussians.optimizer.zero_grad()
550 | gaussians.update_learning_rate(30000 - stylization_retrain_iterations + retrain_iteration)
551 |
552 | # Pick a random Camera
553 | if not viewpoint_stack:
554 | viewpoint_stack = scene.getTrainCameras().copy()
555 | viewpoint_cam = viewpoint_stack.pop(randint(0, len(viewpoint_stack)-1))
556 |
557 | bg = torch.rand((3), device="cuda") if opt.random_background else background
558 |
559 | render_pkg = render(viewpoint_cam, gaussians, pipe, bg, primary_features=True)
560 | image, viewspace_point_tensor, visibility_filter, radii = render_pkg["render"].unsqueeze(0), render_pkg["viewspace_points"], render_pkg["visibility_filter"], render_pkg["radii"]
561 |
562 | gt_image = viewpoint_cam.original_image.cuda().unsqueeze(0)
563 | Ll1 = l1_loss(image, gt_image)
564 | content_loss = (1.0 - opt.lambda_dssim) * Ll1 + opt.lambda_dssim * (1.0 - ssim(image, gt_image))
565 |
566 | loss = content_loss
567 |
568 | loss.backward()
569 | gaussians.optimizer.step()
570 |
571 | if iteration == color_matching_start_iteration:
572 | viewpoint_stack_for_recoloring = scene.getTrainCameras().copy()
573 | recolored_viewpoint_renderings = []
574 |
575 | with torch.no_grad():
576 | pass
577 | for i in range(len(viewpoint_stack_for_recoloring)):
578 | viewpoint_cam = viewpoint_stack_for_recoloring[i]
579 | bg = torch.rand((3), device="cuda") if opt.random_background else background
580 |
581 | render_pkg = render(viewpoint_cam, gaussians, pipe, bg, primary_features=False)
582 | image, viewspace_point_tensor, visibility_filter, radii = render_pkg["render"].unsqueeze(0), render_pkg["viewspace_points"], render_pkg["visibility_filter"], render_pkg["radii"]
583 |
584 | recolored_image = bwhc_to_bcwh(nnfm_utils.match_colors_for_image_set(bcwh_to_bwhc(image), bcwh_to_bwhc(style_image)[0])[0])
585 | recolored_viewpoint_renderings.append(recolored_image.cpu().detach())
586 |
587 | gaussians._features_secondary_dc.data[:, :, 0] = ((gaussians._features_secondary_dc.data[:, :, 0] + gaussians._features_secondary_dc.data[:, :, 1] + gaussians._features_secondary_dc.data[:, :, 2]) / 3.0)
588 | gaussians._features_secondary_dc.data[:, :, 1] = ((gaussians._features_secondary_dc.data[:, :, 0] + gaussians._features_secondary_dc.data[:, :, 1] + gaussians._features_secondary_dc.data[:, :, 2]) / 3.0)
589 | gaussians._features_secondary_dc.data[:, :, 2] = ((gaussians._features_secondary_dc.data[:, :, 0] + gaussians._features_secondary_dc.data[:, :, 1] + gaussians._features_secondary_dc.data[:, :, 2]) / 3.0)
590 |
591 | color_matching_optimizer = torch.optim.Adam([gaussians._features_secondary_dc], lr=0.01)
592 |
593 | viewpoint_stack_for_recoloring = []
594 | for recoloring_iteration in range(1, color_matching_iterations + 1):
595 | iter_start.record()
596 |
597 | color_matching_optimizer.zero_grad()
598 |
599 | # Pick a random Camera
600 | if not viewpoint_stack_for_recoloring:
601 | viewpoint_stack_for_recoloring = scene.getTrainCameras().copy()
602 | recolored_renderings = recolored_viewpoint_renderings.copy()
603 | random_cam_id = randint(0, len(viewpoint_stack_for_recoloring)-1)
604 | viewpoint_cam = viewpoint_stack_for_recoloring.pop(random_cam_id)
605 | gt_image = recolored_renderings.pop(random_cam_id).cuda()
606 |
607 | bg = torch.rand((3), device="cuda") if opt.random_background else background
608 |
609 | render_pkg = render(viewpoint_cam, gaussians, pipe, bg, primary_features=False)
610 | image, viewspace_point_tensor, visibility_filter, radii = render_pkg["render"].unsqueeze(0), render_pkg["viewspace_points"], render_pkg["visibility_filter"], render_pkg["radii"]
611 |
612 | Ll1 = l1_loss(image, gt_image)
613 | content_loss = (1.0 - opt.lambda_dssim) * Ll1 + opt.lambda_dssim * (1.0 - ssim(image, gt_image))
614 |
615 | loss = content_loss
616 |
617 | loss.backward()
618 | color_matching_optimizer.step()
619 |
620 | iter_end.record()
621 |
622 | #print("\n[ITER {}] Saving Gaussians".format(iteration))
623 | print('Saving recolored stylized Gaussians')
624 | scene.save(199999, primary_features=False)
625 |
626 | iter_end.record()
627 |
628 | with torch.no_grad():
629 | # Progress bar
630 | ema_loss_for_log = 0.4 * loss.item() + 0.6 * ema_loss_for_log
631 | if iteration % 10 == 0:
632 | progress_bar.set_postfix({"Loss": f"{ema_loss_for_log:.{7}f}"})
633 | progress_bar.update(10)
634 | if iteration == stylization_iterations:
635 | progress_bar.close()
636 |
637 | print("\n[ITER {}] Saving stylized Gaussians".format(iteration))
638 | scene.save(200000, primary_features=False)
639 | scene.save(200000, primary_features=False, iteration_prefix=style_name)
640 |
641 | def prepare_output_and_logger(args):
642 | if not args.model_path:
643 | if os.getenv('OAR_JOB_ID'):
644 | unique_str=os.getenv('OAR_JOB_ID')
645 | else:
646 | unique_str = str(uuid.uuid4())
647 | args.model_path = os.path.join("./output/", unique_str[0:10])
648 |
649 | # Set up output folder
650 | print("Output folder: {}".format(args.model_path))
651 | os.makedirs(args.model_path, exist_ok = True)
652 | with open(os.path.join(args.model_path, "cfg_args"), 'w') as cfg_log_f:
653 | cfg_log_f.write(str(Namespace(**vars(args))))
654 |
655 | # Create Tensorboard writer
656 | tb_writer = None
657 | if TENSORBOARD_FOUND:
658 | tb_writer = SummaryWriter(args.model_path)
659 | else:
660 | print("Tensorboard not available: not logging progress")
661 | return tb_writer
662 |
663 | def training_report(tb_writer, iteration, Ll1, loss, l1_loss, elapsed, testing_iterations, scene : Scene, renderFunc, renderArgs):
664 | if tb_writer:
665 | tb_writer.add_scalar('train_loss_patches/l1_loss', Ll1.item(), iteration)
666 | tb_writer.add_scalar('train_loss_patches/total_loss', loss.item(), iteration)
667 | tb_writer.add_scalar('iter_time', elapsed, iteration)
668 |
669 | # Report test and samples of training set
670 | if iteration in testing_iterations:
671 | torch.cuda.empty_cache()
672 | validation_configs = ({'name': 'test', 'cameras' : scene.getTestCameras()},
673 | {'name': 'train', 'cameras' : [scene.getTrainCameras()[idx % len(scene.getTrainCameras())] for idx in range(5, 30, 5)]})
674 |
675 | for config in validation_configs:
676 | if config['cameras'] and len(config['cameras']) > 0:
677 | l1_test = 0.0
678 | psnr_test = 0.0
679 | for idx, viewpoint in enumerate(config['cameras']):
680 | image = torch.clamp(renderFunc(viewpoint, scene.gaussians, *renderArgs)["render"], 0.0, 1.0)
681 | gt_image = torch.clamp(viewpoint.original_image.to("cuda"), 0.0, 1.0)
682 | if tb_writer and (idx < 5):
683 | tb_writer.add_images(config['name'] + "_view_{}/render".format(viewpoint.image_name), image[None], global_step=iteration)
684 | if iteration == testing_iterations[0]:
685 | tb_writer.add_images(config['name'] + "_view_{}/ground_truth".format(viewpoint.image_name), gt_image[None], global_step=iteration)
686 | l1_test += l1_loss(image, gt_image).mean().double()
687 | psnr_test += psnr(image, gt_image).mean().double()
688 | psnr_test /= len(config['cameras'])
689 | l1_test /= len(config['cameras'])
690 | print("\n[ITER {}] Evaluating {}: L1 {} PSNR {}".format(iteration, config['name'], l1_test, psnr_test))
691 | if tb_writer:
692 | tb_writer.add_scalar(config['name'] + '/loss_viewpoint - l1_loss', l1_test, iteration)
693 | tb_writer.add_scalar(config['name'] + '/loss_viewpoint - psnr', psnr_test, iteration)
694 |
695 | if tb_writer:
696 | tb_writer.add_histogram("scene/opacity_histogram", scene.gaussians.get_opacity, iteration)
697 | tb_writer.add_scalar('total_points', scene.gaussians.get_xyz.shape[0], iteration)
698 | torch.cuda.empty_cache()
699 |
700 | # Set up command line argument parser
701 | parser = ArgumentParser(description="Training script parameters")
702 | lp = ModelParams(parser)
703 | op = OptimizationParams(parser)
704 | pp = PipelineParams(parser)
705 | parser.add_argument('--ip', type=str, default="127.0.0.1")
706 | parser.add_argument('--port', type=int, default=6009)
707 | parser.add_argument('--debug_from', type=int, default=-1)
708 | parser.add_argument('--detect_anomaly', action='store_true', default=False)
709 | parser.add_argument("--test_iterations", nargs="+", type=int, default=[7_000, 30_000])
710 | parser.add_argument("--save_iterations", nargs="+", type=int, default=[7_000, 30_000])
711 | parser.add_argument("--quiet", action="store_true")
712 | parser.add_argument("--checkpoint_iterations", nargs="+", type=int, default=[])
713 | parser.add_argument("--start_checkpoint", type=str, default = None)
714 | args = parser.parse_args(sys.argv[1:])
715 | args.save_iterations.append(args.iterations)
716 |
717 | print("Optimizing " + args.model_path)
718 |
719 | # Initialize system state (RNG)
720 | safe_state(args.quiet)
721 |
722 | # Start GUI server, configure and run trainings
723 | network_gui.init(args.ip, args.port)
724 | torch.autograd.set_detect_anomaly(args.detect_anomaly)
725 | training(lp.extract(args), op.extract(args), pp.extract(args), args.start_checkpoint)
726 |
727 | # All done
728 | print("\nTraining complete.")
729 |
--------------------------------------------------------------------------------