├── LICENSE ├── MLP.py ├── README.md ├── confs ├── 14deg_planeFull.conf ├── 14deg_planeMissing.conf ├── 14deg_submarine.conf ├── 28deg_planeFull.conf ├── 28deg_planeMissing.conf └── 28deg_submarine.conf ├── data └── .keepme.txt ├── helpers.py ├── load_data.py ├── models ├── __pycache__ │ ├── embedder.cpython-37.pyc │ ├── fields.cpython-37.pyc │ └── renderer.cpython-37.pyc ├── embedder.py ├── fields.py └── renderer.py ├── plot_mesh.py ├── requirements.txt ├── run_sdf.py └── static ├── BPSubmarine.gif ├── BPplaneFull.gif ├── VASubmarine.gif ├── VAplaneFull.gif ├── planeFull.gif ├── planeFull.png ├── submarine.gif └── submarine.png /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Mohamad Qadri 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MLP.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import time 3 | import sys 4 | 5 | class Network_S_Relu(torch.nn.Module): 6 | def __init__(self, D=8, H=256, input_ch=3, input_ch_views=3, output_ch=4, skips=[4], no_rho=False): 7 | super(Network_S_Relu, self).__init__() 8 | self.input_ch = input_ch 9 | self.input_ch_views = input_ch_views 10 | self.skips = skips 11 | self.no_rho = no_rho 12 | self.pts_linears = torch.nn.ModuleList( 13 | [torch.nn.Linear(input_ch, H)] + [torch.nn.Linear(H, H) if i not in self.skips else torch.nn.Linear(H + input_ch, H) for i in range(D-1)]) 14 | self.views_linears = torch.nn.ModuleList([torch.nn.Linear(input_ch_views + H, H//2)]) 15 | if self.no_rho: 16 | self.output_linear = torch.nn.Linear(H, output_ch) 17 | else: 18 | self.feature_linear = torch.nn.Linear(H, H) 19 | self.alpha_linear = torch.nn.Linear(H, 1) 20 | self.rho_linear = torch.nn.Linear(H//2, 1) 21 | 22 | def forward(self, x): 23 | # y_pred = self.linear(x) 24 | if self.no_rho: 25 | input_pts = x 26 | h = x 27 | else: 28 | input_pts, input_views = torch.split(x, [self.input_ch, self.input_ch_views], dim=-1) 29 | h = input_pts 30 | 31 | for i, l in enumerate(self.pts_linears): 32 | h = self.pts_linears[i](h) 33 | h = torch.nn.functional.relu(h) 34 | if i in self.skips: 35 | h = torch.cat([input_pts, h], -1) 36 | 37 | if self.no_rho: 38 | outputs = self.output_linear(h) 39 | else: 40 | alpha = self.alpha_linear(h) 41 | alpha = torch.abs(alpha) 42 | feature = self.feature_linear(h) 43 | h = torch.cat([feature, input_views], -1) 44 | for i, l in enumerate(self.views_linears): 45 | h = self.views_linears[i](h) 46 | h = torch.nn.functional.relu(h) 47 | rho = self.rho_linear(h) 48 | rho = torch.abs(rho) 49 | outputs = torch.cat([rho, alpha], -1) 50 | return outputs 51 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Neural Implicit Surface Reconstruction using Imaging Sonar 2 | 3 | This repo contains the source code for the ICRA 2023 paper [Neural Implicit Surface Reconstruction using Imaging Sonar](https://arxiv.org/abs/2209.08221) 4 | 5 | Ground Truth Mesh | Our Reconstruction | Volumetric Albedo | Back-projection 6 | :-------------------------:|:-------------------------:|:-------------------------:|:-------------------------: 7 | ![](./static/submarine.png) | ![](./static/submarine.gif) | ![](./static/VASubmarine.gif) | ![](./static/BPSubmarine.gif) 8 | ![](./static/planeFull.png) | ![](./static/planeFull.gif) | ![](./static/VAplaneFull.gif) | ![](./static/BPplaneFull.gif) 9 | 10 | 11 | # Usage 12 | 13 | ## Anaconda Environment 14 | Create a virtual python environment using [Anaconda](https://www.anaconda.com/products/individual): 15 | ``` 16 | conda create -n neusis python=3.7 17 | conda activate neusis 18 | ``` 19 | 20 | ## Setup 21 | 22 | Install [PyTorch](https://pytorch.org/get-started/locally/) then: 23 | ``` shell 24 | git clone https://github.com/rpl-cmu/neusis.git 25 | cd neusis 26 | pip install -r requirements.txt 27 | ``` 28 | 29 | ## Data 30 | Sample datasets are available for download here [datasets](https://drive.google.com/drive/folders/161PNPuIfsIwAsRjOc2PXQUkjO4F0P5Nr?usp=sharing). Unzip the files inside the ```data``` directory. 31 | The data is organized as follows: 32 | 33 | ``` 34 | data/ 35 | |-- Data 36 | |-- .pkl # data for each view (includes the sonar image and pose) 37 | |-- .pkl 38 | ... 39 | |-- Config.json # Sonar configuration 40 | ``` 41 | 42 | ## Running 43 | Training: 44 | 45 | ``` python run_sdf.py --conf confs/.conf ``` 46 | 47 | Example: 48 | ``` python run_sdf.py --conf confs/14deg_submarine.conf ``` 49 | 50 | The resulting meshes are saved in the following directory ```experiments//meshes```. The parameter ```val_mesh_freq``` in ```confs/.conf``` controls the frequency of mesh generation. 51 | 52 | 53 | # Notes on training 54 | 1) We used an NVIDIA 3090 GPU for training. Depending on available compute, consider adjusting the following parameters in ```confs/.conf```: 55 | 56 | Parameter | Description 57 | ------------- | ------------- 58 | arc_n_samples | number of samples along each arc 59 | num_select_pixels | number of sampled pixels (px) 60 | percent_select_true | percentage of px with intensity > ```ε``` that are be selected every training iteration 61 | n_samples | number of samples along each acoustic ray 62 | 63 | 64 | 2) Depending on weight initialization, we noticed that the network might converge to "bad" local minimas. A quick way to make sure that the network is training correctly is to check that the ```intensity Loss``` is decreasing after a handful of epochs. Otherwise, please restart the training. 65 | 66 | This issue could be mitigated via data normalization and enabling geometric initialization of network weights: [Issue](https://github.com/rpl-cmu/neusis/issues/1) 67 | 68 | 69 | # Citation 70 | Consider citing as below if you find our work helpful to your project: 71 | 72 | ``` 73 | @inproceedings{qadri2023neural, 74 | title={Neural implicit surface reconstruction using imaging sonar}, 75 | author={Qadri, Mohamad and Kaess, Michael and Gkioulekas, Ioannis}, 76 | booktitle={2023 IEEE International Conference on Robotics and Automation (ICRA)}, 77 | pages={1040--1047}, 78 | year={2023}, 79 | organization={IEEE} 80 | } 81 | ``` 82 | 83 | # Acknowledgement 84 | Some code snippets are borrowed from [IDR](https://github.com/lioryariv/idr), and [NeuS](https://github.com/Totoro97/NeuS). Thanks for these projects! 85 | 86 | -------------------------------------------------------------------------------- /confs/14deg_planeFull.conf: -------------------------------------------------------------------------------- 1 | conf { 2 | dataset = 14deg_planeFull 3 | image_setkeyname = "images" 4 | expID = 14deg_planeFull 5 | timef = False 6 | filter_th = 0 7 | use_manual_bound = True 8 | } 9 | 10 | 11 | train { 12 | learning_rate = 5e-4 13 | learning_rate_alpha = 0.01 14 | end_iter = 300000 15 | start_iter = 0 16 | 17 | warm_up_end = 5000 18 | anneal_end = 50000 19 | select_valid_px = False 20 | 21 | save_freq = 10 22 | val_mesh_freq = 10 23 | report_freq = 1 24 | 25 | igr_weight = 0.1 26 | variation_reg_weight = 0 27 | 28 | arc_n_samples = 10 29 | select_px_method = "bypercent" 30 | num_select_pixels = 100 31 | px_sample_min_weight = 0.001 32 | randomize_points = True 33 | percent_select_true = 0.4 34 | r_div = False 35 | } 36 | 37 | 38 | mesh { 39 | object_bbox_min = [-7.25, -11.5, -2.25] 40 | object_bbox_max = [7.5, 7.0, 2.75] 41 | x_max = -8, 42 | x_min = -24, 43 | y_max = -2.5, 44 | y_min = -17.5, 45 | z_max = -13, 46 | z_min = -19, 47 | level_set = 0 48 | } 49 | 50 | model { 51 | sdf_network { 52 | d_out = 65 53 | d_in = 3 54 | d_hidden = 64 55 | n_layers = 4 56 | skip_in = [2] 57 | multires = 6 58 | bias = 1 59 | scale = 1.0 60 | geometric_init = False 61 | weight_norm = True 62 | } 63 | 64 | variance_network { 65 | init_val = 0.3 66 | } 67 | 68 | rendering_network { 69 | d_feature = 64 70 | mode = idr 71 | d_in = 9 72 | d_out = 1 73 | d_hidden = 64 74 | n_layers = 4 75 | weight_norm = True 76 | multires_view = 4 77 | squeeze_out = True 78 | } 79 | 80 | neus_renderer { 81 | n_samples = 64 82 | n_importance = 0 83 | n_outside = 0 84 | up_sample_steps = 4 85 | perturb = 0 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /confs/14deg_planeMissing.conf: -------------------------------------------------------------------------------- 1 | conf { 2 | dataset = 14deg_planeMissing 3 | image_setkeyname = "images" 4 | expID = 14deg_planeMissing 5 | timef = False 6 | filter_th = 0 7 | plot_freq = 50 8 | use_manual_bound = True 9 | } 10 | 11 | 12 | train { 13 | learning_rate = 5e-4 14 | learning_rate_alpha = 0.01 15 | end_iter = 300000 16 | start_iter = 0 17 | 18 | warm_up_end = 5000 19 | anneal_end = 50000 20 | select_valid_px = False 21 | 22 | save_freq = 10 23 | val_mesh_freq = 10 24 | report_freq = 1 25 | 26 | igr_weight = 0.1 27 | variation_reg_weight = 0 28 | 29 | arc_n_samples = 10 30 | select_px_method = "bypercent" 31 | num_select_pixels = 100 32 | px_sample_min_weight = 0.001 33 | randomize_points = True 34 | percent_select_true = 0.4 35 | r_div = False 36 | } 37 | 38 | 39 | mesh { 40 | object_bbox_min = [-4.25, -6.5, -2.25] 41 | object_bbox_max = [6.0, 7.0, 2.75] 42 | x_max = 16, 43 | x_min = 2, 44 | y_max = 10, 45 | y_min = -4, 46 | z_max = -12, 47 | z_min = -18, 48 | level_set = 0 49 | } 50 | 51 | model { 52 | sdf_network { 53 | d_out = 65 54 | d_in = 3 55 | d_hidden = 64 56 | n_layers = 4 57 | skip_in = [2] 58 | multires = 6 59 | bias = 1 60 | scale = 1.0 61 | geometric_init = False 62 | weight_norm = True 63 | } 64 | 65 | variance_network { 66 | init_val = 0.3 67 | } 68 | 69 | rendering_network { 70 | d_feature = 64 71 | mode = idr 72 | d_in = 9 73 | d_out = 1 74 | d_hidden = 64 75 | n_layers = 4 76 | weight_norm = True 77 | multires_view = 4 78 | squeeze_out = True 79 | } 80 | 81 | neus_renderer { 82 | n_samples = 64 83 | n_importance = 0 84 | n_outside = 0 85 | up_sample_steps = 4 86 | perturb = 0 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /confs/14deg_submarine.conf: -------------------------------------------------------------------------------- 1 | conf { 2 | dataset = 14deg_submarine 3 | image_setkeyname = "images" 4 | expID = 14deg_submarine 5 | timef = False 6 | filter_th = 0 7 | use_manual_bound = True 8 | } 9 | 10 | 11 | train { 12 | learning_rate = 5e-4 13 | learning_rate_alpha = 0.01 14 | end_iter = 300000 15 | start_iter = 0 16 | 17 | warm_up_end = 5000 18 | anneal_end = 50000 19 | select_valid_px = False 20 | 21 | save_freq = 10 22 | val_mesh_freq = 10 23 | report_freq = 1 24 | 25 | igr_weight = 0.1 26 | variation_reg_weight = 0 27 | 28 | arc_n_samples = 10 29 | select_px_method = "bypercent" 30 | num_select_pixels = 100 31 | px_sample_min_weight = 0.001 32 | randomize_points = True 33 | percent_select_true = 0.25 34 | r_div = False 35 | } 36 | 37 | 38 | mesh { 39 | object_bbox_min = [-3.25, -11, -2.25] 40 | object_bbox_max = [4, 8, 3] 41 | x_max = 19, 42 | x_min = 12.5, 43 | y_max = 12, 44 | y_min = -2, 45 | z_max = -13, 46 | z_min = -17.5, 47 | level_set = 0 48 | } 49 | 50 | model { 51 | sdf_network { 52 | d_out = 65 53 | d_in = 3 54 | d_hidden = 64 55 | n_layers = 4 56 | skip_in = [2] 57 | multires = 6 58 | bias = 1 59 | scale = 1.0 60 | geometric_init = False 61 | weight_norm = True 62 | } 63 | 64 | variance_network { 65 | init_val = 0.3 66 | } 67 | 68 | rendering_network { 69 | d_feature = 64 70 | mode = idr 71 | d_in = 9 72 | d_out = 1 73 | d_hidden = 64 74 | n_layers = 4 75 | weight_norm = True 76 | multires_view = 4 77 | squeeze_out = True 78 | } 79 | 80 | neus_renderer { 81 | n_samples = 64 82 | n_importance = 0 83 | n_outside = 0 84 | up_sample_steps = 4 85 | perturb = 0 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /confs/28deg_planeFull.conf: -------------------------------------------------------------------------------- 1 | conf { 2 | dataset = 28deg_planeFull 3 | image_setkeyname = "images" 4 | expID = 28deg_planeFull 5 | timef = False 6 | filter_th = 0 7 | use_manual_bound = True 8 | } 9 | 10 | 11 | train { 12 | learning_rate = 5e-4 13 | learning_rate_alpha = 0.01 14 | end_iter = 300000 15 | start_iter = 0 16 | 17 | warm_up_end = 5000 18 | anneal_end = 50000 19 | select_valid_px = False 20 | 21 | save_freq = 10 22 | val_mesh_freq = 10 23 | report_freq = 1 24 | 25 | igr_weight = 0.1 26 | variation_reg_weight = 0 27 | 28 | arc_n_samples = 15 29 | select_px_method = "bypercent" 30 | num_select_pixels = 100 31 | px_sample_min_weight = 0.01 32 | randomize_points = True 33 | percent_select_true = 0.2 34 | r_div = False 35 | } 36 | 37 | 38 | mesh { 39 | object_bbox_min = [-7.25, -11.5, -2.25] 40 | object_bbox_max = [7.5, 7.0, 2.75] 41 | x_max = -8, 42 | x_min = -24, 43 | y_max = -2.5, 44 | y_min = -17.5, 45 | z_max = -13, 46 | z_min = -19, 47 | level_set = 0 48 | } 49 | 50 | model { 51 | sdf_network { 52 | d_out = 65 53 | d_in = 3 54 | d_hidden = 64 55 | n_layers = 4 56 | skip_in = [2] 57 | multires = 6 58 | bias = 1 59 | scale = 1.0 60 | geometric_init = False 61 | weight_norm = True 62 | } 63 | 64 | variance_network { 65 | init_val = 0.3 66 | } 67 | 68 | rendering_network { 69 | d_feature = 64 70 | mode = idr 71 | d_in = 9 72 | d_out = 1 73 | d_hidden = 64 74 | n_layers = 4 75 | weight_norm = True 76 | multires_view = 4 77 | squeeze_out = True 78 | } 79 | 80 | neus_renderer { 81 | n_samples = 64 82 | n_importance = 0 83 | n_outside = 0 84 | up_sample_steps = 4 85 | perturb = 0 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /confs/28deg_planeMissing.conf: -------------------------------------------------------------------------------- 1 | conf { 2 | dataset = 28deg_planeMissing 3 | image_setkeyname = "images" 4 | expID = 28deg_planeMissing 5 | timef = False 6 | filter_th = 0 7 | use_manual_bound = True 8 | } 9 | 10 | 11 | train { 12 | learning_rate = 5e-4 13 | learning_rate_alpha = 0.01 14 | end_iter = 300000 15 | start_iter = 0 16 | 17 | warm_up_end = 5000 18 | anneal_end = 50000 19 | select_valid_px = False 20 | 21 | save_freq = 10 22 | val_mesh_freq = 10 23 | report_freq = 1 24 | 25 | igr_weight = 0.1 26 | variation_reg_weight = 0 27 | 28 | arc_n_samples = 15 29 | select_px_method = "bypercent" 30 | num_select_pixels = 100 31 | px_sample_min_weight = 0.001 32 | randomize_points = True 33 | percent_select_true = 0.25 34 | r_div = False 35 | } 36 | 37 | 38 | mesh { 39 | object_bbox_min = [-4.25, -6.5, -2.25] 40 | object_bbox_max = [6.0, 7.0, 2.75] 41 | x_max = 16, 42 | x_min = 2, 43 | y_max = 10, 44 | y_min = -4, 45 | z_max = -12, 46 | z_min = -18, 47 | level_set = 0 48 | } 49 | 50 | model { 51 | sdf_network { 52 | d_out = 65 53 | d_in = 3 54 | d_hidden = 64 55 | n_layers = 4 56 | skip_in = [2] 57 | multires = 6 58 | bias = 1 59 | scale = 1.0 60 | geometric_init = False 61 | weight_norm = True 62 | } 63 | 64 | variance_network { 65 | init_val = 0.3 66 | } 67 | 68 | rendering_network { 69 | d_feature = 64 70 | mode = idr 71 | d_in = 9 72 | d_out = 1 73 | d_hidden = 64 74 | n_layers = 4 75 | weight_norm = True 76 | multires_view = 4 77 | squeeze_out = True 78 | } 79 | 80 | neus_renderer { 81 | n_samples = 64 82 | n_importance = 0 83 | n_outside = 0 84 | up_sample_steps = 4 85 | perturb = 0 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /confs/28deg_submarine.conf: -------------------------------------------------------------------------------- 1 | conf { 2 | dataset = 28deg_planeMissing 3 | image_setkeyname = "images" 4 | expID = 28deg_planeMissing 5 | timef = False 6 | filter_th = 0.3 7 | use_manual_bound = True 8 | } 9 | 10 | 11 | train { 12 | learning_rate = 5e-4 13 | learning_rate_alpha = 0.01 14 | end_iter = 300000 15 | start_iter = 0 16 | 17 | warm_up_end = 5000 18 | anneal_end = 50000 19 | select_valid_px = False 20 | 21 | save_freq = 10 22 | val_mesh_freq = 10 23 | report_freq = 1 24 | 25 | igr_weight = 0.1 26 | variation_reg_weight = 0 27 | 28 | arc_n_samples = 20 29 | select_px_method = "bypercent" 30 | num_select_pixels = 100 31 | px_sample_min_weight = 0.001 32 | randomize_points = True 33 | percent_select_true = 0.25 34 | r_div = False 35 | } 36 | 37 | 38 | mesh { 39 | object_bbox_min = [-4.25, -6.5, -2.25] 40 | object_bbox_max = [6.0, 7.0, 2.75] 41 | x_max = 16, 42 | x_min = 2, 43 | y_max = 10, 44 | y_min = -4, 45 | z_max = -12, 46 | z_min = -18, 47 | level_set = 0.07 48 | } 49 | 50 | model { 51 | sdf_network { 52 | d_out = 65 53 | d_in = 3 54 | d_hidden = 64 55 | n_layers = 4 56 | skip_in = [2] 57 | multires = 6 58 | bias = 1 59 | scale = 1.0 60 | geometric_init = False 61 | weight_norm = True 62 | } 63 | 64 | variance_network { 65 | init_val = 0.3 66 | } 67 | 68 | rendering_network { 69 | d_feature = 64 70 | mode = idr 71 | d_in = 9 72 | d_out = 1 73 | d_hidden = 64 74 | n_layers = 4 75 | weight_norm = True 76 | multires_view = 4 77 | squeeze_out = True 78 | } 79 | 80 | neus_renderer { 81 | n_samples = 64 82 | n_importance = 0 83 | n_outside = 0 84 | up_sample_steps = 4 85 | perturb = 0 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /data/.keepme.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rpl-cmu/neusis/8f34722d955f38117c9462523b809addeb9db05e/data/.keepme.txt -------------------------------------------------------------------------------- /helpers.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import matplotlib 3 | matplotlib.use('Agg') 4 | from MLP import * 5 | 6 | 7 | torch.autograd.set_detect_anomaly(True) 8 | 9 | 10 | def update_lr(optimizer,lr_decay): 11 | for param_group in optimizer.param_groups: 12 | if param_group['lr'] > 0.0000001: 13 | param_group['lr'] = param_group['lr'] * lr_decay 14 | learning_rate = param_group['lr'] 15 | print('learning rate is updated to ',learning_rate) 16 | return 0 17 | 18 | def save_model(expID, model, i): 19 | # save model 20 | model_name = './experiments/{}/model/epoch.pt'.format(expID) 21 | torch.save(model, model_name) 22 | return 0 23 | 24 | 25 | def get_arcs(H, W, phi_min, phi_max, r_min, r_max, c2w, n_selected_px, arc_n_samples, ray_n_samples, 26 | hfov, px, r_increments, randomize_points, device, cube_center): 27 | 28 | i = px[:, 0] 29 | j = px[:, 1] 30 | 31 | # sample angle phi 32 | phi = torch.linspace(phi_min, phi_max, arc_n_samples).float().repeat(n_selected_px).reshape(n_selected_px, -1) 33 | 34 | dphi = (phi_max - phi_min) / arc_n_samples 35 | rnd = -dphi + torch.rand(n_selected_px, arc_n_samples)*2*dphi 36 | 37 | sonar_resolution = (r_max-r_min)/H 38 | if randomize_points: 39 | phi = torch.clip(phi + rnd, min=phi_min, max=phi_max) 40 | 41 | # compute radius at each pixel 42 | r = i*sonar_resolution + r_min 43 | # compute bearing angle at each pixel 44 | theta = -hfov/2 + j*hfov/W 45 | 46 | 47 | # Need to calculate coords to figure out the ray direction 48 | # the following operations mimick the cartesian product between the two lists [r, theta] and phi 49 | # coords is of size: n_selected_px x n_arc_n_samples x 3 50 | coords = torch.stack((r.repeat_interleave(arc_n_samples).reshape(n_selected_px, -1), 51 | theta.repeat_interleave(arc_n_samples).reshape(n_selected_px, -1), 52 | phi), dim = -1) 53 | coords = coords.reshape(-1, 3) 54 | 55 | holder = torch.empty(n_selected_px, arc_n_samples*ray_n_samples, dtype=torch.long).to(device) 56 | bitmask = torch.zeros(ray_n_samples, dtype=torch.bool) 57 | bitmask[ray_n_samples - 1] = True 58 | bitmask = bitmask.repeat(arc_n_samples) 59 | 60 | 61 | for n_px in range(n_selected_px): 62 | holder[n_px, :] = torch.randint(0, i[n_px]-1, (arc_n_samples*ray_n_samples,)) 63 | holder[n_px, bitmask] = i[n_px] 64 | 65 | holder = holder.reshape(n_selected_px, arc_n_samples, ray_n_samples) 66 | 67 | holder, _ = torch.sort(holder, dim=-1) 68 | 69 | holder = holder.reshape(-1) 70 | 71 | 72 | r_samples = torch.index_select(r_increments, 0, holder).reshape(n_selected_px, 73 | arc_n_samples, 74 | ray_n_samples) 75 | 76 | rnd = torch.rand((n_selected_px, arc_n_samples, ray_n_samples))*sonar_resolution 77 | 78 | if randomize_points: 79 | r_samples = r_samples + rnd 80 | 81 | rs = r_samples[:, :, -1] 82 | r_samples = r_samples.reshape(n_selected_px*arc_n_samples, ray_n_samples) 83 | 84 | theta_samples = coords[:, 1].repeat_interleave(ray_n_samples).reshape(-1, ray_n_samples) 85 | phi_samples = coords[:, 2].repeat_interleave(ray_n_samples).reshape(-1, ray_n_samples) 86 | 87 | # Note: r_samples is of size n_selected_px*arc_n_samples x ray_n_samples 88 | # so each row of r_samples contain r values for points picked from the same ray (should have the same theta and phi values) 89 | # theta_samples is also of size n_selected_px*arc_n_samples x ray_n_samples 90 | # since all arc_n_samples x ray_n_samples have the same value of theta, then the first n_selected_px rows have all the same value 91 | # Finally phi_samples is also of size n_selected_px*arc_n_samples x ray_n_samples 92 | # but not each ray has a different phi value 93 | 94 | # pts contain all points and is of size n_selected_px*arc_n_samples*ray_n_samples, 3 95 | # the first ray_n_samples rows correspond to points along the same ray 96 | # the first ray_n_samples*arc_n_samples row correspond to points along rays along the same arc 97 | pts = torch.stack((r_samples, theta_samples, phi_samples), dim=-1).reshape(-1, 3) 98 | 99 | dists = torch.diff(r_samples, dim=1) 100 | dists = torch.cat([dists, torch.Tensor([sonar_resolution]).expand(dists[..., :1].shape)], -1) 101 | 102 | #r_samples_mid = r_samples + dists/2 103 | 104 | X_r_rand = pts[:,0]*torch.cos(pts[:,1])*torch.cos(pts[:,2]) 105 | Y_r_rand = pts[:,0]*torch.sin(pts[:,1])*torch.cos(pts[:,2]) 106 | Z_r_rand = pts[:,0]*torch.sin(pts[:,2]) 107 | pts_r_rand = torch.stack((X_r_rand, Y_r_rand, Z_r_rand, torch.ones_like(X_r_rand))) 108 | 109 | 110 | pts_r_rand = torch.matmul(c2w, pts_r_rand) 111 | 112 | pts_r_rand = torch.stack((pts_r_rand[0,:], pts_r_rand[1,:], pts_r_rand[2,:])) 113 | 114 | # Centering step 115 | pts_r_rand = pts_r_rand.T - cube_center 116 | 117 | # Transform to cartesian to apply pose transformation and get the direction 118 | # transformation as described in https://www.ri.cmu.edu/pub_files/2016/5/thuang_mastersthesis.pdf 119 | X = coords[:,0]*torch.cos(coords[:,1])*torch.cos(coords[:,2]) 120 | Y = coords[:,0]*torch.sin(coords[:,1])*torch.cos(coords[:,2]) 121 | Z = coords[:,0]*torch.sin(coords[:,2]) 122 | 123 | dirs = torch.stack((X,Y,Z, torch.ones_like(X))).T 124 | dirs = dirs.repeat_interleave(ray_n_samples, 0) 125 | dirs = torch.matmul(c2w, dirs.T).T 126 | origin = torch.matmul(c2w, torch.tensor([0., 0., 0., 1.])).unsqueeze(dim=0) 127 | dirs = dirs - origin 128 | dirs = dirs[:, 0:3] 129 | dirs = torch.nn.functional.normalize(dirs, dim=1) 130 | 131 | return dirs, dphi, r, rs, pts_r_rand, dists 132 | 133 | 134 | def select_coordinates(coords_all, target, N_rand, select_valid_px): 135 | if select_valid_px: 136 | coords = torch.nonzero(target) 137 | else: 138 | select_inds = torch.randperm(coords_all.shape[0])[:N_rand] 139 | coords = coords_all[select_inds] 140 | return coords 141 | -------------------------------------------------------------------------------- /load_data.py: -------------------------------------------------------------------------------- 1 | import os 2 | import cv2 3 | import pickle 4 | import json 5 | import math 6 | from scipy.io import savemat 7 | 8 | def load_data(target): 9 | dirpath = "./data/{}".format(target) 10 | pickle_loc = "{}/Data".format(dirpath) 11 | output_loc = "{}/UnzipData".format(dirpath) 12 | cfg_path = "{}/Config.json".format(dirpath) 13 | 14 | 15 | with open(cfg_path, 'r') as f: 16 | cfg = json.load(f) 17 | 18 | for agents in cfg["agents"][0]["sensors"]: 19 | if agents["sensor_type"] != "ImagingSonar": continue 20 | hfov = agents["configuration"]["Azimuth"] 21 | vfov = agents["configuration"]["Elevation"] 22 | min_range = agents["configuration"]["RangeMin"] 23 | max_range = agents["configuration"]["RangeMax"] 24 | hfov = math.radians(hfov) 25 | vfov = math.radians(vfov) 26 | 27 | if not os.path.exists(output_loc): 28 | os.makedirs(output_loc) 29 | images = [] 30 | sensor_poses = [] 31 | 32 | 33 | for pkls in os.listdir(pickle_loc): 34 | filename = "{}/{}".format(pickle_loc, pkls) 35 | with open(filename, 'rb') as f: 36 | state = pickle.load(f) 37 | image = state["ImagingSonar"] 38 | s = image.shape 39 | image[image < 0.2] = 0 40 | image[s[0]- 200:, :] = 0 41 | pose = state["PoseSensor"] 42 | images.append(image) 43 | sensor_poses.append(pose) 44 | 45 | data = { 46 | "images": images, 47 | "images_no_noise": [], 48 | "sensor_poses": sensor_poses, 49 | "min_range": min_range, 50 | "max_range": max_range, 51 | "hfov": hfov, 52 | "vfov": vfov 53 | } 54 | 55 | savemat('{}/{}.mat'.format(dirpath,target), data, oned_as='row') 56 | return data 57 | -------------------------------------------------------------------------------- /models/__pycache__/embedder.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rpl-cmu/neusis/8f34722d955f38117c9462523b809addeb9db05e/models/__pycache__/embedder.cpython-37.pyc -------------------------------------------------------------------------------- /models/__pycache__/fields.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rpl-cmu/neusis/8f34722d955f38117c9462523b809addeb9db05e/models/__pycache__/fields.cpython-37.pyc -------------------------------------------------------------------------------- /models/__pycache__/renderer.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rpl-cmu/neusis/8f34722d955f38117c9462523b809addeb9db05e/models/__pycache__/renderer.cpython-37.pyc -------------------------------------------------------------------------------- /models/embedder.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | # Positional encoding embedding. Code was taken from https://github.com/bmild/nerf. 6 | class Embedder: 7 | def __init__(self, **kwargs): 8 | self.kwargs = kwargs 9 | self.create_embedding_fn() 10 | 11 | def create_embedding_fn(self): 12 | embed_fns = [] 13 | d = self.kwargs['input_dims'] 14 | out_dim = 0 15 | if self.kwargs['include_input']: 16 | embed_fns.append(lambda x: x) 17 | out_dim += d 18 | 19 | max_freq = self.kwargs['max_freq_log2'] 20 | N_freqs = self.kwargs['num_freqs'] 21 | 22 | if self.kwargs['log_sampling']: 23 | freq_bands = 2. ** torch.linspace(0., max_freq, N_freqs) 24 | else: 25 | freq_bands = torch.linspace(2.**0., 2.**max_freq, N_freqs) 26 | 27 | for freq in freq_bands: 28 | for p_fn in self.kwargs['periodic_fns']: 29 | embed_fns.append(lambda x, p_fn=p_fn, freq=freq: p_fn(x * freq)) 30 | out_dim += d 31 | 32 | self.embed_fns = embed_fns 33 | self.out_dim = out_dim 34 | 35 | def embed(self, inputs): 36 | return torch.cat([fn(inputs) for fn in self.embed_fns], -1) 37 | 38 | 39 | def get_embedder(multires, input_dims=3): 40 | embed_kwargs = { 41 | 'include_input': True, 42 | 'input_dims': input_dims, 43 | 'max_freq_log2': multires-1, 44 | 'num_freqs': multires, 45 | 'log_sampling': True, 46 | 'periodic_fns': [torch.sin, torch.cos], 47 | } 48 | 49 | embedder_obj = Embedder(**embed_kwargs) 50 | def embed(x, eo=embedder_obj): return eo.embed(x) 51 | return embed, embedder_obj.out_dim 52 | -------------------------------------------------------------------------------- /models/fields.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | import numpy as np 5 | from models.embedder import get_embedder 6 | import sys 7 | 8 | # This implementation is borrowed from IDR: https://github.com/lioryariv/idr 9 | class SDFNetwork(nn.Module): 10 | def __init__(self, 11 | d_in, 12 | d_out, 13 | d_hidden, 14 | n_layers, 15 | skip_in=(4,), 16 | multires=0, 17 | bias=0.5, 18 | scale=1, 19 | geometric_init=True, 20 | weight_norm=True, 21 | inside_outside=False): 22 | super(SDFNetwork, self).__init__() 23 | 24 | dims = [d_in] + [d_hidden for _ in range(n_layers)] + [d_out] 25 | 26 | self.embed_fn_fine = None 27 | 28 | if multires > 0: 29 | embed_fn, input_ch = get_embedder(multires, input_dims=d_in) 30 | self.embed_fn_fine = embed_fn 31 | dims[0] = input_ch 32 | 33 | self.num_layers = len(dims) 34 | self.skip_in = skip_in 35 | self.scale = scale 36 | 37 | for l in range(0, self.num_layers - 1): 38 | if l + 1 in self.skip_in: 39 | out_dim = dims[l + 1] - dims[0] 40 | else: 41 | out_dim = dims[l + 1] 42 | 43 | lin = nn.Linear(dims[l], out_dim) 44 | 45 | if geometric_init: 46 | if l == self.num_layers - 2: 47 | if not inside_outside: 48 | torch.nn.init.normal_(lin.weight, mean= np.sqrt(np.pi) / np.sqrt(dims[l]), std=0.0001) 49 | torch.nn.init.constant_(lin.bias, -bias) 50 | else: 51 | torch.nn.init.normal_(lin.weight, mean= -np.sqrt(np.pi) / np.sqrt(dims[l]), std=0.0001) 52 | torch.nn.init.constant_(lin.bias, bias) 53 | elif multires > 0 and l == 0: 54 | torch.nn.init.constant_(lin.bias, 0.0) 55 | torch.nn.init.constant_(lin.weight[:, 3:], 0.0) 56 | torch.nn.init.normal_(lin.weight[:, :3], 0.0, np.sqrt(2) / np.sqrt(out_dim)) 57 | elif multires > 0 and l in self.skip_in: 58 | torch.nn.init.constant_(lin.bias, 0.0) 59 | torch.nn.init.normal_(lin.weight, 0.0, np.sqrt(2) / np.sqrt(out_dim)) 60 | torch.nn.init.constant_(lin.weight[:, -(dims[0] - 3):], 0.0) 61 | else: 62 | torch.nn.init.constant_(lin.bias, 0.0) 63 | torch.nn.init.normal_(lin.weight, 0.0, np.sqrt(2) / np.sqrt(out_dim)) 64 | 65 | if weight_norm: 66 | lin = nn.utils.weight_norm(lin) 67 | 68 | setattr(self, "lin" + str(l), lin) 69 | 70 | self.activation = nn.Softplus(beta=100) 71 | 72 | def forward(self, inputs): 73 | inputs = inputs * self.scale 74 | if self.embed_fn_fine is not None: 75 | inputs = self.embed_fn_fine(inputs) 76 | 77 | x = inputs 78 | for l in range(0, self.num_layers - 1): 79 | lin = getattr(self, "lin" + str(l)) 80 | 81 | if l in self.skip_in: 82 | x = torch.cat([x, inputs], 1) / np.sqrt(2) 83 | 84 | x = lin(x) 85 | 86 | if l < self.num_layers - 2: 87 | x = self.activation(x) 88 | return torch.cat([x[:, :1] / self.scale, x[:, 1:]], dim=-1) 89 | 90 | def sdf(self, x): 91 | return self.forward(x)[:, :1] 92 | 93 | def sdf_hidden_appearance(self, x): 94 | return self.forward(x) 95 | 96 | def gradient(self, x): 97 | x.requires_grad_(True) 98 | y = self.sdf(x) 99 | d_output = torch.ones_like(y, requires_grad=False, device=y.device) 100 | gradients = torch.autograd.grad( 101 | outputs=y, 102 | inputs=x, 103 | grad_outputs=d_output, 104 | create_graph=True, 105 | retain_graph=True, 106 | only_inputs=True)[0] 107 | return gradients.unsqueeze(1) 108 | 109 | 110 | # This implementation is borrowed from IDR: https://github.com/lioryariv/idr 111 | class RenderingNetwork(nn.Module): 112 | def __init__(self, 113 | d_feature, 114 | mode, 115 | d_in, 116 | d_out, 117 | d_hidden, 118 | n_layers, 119 | weight_norm=True, 120 | multires_view=0, 121 | squeeze_out=True): 122 | super().__init__() 123 | 124 | self.mode = mode 125 | self.squeeze_out = squeeze_out 126 | dims = [d_in + d_feature] + [d_hidden for _ in range(n_layers)] + [d_out] 127 | 128 | self.embedview_fn = None 129 | if multires_view > 0: 130 | embedview_fn, input_ch = get_embedder(multires_view) 131 | self.embedview_fn = embedview_fn 132 | dims[0] += (input_ch - 3) 133 | 134 | self.num_layers = len(dims) 135 | 136 | for l in range(0, self.num_layers - 1): 137 | out_dim = dims[l + 1] 138 | lin = nn.Linear(dims[l], out_dim) 139 | 140 | if weight_norm: 141 | lin = nn.utils.weight_norm(lin) 142 | 143 | setattr(self, "lin" + str(l), lin) 144 | 145 | self.relu = nn.ReLU() 146 | 147 | def forward(self, points, normals, view_dirs, feature_vectors): 148 | if self.embedview_fn is not None: 149 | view_dirs = self.embedview_fn(view_dirs) 150 | 151 | rendering_input = None 152 | 153 | if self.mode == 'idr': 154 | rendering_input = torch.cat([points, view_dirs, normals, feature_vectors], dim=-1) 155 | elif self.mode == 'no_view_dir': 156 | rendering_input = torch.cat([points, normals, feature_vectors], dim=-1) 157 | elif self.mode == 'no_normal': 158 | rendering_input = torch.cat([points, view_dirs, feature_vectors], dim=-1) 159 | 160 | x = rendering_input 161 | 162 | for l in range(0, self.num_layers - 1): 163 | lin = getattr(self, "lin" + str(l)) 164 | 165 | x = lin(x) 166 | 167 | if l < self.num_layers - 2: 168 | x = self.relu(x) 169 | 170 | if self.squeeze_out: 171 | x = torch.sigmoid(x) 172 | return x 173 | 174 | 175 | # This implementation is borrowed from nerf-pytorch: https://github.com/yenchenlin/nerf-pytorch 176 | class NeRF(nn.Module): 177 | def __init__(self, 178 | D=8, 179 | W=256, 180 | d_in=3, 181 | d_in_view=3, 182 | multires=0, 183 | multires_view=0, 184 | output_ch=4, 185 | skips=[4], 186 | use_viewdirs=False): 187 | super(NeRF, self).__init__() 188 | self.D = D 189 | self.W = W 190 | self.d_in = d_in 191 | self.d_in_view = d_in_view 192 | self.input_ch = 3 193 | self.input_ch_view = 3 194 | self.embed_fn = None 195 | self.embed_fn_view = None 196 | 197 | if multires > 0: 198 | embed_fn, input_ch = get_embedder(multires, input_dims=d_in) 199 | self.embed_fn = embed_fn 200 | self.input_ch = input_ch 201 | 202 | if multires_view > 0: 203 | embed_fn_view, input_ch_view = get_embedder(multires_view, input_dims=d_in_view) 204 | self.embed_fn_view = embed_fn_view 205 | self.input_ch_view = input_ch_view 206 | 207 | self.skips = skips 208 | self.use_viewdirs = use_viewdirs 209 | 210 | self.pts_linears = nn.ModuleList( 211 | [nn.Linear(self.input_ch, W)] + 212 | [nn.Linear(W, W) if i not in self.skips else nn.Linear(W + self.input_ch, W) for i in range(D - 1)]) 213 | 214 | ### Implementation according to the official code release 215 | ### (https://github.com/bmild/nerf/blob/master/run_nerf_helpers.py#L104-L105) 216 | self.views_linears = nn.ModuleList([nn.Linear(self.input_ch_view + W, W // 2)]) 217 | 218 | ### Implementation according to the paper 219 | # self.views_linears = nn.ModuleList( 220 | # [nn.Linear(input_ch_views + W, W//2)] + [nn.Linear(W//2, W//2) for i in range(D//2)]) 221 | 222 | if use_viewdirs: 223 | self.feature_linear = nn.Linear(W, W) 224 | self.alpha_linear = nn.Linear(W, 1) 225 | self.rgb_linear = nn.Linear(W // 2, 3) 226 | else: 227 | self.output_linear = nn.Linear(W, output_ch) 228 | 229 | def forward(self, input_pts, input_views): 230 | if self.embed_fn is not None: 231 | input_pts = self.embed_fn(input_pts) 232 | if self.embed_fn_view is not None: 233 | input_views = self.embed_fn_view(input_views) 234 | 235 | h = input_pts 236 | for i, l in enumerate(self.pts_linears): 237 | h = self.pts_linears[i](h) 238 | h = F.relu(h) 239 | if i in self.skips: 240 | h = torch.cat([input_pts, h], -1) 241 | 242 | if self.use_viewdirs: 243 | alpha = self.alpha_linear(h) 244 | feature = self.feature_linear(h) 245 | h = torch.cat([feature, input_views], -1) 246 | 247 | for i, l in enumerate(self.views_linears): 248 | h = self.views_linears[i](h) 249 | h = F.relu(h) 250 | 251 | rgb = self.rgb_linear(h) 252 | return alpha, rgb 253 | else: 254 | assert False 255 | 256 | 257 | class SingleVarianceNetwork(nn.Module): 258 | def __init__(self, init_val): 259 | super(SingleVarianceNetwork, self).__init__() 260 | self.register_parameter('variance', nn.Parameter(torch.tensor(init_val))) 261 | 262 | def forward(self, x): 263 | return torch.ones([len(x), 1]) * torch.exp(self.variance * 10.0) 264 | -------------------------------------------------------------------------------- /models/renderer.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | import numpy as np 5 | import logging 6 | import mcubes 7 | import sys, os 8 | import pickle 9 | import matplotlib.pyplot as plt 10 | import time 11 | 12 | def extract_fields(bound_min, bound_max, resolution, query_func): 13 | N = 64 14 | X = torch.linspace(bound_min[0], bound_max[0], resolution).split(N) 15 | Y = torch.linspace(bound_min[1], bound_max[1], resolution).split(N) 16 | Z = torch.linspace(bound_min[2], bound_max[2], resolution).split(N) 17 | 18 | u = np.zeros([resolution, resolution, resolution], dtype=np.float32) 19 | with torch.no_grad(): 20 | for xi, xs in enumerate(X): 21 | for yi, ys in enumerate(Y): 22 | for zi, zs in enumerate(Z): 23 | xx, yy, zz = torch.meshgrid(xs, ys, zs) 24 | pts = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1) 25 | val = query_func(pts).reshape(len(xs), len(ys), len(zs)).detach().cpu().numpy() 26 | u[xi * N: xi * N + len(xs), yi * N: yi * N + len(ys), zi * N: zi * N + len(zs)] = val 27 | 28 | return u 29 | 30 | 31 | def extract_geometry(bound_min, bound_max, resolution, threshold, query_func): 32 | u = extract_fields(bound_min, bound_max, resolution, query_func) 33 | vertices, triangles = mcubes.marching_cubes(u, threshold) 34 | 35 | b_max_np = bound_max.detach().cpu().numpy() 36 | b_min_np = bound_min.detach().cpu().numpy() 37 | 38 | vertices = vertices / (resolution - 1.0) * (b_max_np - b_min_np)[None, :] + b_min_np[None, :] 39 | 40 | return vertices, triangles 41 | 42 | class NeuSRenderer: 43 | def __init__(self, 44 | sdf_network, 45 | deviation_network, 46 | color_network, 47 | base_exp_dir, 48 | expID, 49 | n_samples, 50 | n_importance, 51 | n_outside, 52 | up_sample_steps, 53 | perturb): 54 | self.sdf_network = sdf_network 55 | self.deviation_network = deviation_network 56 | self.color_network = color_network 57 | self.n_samples = n_samples 58 | self.n_importance = n_importance 59 | self.n_outside = n_outside 60 | self.up_sample_steps = up_sample_steps 61 | self.perturb = perturb 62 | self.base_exp_dir = base_exp_dir 63 | self.expID = expID 64 | 65 | def render_core_sonar(self, 66 | dirs, 67 | pts, 68 | dists, 69 | sdf_network, 70 | deviation_network, 71 | color_network, 72 | n_pixels, 73 | arc_n_samples, 74 | ray_n_samples, 75 | cos_anneal_ratio=0.0): 76 | 77 | dirs_reshaped = dirs.reshape(n_pixels, arc_n_samples, ray_n_samples, 3) 78 | pts_reshaped = pts.reshape(n_pixels, arc_n_samples, ray_n_samples, 3) 79 | dists_reshaped = dists.reshape(n_pixels, arc_n_samples, ray_n_samples, 1) 80 | 81 | pts_mid = pts_reshaped + dirs_reshaped * dists_reshaped/2 82 | 83 | pts_mid = pts_mid.reshape(-1, 3) 84 | 85 | sdf_nn_output = sdf_network(pts_mid) 86 | sdf = sdf_nn_output[:, :1] 87 | 88 | feature_vector = sdf_nn_output[:, 1:] 89 | 90 | gradients = sdf_network.gradient(pts_mid).squeeze() 91 | 92 | 93 | 94 | sampled_color = color_network(pts_mid, gradients, dirs, feature_vector).reshape(n_pixels, arc_n_samples, ray_n_samples) 95 | 96 | inv_s = deviation_network(torch.zeros([1, 3]))[:, :1].clip(1e-6, 1e6) 97 | 98 | inv_s = inv_s.expand(n_pixels*arc_n_samples*ray_n_samples, 1) 99 | true_cos = (dirs * gradients).sum(-1, keepdim=True) 100 | 101 | # "cos_anneal_ratio" grows from 0 to 1 in the beginning training iterations. The anneal strategy below makes 102 | # the cos value "not dead" at the beginning training iterations, for better convergence. 103 | iter_cos = -(F.relu(-true_cos * 0.5 + 0.5) * (1.0 - cos_anneal_ratio) + 104 | F.relu(-true_cos) * cos_anneal_ratio) # always non-positive 105 | 106 | # Estimate signed distances at section points 107 | 108 | estimated_next_sdf = sdf + iter_cos * dists.reshape(-1, 1) * 0.5 109 | estimated_prev_sdf = sdf - iter_cos * dists.reshape(-1, 1) * 0.5 110 | 111 | 112 | prev_cdf = torch.sigmoid(estimated_prev_sdf * inv_s) 113 | next_cdf = torch.sigmoid(estimated_next_sdf * inv_s) 114 | 115 | p = prev_cdf - next_cdf 116 | c = prev_cdf 117 | 118 | alpha = ((p + 1e-5) / (c + 1e-5)).reshape(n_pixels, arc_n_samples, ray_n_samples).clip(0.0, 1.0) 119 | 120 | cumuProdAllPointsOnEachRay = torch.cat([torch.ones([n_pixels, arc_n_samples, 1]), 1. - alpha + 1e-7], -1) 121 | 122 | cumuProdAllPointsOnEachRay = torch.cumprod(cumuProdAllPointsOnEachRay, -1) 123 | 124 | TransmittancePointsOnArc = cumuProdAllPointsOnEachRay[:, :, ray_n_samples-2] 125 | 126 | alphaPointsOnArc = alpha[:, :, ray_n_samples-1] 127 | 128 | weights = alphaPointsOnArc * TransmittancePointsOnArc 129 | 130 | intensityPointsOnArc = sampled_color[:, :, ray_n_samples-1] 131 | 132 | summedIntensities = (intensityPointsOnArc*weights).sum(dim=1) 133 | 134 | # Eikonal loss 135 | gradients = gradients.reshape(n_pixels, arc_n_samples, ray_n_samples, 3) 136 | 137 | gradient_error = (torch.linalg.norm(gradients, ord=2, 138 | dim=-1) - 1.0) ** 2 139 | 140 | variation_error = torch.linalg.norm(alpha, ord=1, dim=-1).sum() 141 | 142 | return { 143 | 'color': summedIntensities, 144 | 'intensityPointsOnArc': intensityPointsOnArc, 145 | 'sdf': sdf, 146 | 'dists': dists, 147 | 'gradients': gradients, 148 | 's_val': 1.0 / inv_s, 149 | 'weights': weights, 150 | 'cdf': c.reshape(n_pixels, arc_n_samples, ray_n_samples), 151 | 'gradient_error': gradient_error, 152 | 'variation_error': variation_error 153 | } 154 | 155 | def render_sonar(self, rays_d, pts, dists, n_pixels, 156 | arc_n_samples, ray_n_samples, cos_anneal_ratio=0.0): 157 | # Render core 158 | 159 | ret_fine = self.render_core_sonar(rays_d, 160 | pts, 161 | dists, 162 | self.sdf_network, 163 | self.deviation_network, 164 | self.color_network, 165 | n_pixels, 166 | arc_n_samples, 167 | ray_n_samples, 168 | cos_anneal_ratio=cos_anneal_ratio) 169 | 170 | color_fine = ret_fine['color'] 171 | weights = ret_fine['weights'] 172 | weights_sum = weights.sum(dim=-1, keepdim=True) 173 | gradients = ret_fine['gradients'] 174 | #s_val = ret_fine['s_val'].reshape(batch_size, n_samples).mean(dim=-1, keepdim=True) 175 | 176 | return { 177 | 'color_fine': color_fine, 178 | 'weight_sum': weights_sum, 179 | 'weight_max': torch.max(weights, dim=-1, keepdim=True)[0], 180 | 'gradients': gradients, 181 | 'weights': weights, 182 | 'intensityPointsOnArc': ret_fine["intensityPointsOnArc"], 183 | 'gradient_error': ret_fine['gradient_error'], 184 | 'variation_error': ret_fine['variation_error'] 185 | } 186 | 187 | 188 | 189 | def extract_geometry(self, bound_min, bound_max, resolution, threshold=0.0): 190 | return extract_geometry(bound_min, 191 | bound_max, 192 | resolution=resolution, 193 | threshold=threshold, 194 | query_func=lambda pts: -self.sdf_network.sdf(pts)) 195 | -------------------------------------------------------------------------------- /plot_mesh.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | import numpy as np 3 | 4 | import time 5 | import torch 6 | import scipy.io 7 | import matplotlib.pyplot as plt 8 | from helpers import * 9 | from MLP import * 10 | 11 | import time 12 | 13 | from load_data import * 14 | from run_sdf import Runner 15 | import logging, argparse 16 | import scipy.io 17 | 18 | 19 | if __name__=='__main__': 20 | torch.set_default_tensor_type('torch.cuda.FloatTensor') 21 | FORMAT = "[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s" 22 | logging.getLogger('matplotlib.font_manager').disabled = True 23 | logging.basicConfig(level=logging.DEBUG, format=FORMAT) 24 | 25 | parser = argparse.ArgumentParser() 26 | parser.add_argument('--conf', type=str, default="./confs/conf.conf") 27 | parser.add_argument('--is_continue', default=False, action="store_true") 28 | parser.add_argument('--gpu', type=int, default=0) 29 | 30 | args = parser.parse_args() 31 | 32 | torch.cuda.set_device(args.gpu) 33 | runner = Runner(args.conf, args.is_continue, write_config=False) 34 | 35 | runner.set_params() 36 | runner.validate_mesh(threshold=0.1) 37 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | pyparsing==2.4.7 2 | pyhocon==0.3.57 3 | opencv_python==4.5.2.52 4 | scipy==1.7.0 5 | PyMCubes==0.1.2 6 | trimesh==3.9.8 7 | numpy==1.19.2 8 | tqdm==4.65.0 9 | matplotlib==3.5.3 -------------------------------------------------------------------------------- /run_sdf.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | import numpy as np 3 | import json 4 | import random 5 | import time 6 | import torch 7 | import torch.nn as nn 8 | import torch.nn.functional as F 9 | from tqdm import tqdm, trange 10 | import scipy.io 11 | import matplotlib.pyplot as plt 12 | from helpers import * 13 | from MLP import * 14 | #from PIL import Image 15 | import cv2 as cv 16 | import time 17 | import random 18 | import string 19 | from pyhocon import ConfigFactory 20 | from models.fields import RenderingNetwork, SDFNetwork, SingleVarianceNetwork, NeRF 21 | from models.renderer import NeuSRenderer 22 | import trimesh 23 | from itertools import groupby 24 | from operator import itemgetter 25 | from load_data import * 26 | import logging 27 | import argparse 28 | 29 | from math import ceil 30 | 31 | 32 | def config_parser(): 33 | import configargparse 34 | parser = configargparse.ArgumentParser() 35 | 36 | 37 | 38 | class Runner: 39 | def __init__(self, conf, is_continue=False, write_config=True): 40 | conf_path = conf 41 | f = open(conf_path) 42 | conf_text = f.read() 43 | self.is_continue = is_continue 44 | self.conf = ConfigFactory.parse_string(conf_text) 45 | self.write_config = write_config 46 | 47 | def set_params(self): 48 | self.expID = self.conf.get_string('conf.expID') 49 | 50 | dataset = self.conf.get_string('conf.dataset') 51 | self.image_setkeyname = self.conf.get_string('conf.image_setkeyname') 52 | 53 | self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 54 | self.dataset = dataset 55 | 56 | # Training parameters 57 | self.end_iter = self.conf.get_int('train.end_iter') 58 | self.N_rand = self.conf.get_int('train.num_select_pixels') #H*W 59 | self.arc_n_samples = self.conf.get_int('train.arc_n_samples') 60 | self.save_freq = self.conf.get_int('train.save_freq') 61 | self.report_freq = self.conf.get_int('train.report_freq') 62 | self.val_mesh_freq = self.conf.get_int('train.val_mesh_freq') 63 | self.learning_rate = self.conf.get_float('train.learning_rate') 64 | self.learning_rate_alpha = self.conf.get_float('train.learning_rate_alpha') 65 | self.warm_up_end = self.conf.get_float('train.warm_up_end', default=0.0) 66 | self.anneal_end = self.conf.get_float('train.anneal_end', default=0.0) 67 | self.percent_select_true = self.conf.get_float('train.percent_select_true', default=0.5) 68 | self.r_div = self.conf.get_bool('train.r_div') 69 | # Weights 70 | self.igr_weight = self.conf.get_float('train.igr_weight') 71 | self.variation_reg_weight = self.conf.get_float('train.variation_reg_weight') 72 | self.px_sample_min_weight = self.conf.get_float('train.px_sample_min_weight') 73 | 74 | self.ray_n_samples = self.conf['model.neus_renderer']['n_samples'] 75 | self.base_exp_dir = './experiments/{}'.format(self.expID) 76 | self.randomize_points = self.conf.get_float('train.randomize_points') 77 | self.select_px_method = self.conf.get_string('train.select_px_method') 78 | self.select_valid_px = self.conf.get_bool('train.select_valid_px') 79 | self.x_max = self.conf.get_float('mesh.x_max') 80 | self.x_min = self.conf.get_float('mesh.x_min') 81 | self.y_max = self.conf.get_float('mesh.y_max') 82 | self.y_min = self.conf.get_float('mesh.y_min') 83 | self.z_max = self.conf.get_float('mesh.z_max') 84 | self.z_min = self.conf.get_float('mesh.z_min') 85 | self.level_set = self.conf.get_float('mesh.level_set') 86 | 87 | self.data = load_data(dataset) 88 | 89 | self.H, self.W = self.data[self.image_setkeyname][0].shape 90 | 91 | self.r_min = self.data["min_range"] 92 | self.r_max = self.data["max_range"] 93 | self.phi_min = -self.data["vfov"]/2 94 | self.phi_max = self.data["vfov"]/2 95 | self.vfov = self.data["vfov"] 96 | self.hfov = self.data["hfov"] 97 | 98 | 99 | self.cube_center = torch.Tensor([(self.x_max + self.x_min)/2, (self.y_max + self.y_min)/2, (self.z_max + self.z_min)/2]) 100 | 101 | self.timef = self.conf.get_bool('conf.timef') 102 | self.end_iter = self.conf.get_int('train.end_iter') 103 | self.start_iter = self.conf.get_int('train.start_iter') 104 | 105 | self.object_bbox_min = self.conf.get_list('mesh.object_bbox_min') 106 | self.object_bbox_max = self.conf.get_list('mesh.object_bbox_max') 107 | 108 | r_increments = [] 109 | self.sonar_resolution = (self.r_max-self.r_min)/self.H 110 | for i in range(self.H): 111 | r_increments.append(i*self.sonar_resolution + self.r_min) 112 | 113 | self.r_increments = torch.FloatTensor(r_increments).to(self.device) 114 | 115 | extrapath = './experiments/{}'.format(self.expID) 116 | if not os.path.exists(extrapath): 117 | os.makedirs(extrapath) 118 | 119 | extrapath = './experiments/{}/checkpoints'.format(self.expID) 120 | if not os.path.exists(extrapath): 121 | os.makedirs(extrapath) 122 | 123 | extrapath = './experiments/{}/model'.format(self.expID) 124 | if not os.path.exists(extrapath): 125 | os.makedirs(extrapath) 126 | 127 | if self.write_config: 128 | with open('./experiments/{}/config.json'.format(self.expID), 'w') as f: 129 | json.dump(self.conf.__dict__, f, indent = 2) 130 | 131 | # Create all image tensors beforehand to speed up process 132 | 133 | self.i_train = np.arange(len(self.data[self.image_setkeyname])) 134 | 135 | self.coords_all_ls = [(x, y) for x in np.arange(self.H) for y in np.arange(self.W)] 136 | self.coords_all_set = set(self.coords_all_ls) 137 | 138 | #self.coords_all = torch.from_numpy(np.array(self.coords_all_ls)).to(self.device) 139 | 140 | self.del_coords = [] 141 | for y in np.arange(self.W): 142 | tmp = [(x, y) for x in np.arange(0, self.ray_n_samples)] 143 | self.del_coords.extend(tmp) 144 | 145 | self.coords_all = list(self.coords_all_set - set(self.del_coords)) 146 | self.coords_all = torch.LongTensor(self.coords_all).to(self.device) 147 | 148 | self.criterion = torch.nn.L1Loss(reduction='sum') 149 | 150 | self.model_list = [] 151 | self.writer = None 152 | 153 | # Networks 154 | params_to_train = [] 155 | self.sdf_network = SDFNetwork(**self.conf['model.sdf_network']).to(self.device) 156 | 157 | self.deviation_network = SingleVarianceNetwork(**self.conf['model.variance_network']).to(self.device) 158 | self.color_network = RenderingNetwork(**self.conf['model.rendering_network']).to(self.device) 159 | params_to_train += list(self.sdf_network.parameters()) 160 | params_to_train += list(self.deviation_network.parameters()) 161 | params_to_train += list(self.color_network.parameters()) 162 | 163 | self.optimizer = torch.optim.Adam(params_to_train, lr=self.learning_rate) 164 | 165 | 166 | self.iter_step = 0 167 | self.renderer = NeuSRenderer(self.sdf_network, 168 | self.deviation_network, 169 | self.color_network, 170 | self.base_exp_dir, 171 | self.expID, 172 | **self.conf['model.neus_renderer']) 173 | 174 | latest_model_name = None 175 | if self.is_continue: 176 | model_list_raw = os.listdir(os.path.join(self.base_exp_dir, 'checkpoints')) 177 | model_list = [] 178 | for model_name in model_list_raw: 179 | if model_name[-3:] == 'pth': #and int(model_name[5:-4]) <= self.end_iter: 180 | model_list.append(model_name) 181 | model_list.sort() 182 | latest_model_name = model_list[-1] 183 | 184 | if latest_model_name is not None: 185 | logging.info('Find checkpoint: {}'.format(latest_model_name)) 186 | self.load_checkpoint(latest_model_name) 187 | 188 | def getRandomImgCoordsByPercentage(self, target): 189 | true_coords = [] 190 | for y in np.arange(self.W): 191 | col = target[:, y] 192 | gt0 = col > 0 193 | indTrue = np.where(gt0)[0] 194 | if len(indTrue) > 0: 195 | true_coords.extend([(x, y) for x in indTrue]) 196 | 197 | sampling_perc = int(self.percent_select_true*len(true_coords)) 198 | true_coords = random.sample(true_coords, sampling_perc) 199 | true_coords = list(set(true_coords) - set(self.del_coords)) 200 | true_coords = torch.LongTensor(true_coords).to(self.device) 201 | target = torch.Tensor(target).to(self.device) 202 | if self.iter_step%len(self.data[self.image_setkeyname]) !=0: 203 | N_rand = 0 204 | else: 205 | N_rand = self.N_rand 206 | N_rand = self.N_rand 207 | coords = select_coordinates(self.coords_all, target, N_rand, self.select_valid_px) 208 | 209 | coords = torch.cat((coords, true_coords), dim=0) 210 | 211 | return coords, target 212 | 213 | def train(self): 214 | loss_arr = [] 215 | 216 | for i in trange(self.start_iter, self.end_iter, len(self.data[self.image_setkeyname])): 217 | i_train = np.arange(len(self.data[self.image_setkeyname])) 218 | np.random.shuffle(i_train) 219 | loss_total = 0 220 | sum_intensity_loss = 0 221 | sum_eikonal_loss = 0 222 | sum_total_variational = 0 223 | 224 | for j in trange(0, len(i_train)): 225 | img_i = i_train[j] 226 | target = self.data[self.image_setkeyname][img_i] 227 | 228 | 229 | pose = self.data["sensor_poses"][img_i] 230 | 231 | if self.select_px_method == "byprob": 232 | coords, target = self.getRandomImgCoordsByProbability(target) 233 | else: 234 | coords, target = self.getRandomImgCoordsByPercentage(target) 235 | 236 | n_pixels = len(coords) 237 | rays_d, dphi, r, rs, pts, dists = get_arcs(self.H, self.W, self.phi_min, self.phi_max, self.r_min, self.r_max, torch.Tensor(pose), n_pixels, 238 | self.arc_n_samples, self.ray_n_samples, self.hfov, coords, self.r_increments, 239 | self.randomize_points, self.device, self.cube_center) 240 | 241 | 242 | target_s = target[coords[:, 0], coords[:, 1]] 243 | 244 | render_out = self.renderer.render_sonar(rays_d, pts, dists, n_pixels, 245 | self.arc_n_samples, self.ray_n_samples, 246 | cos_anneal_ratio=self.get_cos_anneal_ratio()) 247 | 248 | 249 | intensityPointsOnArc = render_out["intensityPointsOnArc"] 250 | 251 | gradient_error = render_out['gradient_error'] #.reshape(n_pixels, self.arc_n_samples, -1) 252 | 253 | eikonal_loss = gradient_error.sum()*(1/(self.arc_n_samples*self.ray_n_samples*n_pixels)) 254 | 255 | variation_regularization = render_out['variation_error']*(1/(self.arc_n_samples*self.ray_n_samples*n_pixels)) 256 | 257 | if self.r_div: 258 | intensity_fine = (torch.divide(intensityPointsOnArc, rs)*render_out["weights"]).sum(dim=1) 259 | else: 260 | intensity_fine = render_out['color_fine'] 261 | 262 | intensity_error = self.criterion(intensity_fine, target_s)*(1/n_pixels) 263 | 264 | 265 | loss = intensity_error + eikonal_loss * self.igr_weight + variation_regularization*self.variation_reg_weight 266 | 267 | self.optimizer.zero_grad() 268 | loss.backward() 269 | self.optimizer.step() 270 | 271 | with torch.no_grad(): 272 | lossNG = intensity_error + eikonal_loss * self.igr_weight 273 | loss_total += lossNG.cpu().numpy().item() 274 | sum_intensity_loss += intensity_error.cpu().numpy().item() 275 | sum_eikonal_loss += eikonal_loss.cpu().numpy().item() 276 | sum_total_variational += variation_regularization.cpu().numpy().item() 277 | 278 | self.iter_step += 1 279 | self.update_learning_rate() 280 | 281 | del(target) 282 | del(target_s) 283 | del(rays_d) 284 | del(pts) 285 | del(dists) 286 | del(render_out) 287 | del(coords) 288 | 289 | with torch.no_grad(): 290 | l = loss_total/len(i_train) 291 | iL = sum_intensity_loss/len(i_train) 292 | eikL = sum_eikonal_loss/len(i_train) 293 | varL = sum_total_variational/len(i_train) 294 | loss_arr.append(l) 295 | 296 | if i ==0 or i % self.save_freq == 0: 297 | logging.info('iter:{} ********************* SAVING CHECKPOINT ****************'.format(self.optimizer.param_groups[0]['lr'])) 298 | self.save_checkpoint() 299 | 300 | if i % self.report_freq == 0: 301 | print('iter:{:8>d} "Loss={} | intensity Loss={} " | eikonal loss={} | total variation loss = {} | lr={}'.format(self.iter_step, l, iL, eikL, varL, self.optimizer.param_groups[0]['lr'])) 302 | 303 | if i == 0 or i % self.val_mesh_freq == 0: 304 | self.validate_mesh(threshold = self.level_set) 305 | 306 | 307 | 308 | def save_checkpoint(self): 309 | checkpoint = { 310 | 'sdf_network_fine': self.sdf_network.state_dict(), 311 | 'variance_network_fine': self.deviation_network.state_dict(), 312 | 'color_network_fine': self.color_network.state_dict(), 313 | 'optimizer': self.optimizer.state_dict(), 314 | 'iter_step': self.iter_step, 315 | } 316 | 317 | os.makedirs(os.path.join(self.base_exp_dir, 'checkpoints'), exist_ok=True) 318 | torch.save(checkpoint, os.path.join(self.base_exp_dir, 'checkpoints', 'ckpt_{:0>6d}.pth'.format(self.iter_step))) 319 | 320 | def load_checkpoint(self, checkpoint_name): 321 | checkpoint = torch.load(os.path.join(self.base_exp_dir, 'checkpoints', checkpoint_name), map_location=self.device) 322 | self.sdf_network.load_state_dict(checkpoint['sdf_network_fine']) 323 | self.deviation_network.load_state_dict(checkpoint['variance_network_fine']) 324 | self.color_network.load_state_dict(checkpoint['color_network_fine']) 325 | self.optimizer.load_state_dict(checkpoint['optimizer']) 326 | self.iter_step = checkpoint['iter_step'] 327 | 328 | def update_learning_rate(self): 329 | if self.iter_step < self.warm_up_end: 330 | learning_factor = self.iter_step / self.warm_up_end 331 | else: 332 | alpha = self.learning_rate_alpha 333 | progress = (self.iter_step - self.warm_up_end) / (self.end_iter - self.warm_up_end) 334 | learning_factor = (np.cos(np.pi * progress) + 1.0) * 0.5 * (1 - alpha) + alpha 335 | 336 | for g in self.optimizer.param_groups: 337 | g['lr'] = self.learning_rate * learning_factor 338 | 339 | def get_cos_anneal_ratio(self): 340 | if self.anneal_end == 0.0: 341 | return 1.0 342 | else: 343 | return np.min([1.0, self.iter_step / self.anneal_end]) 344 | 345 | def validate_mesh(self, world_space=False, resolution=64, threshold=0.0): 346 | bound_min = torch.tensor(self.object_bbox_min, dtype=torch.float32) 347 | bound_max = torch.tensor(self.object_bbox_max, dtype=torch.float32) 348 | 349 | vertices, triangles =\ 350 | self.renderer.extract_geometry(bound_min, bound_max, resolution=resolution, threshold=threshold) 351 | 352 | os.makedirs(os.path.join(self.base_exp_dir, 'meshes'), exist_ok=True) 353 | 354 | if world_space: 355 | vertices = vertices * self.dataset.scale_mats_np[0][0, 0] + self.dataset.scale_mats_np[0][:3, 3][None] 356 | 357 | mesh = trimesh.Trimesh(vertices, triangles) 358 | mesh.export(os.path.join(self.base_exp_dir, 'meshes', '{:0>8d}.ply'.format(self.iter_step))) 359 | 360 | 361 | if __name__=='__main__': 362 | torch.set_default_tensor_type('torch.cuda.FloatTensor') 363 | FORMAT = "[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s" 364 | logging.getLogger('matplotlib.font_manager').disabled = True 365 | logging.basicConfig(level=logging.DEBUG, format=FORMAT) 366 | 367 | parser = argparse.ArgumentParser() 368 | parser.add_argument('--conf', type=str, default="./confs/conf.conf") 369 | parser.add_argument('--is_continue', default=False, action="store_true") 370 | parser.add_argument('--gpu', type=int, default=0) 371 | 372 | args = parser.parse_args() 373 | 374 | torch.cuda.set_device(args.gpu) 375 | runner = Runner(args.conf, args.is_continue) 376 | runner.set_params() 377 | runner.train() 378 | -------------------------------------------------------------------------------- /static/BPSubmarine.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rpl-cmu/neusis/8f34722d955f38117c9462523b809addeb9db05e/static/BPSubmarine.gif -------------------------------------------------------------------------------- /static/BPplaneFull.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rpl-cmu/neusis/8f34722d955f38117c9462523b809addeb9db05e/static/BPplaneFull.gif -------------------------------------------------------------------------------- /static/VASubmarine.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rpl-cmu/neusis/8f34722d955f38117c9462523b809addeb9db05e/static/VASubmarine.gif -------------------------------------------------------------------------------- /static/VAplaneFull.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rpl-cmu/neusis/8f34722d955f38117c9462523b809addeb9db05e/static/VAplaneFull.gif -------------------------------------------------------------------------------- /static/planeFull.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rpl-cmu/neusis/8f34722d955f38117c9462523b809addeb9db05e/static/planeFull.gif -------------------------------------------------------------------------------- /static/planeFull.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rpl-cmu/neusis/8f34722d955f38117c9462523b809addeb9db05e/static/planeFull.png -------------------------------------------------------------------------------- /static/submarine.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rpl-cmu/neusis/8f34722d955f38117c9462523b809addeb9db05e/static/submarine.gif -------------------------------------------------------------------------------- /static/submarine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rpl-cmu/neusis/8f34722d955f38117c9462523b809addeb9db05e/static/submarine.png --------------------------------------------------------------------------------