├── __init__.py
├── NeuS
├── __init__.py
├── models
│ ├── embedder.py
│ ├── dataset.py
│ ├── fields.py
│ └── renderer.py
└── exp_runner.py
├── sample_results
├── view.png
├── normals.png
├── teaser.png
└── mesh_vis.png
├── data
├── reduced_baseline_0.6x_rgb
│ ├── image
│ │ ├── 000.png
│ │ ├── 001.png
│ │ ├── 002.png
│ │ ├── 003.png
│ │ ├── 004.png
│ │ ├── 005.png
│ │ ├── 006.png
│ │ ├── 007.png
│ │ ├── 008.png
│ │ ├── 009.png
│ │ ├── 010.png
│ │ ├── 011.png
│ │ ├── 012.png
│ │ ├── 013.png
│ │ ├── 014.png
│ │ ├── 015.png
│ │ ├── 016.png
│ │ ├── 017.png
│ │ ├── 018.png
│ │ ├── 019.png
│ │ ├── 020.png
│ │ ├── 021.png
│ │ ├── 022.png
│ │ ├── 023.png
│ │ ├── 024.png
│ │ ├── 025.png
│ │ ├── 026.png
│ │ ├── 027.png
│ │ ├── 028.png
│ │ ├── 029.png
│ │ ├── 030.png
│ │ ├── 031.png
│ │ ├── 032.png
│ │ ├── 033.png
│ │ ├── 034.png
│ │ ├── 035.png
│ │ ├── 036.png
│ │ ├── 037.png
│ │ ├── 038.png
│ │ ├── 039.png
│ │ ├── 040.png
│ │ ├── 041.png
│ │ ├── 042.png
│ │ ├── 043.png
│ │ ├── 044.png
│ │ ├── 045.png
│ │ ├── 046.png
│ │ ├── 047.png
│ │ ├── 048.png
│ │ ├── 049.png
│ │ ├── 050.png
│ │ ├── 051.png
│ │ ├── 052.png
│ │ ├── 053.png
│ │ ├── 054.png
│ │ ├── 055.png
│ │ ├── 056.png
│ │ ├── 057.png
│ │ ├── 058.png
│ │ └── 059.png
│ ├── mask
│ │ ├── 000.png
│ │ ├── 001.png
│ │ ├── 002.png
│ │ ├── 003.png
│ │ ├── 004.png
│ │ ├── 005.png
│ │ ├── 006.png
│ │ ├── 007.png
│ │ ├── 008.png
│ │ ├── 009.png
│ │ ├── 010.png
│ │ ├── 011.png
│ │ ├── 012.png
│ │ ├── 013.png
│ │ ├── 014.png
│ │ ├── 015.png
│ │ ├── 016.png
│ │ ├── 017.png
│ │ ├── 018.png
│ │ ├── 019.png
│ │ ├── 020.png
│ │ ├── 021.png
│ │ ├── 022.png
│ │ ├── 023.png
│ │ ├── 024.png
│ │ ├── 025.png
│ │ ├── 026.png
│ │ ├── 027.png
│ │ ├── 028.png
│ │ ├── 029.png
│ │ ├── 030.png
│ │ ├── 031.png
│ │ ├── 032.png
│ │ ├── 033.png
│ │ ├── 034.png
│ │ ├── 035.png
│ │ ├── 036.png
│ │ ├── 037.png
│ │ ├── 038.png
│ │ ├── 039.png
│ │ ├── 040.png
│ │ ├── 041.png
│ │ ├── 042.png
│ │ ├── 043.png
│ │ ├── 044.png
│ │ ├── 045.png
│ │ ├── 046.png
│ │ ├── 047.png
│ │ ├── 048.png
│ │ ├── 049.png
│ │ ├── 050.png
│ │ ├── 051.png
│ │ ├── 052.png
│ │ ├── 053.png
│ │ ├── 054.png
│ │ ├── 055.png
│ │ ├── 056.png
│ │ ├── 057.png
│ │ ├── 058.png
│ │ └── 059.png
│ └── cameras_sphere.npz
└── reduced_baseline_0.6x_sonar
│ ├── Data
│ ├── 000.pkl
│ ├── 001.pkl
│ ├── 002.pkl
│ ├── 003.pkl
│ ├── 004.pkl
│ ├── 005.pkl
│ ├── 006.pkl
│ ├── 007.pkl
│ ├── 008.pkl
│ ├── 009.pkl
│ ├── 010.pkl
│ ├── 011.pkl
│ ├── 012.pkl
│ ├── 013.pkl
│ ├── 014.pkl
│ ├── 015.pkl
│ ├── 016.pkl
│ ├── 017.pkl
│ ├── 018.pkl
│ ├── 019.pkl
│ ├── 020.pkl
│ ├── 021.pkl
│ ├── 022.pkl
│ ├── 023.pkl
│ ├── 024.pkl
│ ├── 025.pkl
│ ├── 026.pkl
│ ├── 027.pkl
│ ├── 028.pkl
│ ├── 029.pkl
│ ├── 030.pkl
│ ├── 031.pkl
│ ├── 032.pkl
│ ├── 033.pkl
│ ├── 034.pkl
│ ├── 035.pkl
│ ├── 036.pkl
│ ├── 037.pkl
│ ├── 038.pkl
│ ├── 039.pkl
│ ├── 040.pkl
│ ├── 041.pkl
│ ├── 042.pkl
│ ├── 043.pkl
│ ├── 044.pkl
│ ├── 045.pkl
│ ├── 046.pkl
│ ├── 047.pkl
│ ├── 048.pkl
│ ├── 049.pkl
│ ├── 050.pkl
│ ├── 051.pkl
│ ├── 052.pkl
│ ├── 053.pkl
│ ├── 054.pkl
│ ├── 055.pkl
│ ├── 056.pkl
│ ├── 057.pkl
│ ├── 058.pkl
│ └── 059.pkl
│ ├── imgs
│ ├── 0000.png
│ ├── 0001.png
│ ├── 0002.png
│ ├── 0003.png
│ ├── 0004.png
│ ├── 0005.png
│ ├── 0006.png
│ ├── 0007.png
│ ├── 0008.png
│ ├── 0009.png
│ ├── 0010.png
│ ├── 0011.png
│ ├── 0012.png
│ ├── 0013.png
│ ├── 0014.png
│ ├── 0015.png
│ ├── 0016.png
│ ├── 0017.png
│ ├── 0018.png
│ ├── 0019.png
│ ├── 0020.png
│ ├── 0021.png
│ ├── 0022.png
│ ├── 0023.png
│ ├── 0024.png
│ ├── 0025.png
│ ├── 0026.png
│ ├── 0027.png
│ ├── 0028.png
│ ├── 0029.png
│ ├── 0030.png
│ ├── 0031.png
│ ├── 0032.png
│ ├── 0033.png
│ ├── 0034.png
│ ├── 0035.png
│ ├── 0036.png
│ ├── 0037.png
│ ├── 0038.png
│ ├── 0039.png
│ ├── 0040.png
│ ├── 0041.png
│ ├── 0042.png
│ ├── 0043.png
│ ├── 0044.png
│ ├── 0045.png
│ ├── 0046.png
│ ├── 0047.png
│ ├── 0048.png
│ ├── 0049.png
│ ├── 0050.png
│ ├── 0051.png
│ ├── 0052.png
│ ├── 0053.png
│ ├── 0054.png
│ ├── 0055.png
│ ├── 0056.png
│ ├── 0057.png
│ ├── 0058.png
│ └── 0059.png
│ └── Config.json
├── LICENSE
├── models
├── embedder.py
├── testing.py
├── fields.py
└── renderer.py
├── confs
├── turtle_reduced_baseline_0.6x_joint_rgb.conf
└── turtle_reduced_baseline_0.6x_joint_sonar.conf
├── MLP.py
├── load_data.py
├── environment.yml
├── README.md
├── helpers.py
└── run_sdf.py
/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/NeuS/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/sample_results/view.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/sample_results/view.png
--------------------------------------------------------------------------------
/sample_results/normals.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/sample_results/normals.png
--------------------------------------------------------------------------------
/sample_results/teaser.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/sample_results/teaser.png
--------------------------------------------------------------------------------
/sample_results/mesh_vis.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/sample_results/mesh_vis.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/000.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/000.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/001.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/001.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/002.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/002.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/003.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/003.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/004.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/004.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/005.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/005.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/006.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/006.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/007.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/007.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/008.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/008.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/009.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/009.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/010.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/010.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/011.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/011.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/012.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/012.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/013.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/013.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/014.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/014.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/015.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/015.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/016.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/016.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/017.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/017.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/018.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/018.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/019.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/019.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/020.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/020.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/021.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/021.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/022.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/022.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/023.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/023.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/024.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/024.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/025.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/025.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/026.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/026.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/027.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/027.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/028.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/028.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/029.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/029.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/030.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/030.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/031.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/031.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/032.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/032.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/033.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/033.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/034.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/034.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/035.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/035.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/036.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/036.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/037.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/037.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/038.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/038.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/039.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/039.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/040.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/040.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/041.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/041.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/042.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/042.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/043.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/043.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/044.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/044.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/045.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/045.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/046.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/046.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/047.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/047.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/048.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/048.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/049.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/049.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/050.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/050.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/051.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/051.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/052.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/052.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/053.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/053.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/054.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/054.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/055.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/055.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/056.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/056.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/057.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/057.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/058.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/058.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/image/059.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/image/059.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/000.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/000.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/001.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/001.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/002.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/002.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/003.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/003.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/004.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/004.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/005.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/005.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/006.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/006.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/007.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/007.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/008.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/008.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/009.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/009.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/010.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/010.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/011.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/011.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/012.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/012.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/013.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/013.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/014.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/014.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/015.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/015.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/016.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/016.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/017.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/017.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/018.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/018.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/019.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/019.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/020.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/020.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/021.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/021.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/022.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/022.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/023.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/023.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/024.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/024.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/025.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/025.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/026.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/026.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/027.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/027.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/028.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/028.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/029.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/029.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/030.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/030.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/031.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/031.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/032.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/032.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/033.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/033.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/034.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/034.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/035.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/035.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/036.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/036.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/037.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/037.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/038.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/038.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/039.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/039.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/040.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/040.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/041.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/041.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/042.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/042.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/043.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/043.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/044.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/044.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/045.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/045.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/046.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/046.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/047.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/047.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/048.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/048.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/049.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/049.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/050.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/050.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/051.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/051.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/052.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/052.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/053.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/053.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/054.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/054.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/055.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/055.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/056.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/056.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/057.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/057.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/058.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/058.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/mask/059.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/mask/059.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/000.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/000.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/001.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/001.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/002.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/002.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/003.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/003.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/004.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/004.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/005.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/005.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/006.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/006.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/007.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/007.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/008.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/008.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/009.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/009.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/010.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/010.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/011.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/011.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/012.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/012.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/013.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/013.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/014.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/014.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/015.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/015.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/016.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/016.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/017.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/017.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/018.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/018.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/019.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/019.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/020.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/020.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/021.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/021.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/022.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/022.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/023.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/023.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/024.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/024.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/025.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/025.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/026.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/026.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/027.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/027.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/028.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/028.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/029.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/029.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/030.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/030.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/031.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/031.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/032.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/032.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/033.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/033.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/034.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/034.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/035.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/035.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/036.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/036.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/037.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/037.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/038.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/038.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/039.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/039.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/040.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/040.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/041.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/041.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/042.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/042.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/043.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/043.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/044.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/044.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/045.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/045.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/046.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/046.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/047.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/047.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/048.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/048.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/049.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/049.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/050.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/050.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/051.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/051.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/052.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/052.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/053.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/053.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/054.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/054.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/055.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/055.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/056.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/056.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/057.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/057.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/058.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/058.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Data/059.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/Data/059.pkl
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0000.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0000.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0001.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0001.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0002.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0002.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0003.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0003.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0004.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0004.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0005.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0005.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0006.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0006.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0007.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0007.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0008.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0008.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0009.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0009.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0010.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0010.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0011.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0011.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0012.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0012.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0013.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0013.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0014.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0014.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0015.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0015.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0016.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0016.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0017.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0017.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0018.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0018.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0019.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0019.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0020.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0020.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0021.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0021.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0022.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0022.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0023.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0023.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0024.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0024.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0025.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0025.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0026.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0026.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0027.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0027.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0028.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0028.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0029.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0029.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0030.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0030.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0031.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0031.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0032.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0032.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0033.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0033.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0034.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0034.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0035.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0035.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0036.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0036.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0037.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0037.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0038.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0038.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0039.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0039.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0040.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0040.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0041.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0041.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0042.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0042.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0043.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0043.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0044.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0044.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0045.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0045.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0046.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0046.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0047.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0047.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0048.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0048.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0049.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0049.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0050.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0050.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0051.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0051.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0052.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0052.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0053.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0053.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0054.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0054.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0055.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0055.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0056.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0056.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0057.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0057.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0058.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0058.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/imgs/0059.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_sonar/imgs/0059.png
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_rgb/cameras_sphere.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kzhang2/aoneus/HEAD/data/reduced_baseline_0.6x_rgb/cameras_sphere.npz
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Kevin Zhang
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/models/embedder.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 |
5 | # Positional encoding embedding. Code was taken from https://github.com/bmild/nerf.
6 | class Embedder:
7 | def __init__(self, **kwargs):
8 | self.kwargs = kwargs
9 | self.create_embedding_fn()
10 |
11 | def create_embedding_fn(self):
12 | embed_fns = []
13 | d = self.kwargs['input_dims']
14 | out_dim = 0
15 | if self.kwargs['include_input']:
16 | embed_fns.append(lambda x: x)
17 | out_dim += d
18 |
19 | max_freq = self.kwargs['max_freq_log2']
20 | N_freqs = self.kwargs['num_freqs']
21 |
22 | if self.kwargs['log_sampling']:
23 | freq_bands = 2. ** torch.linspace(0., max_freq, N_freqs)
24 | else:
25 | freq_bands = torch.linspace(2.**0., 2.**max_freq, N_freqs)
26 |
27 | for freq in freq_bands:
28 | for p_fn in self.kwargs['periodic_fns']:
29 | embed_fns.append(lambda x, p_fn=p_fn, freq=freq: p_fn(x * freq))
30 | out_dim += d
31 |
32 | self.embed_fns = embed_fns
33 | self.out_dim = out_dim
34 |
35 | def embed(self, inputs):
36 | return torch.cat([fn(inputs) for fn in self.embed_fns], -1)
37 |
38 |
39 | def get_embedder(multires, input_dims=3):
40 | embed_kwargs = {
41 | 'include_input': True,
42 | 'input_dims': input_dims,
43 | 'max_freq_log2': multires-1,
44 | 'num_freqs': multires,
45 | 'log_sampling': True,
46 | 'periodic_fns': [torch.sin, torch.cos],
47 | }
48 |
49 | embedder_obj = Embedder(**embed_kwargs)
50 | def embed(x, eo=embedder_obj): return eo.embed(x)
51 | return embed, embedder_obj.out_dim
52 |
--------------------------------------------------------------------------------
/NeuS/models/embedder.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 |
5 | # Positional encoding embedding. Code was taken from https://github.com/bmild/nerf.
6 | class Embedder:
7 | def __init__(self, **kwargs):
8 | self.kwargs = kwargs
9 | self.create_embedding_fn()
10 |
11 | def create_embedding_fn(self):
12 | embed_fns = []
13 | d = self.kwargs['input_dims']
14 | out_dim = 0
15 | if self.kwargs['include_input']:
16 | embed_fns.append(lambda x: x)
17 | out_dim += d
18 |
19 | max_freq = self.kwargs['max_freq_log2']
20 | N_freqs = self.kwargs['num_freqs']
21 |
22 | if self.kwargs['log_sampling']:
23 | freq_bands = 2. ** torch.linspace(0., max_freq, N_freqs)
24 | else:
25 | freq_bands = torch.linspace(2.**0., 2.**max_freq, N_freqs)
26 |
27 | for freq in freq_bands:
28 | for p_fn in self.kwargs['periodic_fns']:
29 | embed_fns.append(lambda x, p_fn=p_fn, freq=freq: p_fn(x * freq))
30 | out_dim += d
31 |
32 | self.embed_fns = embed_fns
33 | self.out_dim = out_dim
34 |
35 | def embed(self, inputs):
36 | return torch.cat([fn(inputs) for fn in self.embed_fns], -1)
37 |
38 |
39 | def get_embedder(multires, input_dims=3):
40 | embed_kwargs = {
41 | 'include_input': True,
42 | 'input_dims': input_dims,
43 | 'max_freq_log2': multires-1,
44 | 'num_freqs': multires,
45 | 'log_sampling': True,
46 | 'periodic_fns': [torch.sin, torch.cos],
47 | }
48 |
49 | embedder_obj = Embedder(**embed_kwargs)
50 | def embed(x, eo=embedder_obj): return eo.embed(x)
51 | return embed, embedder_obj.out_dim
52 |
--------------------------------------------------------------------------------
/confs/turtle_reduced_baseline_0.6x_joint_rgb.conf:
--------------------------------------------------------------------------------
1 | general {
2 | base_exp_dir = "experiments/reduced_baseline_0.6x_joint"
3 | recording = [
4 | "./"
5 | "./models"
6 | ]
7 | }
8 | dataset {
9 | data_dir = data/reduced_baseline_0.6x_rgb
10 | render_cameras_name = "cameras_sphere.npz"
11 | object_cameras_name = "cameras_sphere.npz"
12 | ds_factor = 1.0
13 | }
14 | train {
15 | learning_rate = 0.0005
16 | learning_rate_alpha = 0.05
17 | end_iter = 5001
18 | batch_size = 512
19 | validate_resolution_level = 4
20 | warm_up_end = 5000
21 | anneal_end = 0
22 | use_white_bkgd = false
23 | save_freq = 10000
24 | val_freq = 500
25 | val_mesh_freq = 1000
26 | report_freq = 100
27 | igr_weight = 0.1
28 | mask_weight = 0.0
29 | variation_weight = 0.0
30 | }
31 | model {
32 | nerf {
33 | D = 8
34 | d_in = 4
35 | d_in_view = 3
36 | W = 256
37 | multires = 10
38 | multires_view = 4
39 | output_ch = 4
40 | skips = [
41 | 4
42 | ]
43 | use_viewdirs = true
44 | }
45 | sdf_network {
46 | d_out = 257
47 | d_in = 3
48 | d_hidden = 256
49 | n_layers = 8
50 | skip_in = [
51 | 4
52 | ]
53 | multires = 6
54 | bias = 0.5
55 | scale = 1.0
56 | geometric_init = true
57 | weight_norm = true
58 | }
59 | variance_network {
60 | init_val = 0.3
61 | }
62 | rendering_network {
63 | d_feature = 64
64 | mode = "idr"
65 | d_in = 9
66 | d_out = 3
67 | d_hidden = 64
68 | n_layers = 4
69 | weight_norm = true
70 | multires_view = 4
71 | squeeze_out = true
72 | }
73 | neus_renderer {
74 | n_samples = 64
75 | n_importance = 64
76 | n_outside = 32
77 | up_sample_steps = 4
78 | perturb = 1.0
79 | }
80 | }
--------------------------------------------------------------------------------
/confs/turtle_reduced_baseline_0.6x_joint_sonar.conf:
--------------------------------------------------------------------------------
1 | conf {
2 | dataset = "data/reduced_baseline_0.6x_sonar"
3 | image_setkeyname = "images"
4 | expID = "experiments/reduced_baseline_0.6x_joint"
5 | timef = false
6 | filter_th = 0
7 | use_manual_bound = true
8 | }
9 | train {
10 | learning_rate = 0.0005
11 | learning_rate_alpha = 0.01
12 | end_iter = 5001
13 | start_iter = 0
14 | warm_up_end = 5000
15 | anneal_end = 50000
16 | select_valid_px = false
17 | save_freq = 10
18 | val_mesh_freq = 10
19 | report_freq = 1
20 | igr_weight = 0.1
21 | variation_reg_weight = 0
22 | arc_n_samples = 10
23 | select_px_method = "bypercent"
24 | num_select_pixels = 100
25 | px_sample_min_weight = 0.001
26 | randomize_points = true
27 | percent_select_true = 0.4
28 | r_div = false
29 | weight_sum_factor = 0.1
30 | dark_weight_sum_factor = 0.1
31 | do_weight_norm = true
32 | mode_tradeoff_schedule = "step"
33 | mode_tradeoff_step_iter = 2500
34 | rgb_weight = 0.7
35 | accel = false
36 | }
37 | mesh {
38 | object_bbox_min = [
39 | -1
40 | -1
41 | -1
42 | ]
43 | object_bbox_max = [
44 | 1
45 | 1
46 | 1
47 | ]
48 | x_max = 1.0
49 | x_min = -1.0
50 | y_max = 1.0
51 | y_min = -1.0
52 | z_max = 1.0
53 | z_min = -1.0
54 | level_set = 0
55 | }
56 | model {
57 | sdf_network {
58 | d_out = 65
59 | d_in = 3
60 | d_hidden = 64
61 | n_layers = 4
62 | skip_in = [
63 | 2
64 | ]
65 | multires = 6
66 | bias = 0.5
67 | scale = 1.0
68 | geometric_init = true
69 | weight_norm = true
70 | }
71 | variance_network {
72 | init_val = 0.3
73 | }
74 | rendering_network {
75 | d_feature = 64
76 | mode = "idr"
77 | d_in = 9
78 | d_out = 1
79 | d_hidden = 64
80 | n_layers = 4
81 | weight_norm = true
82 | multires_view = 4
83 | squeeze_out = true
84 | }
85 | neus_renderer {
86 | n_samples = 64
87 | n_importance = 0
88 | n_outside = 0
89 | up_sample_steps = 4
90 | perturb = 0
91 | }
92 | }
--------------------------------------------------------------------------------
/MLP.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import time
3 | import sys
4 |
5 | class Network_S_Relu(torch.nn.Module):
6 | def __init__(self, D=8, H=256, input_ch=3, input_ch_views=3, output_ch=4, skips=[4], no_rho=False):
7 | super(Network_S_Relu, self).__init__()
8 | self.input_ch = input_ch
9 | self.input_ch_views = input_ch_views
10 | self.skips = skips
11 | self.no_rho = no_rho
12 | self.pts_linears = torch.nn.ModuleList(
13 | [torch.nn.Linear(input_ch, H)] + [torch.nn.Linear(H, H) if i not in self.skips else torch.nn.Linear(H + input_ch, H) for i in range(D-1)])
14 | self.views_linears = torch.nn.ModuleList([torch.nn.Linear(input_ch_views + H, H//2)])
15 | if self.no_rho:
16 | self.output_linear = torch.nn.Linear(H, output_ch)
17 | else:
18 | self.feature_linear = torch.nn.Linear(H, H)
19 | self.alpha_linear = torch.nn.Linear(H, 1)
20 | self.rho_linear = torch.nn.Linear(H//2, 1)
21 |
22 | def forward(self, x):
23 | # y_pred = self.linear(x)
24 | if self.no_rho:
25 | input_pts = x
26 | h = x
27 | else:
28 | input_pts, input_views = torch.split(x, [self.input_ch, self.input_ch_views], dim=-1)
29 | h = input_pts
30 |
31 | for i, l in enumerate(self.pts_linears):
32 | h = self.pts_linears[i](h)
33 | h = torch.nn.functional.relu(h)
34 | if i in self.skips:
35 | h = torch.cat([input_pts, h], -1)
36 |
37 | if self.no_rho:
38 | outputs = self.output_linear(h)
39 | else:
40 | alpha = self.alpha_linear(h)
41 | alpha = torch.abs(alpha)
42 | feature = self.feature_linear(h)
43 | h = torch.cat([feature, input_views], -1)
44 | for i, l in enumerate(self.views_linears):
45 | h = self.views_linears[i](h)
46 | h = torch.nn.functional.relu(h)
47 | rho = self.rho_linear(h)
48 | rho = torch.abs(rho)
49 | outputs = torch.cat([rho, alpha], -1)
50 | return outputs
51 |
--------------------------------------------------------------------------------
/load_data.py:
--------------------------------------------------------------------------------
1 | import os
2 | import cv2
3 | import pickle
4 | import json
5 | import math
6 | from scipy.io import savemat
7 | import numpy as np
8 |
9 | def load_data(target):
10 | # dirpath = "./data/{}".format(target)
11 | dirpath = target
12 | pickle_loc = "{}/Data".format(dirpath)
13 | output_loc = "{}/UnzipData".format(dirpath)
14 | cfg_path = "{}/Config.json".format(dirpath)
15 |
16 |
17 | with open(cfg_path, 'r') as f:
18 | cfg = json.load(f)
19 |
20 | for agents in cfg["agents"][0]["sensors"]:
21 | if agents["sensor_type"] != "ImagingSonar": continue
22 | hfov = agents["configuration"]["Azimuth"]
23 | vfov = agents["configuration"]["Elevation"]
24 | min_range = agents["configuration"]["RangeMin"]
25 | max_range = agents["configuration"]["RangeMax"]
26 | hfov = math.radians(hfov)
27 | vfov = math.radians(vfov)
28 |
29 | # os.makedirs(output_loc)
30 | images = []
31 | sensor_poses = []
32 |
33 | ds_factor = cfg["ds_factor"]
34 | for pkls in os.listdir(pickle_loc):
35 | filename = "{}/{}".format(pickle_loc, pkls)
36 | with open(filename, 'rb') as f:
37 | state = pickle.load(f)
38 | image = state["ImagingSonar"]
39 | s = image.shape
40 | # get rid of preprocessing?
41 | if image.dtype == np.uint8:
42 | # print("hey")
43 | image = image / 255
44 | if cfg["simple_denoise"]:
45 | image[image < 0.2] = 0
46 | image[s[0]- 200:, :] = 0
47 | pose = state["PoseSensor"]
48 | pose[:3, 3] /= ds_factor
49 | images.append(image)
50 | sensor_poses.append(pose)
51 |
52 | data = {
53 | "images": images,
54 | "images_no_noise": [],
55 | "sensor_poses": sensor_poses,
56 | "min_range": min_range / ds_factor,
57 | "max_range": max_range / ds_factor,
58 | "hfov": hfov,
59 | "vfov": vfov
60 | }
61 |
62 | # savemat('{}/{}.mat'.format(dirpath,target), data, oned_as='row')
63 | return data
64 |
--------------------------------------------------------------------------------
/environment.yml:
--------------------------------------------------------------------------------
1 | name: aoneus_env
2 | channels:
3 | - defaults
4 | dependencies:
5 | - _libgcc_mutex=0.1=main
6 | - _openmp_mutex=5.1=1_gnu
7 | - ca-certificates=2023.12.12=h06a4308_0
8 | - ld_impl_linux-64=2.38=h1181459_1
9 | - libffi=3.4.4=h6a678d5_0
10 | - libgcc-ng=11.2.0=h1234567_1
11 | - libgomp=11.2.0=h1234567_1
12 | - libstdcxx-ng=11.2.0=h1234567_1
13 | - ncurses=6.4=h6a678d5_0
14 | - openssl=3.0.12=h7f8727e_0
15 | - pip=23.3.1=py39h06a4308_0
16 | - python=3.9.18=h955ad1f_0
17 | - readline=8.2=h5eee18b_0
18 | - setuptools=68.2.2=py39h06a4308_0
19 | - sqlite=3.41.2=h5eee18b_0
20 | - tk=8.6.12=h1ccaba5_0
21 | - tzdata=2023d=h04d1e81_0
22 | - wheel=0.41.2=py39h06a4308_0
23 | - xz=5.4.5=h5eee18b_0
24 | - zlib=1.2.13=h5eee18b_0
25 | - pip:
26 | - asttokens==2.4.1
27 | - certifi==2023.11.17
28 | - charset-normalizer==3.3.2
29 | - colorama==0.4.6
30 | - executing==2.0.1
31 | - filelock==3.13.1
32 | - fsspec==2023.12.2
33 | - icecream==2.1.3
34 | - idna==3.6
35 | - jinja2==3.1.3
36 | - markupsafe==2.1.4
37 | - mpmath==1.3.0
38 | - networkx==3.2.1
39 | - numpy==1.26.3
40 | - nvidia-cublas-cu12==12.1.3.1
41 | - nvidia-cuda-cupti-cu12==12.1.105
42 | - nvidia-cuda-nvrtc-cu12==12.1.105
43 | - nvidia-cuda-runtime-cu12==12.1.105
44 | - nvidia-cudnn-cu12==8.9.2.26
45 | - nvidia-cufft-cu12==11.0.2.54
46 | - nvidia-curand-cu12==10.3.2.106
47 | - nvidia-cusolver-cu12==11.4.5.107
48 | - nvidia-cusparse-cu12==12.1.0.106
49 | - nvidia-nccl-cu12==2.18.1
50 | - nvidia-nvjitlink-cu12==12.3.101
51 | - nvidia-nvtx-cu12==12.1.105
52 | - opencv-python==4.9.0.80
53 | - pillow==10.2.0
54 | - pygments==2.17.2
55 | - pyhocon==0.3.57
56 | - pymcubes==0.1.4
57 | - pyparsing==3.1.1
58 | - requests==2.31.0
59 | - scipy==1.12.0
60 | - six==1.16.0
61 | - sympy==1.12
62 | - torch==2.1.2
63 | - torchaudio==2.1.2
64 | - torchvision==0.16.2
65 | - tqdm==4.66.1
66 | - trimesh==4.0.10
67 | - triton==2.1.0
68 | - typing-extensions==4.9.0
69 | - urllib3==2.1.0
70 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # AONeuS
2 | AONeuS, or Acoustic-optical Neural Surfaces, is a state-of-the-art imaging sonar and optical camera based 3d reconstruction method.
3 |
4 |
5 | # System Requirements
6 | We ran the demo successfully on a NVIDIA RTX A6000 using CUDA 12.2 with 32 Gb RAM using a single CPU core.
7 | The demo took ~16 minutes to finish.
8 | Running the demo used ~ 8.25 GB of VRAM on our system, so the demo should be run on a system with at least 9 GB of VRAM to be safe.
9 |
10 | # Install Dependencies
11 | We maintained our Python environments using conda. Run the below command in a shell to install the required dependencies.
12 |
13 | `conda env create -f environment.yml`
14 |
15 | # Dataset Info
16 | A sample synthetic dataset comes with this demo in the `data/`` folder. It is a simulated turtle, and the camera trajectory setting corresponds to the 0.6x or 0.72m setting described in the paper. Below are some sample images from the dataset.
17 |
18 | ## RGB Image
19 |
20 |
21 | ## Sonar Image
22 |
23 |
24 |
25 | # Running the demo
26 | To run our demo after setting up the dependencies, run the following command in a shell:
27 |
28 | `python run_sdf.py --conf confs/turtle_reduced_baseline_0.6x_joint_sonar.conf --neus_conf confs/turtle_reduced_baseline_0.6x_joint_rgb.conf --disable_wandb --random_seed 1706110819`
29 |
30 | Please feel free to change the `--random_seed` flag to whatever is desired.
31 |
32 | Experiments will be written to `experiments/reduced_baseline_0.6x_joint/{random_seed}` (so for this command, `1706110819`).
33 |
34 | # Results
35 | Here are the results after running the command provided on our system:
36 |
37 | ## Mesh Visualization:
38 |
39 |
40 |
41 | ## View Synthesis:
42 |
43 |
44 |
45 | ## Mesh Normals:
46 |
47 |
48 |
49 | # Citation
50 | Please cite our work as below:
51 |
52 | ```
53 | @inproceedings{10.1145/3641519.3657446,
54 | author = {Qadri, Mohamad and Zhang, Kevin and Hinduja, Akshay and Kaess, Michael and Pediredla, Adithya and Metzler, Christopher A},
55 | title = {AONeuS: A Neural Rendering Framework for Acoustic-Optical Sensor Fusion},
56 | year = {2024},
57 | isbn = {9798400705250},
58 | publisher = {Association for Computing Machinery},
59 | address = {New York, NY, USA},
60 | url = {https://doi.org/10.1145/3641519.3657446},
61 | doi = {10.1145/3641519.3657446},
62 | booktitle = {ACM SIGGRAPH 2024 Conference Papers},
63 | articleno = {127},
64 | numpages = {12},
65 | keywords = {3D reconstruction, imaging sonar, implicit neural representations, inverse rendering, multimodal sensing, neural rendering, robotics, sensor fusion, signed distance functions, underwater imaging, underwater sensing},
66 | location = {Denver, CO, USA},
67 | series = {SIGGRAPH '24}
68 | }
69 | ```
70 |
71 | # Credits
72 | This codebase is mostly adapted from https://github.com/Totoro97/NeuS. Thanks to the authors!
73 |
74 |
--------------------------------------------------------------------------------
/data/reduced_baseline_0.6x_sonar/Config.json:
--------------------------------------------------------------------------------
1 | {
2 | "simple_denoise": false,
3 | "ds_factor": 1.0,
4 | "name": "HoveringImagingSonar",
5 | "world": "SimpleUnderwater",
6 | "main_agent": "auv0",
7 | "ticks_per_sec": 200,
8 | "frames_per_sec": true,
9 | "octree_min": 0.02,
10 | "octree_max": 5.0,
11 | "agents": [
12 | {
13 | "agent_name": "auv0",
14 | "agent_type": "HoveringAUV",
15 | "sensors": [
16 | {
17 | "sensor_type": "OrientationSensor"
18 | },
19 | {
20 | "sensor_type": "LocationSensor"
21 | },
22 | {
23 | "sensor_type": "PoseSensor",
24 | "socket": "SonarSocket",
25 | "rotation": [
26 | 0,
27 | 45,
28 | 0
29 | ]
30 | },
31 | {
32 | "sensor_type": "VelocitySensor",
33 | "socket": "IMUSocket"
34 | },
35 | {
36 | "sensor_type": "IMUSensor",
37 | "socket": "IMUSocket",
38 | "Hz": 200,
39 | "configuration": {
40 | "AccelSigma": 0.00277,
41 | "AngVelSigma": 0.00123,
42 | "AccelBiasSigma": 0.00141,
43 | "AngVelBiasSigma": 0.00388,
44 | "ReturnBias": true
45 | }
46 | },
47 | {
48 | "sensor_type": "GPSSensor",
49 | "socket": "IMUSocket",
50 | "Hz": 5,
51 | "configuration": {
52 | "Sigma": 0.5,
53 | "Depth": 1,
54 | "DepthSigma": 0.25
55 | }
56 | },
57 | {
58 | "sensor_type": "DVLSensor",
59 | "socket": "DVLSocket",
60 | "Hz": 20,
61 | "configuration": {
62 | "Elevation": 22.5,
63 | "VelSigma": 0.02626,
64 | "ReturnRange": true,
65 | "MaxRange": 50,
66 | "RangeSigma": 0.1
67 | }
68 | },
69 | {
70 | "sensor_type": "DepthSensor",
71 | "socket": "DepthSocket",
72 | "Hz": 100,
73 | "configuration": {
74 | "Sigma": 0.255
75 | }
76 | },
77 | {
78 | "sensor_type": "ImagingSonar",
79 | "socket": "SonarSocket",
80 | "Hz": 10,
81 | "rotation": [
82 | 0,
83 | 45,
84 | 0
85 | ],
86 | "configuration": {
87 | "RangeBins": 256,
88 | "AzimuthBins": 96,
89 | "RangeMin": 0.01,
90 | "RangeMax": 3.3,
91 | "InitOctreeRange": 50,
92 | "Elevation": 12.000000000000002,
93 | "Azimuth": 59.99999999999999,
94 | "AzimuthStreaks": -1,
95 | "ScaleNoise": true,
96 | "AddSigma": 0.15,
97 | "MultSigma": 0.2,
98 | "RangeSigma": 0.0,
99 | "MultiPath": true,
100 | "ViewOctree": -1
101 | }
102 | }
103 | ],
104 | "control_scheme": 0,
105 | "location": [
106 | -18,
107 | 16,
108 | -22
109 | ],
110 | "rotation": [
111 | 0.0,
112 | 0.0,
113 | 130.0
114 | ]
115 | }
116 | ],
117 | "window_width": 1280,
118 | "window_height": 720
119 | }
--------------------------------------------------------------------------------
/models/testing.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import plotly.graph_objects as go
4 | import math
5 | import numpy as np
6 |
7 | def plot_sphere(r, fig):
8 | azis = torch.linspace(-np.pi, np.pi, 64)
9 | eles = torch.linspace(-np.pi / 2, np.pi / 2, 64)
10 | pixels_theta, pixels_phi = torch.meshgrid(
11 | azis, eles, indexing="ij"
12 | ) # careful with indexing here
13 |
14 | xs = r * torch.cos(pixels_theta) * torch.cos(pixels_phi)
15 | ys = r * torch.sin(pixels_theta) * torch.cos(pixels_phi)
16 | zs = r * torch.sin(pixels_phi)
17 | p = torch.stack([xs, ys, zs], dim=-1) # 64, 64, 3
18 |
19 | sphere_surface = go.Surface(
20 | z=p[:, :, 2].cpu(),
21 | x=p[:, :, 0].cpu(),
22 | y=p[:, :, 1].cpu(),
23 | opacity=0.3,
24 | showscale=False,
25 | )
26 | fig.add_trace(sphere_surface)
27 |
28 | def convert_pose(pose, direction):
29 | """
30 | Args:
31 | pose: (4, 4)
32 | Returns:
33 | converted_pose: (4, 4)
34 |
35 | neus: (right, down, in)
36 | ho: (in, left, up)
37 | ue: (in, right, up)
38 |
39 | how to visualize: if pose is in coord system A, then to convert to coord system
40 | B c_mat needs to convert coords from coord system B to coord system A. Think
41 | of coord system A vec first, where do you get the things you need from a vec
42 | written wrt coord system B?
43 | """
44 | conversion_mats = {
45 | "neus_to_ho": np.array([[0.0, -1.0, 0.0],
46 | [0.0, 0.0, -1.0],
47 | [1.0, 0.0, 0.0]]),
48 | "ue_to_neus": np.array(
49 | [[0.0, 0.0, 1.0],
50 | [1.0, 0.0, 0.0],
51 | [0.0, -1.0, 0.0]]
52 | ), # sign on last row?
53 | }
54 | c_mat = conversion_mats[direction]
55 | if isinstance(pose, np.ndarray):
56 | converted_pose = pose.copy()
57 | elif isinstance(pose, torch.Tensor):
58 | convert_pose = pose.clone()
59 | converted_pose[:3, :3] = c_mat.T @ pose[:3, :3] @ c_mat
60 | # converted_pose[:3, :3] = pose[:3, :3] @ c_mat
61 | # converted_pose[:3, :3] = c_mat.T @ pose[:3, :3]
62 | converted_pose[:3, 3:] = c_mat.T @ pose[:3, 3:]
63 | return converted_pose
64 |
65 | def plot_box(r, fig):
66 | range = torch.linspace(-r, r, 10)
67 | r1, r2 = torch.meshgrid(range, range)
68 | lower = torch.ones_like(r1) * (-r)
69 | upper = torch.ones_like(r1) * r
70 | planes = [
71 | [r1, r2, lower],
72 | [r1, r2, upper],
73 | [lower, r1, r2],
74 | [upper, r1, r2],
75 | [r1, lower, r2],
76 | [r1, upper, r2],
77 | ]
78 | for p in planes:
79 | p1 = torch.stack(p, dim=-1)
80 | p1_s = go.Surface(
81 | z=p1[:, :, 2].cpu(),
82 | x=p1[:, :, 0].cpu(),
83 | y=p1[:, :, 1].cpu(),
84 | opacity=0.3,
85 | showscale=False,
86 | )
87 | fig.add_trace(p1_s)
88 |
89 |
90 | # can refactor below slightly, curry parameter into sdf_func
91 | class ShapeSDF(nn.Module):
92 | def __init__(self, sdf_func, parameter):
93 | super().__init__()
94 | self.parameter = parameter
95 | self.sdf_func = sdf_func
96 |
97 | def forward(self, inputs):
98 | """
99 | Args:
100 | inputs: batch_size X 3
101 | """
102 | dummy_feats = torch.zeros((inputs.size()[0], 1)).cuda()
103 | sdf_vals = self.sdf_func(inputs, self.parameter)
104 | return torch.cat([sdf_vals, dummy_feats], dim=-1)
105 |
106 | def sdf(self, x):
107 | return self.forward(x)[:, :1]
108 |
109 | def gradient(self, x, mode="analytic"):
110 | if mode == "analytic":
111 | x.requires_grad_(True)
112 | y = self.sdf(x)
113 | d_output = torch.ones_like(y, requires_grad=False, device=y.device)
114 | gradients = torch.autograd.grad(
115 | outputs=y,
116 | inputs=x,
117 | grad_outputs=d_output,
118 | create_graph=True,
119 | retain_graph=True,
120 | only_inputs=True,
121 | )[0].unsqueeze(1)
122 | elif mode == "finite_difference":
123 | gradients = finite_difference_grad(self.sdf, x)
124 | return gradients
125 |
126 |
127 | def sphere_sdf_func(inputs, radius):
128 | norms = torch.linalg.vector_norm(inputs, dim=-1, keepdim=True)
129 | sdf_vals = norms - radius
130 | return sdf_vals
131 |
132 |
133 | def box_sdf_func(inputs, bounds):
134 | # print(inputs.size(), bounds.size())
135 | # from inigo quilez's site
136 | q = torch.abs(inputs) - bounds
137 | norms = torch.linalg.vector_norm(torch.clamp(q, min=0.0), dim=-1, keepdim=True)
138 | other = torch.min(
139 | torch.max(q[:, 0:1], torch.max(q[:, 1:2], q[:, 2:3])),
140 | torch.zeros_like(q[:, 0:1]),
141 | )
142 | return norms + other
143 |
144 | # color based on normals? lambertian reflectance?
145 | class RenderNetLamb(nn.Module):
146 | def __init__(self, sdf_func=None, falloff=-10, channels=1):
147 | super().__init__()
148 | self.sdf_func = sdf_func
149 | self.falloff = falloff
150 | self.channels = channels
151 |
152 | def forward(self, points, normals, view_dirs, feature_vectors):
153 | # print(points.shape)
154 | cos = (normals * view_dirs).sum(-1)[..., None].repeat(1, self.channels).abs()
155 | if self.sdf_func is not None:
156 | sdf_vals = self.sdf_func(points)
157 | weights = torch.exp(
158 | self.falloff * torch.abs(sdf_vals.expand((-1, self.channels)))
159 | )
160 | res = cos * weights
161 | else:
162 | res = cos
163 | return res
164 |
165 |
166 | class ConstVar(nn.Module):
167 | def __init__(self, inv_var):
168 | super().__init__()
169 | self.variance = inv_var
170 |
171 | def forward(self, pts):
172 | return torch.tensor(
173 | [[math.exp(10 * self.variance)]]
174 | ).cuda() # previously had sign error here, be careful
175 |
176 | def plot_mesh(mesh, fig):
177 | """
178 | Args:
179 | mesh: trimesh.Trimesh
180 | fig: graph_objects.Figure
181 | """
182 | mesh_plot = go.Mesh3d(
183 | x=mesh.vertices[:, 0],
184 | y=mesh.vertices[:, 1],
185 | z=mesh.vertices[:, 2],
186 | i=mesh.faces[:, 0],
187 | j=mesh.faces[:, 1],
188 | k=mesh.faces[:, 2],
189 | intensity = np.linspace(0, 1, len(mesh.faces), endpoint=True),
190 | intensitymode='cell',
191 | )
192 | fig.add_trace(mesh_plot)
193 |
194 | def plot_points_3d(points, fig, size=2, mode="markers"):
195 | """
196 | Args:
197 | points: (n, 3) np.ndarray
198 | fig: graph_objects.Figure
199 | """
200 | center_plot = go.Scatter3d(x=points[:, 0],
201 | y=points[:, 1],
202 | z=points[:, 2],
203 | mode=mode,
204 | marker=dict(size=size))
205 | fig.add_trace(center_plot)
206 |
207 | def sample_points_along_rays(rays_o, rays_d, near, far, num_points=16):
208 | """
209 | Args:
210 | rays_o: (n, 3), np.ndarray
211 | rays_d: (n, 3), np.ndarray
212 | near: float
213 | far: float
214 | Returns:
215 | vis_sample_pts: (n, num_points, 3)
216 | """
217 | vis_r = np.linspace(near, far, num_points) # (num_points,)
218 | vis_sample_pts = rays_o[:, None, :] + rays_d[:, None, :] * vis_r[None, :, None]
219 | # vis_sample_pts = vis_sample_pts.reshape(-1, 3)
220 | return vis_sample_pts
221 |
222 | def plot_pose_axes(pose, fig):
223 | """
224 | Args:
225 | pose: (4, 4) np.ndarray
226 | fig: graph_objects.Figure
227 | """
228 | if isinstance(pose, torch.Tensor):
229 | pose = pose.cpu().numpy()
230 | t = pose[:3, 3].T
231 | colorscale = [
232 | [0, "red"],
233 | [1.0, "green"],
234 | ] # red is origin, green is point inwards wrt ray direction
235 | for i in range(3):
236 | curr_in = pose[:3, i]# z-axis is in
237 | far = t + 0.5 * curr_in
238 | endpoints = np.stack([t, far])
239 | dir_vec_plot = go.Scatter3d(x=endpoints[:, 0], y=endpoints[:, 1], z=endpoints[:, 2],
240 | marker=dict(size=3, color=[0.0, 1.0], colorscale=colorscale),
241 | line=dict(color="darkblue", width=2))
242 | fig.add_trace(dir_vec_plot)
243 |
--------------------------------------------------------------------------------
/NeuS/models/dataset.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn.functional as F
3 | import cv2 as cv
4 | import numpy as np
5 | import os
6 | from glob import glob
7 | from icecream import ic
8 | from scipy.spatial.transform import Rotation as Rot
9 | from scipy.spatial.transform import Slerp
10 |
11 |
12 | # This function is borrowed from IDR: https://github.com/lioryariv/idr
13 | def load_K_Rt_from_P(filename, P=None):
14 | if P is None:
15 | lines = open(filename).read().splitlines()
16 | if len(lines) == 4:
17 | lines = lines[1:]
18 | lines = [[x[0], x[1], x[2], x[3]] for x in (x.split(" ") for x in lines)]
19 | P = np.asarray(lines).astype(np.float32).squeeze()
20 |
21 | out = cv.decomposeProjectionMatrix(P)
22 | K = out[0]
23 | R = out[1]
24 | t = out[2]
25 |
26 | K = K / K[2, 2]
27 | intrinsics = np.eye(4)
28 | intrinsics[:3, :3] = K
29 |
30 | pose = np.eye(4, dtype=np.float32)
31 | pose[:3, :3] = R.transpose()
32 | pose[:3, 3] = (t[:3] / t[3])[:, 0]
33 |
34 | return intrinsics, pose
35 |
36 | def cvt_neus_coords_to_ho_coords(pts):
37 | """
38 | args:
39 | pts: (..., 3)
40 | """
41 | original_shape = pts.shape
42 | pts = pts.reshape(-1, 3)
43 | cmat = torch.tensor(
44 | [[0.0, 0.0, 1.0],
45 | [-1.0, 0.0, 0.0],
46 | [0.0, -1.0, 0.0]])
47 | out = pts @ cmat.T
48 | return out.reshape(original_shape)
49 |
50 | class Dataset:
51 | def __init__(self, conf):
52 | super(Dataset, self).__init__()
53 | print('Load data: Begin')
54 | self.device = torch.device('cuda')
55 | self.conf = conf
56 |
57 | self.data_dir = conf.get_string('data_dir')
58 | self.render_cameras_name = conf.get_string('render_cameras_name')
59 | self.object_cameras_name = conf.get_string('object_cameras_name')
60 |
61 | self.camera_outside_sphere = conf.get_bool('camera_outside_sphere', default=True)
62 | self.scale_mat_scale = conf.get_float('scale_mat_scale', default=1.1)
63 |
64 | camera_dict = np.load(os.path.join(self.data_dir, self.render_cameras_name))
65 | self.camera_dict = camera_dict
66 | self.images_lis = sorted(glob(os.path.join(self.data_dir, 'image/*.png')))
67 | self.n_images = len(self.images_lis)
68 | self.images_np = np.stack([cv.imread(im_name) for im_name in self.images_lis]) / 256.0
69 | self.masks_lis = sorted(glob(os.path.join(self.data_dir, 'mask/*.png')))
70 | self.masks_np = np.stack([cv.imread(im_name) for im_name in self.masks_lis]) / 256.0
71 |
72 | # world_mat is a projection matrix from world to image
73 | self.world_mats_np = [camera_dict['world_mat_%d' % idx].astype(np.float32) for idx in range(self.n_images)]
74 |
75 | self.scale_mats_np = []
76 |
77 | # scale_mat: used for coordinate normalization, we assume the scene to render is inside a unit sphere at origin.
78 | self.scale_mats_np = [camera_dict['scale_mat_%d' % idx].astype(np.float32) for idx in range(self.n_images)]
79 |
80 | self.intrinsics_all = []
81 | self.pose_all = []
82 |
83 | for scale_mat, world_mat in zip(self.scale_mats_np, self.world_mats_np):
84 | P = world_mat @ scale_mat
85 | P = P[:3, :4]
86 | intrinsics, pose = load_K_Rt_from_P(None, P)
87 | self.intrinsics_all.append(torch.from_numpy(intrinsics).float())
88 | self.pose_all.append(torch.from_numpy(pose).float())
89 |
90 | self.images = torch.from_numpy(self.images_np.astype(np.float32)).cpu() # [n_images, H, W, 3]
91 | self.masks = torch.from_numpy(self.masks_np.astype(np.float32)).cpu() # [n_images, H, W, 3]
92 | self.intrinsics_all = torch.stack(self.intrinsics_all).to(self.device) # [n_images, 4, 4]
93 | self.intrinsics_all_inv = torch.inverse(self.intrinsics_all) # [n_images, 4, 4]
94 | self.focal = self.intrinsics_all[0][0, 0]
95 | self.pose_all = torch.stack(self.pose_all).to(self.device) # [n_images, 4, 4]
96 | self.H, self.W = self.images.shape[1], self.images.shape[2]
97 | self.image_pixels = self.H * self.W
98 |
99 | object_bbox_min = np.array([-1.01, -1.01, -1.01, 1.0])
100 | object_bbox_max = np.array([ 1.01, 1.01, 1.01, 1.0])
101 | # Object scale mat: region of interest to **extract mesh**
102 | object_scale_mat = np.load(os.path.join(self.data_dir, self.object_cameras_name))['scale_mat_0']
103 | object_bbox_min = np.linalg.inv(self.scale_mats_np[0]) @ object_scale_mat @ object_bbox_min[:, None]
104 | object_bbox_max = np.linalg.inv(self.scale_mats_np[0]) @ object_scale_mat @ object_bbox_max[:, None]
105 | self.object_bbox_min = object_bbox_min[:3, 0]
106 | self.object_bbox_max = object_bbox_max[:3, 0]
107 |
108 | print('Load data: End')
109 |
110 | def gen_rays_at(self, img_idx, resolution_level=1):
111 | """
112 | Generate rays at world space from one camera.
113 | """
114 | l = resolution_level
115 | tx = torch.linspace(0, self.W - 1, self.W // l)
116 | ty = torch.linspace(0, self.H - 1, self.H // l)
117 | pixels_x, pixels_y = torch.meshgrid(tx, ty)
118 | p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3
119 | p = torch.matmul(self.intrinsics_all_inv[img_idx, None, None, :3, :3], p[:, :, :, None]).squeeze() # W, H, 3
120 | rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # W, H, 3
121 | rays_v = torch.matmul(self.pose_all[img_idx, None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3
122 | rays_o = self.pose_all[img_idx, None, None, :3, 3].expand(rays_v.shape) # W, H, 3
123 | rays_o = cvt_neus_coords_to_ho_coords(rays_o)
124 | rays_v = cvt_neus_coords_to_ho_coords(rays_v)
125 | return rays_o.transpose(0, 1), rays_v.transpose(0, 1)
126 |
127 | def gen_random_rays_at(self, img_idx, batch_size):
128 | """
129 | Generate random rays at world space from one camera.
130 | """
131 | pixels_x = torch.randint(low=0, high=self.W, size=[batch_size])
132 | pixels_y = torch.randint(low=0, high=self.H, size=[batch_size])
133 | color = self.images[img_idx].cuda()[(pixels_y, pixels_x)] # batch_size, 3
134 | mask = self.masks[img_idx].cuda()[(pixels_y, pixels_x)] # batch_size, 3
135 | p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1).float() # batch_size, 3
136 | p = torch.matmul(self.intrinsics_all_inv[img_idx, None, :3, :3], p[:, :, None]).squeeze() # batch_size, 3
137 | rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # batch_size, 3
138 | rays_v = torch.matmul(self.pose_all[img_idx, None, :3, :3], rays_v[:, :, None]).squeeze() # batch_size, 3
139 | rays_o = self.pose_all[img_idx, None, :3, 3].expand(rays_v.shape) # batch_size, 3
140 | # switch to holoocean coordinates
141 | rays_v = cvt_neus_coords_to_ho_coords(rays_v)
142 | rays_o = cvt_neus_coords_to_ho_coords(rays_o)
143 | return torch.cat([rays_o, rays_v, color, mask[:, :1]], dim=-1).cuda() # batch_size, 10
144 |
145 | def gen_rays_between(self, idx_0, idx_1, ratio, resolution_level=1):
146 | """
147 | Interpolate pose between two cameras.
148 | """
149 | l = resolution_level
150 | tx = torch.linspace(0, self.W - 1, self.W // l)
151 | ty = torch.linspace(0, self.H - 1, self.H // l)
152 | pixels_x, pixels_y = torch.meshgrid(tx, ty)
153 | p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3
154 | p = torch.matmul(self.intrinsics_all_inv[0, None, None, :3, :3], p[:, :, :, None]).squeeze() # W, H, 3
155 | rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # W, H, 3
156 | trans = self.pose_all[idx_0, :3, 3] * (1.0 - ratio) + self.pose_all[idx_1, :3, 3] * ratio
157 | pose_0 = self.pose_all[idx_0].detach().cpu().numpy()
158 | pose_1 = self.pose_all[idx_1].detach().cpu().numpy()
159 | pose_0 = np.linalg.inv(pose_0)
160 | pose_1 = np.linalg.inv(pose_1)
161 | rot_0 = pose_0[:3, :3]
162 | rot_1 = pose_1[:3, :3]
163 | rots = Rot.from_matrix(np.stack([rot_0, rot_1]))
164 | key_times = [0, 1]
165 | slerp = Slerp(key_times, rots)
166 | rot = slerp(ratio)
167 | pose = np.diag([1.0, 1.0, 1.0, 1.0])
168 | pose = pose.astype(np.float32)
169 | pose[:3, :3] = rot.as_matrix()
170 | pose[:3, 3] = ((1.0 - ratio) * pose_0 + ratio * pose_1)[:3, 3]
171 | pose = np.linalg.inv(pose)
172 | rot = torch.from_numpy(pose[:3, :3]).cuda()
173 | trans = torch.from_numpy(pose[:3, 3]).cuda()
174 | rays_v = torch.matmul(rot[None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3
175 | rays_o = trans[None, None, :3].expand(rays_v.shape) # W, H, 3
176 | rays_o = cvt_neus_coords_to_ho_coords(rays_o)
177 | rays_v = cvt_neus_coords_to_ho_coords(rays_v)
178 | return rays_o.transpose(0, 1), rays_v.transpose(0, 1)
179 |
180 | def near_far_from_sphere(self, rays_o, rays_d):
181 | a = torch.sum(rays_d**2, dim=-1, keepdim=True)
182 | b = 2.0 * torch.sum(rays_o * rays_d, dim=-1, keepdim=True)
183 | mid = 0.5 * (-b) / a
184 | near = mid - 1.0
185 | far = mid + 1.0
186 | return near, far
187 |
188 | def image_at(self, idx, resolution_level):
189 | img = cv.imread(self.images_lis[idx])
190 | return (cv.resize(img, (self.W // resolution_level, self.H // resolution_level))).clip(0, 255)
191 |
192 |
--------------------------------------------------------------------------------
/NeuS/models/fields.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 | import numpy as np
5 | from models.embedder import get_embedder
6 |
7 |
8 | # This implementation is borrowed from IDR: https://github.com/lioryariv/idr
9 | class SDFNetwork(nn.Module):
10 | def __init__(self,
11 | d_in,
12 | d_out,
13 | d_hidden,
14 | n_layers,
15 | skip_in=(4,),
16 | multires=0,
17 | bias=0.5,
18 | scale=1,
19 | geometric_init=True,
20 | weight_norm=True,
21 | inside_outside=False):
22 | super(SDFNetwork, self).__init__()
23 |
24 | dims = [d_in] + [d_hidden for _ in range(n_layers)] + [d_out]
25 |
26 | self.embed_fn_fine = None
27 |
28 | if multires > 0:
29 | embed_fn, input_ch = get_embedder(multires, input_dims=d_in)
30 | self.embed_fn_fine = embed_fn
31 | dims[0] = input_ch
32 |
33 | self.num_layers = len(dims)
34 | self.skip_in = skip_in
35 | self.scale = scale
36 |
37 | for l in range(0, self.num_layers - 1):
38 | if l + 1 in self.skip_in:
39 | out_dim = dims[l + 1] - dims[0]
40 | else:
41 | out_dim = dims[l + 1]
42 |
43 | lin = nn.Linear(dims[l], out_dim)
44 |
45 | if geometric_init:
46 | if l == self.num_layers - 2:
47 | if not inside_outside:
48 | torch.nn.init.normal_(lin.weight, mean=np.sqrt(np.pi) / np.sqrt(dims[l]), std=0.0001)
49 | torch.nn.init.constant_(lin.bias, -bias)
50 | else:
51 | torch.nn.init.normal_(lin.weight, mean=-np.sqrt(np.pi) / np.sqrt(dims[l]), std=0.0001)
52 | torch.nn.init.constant_(lin.bias, bias)
53 | elif multires > 0 and l == 0:
54 | torch.nn.init.constant_(lin.bias, 0.0)
55 | torch.nn.init.constant_(lin.weight[:, 3:], 0.0)
56 | torch.nn.init.normal_(lin.weight[:, :3], 0.0, np.sqrt(2) / np.sqrt(out_dim))
57 | elif multires > 0 and l in self.skip_in:
58 | torch.nn.init.constant_(lin.bias, 0.0)
59 | torch.nn.init.normal_(lin.weight, 0.0, np.sqrt(2) / np.sqrt(out_dim))
60 | torch.nn.init.constant_(lin.weight[:, -(dims[0] - 3):], 0.0)
61 | else:
62 | torch.nn.init.constant_(lin.bias, 0.0)
63 | torch.nn.init.normal_(lin.weight, 0.0, np.sqrt(2) / np.sqrt(out_dim))
64 |
65 | if weight_norm:
66 | lin = nn.utils.weight_norm(lin)
67 |
68 | setattr(self, "lin" + str(l), lin)
69 |
70 | self.activation = nn.Softplus(beta=100)
71 |
72 | def forward(self, inputs):
73 | inputs = inputs * self.scale
74 | if self.embed_fn_fine is not None:
75 | inputs = self.embed_fn_fine(inputs)
76 |
77 | x = inputs
78 | for l in range(0, self.num_layers - 1):
79 | lin = getattr(self, "lin" + str(l))
80 |
81 | if l in self.skip_in:
82 | x = torch.cat([x, inputs], 1) / np.sqrt(2)
83 |
84 | x = lin(x)
85 |
86 | if l < self.num_layers - 2:
87 | x = self.activation(x)
88 | return torch.cat([x[:, :1] / self.scale, x[:, 1:]], dim=-1)
89 |
90 | def sdf(self, x):
91 | return self.forward(x)[:, :1]
92 |
93 | def sdf_hidden_appearance(self, x):
94 | return self.forward(x)
95 |
96 | def gradient(self, x):
97 | x.requires_grad_(True)
98 | y = self.sdf(x)
99 | d_output = torch.ones_like(y, requires_grad=False, device=y.device)
100 | gradients = torch.autograd.grad(
101 | outputs=y,
102 | inputs=x,
103 | grad_outputs=d_output,
104 | create_graph=True,
105 | retain_graph=True,
106 | only_inputs=True)[0]
107 | return gradients.unsqueeze(1)
108 |
109 |
110 | # This implementation is borrowed from IDR: https://github.com/lioryariv/idr
111 | class RenderingNetwork(nn.Module):
112 | def __init__(self,
113 | d_feature,
114 | mode,
115 | d_in,
116 | d_out,
117 | d_hidden,
118 | n_layers,
119 | weight_norm=True,
120 | multires_view=0,
121 | squeeze_out=True):
122 | super().__init__()
123 |
124 | self.mode = mode
125 | self.squeeze_out = squeeze_out
126 | dims = [d_in + d_feature] + [d_hidden for _ in range(n_layers)] + [d_out]
127 |
128 | self.embedview_fn = None
129 | if multires_view > 0:
130 | embedview_fn, input_ch = get_embedder(multires_view)
131 | self.embedview_fn = embedview_fn
132 | dims[0] += (input_ch - 3)
133 |
134 | self.num_layers = len(dims)
135 |
136 | for l in range(0, self.num_layers - 1):
137 | out_dim = dims[l + 1]
138 | lin = nn.Linear(dims[l], out_dim)
139 |
140 | if weight_norm:
141 | lin = nn.utils.weight_norm(lin)
142 |
143 | setattr(self, "lin" + str(l), lin)
144 |
145 | self.relu = nn.ReLU()
146 |
147 | def forward(self, points, normals, view_dirs, feature_vectors):
148 | if self.embedview_fn is not None:
149 | view_dirs = self.embedview_fn(view_dirs)
150 |
151 | rendering_input = None
152 |
153 | if self.mode == 'idr':
154 | rendering_input = torch.cat([points, view_dirs, normals, feature_vectors], dim=-1)
155 | elif self.mode == 'no_view_dir':
156 | rendering_input = torch.cat([points, normals, feature_vectors], dim=-1)
157 | elif self.mode == 'no_normal':
158 | rendering_input = torch.cat([points, view_dirs, feature_vectors], dim=-1)
159 |
160 | x = rendering_input
161 |
162 | for l in range(0, self.num_layers - 1):
163 | lin = getattr(self, "lin" + str(l))
164 |
165 | x = lin(x)
166 |
167 | if l < self.num_layers - 2:
168 | x = self.relu(x)
169 |
170 | if self.squeeze_out:
171 | x = torch.sigmoid(x)
172 | return x
173 |
174 |
175 | # This implementation is borrowed from nerf-pytorch: https://github.com/yenchenlin/nerf-pytorch
176 | class NeRF(nn.Module):
177 | def __init__(self,
178 | D=8,
179 | W=256,
180 | d_in=3,
181 | d_in_view=3,
182 | multires=0,
183 | multires_view=0,
184 | output_ch=4,
185 | skips=[4],
186 | use_viewdirs=False):
187 | super(NeRF, self).__init__()
188 | self.D = D
189 | self.W = W
190 | self.d_in = d_in
191 | self.d_in_view = d_in_view
192 | self.input_ch = 3
193 | self.input_ch_view = 3
194 | self.embed_fn = None
195 | self.embed_fn_view = None
196 |
197 | if multires > 0:
198 | embed_fn, input_ch = get_embedder(multires, input_dims=d_in)
199 | self.embed_fn = embed_fn
200 | self.input_ch = input_ch
201 |
202 | if multires_view > 0:
203 | embed_fn_view, input_ch_view = get_embedder(multires_view, input_dims=d_in_view)
204 | self.embed_fn_view = embed_fn_view
205 | self.input_ch_view = input_ch_view
206 |
207 | self.skips = skips
208 | self.use_viewdirs = use_viewdirs
209 |
210 | self.pts_linears = nn.ModuleList(
211 | [nn.Linear(self.input_ch, W)] +
212 | [nn.Linear(W, W) if i not in self.skips else nn.Linear(W + self.input_ch, W) for i in range(D - 1)])
213 |
214 | ### Implementation according to the official code release
215 | ### (https://github.com/bmild/nerf/blob/master/run_nerf_helpers.py#L104-L105)
216 | self.views_linears = nn.ModuleList([nn.Linear(self.input_ch_view + W, W // 2)])
217 |
218 | ### Implementation according to the paper
219 | # self.views_linears = nn.ModuleList(
220 | # [nn.Linear(input_ch_views + W, W//2)] + [nn.Linear(W//2, W//2) for i in range(D//2)])
221 |
222 | if use_viewdirs:
223 | self.feature_linear = nn.Linear(W, W)
224 | self.alpha_linear = nn.Linear(W, 1)
225 | self.rgb_linear = nn.Linear(W // 2, 3)
226 | else:
227 | self.output_linear = nn.Linear(W, output_ch)
228 |
229 | def forward(self, input_pts, input_views):
230 | if self.embed_fn is not None:
231 | input_pts = self.embed_fn(input_pts)
232 | if self.embed_fn_view is not None:
233 | input_views = self.embed_fn_view(input_views)
234 |
235 | h = input_pts
236 | for i, l in enumerate(self.pts_linears):
237 | h = self.pts_linears[i](h)
238 | h = F.relu(h)
239 | if i in self.skips:
240 | h = torch.cat([input_pts, h], -1)
241 |
242 | if self.use_viewdirs:
243 | alpha = self.alpha_linear(h)
244 | feature = self.feature_linear(h)
245 | h = torch.cat([feature, input_views], -1)
246 |
247 | for i, l in enumerate(self.views_linears):
248 | h = self.views_linears[i](h)
249 | h = F.relu(h)
250 |
251 | rgb = self.rgb_linear(h)
252 | return alpha, rgb
253 | else:
254 | assert False
255 |
256 |
257 | class SingleVarianceNetwork(nn.Module):
258 | def __init__(self, init_val):
259 | super(SingleVarianceNetwork, self).__init__()
260 | self.register_parameter('variance', nn.Parameter(torch.tensor(init_val)))
261 |
262 | def forward(self, x):
263 | return torch.ones([len(x), 1]) * torch.exp(self.variance * 10.0)
264 |
--------------------------------------------------------------------------------
/models/fields.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 | import numpy as np
5 | from models.embedder import get_embedder
6 | import sys
7 |
8 | # This implementation is borrowed from IDR: https://github.com/lioryariv/idr
9 | class SDFNetwork(nn.Module):
10 | def __init__(self,
11 | d_in,
12 | d_out,
13 | d_hidden,
14 | n_layers,
15 | skip_in=(4,),
16 | multires=0,
17 | bias=0.5,
18 | scale=1,
19 | geometric_init=True,
20 | weight_norm=True,
21 | inside_outside=False):
22 | super(SDFNetwork, self).__init__()
23 |
24 | dims = [d_in] + [d_hidden for _ in range(n_layers)] + [d_out]
25 |
26 | self.embed_fn_fine = None
27 |
28 | if multires > 0:
29 | embed_fn, input_ch = get_embedder(multires, input_dims=d_in)
30 | self.embed_fn_fine = embed_fn
31 | dims[0] = input_ch
32 |
33 | self.num_layers = len(dims)
34 | self.skip_in = skip_in
35 | self.scale = scale
36 |
37 | for l in range(0, self.num_layers - 1):
38 | if l + 1 in self.skip_in:
39 | out_dim = dims[l + 1] - dims[0]
40 | else:
41 | out_dim = dims[l + 1]
42 |
43 | lin = nn.Linear(dims[l], out_dim)
44 |
45 | if geometric_init:
46 | if l == self.num_layers - 2:
47 | if not inside_outside:
48 | torch.nn.init.normal_(lin.weight, mean= np.sqrt(np.pi) / np.sqrt(dims[l]), std=0.0001)
49 | torch.nn.init.constant_(lin.bias, -bias)
50 | else:
51 | torch.nn.init.normal_(lin.weight, mean= -np.sqrt(np.pi) / np.sqrt(dims[l]), std=0.0001)
52 | torch.nn.init.constant_(lin.bias, bias)
53 | elif multires > 0 and l == 0:
54 | torch.nn.init.constant_(lin.bias, 0.0)
55 | torch.nn.init.constant_(lin.weight[:, 3:], 0.0)
56 | torch.nn.init.normal_(lin.weight[:, :3], 0.0, np.sqrt(2) / np.sqrt(out_dim))
57 | elif multires > 0 and l in self.skip_in:
58 | torch.nn.init.constant_(lin.bias, 0.0)
59 | torch.nn.init.normal_(lin.weight, 0.0, np.sqrt(2) / np.sqrt(out_dim))
60 | torch.nn.init.constant_(lin.weight[:, -(dims[0] - 3):], 0.0)
61 | else:
62 | torch.nn.init.constant_(lin.bias, 0.0)
63 | torch.nn.init.normal_(lin.weight, 0.0, np.sqrt(2) / np.sqrt(out_dim))
64 |
65 | if weight_norm:
66 | lin = nn.utils.weight_norm(lin)
67 |
68 | setattr(self, "lin" + str(l), lin)
69 |
70 | self.activation = nn.Softplus(beta=100)
71 |
72 | def forward(self, inputs):
73 | inputs = inputs * self.scale
74 | if self.embed_fn_fine is not None:
75 | inputs = self.embed_fn_fine(inputs)
76 |
77 | x = inputs
78 | for l in range(0, self.num_layers - 1):
79 | lin = getattr(self, "lin" + str(l))
80 |
81 | if l in self.skip_in:
82 | x = torch.cat([x, inputs], 1) / np.sqrt(2)
83 |
84 | x = lin(x)
85 |
86 | if l < self.num_layers - 2:
87 | x = self.activation(x)
88 | return torch.cat([x[:, :1] / self.scale, x[:, 1:]], dim=-1)
89 |
90 | def sdf(self, x):
91 | return self.forward(x)[:, :1]
92 |
93 | def sdf_hidden_appearance(self, x):
94 | return self.forward(x)
95 |
96 | def gradient(self, x):
97 | x.requires_grad_(True)
98 | y = self.sdf(x)
99 | d_output = torch.ones_like(y, requires_grad=False, device=y.device)
100 | gradients = torch.autograd.grad(
101 | outputs=y,
102 | inputs=x,
103 | grad_outputs=d_output,
104 | create_graph=True,
105 | retain_graph=True,
106 | only_inputs=True)[0]
107 | return gradients.unsqueeze(1)
108 |
109 |
110 | # This implementation is borrowed from IDR: https://github.com/lioryariv/idr
111 | class RenderingNetwork(nn.Module):
112 | def __init__(self,
113 | d_feature,
114 | mode,
115 | d_in,
116 | d_out,
117 | d_hidden,
118 | n_layers,
119 | weight_norm=True,
120 | multires_view=0,
121 | squeeze_out=True):
122 | super().__init__()
123 |
124 | self.mode = mode
125 | self.squeeze_out = squeeze_out
126 | dims = [d_in + d_feature] + [d_hidden for _ in range(n_layers)] + [d_out]
127 |
128 | self.embedview_fn = None
129 | if multires_view > 0:
130 | embedview_fn, input_ch = get_embedder(multires_view)
131 | self.embedview_fn = embedview_fn
132 | dims[0] += (input_ch - 3)
133 |
134 | self.num_layers = len(dims)
135 |
136 | for l in range(0, self.num_layers - 1):
137 | out_dim = dims[l + 1]
138 | lin = nn.Linear(dims[l], out_dim)
139 |
140 | if weight_norm:
141 | lin = nn.utils.weight_norm(lin)
142 |
143 | setattr(self, "lin" + str(l), lin)
144 |
145 | self.relu = nn.ReLU()
146 |
147 | def forward(self, points, normals, view_dirs, feature_vectors):
148 | if self.embedview_fn is not None:
149 | view_dirs = self.embedview_fn(view_dirs)
150 |
151 | rendering_input = None
152 |
153 | if self.mode == 'idr':
154 | rendering_input = torch.cat([points, view_dirs, normals, feature_vectors], dim=-1)
155 | elif self.mode == 'no_view_dir':
156 | rendering_input = torch.cat([points, normals, feature_vectors], dim=-1)
157 | elif self.mode == 'no_normal':
158 | rendering_input = torch.cat([points, view_dirs, feature_vectors], dim=-1)
159 |
160 | x = rendering_input
161 |
162 | for l in range(0, self.num_layers - 1):
163 | lin = getattr(self, "lin" + str(l))
164 |
165 | x = lin(x)
166 |
167 | if l < self.num_layers - 2:
168 | x = self.relu(x)
169 |
170 | if self.squeeze_out:
171 | x = torch.sigmoid(x)
172 | return x
173 |
174 |
175 | # This implementation is borrowed from nerf-pytorch: https://github.com/yenchenlin/nerf-pytorch
176 | class NeRF(nn.Module):
177 | def __init__(self,
178 | D=8,
179 | W=256,
180 | d_in=3,
181 | d_in_view=3,
182 | multires=0,
183 | multires_view=0,
184 | output_ch=4,
185 | skips=[4],
186 | use_viewdirs=False):
187 | super(NeRF, self).__init__()
188 | self.D = D
189 | self.W = W
190 | self.d_in = d_in
191 | self.d_in_view = d_in_view
192 | self.input_ch = 3
193 | self.input_ch_view = 3
194 | self.embed_fn = None
195 | self.embed_fn_view = None
196 |
197 | if multires > 0:
198 | embed_fn, input_ch = get_embedder(multires, input_dims=d_in)
199 | self.embed_fn = embed_fn
200 | self.input_ch = input_ch
201 |
202 | if multires_view > 0:
203 | embed_fn_view, input_ch_view = get_embedder(multires_view, input_dims=d_in_view)
204 | self.embed_fn_view = embed_fn_view
205 | self.input_ch_view = input_ch_view
206 |
207 | self.skips = skips
208 | self.use_viewdirs = use_viewdirs
209 |
210 | self.pts_linears = nn.ModuleList(
211 | [nn.Linear(self.input_ch, W)] +
212 | [nn.Linear(W, W) if i not in self.skips else nn.Linear(W + self.input_ch, W) for i in range(D - 1)])
213 |
214 | ### Implementation according to the official code release
215 | ### (https://github.com/bmild/nerf/blob/master/run_nerf_helpers.py#L104-L105)
216 | self.views_linears = nn.ModuleList([nn.Linear(self.input_ch_view + W, W // 2)])
217 |
218 | ### Implementation according to the paper
219 | # self.views_linears = nn.ModuleList(
220 | # [nn.Linear(input_ch_views + W, W//2)] + [nn.Linear(W//2, W//2) for i in range(D//2)])
221 |
222 | if use_viewdirs:
223 | self.feature_linear = nn.Linear(W, W)
224 | self.alpha_linear = nn.Linear(W, 1)
225 | self.rgb_linear = nn.Linear(W // 2, 3)
226 | else:
227 | self.output_linear = nn.Linear(W, output_ch)
228 |
229 | def forward(self, input_pts, input_views):
230 | if self.embed_fn is not None:
231 | input_pts = self.embed_fn(input_pts)
232 | if self.embed_fn_view is not None:
233 | input_views = self.embed_fn_view(input_views)
234 |
235 | h = input_pts
236 | for i, l in enumerate(self.pts_linears):
237 | h = self.pts_linears[i](h)
238 | h = F.relu(h)
239 | if i in self.skips:
240 | h = torch.cat([input_pts, h], -1)
241 |
242 | if self.use_viewdirs:
243 | alpha = self.alpha_linear(h)
244 | feature = self.feature_linear(h)
245 | h = torch.cat([feature, input_views], -1)
246 |
247 | for i, l in enumerate(self.views_linears):
248 | h = self.views_linears[i](h)
249 | h = F.relu(h)
250 |
251 | rgb = self.rgb_linear(h)
252 | return alpha, rgb
253 | else:
254 | assert False
255 |
256 |
257 | class SingleVarianceNetwork(nn.Module):
258 | def __init__(self, init_val):
259 | super(SingleVarianceNetwork, self).__init__()
260 | self.register_parameter('variance', nn.Parameter(torch.tensor(init_val)))
261 |
262 | def forward(self, x):
263 | return torch.ones([len(x), 1]) * torch.exp(self.variance * 10.0)
264 |
--------------------------------------------------------------------------------
/helpers.py:
--------------------------------------------------------------------------------
1 | import torch
2 | # import matplotlib
3 | import numpy as np
4 |
5 | # matplotlib.use("Agg")
6 | from MLP import *
7 |
8 |
9 | torch.autograd.set_detect_anomaly(True)
10 |
11 |
12 | def update_lr(optimizer, lr_decay):
13 | for param_group in optimizer.param_groups:
14 | if param_group["lr"] > 0.0000001:
15 | param_group["lr"] = param_group["lr"] * lr_decay
16 | learning_rate = param_group["lr"]
17 | print("learning r ate is updated to ", learning_rate)
18 | return 0
19 |
20 |
21 | def save_model(expID, model, i):
22 | # save model
23 | model_name = "./experiments/{}/model/epoch.pt".format(expID)
24 | torch.save(model, model_name)
25 | return 0
26 |
27 |
28 | def render_image(neusis_runner, pose_ind, estimator=None, debug=False):
29 | H = neusis_runner.H
30 | W = neusis_runner.W
31 |
32 | phi_min = neusis_runner.phi_min
33 | phi_max = neusis_runner.phi_max
34 |
35 | tx = torch.linspace(0, W - 1, W)
36 | ty = torch.linspace(0, H - 1, H)
37 | # need to use xy indexing to be consistent with render_image_from_rays
38 | pixels_x, pixels_y = torch.meshgrid(tx, ty, indexing="xy")
39 | px = torch.stack([pixels_y, pixels_x], dim=-1) # W, H, 2
40 | px = px.reshape(-1, 2).long() # int conversion needed
41 |
42 |
43 | c2w = torch.from_numpy(neusis_runner.data["sensor_poses"][pose_ind]).cuda()
44 | r_min = neusis_runner.r_min
45 | r_max = neusis_runner.r_max
46 | n_selected_px = H * W
47 | arc_n_samples = neusis_runner.arc_n_samples
48 | ray_n_samples = neusis_runner.ray_n_samples
49 | hfov = neusis_runner.hfov
50 | r_increments = []
51 | sonar_resolution = (r_max - r_min) / H
52 | # print(sonar_resolution)
53 | for i in range(H):
54 | r_increments.append(i * sonar_resolution + r_min)
55 | r_increments = torch.tensor(r_increments).cuda()
56 | randomize_points = False
57 | device = "cuda:0"
58 | cube_center = neusis_runner.cube_center.cuda()
59 |
60 | dirs, dphi, r, rs, pts_r_rand, dists = get_arcs(
61 | H,
62 | W,
63 | phi_min,
64 | phi_max,
65 | r_min,
66 | r_max,
67 | c2w,
68 | n_selected_px,
69 | arc_n_samples,
70 | ray_n_samples,
71 | hfov,
72 | px,
73 | r_increments,
74 | randomize_points,
75 | device,
76 | cube_center,
77 | estimator=estimator
78 | )
79 | if estimator is not None:
80 | ray_indices = r
81 | final_out = np.zeros((H, W))
82 | # weight_sum_out = np.zeros((H, W))
83 | # sdf_vals = []
84 |
85 | if estimator is None:
86 | # render a row at a time
87 | for i in range(H):
88 | curr_dirs = dirs[W*i*arc_n_samples*ray_n_samples:W*(i+1)*arc_n_samples*ray_n_samples]
89 | curr_pts_r_rand = pts_r_rand[W*i*arc_n_samples*ray_n_samples:W*(i+1)*arc_n_samples*ray_n_samples]
90 | curr_dists = dists[W*i*arc_n_samples:W*(i+1)*arc_n_samples]
91 | out = neusis_runner.renderer.render_sonar(curr_dirs, curr_pts_r_rand, curr_dists, W, arc_n_samples, ray_n_samples, r, neusis_runner.get_cos_anneal_ratio())
92 | curr_pixels = out["color_fine"].reshape(W).detach().cpu().numpy()
93 | # weight_sum_out[i] = out["weight_sum"].reshape(W).detach().cpu().numpy()
94 | # sdf_vals.append(out["alpha"].detach())
95 | del out
96 |
97 | final_out[i] = curr_pixels
98 | else:
99 | out = neusis_runner.renderer.render_sonar_accel(dirs, pts_r_rand, dists, ray_indices, arc_n_samples, neusis_runner.get_cos_anneal_ratio())
100 | final_out = out["color_fine"].reshape(H, W).detach().cpu().numpy()
101 |
102 | if debug:
103 | if estimator is not None:
104 | return final_out, pts_r_rand, ray_indices
105 | else:
106 | return final_out, pts_r_rand
107 | else:
108 | return final_out
109 |
110 | def get_arcs(
111 | H,
112 | W,
113 | phi_min,
114 | phi_max,
115 | r_min,
116 | r_max,
117 | c2w,
118 | n_selected_px,
119 | arc_n_samples,
120 | ray_n_samples,
121 | hfov,
122 | px,
123 | r_increments,
124 | randomize_points,
125 | device,
126 | cube_center,
127 | estimator=None,
128 | ):
129 | i = px[:, 0] # img y coords
130 | j = px[:, 1] # img x coords
131 |
132 | # sample angle phi (elevation)
133 | phi = (
134 | torch.linspace(phi_min, phi_max, arc_n_samples)
135 | .float()
136 | .repeat(n_selected_px)
137 | .reshape(n_selected_px, -1)
138 | )
139 |
140 | dphi = (phi_max - phi_min) / arc_n_samples
141 | rnd = -dphi + torch.rand(n_selected_px, arc_n_samples) * 2 * dphi
142 |
143 | sonar_resolution = (r_max - r_min) / H
144 | if randomize_points:
145 | phi = torch.clip(phi + rnd, min=phi_min, max=phi_max)
146 |
147 | # compute radius at each pixel
148 | r = i * sonar_resolution + r_min
149 | # compute bearing angle at each pixel (azimuth)
150 | theta = -hfov / 2 + j * hfov / W
151 |
152 | # Need to calculate coords to figure out the ray direction
153 | # the following operations mimick the cartesian product between the two lists [r, theta] and phi
154 | # coords is of size: n_selected_px x arc_n_samples x 3
155 | coords = torch.stack(
156 | (
157 | r.repeat_interleave(arc_n_samples).reshape(n_selected_px, -1),
158 | theta.repeat_interleave(arc_n_samples).reshape(n_selected_px, -1),
159 | phi,
160 | ),
161 | dim=-1,
162 | )
163 | coords = coords.reshape(-1, 3)
164 | # Transform to cartesian to apply pose transformation and get the direction
165 | # transformation as described in https://www.ri.cmu.edu/pub_files/2016/5/thuang_mastersthesis.pdf
166 | X = coords[:, 0] * torch.cos(coords[:, 1]) * torch.cos(coords[:, 2])
167 | Y = coords[:, 0] * torch.sin(coords[:, 1]) * torch.cos(coords[:, 2])
168 | Z = coords[:, 0] * torch.sin(coords[:, 2])
169 |
170 | dirs = torch.stack((X, Y, Z, torch.ones_like(X))).T
171 | dirs = torch.matmul(c2w, dirs.T).T
172 | origin = torch.matmul(c2w, torch.tensor([0.0, 0.0, 0.0, 1.0])).unsqueeze(dim=0)
173 | dirs = dirs - origin
174 | dirs = dirs[:, 0:3]
175 | dirs = torch.nn.functional.normalize(dirs, dim=1)
176 |
177 | if estimator is None:
178 | dirs = dirs.repeat_interleave(ray_n_samples, 0)
179 |
180 | holder = torch.empty(
181 | n_selected_px, arc_n_samples * ray_n_samples, dtype=torch.long
182 | ).to(device)
183 | bitmask = torch.zeros(ray_n_samples, dtype=torch.bool) # where end points of rays are
184 | bitmask[ray_n_samples - 1] = True
185 | bitmask = bitmask.repeat(arc_n_samples)
186 |
187 | # I think this for loop is slow in particular
188 | for n_px in range(n_selected_px):
189 | holder[n_px, :] = torch.randint(
190 | 0, i[n_px] + 1, (arc_n_samples * ray_n_samples,) # already excludes right endpoint (bug?)
191 | )
192 | holder[n_px, bitmask] = i[n_px]
193 |
194 | holder = holder.reshape(n_selected_px, arc_n_samples, ray_n_samples)
195 |
196 | holder, _ = torch.sort(holder, dim=-1)
197 |
198 | holder = holder.reshape(-1)
199 |
200 | r_samples = torch.index_select(r_increments, 0, holder).reshape(
201 | n_selected_px, arc_n_samples, ray_n_samples
202 | )
203 |
204 | rnd = torch.rand((n_selected_px, arc_n_samples, ray_n_samples)) * sonar_resolution
205 |
206 | if randomize_points:
207 | r_samples = r_samples + rnd
208 |
209 | rs = r_samples[:, :, -1]
210 | r_samples = r_samples.reshape(n_selected_px * arc_n_samples, ray_n_samples)
211 |
212 | theta_samples = (
213 | coords[:, 1].repeat_interleave(ray_n_samples).reshape(-1, ray_n_samples)
214 | )
215 | phi_samples = (
216 | coords[:, 2].repeat_interleave(ray_n_samples).reshape(-1, ray_n_samples)
217 | )
218 |
219 | # Note: r_samples is of size n_selected_px*arc_n_samples x ray_n_samples
220 | # so each row of r_samples contain r values for points picked from the same ray (should have the same theta and phi values)
221 | # theta_samples is also of size n_selected_px*arc_n_samples x ray_n_samples
222 | # since all arc_n_samples x ray_n_samples have the same value of theta, then the first n_selected_px rows have all the same value
223 | # Finally phi_samples is also of size n_selected_px*arc_n_samples x ray_n_samples
224 | # but not each ray has a different phi value
225 |
226 | # pts contain all points and is of size n_selected_px*arc_n_samples*ray_n_samples, 3
227 | # the first ray_n_samples rows correspond to points along the same ray
228 | # the first ray_n_samples*arc_n_samples row correspond to points along rays along the same arc
229 | pts = torch.stack((r_samples, theta_samples, phi_samples), dim=-1).reshape(-1, 3)
230 |
231 | dists = torch.diff(r_samples, dim=1)
232 | dists = torch.cat(
233 | [dists, torch.Tensor([sonar_resolution]).expand(dists[..., :1].shape)], -1
234 | )
235 |
236 | # r_samples_mid = r_samples + dists/2
237 |
238 | X_r_rand = pts[:, 0] * torch.cos(pts[:, 1]) * torch.cos(pts[:, 2])
239 | Y_r_rand = pts[:, 0] * torch.sin(pts[:, 1]) * torch.cos(pts[:, 2])
240 | Z_r_rand = pts[:, 0] * torch.sin(pts[:, 2])
241 | pts_r_rand = torch.stack((X_r_rand, Y_r_rand, Z_r_rand, torch.ones_like(X_r_rand)))
242 |
243 | pts_r_rand = torch.matmul(c2w, pts_r_rand)
244 |
245 | pts_r_rand = torch.stack((pts_r_rand[0, :], pts_r_rand[1, :], pts_r_rand[2, :]))
246 |
247 | # Centering step
248 | pts_r_rand = pts_r_rand.T - cube_center
249 | return dirs, dphi, None, rs, pts_r_rand, dists
250 | else:
251 | rays_o = origin[:, :3].expand(dirs.shape)
252 | render_step_size = 0.05 # TODO: make this less hacky
253 | t_max = r.repeat_interleave(arc_n_samples)
254 | ray_indices, t_starts, t_ends = estimator.sampling(rays_o, dirs, render_step_size=render_step_size, near_plane=r_min, t_max=t_max)
255 | pts_r_rand = rays_o[ray_indices] + t_starts[:, None] * dirs[ray_indices]
256 | dists = t_ends - t_starts
257 |
258 | # handle endpoints in a hacky way
259 | dists_ep = torch.tensor(sonar_resolution).expand(t_max.shape)
260 | pts_ep = rays_o + t_max[:, None] * dirs
261 | pts_r_rand = torch.cat([pts_r_rand, pts_ep], dim=0)
262 | dists = torch.cat([dists, dists_ep], dim=0)
263 | # print(pts_r_rand.shape, ray_indices.shape)
264 |
265 | return dirs, dphi, ray_indices, None, pts_r_rand, dists
266 |
267 |
268 | def select_coordinates(coords_all, target, N_rand, select_valid_px):
269 | if select_valid_px:
270 | coords = torch.nonzero(target)
271 | else:
272 | select_inds = torch.randperm(coords_all.shape[0])[:N_rand]
273 | coords = coords_all[select_inds]
274 | return coords
275 |
--------------------------------------------------------------------------------
/models/renderer.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 | import numpy as np
5 | import logging
6 | import mcubes
7 | import sys, os
8 | import pickle
9 | # import matplotlib.pyplot as plt
10 | import time
11 | # from nerfacc import inclusive_prod, pack_info
12 |
13 | def extract_fields(bound_min, bound_max, resolution, query_func, return_coords=False):
14 | N = 64
15 | X_coords = torch.linspace(bound_min[0], bound_max[0], resolution)
16 | Y_coords = torch.linspace(bound_min[1], bound_max[1], resolution)
17 | Z_coords = torch.linspace(bound_min[2], bound_max[2], resolution)
18 | X = X_coords.split(N)
19 | Y = Y_coords.split(N)
20 | Z = Z_coords.split(N)
21 |
22 | u = np.zeros([resolution, resolution, resolution], dtype=np.float32)
23 | with torch.no_grad():
24 | for xi, xs in enumerate(X):
25 | for yi, ys in enumerate(Y):
26 | for zi, zs in enumerate(Z):
27 | xx, yy, zz = torch.meshgrid(xs, ys, zs)
28 | pts = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1)
29 | val = query_func(pts).reshape(len(xs), len(ys), len(zs)).detach().cpu().numpy()
30 | u[xi * N: xi * N + len(xs), yi * N: yi * N + len(ys), zi * N: zi * N + len(zs)] = val
31 |
32 | if return_coords:
33 | return u, X_coords, Y_coords, Z_coords
34 | else:
35 | return u
36 |
37 |
38 | def extract_geometry(bound_min, bound_max, resolution, threshold, query_func):
39 | u = extract_fields(bound_min, bound_max, resolution, query_func)
40 | vertices, triangles = mcubes.marching_cubes(u, threshold)
41 |
42 | b_max_np = bound_max.detach().cpu().numpy()
43 | b_min_np = bound_min.detach().cpu().numpy()
44 |
45 | vertices = vertices / (resolution - 1.0) * (b_max_np - b_min_np)[None, :] + b_min_np[None, :]
46 |
47 | return vertices, triangles
48 |
49 | class NeuSRenderer:
50 | def __init__(self,
51 | sdf_network,
52 | deviation_network,
53 | color_network,
54 | base_exp_dir,
55 | expID,
56 | n_samples,
57 | n_importance,
58 | n_outside,
59 | up_sample_steps,
60 | perturb):
61 | self.sdf_network = sdf_network
62 | self.deviation_network = deviation_network
63 | self.color_network = color_network
64 | self.n_samples = n_samples
65 | self.n_importance = n_importance
66 | self.n_outside = n_outside
67 | self.up_sample_steps = up_sample_steps
68 | self.perturb = perturb
69 | self.base_exp_dir = base_exp_dir
70 | self.expID = expID
71 |
72 | def render_sonar_accel(self, dirs, pts, dists, ray_indices, arc_n_samples, cos_anneal_ratio=0.0):
73 | num_samples = len(ray_indices)
74 | num_ep = len(pts) - num_samples
75 | dirs_all = torch.cat([dirs[ray_indices], dirs], dim=0)
76 | pts_mid = pts + dirs_all * dists.reshape(-1, 1)/2
77 | sdf_network = self.sdf_network
78 | deviation_network = self.deviation_network
79 | color_network = self.color_network
80 |
81 | sdf_nn_output = sdf_network(pts_mid)
82 | sdf = sdf_nn_output[:, :1]
83 | feature_vector = sdf_nn_output[:, 1:]
84 | gradients = sdf_network.gradient(pts_mid).squeeze()
85 |
86 | # only evaluate at endpoints
87 | sampled_color = color_network(pts_mid[num_samples:], gradients[num_samples:], dirs, feature_vector[num_samples:])
88 | inv_s = deviation_network(torch.zeros([1, 3]))[:, :1].clip(1e-6, 1e6)
89 | inv_s = inv_s.expand(sdf.shape)
90 |
91 | true_cos = (dirs_all * gradients).sum(-1, keepdim=True)
92 | iter_cos = -(F.relu(-true_cos * 0.5 + 0.5) * (1.0 - cos_anneal_ratio) +
93 | F.relu(-true_cos) * cos_anneal_ratio) # always non-positive
94 |
95 | estimated_next_sdf = sdf + iter_cos * dists.reshape(-1, 1) * 0.5
96 | estimated_prev_sdf = sdf - iter_cos * dists.reshape(-1, 1) * 0.5
97 |
98 | prev_cdf = torch.sigmoid(estimated_prev_sdf * inv_s)
99 | next_cdf = torch.sigmoid(estimated_next_sdf * inv_s)
100 |
101 | p = prev_cdf - next_cdf
102 | c = prev_cdf
103 |
104 | alpha = ((p + 1e-5) / (c + 1e-5)).clip(0.0, 1.0)
105 |
106 | transmittance = inclusive_prod(1-alpha[:num_samples, 0], indices=ray_indices)
107 | # print(len(ray_indices))
108 | packed_info = pack_info(ray_indices=ray_indices)
109 | packed_info = packed_info[packed_info[:, 1] > 0]
110 | transmittance_inds = packed_info[:, 0] + packed_info[:, 1] - 1
111 | transmittance_ray_inds = ray_indices[transmittance_inds]
112 | transmittance_ep = torch.ones((num_ep,))
113 | transmittance_ep[transmittance_ray_inds] = transmittance[transmittance_inds]
114 | alpha_ep = alpha[num_samples:]
115 | weights = alpha_ep * transmittance_ep[:, None]
116 | intensity = weights * sampled_color
117 | intensity = intensity.reshape(-1, arc_n_samples).sum(dim=1)
118 | weight_sum = weights.reshape(-1, arc_n_samples).sum(dim=1)
119 |
120 | gradient_error = (torch.linalg.norm(gradients, ord=2,
121 | dim=-1) - 1.0) ** 2
122 |
123 | return {
124 | "color_fine": intensity,
125 | "weight_sum": weight_sum,
126 | "gradient_error": gradient_error,
127 | }
128 |
129 |
130 |
131 | def render_core_sonar(self,
132 | dirs,
133 | pts,
134 | dists,
135 | sdf_network,
136 | deviation_network,
137 | color_network,
138 | n_pixels,
139 | arc_n_samples,
140 | ray_n_samples,
141 | cos_anneal_ratio=0.0,
142 | render_mode=False):
143 |
144 | pts_mid = pts + dirs * dists.reshape(-1, 1)/2
145 |
146 | if render_mode:
147 | with torch.no_grad():
148 | sdf_nn_output = sdf_network(pts_mid)
149 | else:
150 | sdf_nn_output = sdf_network(pts_mid)
151 | sdf = sdf_nn_output[:, :1]
152 |
153 | feature_vector = sdf_nn_output[:, 1:]
154 |
155 | gradients = sdf_network.gradient(pts_mid).squeeze()
156 |
157 |
158 | # optimize memory consumption of below?
159 | # print(pts_mid.shape)
160 | if render_mode:
161 | with torch.no_grad():
162 | sampled_color = color_network(pts_mid, gradients, dirs, feature_vector).reshape(n_pixels, arc_n_samples, ray_n_samples)
163 |
164 | inv_s = deviation_network(torch.zeros([1, 3]))[:, :1].clip(1e-6, 1e6)
165 | else:
166 | sampled_color = color_network(pts_mid, gradients, dirs, feature_vector).reshape(n_pixels, arc_n_samples, ray_n_samples)
167 |
168 | inv_s = deviation_network(torch.zeros([1, 3]))[:, :1].clip(1e-6, 1e6)
169 |
170 | inv_s = inv_s.expand(n_pixels*arc_n_samples*ray_n_samples, 1)
171 | true_cos = (dirs * gradients).sum(-1, keepdim=True)
172 |
173 | # "cos_anneal_ratio" grows from 0 to 1 in the beginning training iterations. The anneal strategy below makes
174 | # the cos value "not dead" at the beginning training iterations, for better convergence.
175 | iter_cos = -(F.relu(-true_cos * 0.5 + 0.5) * (1.0 - cos_anneal_ratio) +
176 | F.relu(-true_cos) * cos_anneal_ratio) # always non-positive
177 |
178 | # Estimate signed distances at section points
179 | # why calculate the next/prev sdfs like this? to enforce "consistency"?
180 | # this happens to be the opposite of what is done during original neus upscaling (approx cos from sdf differences)
181 | estimated_next_sdf = sdf + iter_cos * dists.reshape(-1, 1) * 0.5
182 | estimated_prev_sdf = sdf - iter_cos * dists.reshape(-1, 1) * 0.5
183 |
184 | prev_cdf = torch.sigmoid(estimated_prev_sdf * inv_s)
185 | next_cdf = torch.sigmoid(estimated_next_sdf * inv_s)
186 |
187 | p = prev_cdf - next_cdf
188 | c = prev_cdf
189 |
190 | alpha = ((p + 1e-5) / (c + 1e-5)).reshape(n_pixels, arc_n_samples, ray_n_samples).clip(0.0, 1.0)
191 |
192 | cumuProdAllPointsOnEachRay = torch.cat([torch.ones([n_pixels, arc_n_samples, 1]), 1. - alpha + 1e-7], -1)
193 |
194 | cumuProdAllPointsOnEachRay = torch.cumprod(cumuProdAllPointsOnEachRay, -1)
195 |
196 | TransmittancePointsOnArc = cumuProdAllPointsOnEachRay[:, :, ray_n_samples-2]
197 |
198 | alphaPointsOnArc = alpha[:, :, ray_n_samples-1]
199 |
200 | weights = alphaPointsOnArc * TransmittancePointsOnArc
201 |
202 | intensityPointsOnArc = sampled_color[:, :, ray_n_samples-1]
203 |
204 | summedIntensities = (intensityPointsOnArc*weights).sum(dim=1)
205 |
206 | # Eikonal loss
207 | gradients = gradients.reshape(n_pixels, arc_n_samples, ray_n_samples, 3)
208 |
209 | gradient_error = (torch.linalg.norm(gradients, ord=2,
210 | dim=-1) - 1.0) ** 2
211 |
212 | variation_error = torch.linalg.norm(alpha, ord=1, dim=-1).sum()
213 |
214 | return {
215 | 'color': summedIntensities,
216 | 'intensityPointsOnArc': intensityPointsOnArc,
217 | 'sdf': sdf,
218 | 'prev_sdf': estimated_prev_sdf,
219 | 'next_sdf': estimated_next_sdf,
220 | 'alpha': alpha,
221 | 'dists': dists,
222 | 'gradients': gradients,
223 | 's_val': 1.0 / inv_s,
224 | 'weights': weights,
225 | 'cdf': c.reshape(n_pixels, arc_n_samples, ray_n_samples),
226 | 'gradient_error': gradient_error,
227 | 'variation_error': variation_error
228 | }
229 |
230 | def render_sonar(self, rays_d, pts, dists, n_pixels,
231 | arc_n_samples, ray_n_samples, ray_indices, cos_anneal_ratio=0.0,
232 | render_mode=False):
233 | # Render core
234 |
235 | if ray_indices is None:
236 | ret_fine = self.render_core_sonar(rays_d,
237 | pts,
238 | dists,
239 | self.sdf_network,
240 | self.deviation_network,
241 | self.color_network,
242 | n_pixels,
243 | arc_n_samples,
244 | ray_n_samples,
245 | cos_anneal_ratio=cos_anneal_ratio,
246 | render_mode=render_mode)
247 |
248 | color_fine = ret_fine['color']
249 | weights = ret_fine['weights']
250 | weights_sum = weights.sum(dim=-1, keepdim=True)
251 | gradients = ret_fine['gradients']
252 | #s_val = ret_fine['s_val'].reshape(batch_size, n_samples).mean(dim=-1, keepdim=True)
253 |
254 | return {
255 | 'color_fine': color_fine,
256 | 'weight_sum': weights_sum,
257 | 'weight_max': torch.max(weights, dim=-1, keepdim=True)[0],
258 | 'gradients': gradients,
259 | 'weights': weights,
260 | 'intensityPointsOnArc': ret_fine["intensityPointsOnArc"],
261 | 'gradient_error': ret_fine['gradient_error'],
262 | 'variation_error': ret_fine['variation_error'],
263 | "sdf": ret_fine["sdf"],
264 | "prev_sdf": ret_fine["prev_sdf"],
265 | "next_sdf": ret_fine["next_sdf"],
266 | "alpha": ret_fine["alpha"],
267 | }
268 | else:
269 | ret_fine = self.render_sonar_accel(rays_d, pts, dists, ray_indices, arc_n_samples)
270 | # return {
271 | # "color_fine": ret_fine["color"],
272 | # "weight_sum": ret_fine["weight_sum"],
273 | # }
274 | return ret_fine
275 |
276 |
277 |
278 | def extract_geometry(self, bound_min, bound_max, resolution, threshold=0.0):
279 | return extract_geometry(bound_min,
280 | bound_max,
281 | resolution=resolution,
282 | threshold=threshold,
283 | query_func=lambda pts: -self.sdf_network.sdf(pts))
284 |
--------------------------------------------------------------------------------
/NeuS/models/renderer.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 | import numpy as np
5 | import logging
6 | import mcubes
7 | from icecream import ic
8 |
9 |
10 | def extract_fields(bound_min, bound_max, resolution, query_func):
11 | N = 64
12 | X = torch.linspace(bound_min[0], bound_max[0], resolution).split(N)
13 | Y = torch.linspace(bound_min[1], bound_max[1], resolution).split(N)
14 | Z = torch.linspace(bound_min[2], bound_max[2], resolution).split(N)
15 |
16 | u = np.zeros([resolution, resolution, resolution], dtype=np.float32)
17 | with torch.no_grad():
18 | for xi, xs in enumerate(X):
19 | for yi, ys in enumerate(Y):
20 | for zi, zs in enumerate(Z):
21 | xx, yy, zz = torch.meshgrid(xs, ys, zs)
22 | pts = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1)
23 | val = query_func(pts).reshape(len(xs), len(ys), len(zs)).detach().cpu().numpy()
24 | u[xi * N: xi * N + len(xs), yi * N: yi * N + len(ys), zi * N: zi * N + len(zs)] = val
25 | return u
26 |
27 |
28 | def extract_geometry(bound_min, bound_max, resolution, threshold, query_func):
29 | print('threshold: {}'.format(threshold))
30 | u = extract_fields(bound_min, bound_max, resolution, query_func)
31 | vertices, triangles = mcubes.marching_cubes(u, threshold)
32 | b_max_np = bound_max.detach().cpu().numpy()
33 | b_min_np = bound_min.detach().cpu().numpy()
34 |
35 | vertices = vertices / (resolution - 1.0) * (b_max_np - b_min_np)[None, :] + b_min_np[None, :]
36 | return vertices, triangles
37 |
38 |
39 | def sample_pdf(bins, weights, n_samples, det=False):
40 | # This implementation is from NeRF
41 | # Get pdf
42 | weights = weights + 1e-5 # prevent nans
43 | pdf = weights / torch.sum(weights, -1, keepdim=True)
44 | cdf = torch.cumsum(pdf, -1)
45 | cdf = torch.cat([torch.zeros_like(cdf[..., :1]), cdf], -1)
46 | # Take uniform samples
47 | if det:
48 | u = torch.linspace(0. + 0.5 / n_samples, 1. - 0.5 / n_samples, steps=n_samples)
49 | u = u.expand(list(cdf.shape[:-1]) + [n_samples])
50 | else:
51 | u = torch.rand(list(cdf.shape[:-1]) + [n_samples])
52 |
53 | # Invert CDF
54 | u = u.contiguous()
55 | inds = torch.searchsorted(cdf, u, right=True)
56 | below = torch.max(torch.zeros_like(inds - 1), inds - 1)
57 | above = torch.min((cdf.shape[-1] - 1) * torch.ones_like(inds), inds)
58 | inds_g = torch.stack([below, above], -1) # (batch, N_samples, 2)
59 |
60 | matched_shape = [inds_g.shape[0], inds_g.shape[1], cdf.shape[-1]]
61 | cdf_g = torch.gather(cdf.unsqueeze(1).expand(matched_shape), 2, inds_g)
62 | bins_g = torch.gather(bins.unsqueeze(1).expand(matched_shape), 2, inds_g)
63 |
64 | denom = (cdf_g[..., 1] - cdf_g[..., 0])
65 | denom = torch.where(denom < 1e-5, torch.ones_like(denom), denom)
66 | t = (u - cdf_g[..., 0]) / denom
67 | samples = bins_g[..., 0] + t * (bins_g[..., 1] - bins_g[..., 0])
68 |
69 | return samples
70 |
71 |
72 | class NeuSRenderer:
73 | def __init__(self,
74 | nerf,
75 | sdf_network,
76 | deviation_network,
77 | color_network,
78 | n_samples,
79 | n_importance,
80 | n_outside,
81 | up_sample_steps,
82 | perturb):
83 | self.nerf = nerf
84 | self.sdf_network = sdf_network
85 | self.deviation_network = deviation_network
86 | self.color_network = color_network
87 | self.n_samples = n_samples
88 | self.n_importance = n_importance
89 | self.n_outside = n_outside
90 | self.up_sample_steps = up_sample_steps
91 | self.perturb = perturb
92 |
93 | def render_core_outside(self, rays_o, rays_d, z_vals, sample_dist, nerf, background_rgb=None):
94 | """
95 | Render background
96 | """
97 | batch_size, n_samples = z_vals.shape
98 |
99 | # Section length
100 | dists = z_vals[..., 1:] - z_vals[..., :-1]
101 | dists = torch.cat([dists, torch.Tensor([sample_dist]).expand(dists[..., :1].shape)], -1)
102 | mid_z_vals = z_vals + dists * 0.5
103 |
104 | # Section midpoints
105 | pts = rays_o[:, None, :] + rays_d[:, None, :] * mid_z_vals[..., :, None] # batch_size, n_samples, 3
106 |
107 | dis_to_center = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=True).clip(1.0, 1e10)
108 | pts = torch.cat([pts / dis_to_center, 1.0 / dis_to_center], dim=-1) # batch_size, n_samples, 4
109 |
110 | dirs = rays_d[:, None, :].expand(batch_size, n_samples, 3)
111 |
112 | pts = pts.reshape(-1, 3 + int(self.n_outside > 0))
113 | dirs = dirs.reshape(-1, 3)
114 |
115 | density, sampled_color = nerf(pts, dirs)
116 | sampled_color = torch.sigmoid(sampled_color)
117 | alpha = 1.0 - torch.exp(-F.softplus(density.reshape(batch_size, n_samples)) * dists)
118 | alpha = alpha.reshape(batch_size, n_samples)
119 | weights = alpha * torch.cumprod(torch.cat([torch.ones([batch_size, 1]), 1. - alpha + 1e-7], -1), -1)[:, :-1]
120 | sampled_color = sampled_color.reshape(batch_size, n_samples, 3)
121 | color = (weights[:, :, None] * sampled_color).sum(dim=1)
122 | if background_rgb is not None:
123 | color = color + background_rgb * (1.0 - weights.sum(dim=-1, keepdim=True))
124 |
125 | return {
126 | 'color': color,
127 | 'sampled_color': sampled_color,
128 | 'alpha': alpha,
129 | 'weights': weights,
130 | }
131 |
132 | def up_sample(self, rays_o, rays_d, z_vals, sdf, n_importance, inv_s):
133 | """
134 | Up sampling give a fixed inv_s
135 | """
136 | batch_size, n_samples = z_vals.shape
137 | pts = rays_o[:, None, :] + rays_d[:, None, :] * z_vals[..., :, None] # n_rays, n_samples, 3
138 | radius = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=False)
139 | inside_sphere = (radius[:, :-1] < 1.0) | (radius[:, 1:] < 1.0)
140 | sdf = sdf.reshape(batch_size, n_samples)
141 | prev_sdf, next_sdf = sdf[:, :-1], sdf[:, 1:]
142 | prev_z_vals, next_z_vals = z_vals[:, :-1], z_vals[:, 1:]
143 | mid_sdf = (prev_sdf + next_sdf) * 0.5
144 | cos_val = (next_sdf - prev_sdf) / (next_z_vals - prev_z_vals + 1e-5)
145 |
146 | # ----------------------------------------------------------------------------------------------------------
147 | # Use min value of [ cos, prev_cos ]
148 | # Though it makes the sampling (not rendering) a little bit biased, this strategy can make the sampling more
149 | # robust when meeting situations like below:
150 | #
151 | # SDF
152 | # ^
153 | # |\ -----x----...
154 | # | \ /
155 | # | x x
156 | # |---\----/-------------> 0 level
157 | # | \ /
158 | # | \/
159 | # |
160 | # ----------------------------------------------------------------------------------------------------------
161 | prev_cos_val = torch.cat([torch.zeros([batch_size, 1]), cos_val[:, :-1]], dim=-1)
162 | cos_val = torch.stack([prev_cos_val, cos_val], dim=-1)
163 | cos_val, _ = torch.min(cos_val, dim=-1, keepdim=False)
164 | cos_val = cos_val.clip(-1e3, 0.0) * inside_sphere
165 |
166 | dist = (next_z_vals - prev_z_vals)
167 | prev_esti_sdf = mid_sdf - cos_val * dist * 0.5
168 | next_esti_sdf = mid_sdf + cos_val * dist * 0.5
169 | prev_cdf = torch.sigmoid(prev_esti_sdf * inv_s)
170 | next_cdf = torch.sigmoid(next_esti_sdf * inv_s)
171 | alpha = (prev_cdf - next_cdf + 1e-5) / (prev_cdf + 1e-5)
172 | weights = alpha * torch.cumprod(
173 | torch.cat([torch.ones([batch_size, 1]), 1. - alpha + 1e-7], -1), -1)[:, :-1]
174 |
175 | z_samples = sample_pdf(z_vals, weights, n_importance, det=True).detach()
176 | return z_samples
177 |
178 | def cat_z_vals(self, rays_o, rays_d, z_vals, new_z_vals, sdf, last=False):
179 | batch_size, n_samples = z_vals.shape
180 | _, n_importance = new_z_vals.shape
181 | pts = rays_o[:, None, :] + rays_d[:, None, :] * new_z_vals[..., :, None]
182 | z_vals = torch.cat([z_vals, new_z_vals], dim=-1)
183 | z_vals, index = torch.sort(z_vals, dim=-1)
184 |
185 | if not last:
186 | new_sdf = self.sdf_network.sdf(pts.reshape(-1, 3)).reshape(batch_size, n_importance)
187 | sdf = torch.cat([sdf, new_sdf], dim=-1)
188 | xx = torch.arange(batch_size)[:, None].expand(batch_size, n_samples + n_importance).reshape(-1)
189 | index = index.reshape(-1)
190 | sdf = sdf[(xx, index)].reshape(batch_size, n_samples + n_importance)
191 |
192 | return z_vals, sdf
193 |
194 | def render_core(self,
195 | rays_o,
196 | rays_d,
197 | z_vals,
198 | sample_dist,
199 | sdf_network,
200 | deviation_network,
201 | color_network,
202 | background_alpha=None,
203 | background_sampled_color=None,
204 | background_rgb=None,
205 | cos_anneal_ratio=0.0):
206 | batch_size, n_samples = z_vals.shape
207 |
208 | # Section length
209 | dists = z_vals[..., 1:] - z_vals[..., :-1]
210 | dists = torch.cat([dists, torch.Tensor([sample_dist]).expand(dists[..., :1].shape)], -1)
211 | mid_z_vals = z_vals + dists * 0.5
212 |
213 | # Section midpoints
214 | pts = rays_o[:, None, :] + rays_d[:, None, :] * mid_z_vals[..., :, None] # n_rays, n_samples, 3
215 | dirs = rays_d[:, None, :].expand(pts.shape)
216 |
217 | pts = pts.reshape(-1, 3)
218 | dirs = dirs.reshape(-1, 3)
219 |
220 | sdf_nn_output = sdf_network(pts)
221 | sdf = sdf_nn_output[:, :1]
222 | feature_vector = sdf_nn_output[:, 1:]
223 |
224 | gradients = sdf_network.gradient(pts).squeeze()
225 | sampled_color = color_network(pts, gradients, dirs, feature_vector).reshape(batch_size, n_samples, 3)
226 |
227 | inv_s = deviation_network(torch.zeros([1, 3]))[:, :1].clip(1e-6, 1e6) # Single parameter
228 | inv_s = inv_s.expand(batch_size * n_samples, 1)
229 |
230 | true_cos = (dirs * gradients).sum(-1, keepdim=True)
231 |
232 | # "cos_anneal_ratio" grows from 0 to 1 in the beginning training iterations. The anneal strategy below makes
233 | # the cos value "not dead" at the beginning training iterations, for better convergence.
234 | iter_cos = -(F.relu(-true_cos * 0.5 + 0.5) * (1.0 - cos_anneal_ratio) +
235 | F.relu(-true_cos) * cos_anneal_ratio) # always non-positive
236 |
237 | # Estimate signed distances at section points
238 | estimated_next_sdf = sdf + iter_cos * dists.reshape(-1, 1) * 0.5
239 | estimated_prev_sdf = sdf - iter_cos * dists.reshape(-1, 1) * 0.5
240 |
241 | prev_cdf = torch.sigmoid(estimated_prev_sdf * inv_s)
242 | next_cdf = torch.sigmoid(estimated_next_sdf * inv_s)
243 |
244 | p = prev_cdf - next_cdf
245 | c = prev_cdf
246 |
247 | alpha = ((p + 1e-5) / (c + 1e-5)).reshape(batch_size, n_samples).clip(0.0, 1.0)
248 |
249 | pts_norm = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=True).reshape(batch_size, n_samples)
250 | inside_sphere = (pts_norm < 1.0).float().detach()
251 | relax_inside_sphere = (pts_norm < 1.2).float().detach()
252 |
253 | # Render with background
254 | if background_alpha is not None:
255 | alpha = alpha * inside_sphere + background_alpha[:, :n_samples] * (1.0 - inside_sphere)
256 | alpha = torch.cat([alpha, background_alpha[:, n_samples:]], dim=-1)
257 | sampled_color = sampled_color * inside_sphere[:, :, None] +\
258 | background_sampled_color[:, :n_samples] * (1.0 - inside_sphere)[:, :, None]
259 | sampled_color = torch.cat([sampled_color, background_sampled_color[:, n_samples:]], dim=1)
260 |
261 | weights = alpha * torch.cumprod(torch.cat([torch.ones([batch_size, 1]), 1. - alpha + 1e-7], -1), -1)[:, :-1]
262 | weights_sum = weights.sum(dim=-1, keepdim=True)
263 |
264 | color = (sampled_color * weights[:, :, None]).sum(dim=1)
265 | if background_rgb is not None: # Fixed background, usually black
266 | color = color + background_rgb * (1.0 - weights_sum)
267 |
268 | # Eikonal loss
269 | gradient_error = (torch.linalg.norm(gradients.reshape(batch_size, n_samples, 3), ord=2,
270 | dim=-1) - 1.0) ** 2
271 | gradient_error = (relax_inside_sphere * gradient_error).sum() / (relax_inside_sphere.sum() + 1e-5)
272 |
273 | return {
274 | 'color': color,
275 | 'sdf': sdf,
276 | 'dists': dists,
277 | 'gradients': gradients.reshape(batch_size, n_samples, 3),
278 | 's_val': 1.0 / inv_s,
279 | 'mid_z_vals': mid_z_vals,
280 | 'weights': weights,
281 | 'cdf': c.reshape(batch_size, n_samples),
282 | 'gradient_error': gradient_error,
283 | 'inside_sphere': inside_sphere,
284 | 'alpha': alpha,
285 | }
286 |
287 | def render(self, rays_o, rays_d, near, far, perturb_overwrite=-1, background_rgb=None, cos_anneal_ratio=0.0):
288 | batch_size = len(rays_o)
289 | sample_dist = 2.0 / self.n_samples # Assuming the region of interest is a unit sphere
290 | z_vals = torch.linspace(0.0, 1.0, self.n_samples)
291 | z_vals = near + (far - near) * z_vals[None, :]
292 |
293 | z_vals_outside = None
294 | if self.n_outside > 0:
295 | z_vals_outside = torch.linspace(1e-3, 1.0 - 1.0 / (self.n_outside + 1.0), self.n_outside)
296 |
297 | n_samples = self.n_samples
298 | perturb = self.perturb
299 |
300 | if perturb_overwrite >= 0:
301 | perturb = perturb_overwrite
302 | if perturb > 0:
303 | t_rand = (torch.rand([batch_size, 1]) - 0.5)
304 | z_vals = z_vals + t_rand * 2.0 / self.n_samples
305 |
306 | if self.n_outside > 0:
307 | mids = .5 * (z_vals_outside[..., 1:] + z_vals_outside[..., :-1])
308 | upper = torch.cat([mids, z_vals_outside[..., -1:]], -1)
309 | lower = torch.cat([z_vals_outside[..., :1], mids], -1)
310 | t_rand = torch.rand([batch_size, z_vals_outside.shape[-1]])
311 | z_vals_outside = lower[None, :] + (upper - lower)[None, :] * t_rand
312 |
313 | if self.n_outside > 0:
314 | z_vals_outside = far / torch.flip(z_vals_outside, dims=[-1]) + 1.0 / self.n_samples
315 |
316 | background_alpha = None
317 | background_sampled_color = None
318 |
319 | # Up sample
320 | if self.n_importance > 0:
321 | with torch.no_grad():
322 | pts = rays_o[:, None, :] + rays_d[:, None, :] * z_vals[..., :, None]
323 | sdf = self.sdf_network.sdf(pts.reshape(-1, 3)).reshape(batch_size, self.n_samples)
324 |
325 | for i in range(self.up_sample_steps):
326 | new_z_vals = self.up_sample(rays_o,
327 | rays_d,
328 | z_vals,
329 | sdf,
330 | self.n_importance // self.up_sample_steps,
331 | 64 * 2**i)
332 | z_vals, sdf = self.cat_z_vals(rays_o,
333 | rays_d,
334 | z_vals,
335 | new_z_vals,
336 | sdf,
337 | last=(i + 1 == self.up_sample_steps))
338 |
339 | n_samples = self.n_samples + self.n_importance
340 |
341 | # Background model
342 | if self.n_outside > 0:
343 | z_vals_feed = torch.cat([z_vals, z_vals_outside], dim=-1)
344 | z_vals_feed, _ = torch.sort(z_vals_feed, dim=-1)
345 | ret_outside = self.render_core_outside(rays_o, rays_d, z_vals_feed, sample_dist, self.nerf)
346 |
347 | background_sampled_color = ret_outside['sampled_color']
348 | background_alpha = ret_outside['alpha']
349 |
350 | # Render core
351 | ret_fine = self.render_core(rays_o,
352 | rays_d,
353 | z_vals,
354 | sample_dist,
355 | self.sdf_network,
356 | self.deviation_network,
357 | self.color_network,
358 | background_rgb=background_rgb,
359 | background_alpha=background_alpha,
360 | background_sampled_color=background_sampled_color,
361 | cos_anneal_ratio=cos_anneal_ratio)
362 |
363 | color_fine = ret_fine['color']
364 | weights = ret_fine['weights']
365 | weights_sum = weights.sum(dim=-1, keepdim=True)
366 | gradients = ret_fine['gradients']
367 | s_val = ret_fine['s_val'].reshape(batch_size, n_samples).mean(dim=-1, keepdim=True)
368 |
369 | return {
370 | 'color_fine': color_fine,
371 | 's_val': s_val,
372 | 'cdf_fine': ret_fine['cdf'],
373 | 'weight_sum': weights_sum,
374 | 'weight_max': torch.max(weights, dim=-1, keepdim=True)[0],
375 | 'gradients': gradients,
376 | 'weights': weights,
377 | 'gradient_error': ret_fine['gradient_error'],
378 | 'inside_sphere': ret_fine['inside_sphere'],
379 | "alpha": ret_fine["alpha"]
380 | }
381 |
382 | def extract_geometry(self, bound_min, bound_max, resolution, threshold=0.0):
383 | return extract_geometry(bound_min,
384 | bound_max,
385 | resolution=resolution,
386 | threshold=threshold,
387 | query_func=lambda pts: -self.sdf_network.sdf(pts))
388 |
--------------------------------------------------------------------------------
/NeuS/exp_runner.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | import logging
4 | import argparse
5 | import numpy as np
6 | import cv2 as cv
7 | import trimesh
8 | import torch
9 | import torch.nn.functional as F
10 | # from torch.utils.tensorboard import SummaryWriter
11 | from shutil import copyfile
12 | from icecream import ic
13 | from tqdm import tqdm
14 | from pyhocon import ConfigFactory
15 | from .models.dataset import Dataset
16 | from .models.fields import RenderingNetwork, SDFNetwork, SingleVarianceNetwork, NeRF
17 | from .models.renderer import NeuSRenderer
18 |
19 |
20 | class Runner:
21 | def __init__(self, conf_path, mode='train', case='CASE_NAME', is_continue=False, init_opt=True, sdf_network=None, random_seed=0):
22 | self.device = torch.device('cuda')
23 | self.init_opt = init_opt
24 | # Configuration
25 | self.conf_path = conf_path
26 | f = open(self.conf_path)
27 | conf_text = f.read()
28 | conf_text = conf_text.replace('CASE_NAME', case)
29 | f.close()
30 |
31 | self.conf = ConfigFactory.parse_string(conf_text)
32 | self.conf['dataset.data_dir'] = self.conf['dataset.data_dir'].replace('CASE_NAME', case)
33 | self.base_exp_dir = f"{self.conf['general.base_exp_dir']}/{random_seed}"
34 | os.makedirs(self.base_exp_dir, exist_ok=True)
35 | self.dataset = Dataset(self.conf['dataset'])
36 | self.iter_step = 0
37 |
38 | # Training parameters
39 | self.end_iter = self.conf.get_int('train.end_iter')
40 | self.save_freq = self.conf.get_int('train.save_freq')
41 | self.report_freq = self.conf.get_int('train.report_freq')
42 | self.val_freq = self.conf.get_int('train.val_freq')
43 | self.val_mesh_freq = self.conf.get_int('train.val_mesh_freq')
44 | self.batch_size = self.conf.get_int('train.batch_size')
45 | self.validate_resolution_level = self.conf.get_int('train.validate_resolution_level')
46 | self.learning_rate = self.conf.get_float('train.learning_rate')
47 | self.learning_rate_alpha = self.conf.get_float('train.learning_rate_alpha')
48 | self.use_white_bkgd = self.conf.get_bool('train.use_white_bkgd')
49 | self.warm_up_end = self.conf.get_float('train.warm_up_end', default=0.0)
50 | self.anneal_end = self.conf.get_float('train.anneal_end', default=0.0)
51 |
52 | # Weights
53 | self.igr_weight = self.conf.get_float('train.igr_weight')
54 | self.mask_weight = self.conf.get_float('train.mask_weight')
55 | self.is_continue = is_continue
56 | self.mode = mode
57 |
58 | self.model_list = []
59 | self.writer = None
60 |
61 | # Networks
62 | params_to_train = []
63 | self.nerf_outside = NeRF(**self.conf['model.nerf']).to(self.device)
64 | if sdf_network is None:
65 | self.sdf_network = SDFNetwork(**self.conf['model.sdf_network']).to(self.device)
66 | else:
67 | self.sdf_network = sdf_network
68 | self.deviation_network = SingleVarianceNetwork(**self.conf['model.variance_network']).to(self.device)
69 | self.color_network = RenderingNetwork(**self.conf['model.rendering_network']).to(self.device)
70 | if init_opt:
71 | params_to_train += list(self.nerf_outside.parameters())
72 | params_to_train += list(self.sdf_network.parameters())
73 | params_to_train += list(self.deviation_network.parameters())
74 | params_to_train += list(self.color_network.parameters())
75 |
76 | self.optimizer = torch.optim.Adam(params_to_train, lr=self.learning_rate)
77 |
78 | self.renderer = NeuSRenderer(self.nerf_outside,
79 | self.sdf_network,
80 | self.deviation_network,
81 | self.color_network,
82 | **self.conf['model.neus_renderer'])
83 |
84 | # Load checkpoint
85 | latest_model_name = None
86 | if is_continue:
87 | model_list_raw = os.listdir(os.path.join(self.base_exp_dir, 'checkpoints'))
88 | model_list = []
89 | for model_name in model_list_raw:
90 | if model_name[-3:] == 'pth' and int(model_name[5:-4]) <= self.end_iter:
91 | model_list.append(model_name)
92 | model_list.sort()
93 | latest_model_name = model_list[-1]
94 |
95 | if latest_model_name is not None:
96 | logging.info('Find checkpoint: {}'.format(latest_model_name))
97 | self.load_checkpoint(latest_model_name)
98 |
99 | # Backup codes and configs for debug
100 | if self.mode[:5] == 'train':
101 | self.file_backup()
102 |
103 | def do_one_iter(self, image_idx):
104 | data = self.dataset.gen_random_rays_at(image_idx, self.batch_size)
105 |
106 | rays_o, rays_d, true_rgb, mask = data[:, :3], data[:, 3: 6], data[:, 6: 9], data[:, 9: 10]
107 | near, far = self.dataset.near_far_from_sphere(rays_o, rays_d)
108 |
109 | background_rgb = None
110 | if self.use_white_bkgd:
111 | background_rgb = torch.ones([1, 3])
112 |
113 | if self.mask_weight > 0.0:
114 | mask = (mask > 0.5).float()
115 | else:
116 | mask = torch.ones_like(mask)
117 |
118 | mask_sum = mask.sum() + 1e-5
119 | render_out = self.renderer.render(rays_o, rays_d, near, far,
120 | background_rgb=background_rgb,
121 | cos_anneal_ratio=self.get_cos_anneal_ratio())
122 |
123 | color_fine = render_out['color_fine']
124 | s_val = render_out['s_val']
125 | cdf_fine = render_out['cdf_fine']
126 | gradient_error = render_out['gradient_error']
127 | weight_max = render_out['weight_max']
128 | weight_sum = render_out['weight_sum']
129 | alpha = render_out["alpha"]
130 |
131 | # Loss
132 | color_error = (color_fine - true_rgb) * mask
133 | color_fine_loss = F.l1_loss(color_error, torch.zeros_like(color_error), reduction='sum') / mask_sum
134 | variation_loss = F.l1_loss(alpha, torch.zeros_like(alpha))
135 | psnr = 20.0 * torch.log10(1.0 / (((color_fine - true_rgb)**2 * mask).sum() / (mask_sum * 3.0)).sqrt())
136 |
137 | eikonal_loss = gradient_error
138 |
139 | mask_loss = F.binary_cross_entropy(weight_sum.clip(1e-3, 1.0 - 1e-3), mask)
140 |
141 | # print(variation_loss)
142 |
143 | loss = color_fine_loss +\
144 | eikonal_loss * self.igr_weight +\
145 | mask_loss * self.mask_weight + variation_loss * self.conf["train.variation_weight"]
146 | self.iter_step += 1
147 | return loss
148 |
149 | def train(self):
150 | self.writer = SummaryWriter(log_dir=os.path.join(self.base_exp_dir, 'logs'))
151 | self.update_learning_rate()
152 | res_step = self.end_iter - self.iter_step
153 | image_perm = self.get_image_perm()
154 | self.validate_mesh()
155 | for iter_i in tqdm(range(res_step)):
156 | loss = self.do_one_iter(image_perm[self.iter_step % len(image_perm)])
157 |
158 | self.optimizer.zero_grad()
159 | loss.backward()
160 | self.optimizer.step()
161 |
162 | # self.writer.add_scalar('Loss/loss', loss, self.iter_step)
163 | # self.writer.add_scalar('Loss/color_loss', color_fine_loss, self.iter_step)
164 | # self.writer.add_scalar('Loss/eikonal_loss', eikonal_loss, self.iter_step)
165 | # self.writer.add_scalar('Statistics/s_val', s_val.mean(), self.iter_step)
166 | # self.writer.add_scalar('Statistics/cdf', (cdf_fine[:, :1] * mask).sum() / mask_sum, self.iter_step)
167 | # self.writer.add_scalar('Statistics/weight_max', (weight_max * mask).sum() / mask_sum, self.iter_step)
168 | # self.writer.add_scalar('Statistics/psnr', psnr, self.iter_step)
169 |
170 | if self.iter_step % self.report_freq == 0:
171 | print(self.base_exp_dir)
172 | print('iter:{:8>d} loss = {} lr={}'.format(self.iter_step, loss, self.optimizer.param_groups[0]['lr']))
173 |
174 | if self.iter_step % self.save_freq == 0:
175 | self.save_checkpoint()
176 |
177 | if self.iter_step % self.val_freq == 0:
178 | self.validate_image()
179 |
180 | if self.iter_step % self.val_mesh_freq == 0:
181 | self.validate_mesh()
182 |
183 | self.update_learning_rate()
184 |
185 | if self.iter_step % len(image_perm) == 0:
186 | image_perm = self.get_image_perm()
187 | self.validate_mesh()
188 |
189 | def get_image_perm(self):
190 | return torch.randperm(self.dataset.n_images)
191 |
192 | def get_cos_anneal_ratio(self):
193 | if self.anneal_end == 0.0:
194 | return 1.0
195 | else:
196 | return np.min([1.0, self.iter_step / self.anneal_end])
197 |
198 | def update_learning_rate(self):
199 | if self.iter_step < self.warm_up_end:
200 | learning_factor = self.iter_step / self.warm_up_end
201 | else:
202 | alpha = self.learning_rate_alpha
203 | progress = (self.iter_step - self.warm_up_end) / (self.end_iter - self.warm_up_end)
204 | learning_factor = (np.cos(np.pi * progress) + 1.0) * 0.5 * (1 - alpha) + alpha
205 |
206 | for g in self.optimizer.param_groups:
207 | g['lr'] = self.learning_rate * learning_factor
208 |
209 | def file_backup(self):
210 | dir_lis = self.conf['general.recording']
211 | os.makedirs(os.path.join(self.base_exp_dir, 'recording'), exist_ok=True)
212 | for dir_name in dir_lis:
213 | cur_dir = os.path.join(self.base_exp_dir, 'recording', dir_name)
214 | os.makedirs(cur_dir, exist_ok=True)
215 | files = os.listdir(dir_name)
216 | for f_name in files:
217 | if f_name[-3:] == '.py':
218 | copyfile(os.path.join(dir_name, f_name), os.path.join(cur_dir, f_name))
219 |
220 | copyfile(self.conf_path, os.path.join(self.base_exp_dir, 'recording', 'config.conf'))
221 |
222 | def load_checkpoint(self, checkpoint_name):
223 | checkpoint = torch.load(os.path.join(self.base_exp_dir, 'checkpoints', checkpoint_name), map_location=self.device)
224 | self.nerf_outside.load_state_dict(checkpoint['nerf'])
225 | self.sdf_network.load_state_dict(checkpoint['sdf_network_fine'])
226 | self.deviation_network.load_state_dict(checkpoint['variance_network_fine'])
227 | self.color_network.load_state_dict(checkpoint['color_network_fine'])
228 | self.optimizer.load_state_dict(checkpoint['optimizer'])
229 | self.iter_step = checkpoint['iter_step']
230 |
231 | logging.info('End')
232 |
233 | def save_checkpoint(self):
234 | checkpoint = {
235 | 'nerf': self.nerf_outside.state_dict(),
236 | 'sdf_network_fine': self.sdf_network.state_dict(),
237 | 'variance_network_fine': self.deviation_network.state_dict(),
238 | 'color_network_fine': self.color_network.state_dict(),
239 | 'iter_step': self.iter_step,
240 | }
241 | if self.init_opt:
242 | checkpoint['optimizer'] = self.optimizer.state_dict()
243 | os.makedirs(os.path.join(self.base_exp_dir, 'checkpoints'), exist_ok=True)
244 | torch.save(checkpoint, os.path.join(self.base_exp_dir, 'checkpoints', 'ckpt_{:0>6d}.pth'.format(self.iter_step)))
245 |
246 | def validate_image(self, idx=-1, resolution_level=-1):
247 | if idx < 0:
248 | idx = np.random.randint(self.dataset.n_images)
249 |
250 | print('Validate: iter: {}, camera: {}'.format(self.iter_step, idx))
251 |
252 | if resolution_level < 0:
253 | resolution_level = self.validate_resolution_level
254 | rays_o, rays_d = self.dataset.gen_rays_at(idx, resolution_level=resolution_level)
255 | H, W, _ = rays_o.shape
256 | rays_o = rays_o.reshape(-1, 3).split(self.batch_size)
257 | rays_d = rays_d.reshape(-1, 3).split(self.batch_size)
258 |
259 | out_rgb_fine = []
260 | out_normal_fine = []
261 |
262 | for rays_o_batch, rays_d_batch in zip(rays_o, rays_d):
263 | near, far = self.dataset.near_far_from_sphere(rays_o_batch, rays_d_batch)
264 | background_rgb = torch.ones([1, 3]) if self.use_white_bkgd else None
265 |
266 | render_out = self.renderer.render(rays_o_batch,
267 | rays_d_batch,
268 | near,
269 | far,
270 | cos_anneal_ratio=self.get_cos_anneal_ratio(),
271 | background_rgb=background_rgb)
272 |
273 | def feasible(key): return (key in render_out) and (render_out[key] is not None)
274 |
275 | if feasible('color_fine'):
276 | out_rgb_fine.append(render_out['color_fine'].detach().cpu().numpy())
277 | if feasible('gradients') and feasible('weights'):
278 | n_samples = self.renderer.n_samples + self.renderer.n_importance
279 | normals = render_out['gradients'] * render_out['weights'][:, :n_samples, None]
280 | if feasible('inside_sphere'):
281 | normals = normals * render_out['inside_sphere'][..., None]
282 | normals = normals.sum(dim=1).detach().cpu().numpy()
283 | out_normal_fine.append(normals)
284 | del render_out
285 |
286 | img_fine = None
287 | if len(out_rgb_fine) > 0:
288 | img_fine = (np.concatenate(out_rgb_fine, axis=0).reshape([H, W, 3, -1]) * 256).clip(0, 255)
289 |
290 | normal_img = None
291 | if len(out_normal_fine) > 0:
292 | normal_img = np.concatenate(out_normal_fine, axis=0)
293 | rot = np.linalg.inv(self.dataset.pose_all[idx, :3, :3].detach().cpu().numpy())
294 | normal_img = (np.matmul(rot[None, :, :], normal_img[:, :, None])
295 | .reshape([H, W, 3, -1]) * 128 + 128).clip(0, 255)
296 |
297 | os.makedirs(os.path.join(self.base_exp_dir, 'validations_fine'), exist_ok=True)
298 | os.makedirs(os.path.join(self.base_exp_dir, 'normals'), exist_ok=True)
299 |
300 | for i in range(img_fine.shape[-1]):
301 | if len(out_rgb_fine) > 0:
302 | cv.imwrite(os.path.join(self.base_exp_dir,
303 | 'validations_fine',
304 | '{:0>8d}_{}_{}.png'.format(self.iter_step, i, idx)),
305 | np.concatenate([img_fine[..., i],
306 | self.dataset.image_at(idx, resolution_level=resolution_level)]))
307 | if len(out_normal_fine) > 0:
308 | cv.imwrite(os.path.join(self.base_exp_dir,
309 | 'normals',
310 | '{:0>8d}_{}_{}.png'.format(self.iter_step, i, idx)),
311 | normal_img[..., i])
312 |
313 | def render_novel_image(self, idx_0, idx_1, ratio, resolution_level):
314 | """
315 | Interpolate view between two cameras.
316 | """
317 | rays_o, rays_d = self.dataset.gen_rays_between(idx_0, idx_1, ratio, resolution_level=resolution_level)
318 | H, W, _ = rays_o.shape
319 | rays_o = rays_o.reshape(-1, 3).split(self.batch_size)
320 | rays_d = rays_d.reshape(-1, 3).split(self.batch_size)
321 |
322 | out_rgb_fine = []
323 | for rays_o_batch, rays_d_batch in zip(rays_o, rays_d):
324 | near, far = self.dataset.near_far_from_sphere(rays_o_batch, rays_d_batch)
325 | background_rgb = torch.ones([1, 3]) if self.use_white_bkgd else None
326 |
327 | render_out = self.renderer.render(rays_o_batch,
328 | rays_d_batch,
329 | near,
330 | far,
331 | cos_anneal_ratio=self.get_cos_anneal_ratio(),
332 | background_rgb=background_rgb)
333 |
334 | out_rgb_fine.append(render_out['color_fine'].detach().cpu().numpy())
335 |
336 | del render_out
337 |
338 | img_fine = (np.concatenate(out_rgb_fine, axis=0).reshape([H, W, 3]) * 256).clip(0, 255).astype(np.uint8)
339 | return img_fine
340 |
341 | def validate_mesh(self, world_space=False, resolution=64, threshold=0.0):
342 | bound_min = torch.tensor(self.dataset.object_bbox_min, dtype=torch.float32)
343 | bound_max = torch.tensor(self.dataset.object_bbox_max, dtype=torch.float32)
344 |
345 | vertices, triangles =\
346 | self.renderer.extract_geometry(bound_min, bound_max, resolution=resolution, threshold=threshold)
347 | os.makedirs(os.path.join(self.base_exp_dir, 'meshes'), exist_ok=True)
348 |
349 | if world_space:
350 | vertices = vertices * self.dataset.scale_mats_np[0][0, 0] + self.dataset.scale_mats_np[0][:3, 3][None]
351 |
352 | mesh = trimesh.Trimesh(vertices, triangles)
353 | mesh.export(os.path.join(self.base_exp_dir, 'meshes', '{:0>8d}.ply'.format(self.iter_step)))
354 |
355 | logging.info('End')
356 |
357 | def interpolate_view(self, img_idx_0, img_idx_1):
358 | images = []
359 | n_frames = 60
360 | for i in range(n_frames):
361 | print(i)
362 | images.append(self.render_novel_image(img_idx_0,
363 | img_idx_1,
364 | np.sin(((i / n_frames) - 0.5) * np.pi) * 0.5 + 0.5,
365 | resolution_level=4))
366 | for i in range(n_frames):
367 | images.append(images[n_frames - i - 1])
368 |
369 | fourcc = cv.VideoWriter_fourcc(*'mp4v')
370 | video_dir = os.path.join(self.base_exp_dir, 'render')
371 | os.makedirs(video_dir, exist_ok=True)
372 | h, w, _ = images[0].shape
373 | writer = cv.VideoWriter(os.path.join(video_dir,
374 | '{:0>8d}_{}_{}.mp4'.format(self.iter_step, img_idx_0, img_idx_1)),
375 | fourcc, 30, (w, h))
376 |
377 | for image in images:
378 | writer.write(image)
379 |
380 | writer.release()
381 |
382 |
383 | if __name__ == '__main__':
384 | print('Hello Wooden')
385 |
386 | torch.set_default_tensor_type('torch.cuda.FloatTensor')
387 |
388 | FORMAT = "[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s"
389 | logging.basicConfig(level=logging.DEBUG, format=FORMAT)
390 |
391 | parser = argparse.ArgumentParser()
392 | parser.add_argument('--conf', type=str, default='./confs/base.conf')
393 | parser.add_argument('--mode', type=str, default='train')
394 | parser.add_argument('--mcube_threshold', type=float, default=0.0)
395 | parser.add_argument('--is_continue', default=False, action="store_true")
396 | parser.add_argument('--gpu', type=int, default=0)
397 | parser.add_argument('--case', type=str, default='')
398 | parser.add_argument("--random_seed", type=int)
399 | parser.add_argument("--disable_wandb", action="store_true")
400 |
401 | args = parser.parse_args()
402 |
403 | torch.cuda.set_device(args.gpu)
404 | runner = Runner(args.conf, args.mode, args.case, args.is_continue, random_seed=args.random_seed)
405 |
406 | if args.mode == 'train':
407 | runner.train()
408 | elif args.mode == 'validate_mesh':
409 | runner.validate_mesh(world_space=True, resolution=512, threshold=args.mcube_threshold)
410 | elif args.mode.startswith('interpolate'): # Interpolate views given two image indices
411 | _, img_idx_0, img_idx_1 = args.mode.split('_')
412 | img_idx_0 = int(img_idx_0)
413 | img_idx_1 = int(img_idx_1)
414 | runner.interpolate_view(img_idx_0, img_idx_1)
415 |
--------------------------------------------------------------------------------
/run_sdf.py:
--------------------------------------------------------------------------------
1 | import os, sys
2 | import numpy as np
3 | import json
4 | import random
5 | import time
6 | import torch
7 | import torch.nn as nn
8 | import torch.nn.functional as F
9 | from tqdm import tqdm, trange
10 | import scipy.io
11 | from helpers import *
12 | from MLP import *
13 | #from PIL import Image
14 | import cv2 as cv
15 | import time
16 | import random
17 | import string
18 | from pyhocon import ConfigFactory
19 | from models.fields import RenderingNetwork, SDFNetwork, SingleVarianceNetwork, NeRF
20 | from models.renderer import NeuSRenderer
21 | import trimesh
22 | from itertools import groupby
23 | from operator import itemgetter
24 | from load_data import *
25 | import logging
26 | import argparse
27 | # import wandb
28 | # from models.testing import RenderNetLamb
29 | import shutil
30 |
31 | import NeuS.exp_runner
32 |
33 | from math import ceil
34 |
35 | # set seeds
36 | torch.random.manual_seed(0)
37 | np.random.seed(0)
38 | random.seed(0)
39 |
40 | def config_parser():
41 | import configargparse
42 | parser = configargparse.ArgumentParser()
43 |
44 | # def scatterXYZ(X,Y,Z, name, expID, elev=None, azim=None, lims=None):
45 | # fig = plt.figure()
46 | # ax = fig.add_subplot(1,1,1, projection='3d')
47 | # #surf = ax.plot_trisurf(X, Y, Z, linewidth=0, antialiased=False)
48 | # if lims is not None:
49 | # ax.set_xlim(lims['x_min'], lims['x_max'])
50 | # ax.set_ylim(lims['y_min'], lims['y_max'])
51 | # ax.set_zlim(lims['z_min'], lims['z_max'])
52 | # surf = ax.scatter3D(X.cpu().numpy(), Y.cpu().numpy(), Z.cpu().numpy(), color = "green")
53 | # if elev is not None and azim is not None:
54 | # print("Setting elevation and azimuth to {} {}".format(elev, azim))
55 | # ax.view_init(elev=elev, azim=azim)
56 | # plt.xlabel('x', fontsize=18)
57 | # plt.ylabel('y', fontsize=16)
58 | # plt.savefig("./experiments/{}/figures/scatters/{}.png".format(expID, name))
59 | # plt.clf()
60 |
61 | def make_occ_eval_fn(neusis_runner, render_step_size=0.05):
62 | def occ_eval_fn(x):
63 | with torch.no_grad():
64 | # print(x.shape)
65 | sdf = neusis_runner.sdf_network.sdf(x)
66 | inv_s = neusis_runner.deviation_network(torch.zeros([1, 3]))[:, :1].clip(1e-6, 1e6)
67 |
68 | estimated_next_sdf = sdf - render_step_size * 0.5
69 | estimated_prev_sdf = sdf + render_step_size * 0.5
70 | prev_cdf = torch.sigmoid(estimated_prev_sdf * inv_s)
71 | next_cdf = torch.sigmoid(estimated_next_sdf * inv_s)
72 | p = prev_cdf - next_cdf
73 | c = prev_cdf
74 | alpha = ((p + 1e-5) / (c + 1e-5)).view(-1, 1).clip(0.0, 1.0)
75 | return alpha
76 | return occ_eval_fn
77 |
78 | class Runner:
79 | def __init__(self, conf, is_continue=False, write_config=True, testing=False, neus_conf=None, use_wandb=True, random_seed=0):
80 | conf_path = conf
81 | f = open(conf_path)
82 | conf_text = f.read()
83 | conf_name = str(conf_path).split("/")[-1][:-5]
84 | self.is_continue = is_continue
85 | self.conf = ConfigFactory.parse_string(conf_text)
86 | if use_wandb:
87 | project_name = "testing" if testing else "testing"
88 | # breakpoint()
89 | run = wandb.init(
90 | # Set the project where this run will be logged
91 | project=project_name,
92 | # Track hyperparameters and run metadata
93 | config=self.conf.as_plain_ordered_dict(),
94 | name=f"{conf_name}-{str(random_seed)}",
95 | dir="/tmp/"
96 | )
97 | self.neus_conf = neus_conf
98 | self.write_config = write_config
99 | if random_seed > 0:
100 | torch.random.manual_seed(random_seed)
101 | np.random.seed(random_seed)
102 | random.seed(random_seed)
103 | self.random_seed = random_seed
104 | self.use_wandb = use_wandb
105 | self.conf_path = conf_path
106 |
107 | def set_params(self):
108 | self.expID = self.conf.get_string('conf.expID')
109 |
110 | dataset = self.conf.get_string('conf.dataset')
111 | self.image_setkeyname = self.conf.get_string('conf.image_setkeyname')
112 |
113 | self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
114 | self.dataset = dataset
115 | # Training parameters
116 | self.end_iter = self.conf.get_int('train.end_iter')
117 | self.N_rand = self.conf.get_int('train.num_select_pixels') #H*W
118 | self.arc_n_samples = self.conf.get_int('train.arc_n_samples')
119 | self.save_freq = self.conf.get_int('train.save_freq')
120 | self.report_freq = self.conf.get_int('train.report_freq')
121 | self.val_mesh_freq = self.conf.get_int('train.val_mesh_freq')
122 | self.learning_rate = self.conf.get_float('train.learning_rate')
123 | self.learning_rate_alpha = self.conf.get_float('train.learning_rate_alpha')
124 | self.warm_up_end = self.conf.get_float('train.warm_up_end', default=0.0)
125 | self.anneal_end = self.conf.get_float('train.anneal_end', default=0.0)
126 | self.percent_select_true = self.conf.get_float('train.percent_select_true', default=0.5)
127 | self.r_div = self.conf.get_bool('train.r_div')
128 | self.train_frac = self.conf.get_float("train.train_frac", default=1.0)
129 | self.accel = self.conf.get_bool('train.accel', default=False)
130 | # breakpoint()
131 | self.val_img_freq = self.conf.get_int("train.val_img_freq", default=10000)
132 | self.lamb_shading = self.conf.get_bool("train.lamb_shading", default=False)
133 | self.do_weight_norm = self.conf.get_bool("train.do_weight_norm", default=False)
134 | self.mode_tradeoff_schedule = self.conf.get_string("train.mode_tradeoff_schedule", default="none")
135 | self.mode_tradeoff_step_iter = self.conf.get_int("train.mode_tradeoff_step_iter", default=-1)
136 | self.rgb_weight = self.conf.get_float("train.rgb_weight", default=0.0)
137 |
138 | # Weights
139 | self.igr_weight = self.conf.get_float('train.igr_weight')
140 | self.variation_reg_weight = self.conf.get_float('train.variation_reg_weight')
141 | self.px_sample_min_weight = self.conf.get_float('train.px_sample_min_weight')
142 | # TODO: make below more reasonable?
143 | self.weight_sum_factor = self.conf.get_float("train.weight_sum_factor", default=0.1)
144 | self.dark_weight_sum_factor = self.conf.get_float("train.dark_weight_sum_factor", default=0.0)
145 |
146 | self.ray_n_samples = self.conf['model.neus_renderer']['n_samples']
147 | # TODO: make below more flexible
148 | self.base_exp_dir = f"{self.expID}/{self.random_seed}"
149 | os.makedirs(self.base_exp_dir, exist_ok=True)
150 | shutil.copy(self.conf_path, f"{self.base_exp_dir}/config.conf")
151 | self.randomize_points = self.conf.get_float('train.randomize_points')
152 | self.select_px_method = self.conf.get_string('train.select_px_method')
153 | self.select_valid_px = self.conf.get_bool('train.select_valid_px')
154 | self.x_max = self.conf.get_float('mesh.x_max')
155 | self.x_min = self.conf.get_float('mesh.x_min')
156 | self.y_max = self.conf.get_float('mesh.y_max')
157 | self.y_min = self.conf.get_float('mesh.y_min')
158 | self.z_max = self.conf.get_float('mesh.z_max')
159 | self.z_min = self.conf.get_float('mesh.z_min')
160 | self.level_set = self.conf.get_float('mesh.level_set')
161 |
162 | self.data = load_data(dataset)
163 |
164 | self.H, self.W = self.data[self.image_setkeyname][0].shape
165 |
166 | self.r_min = self.data["min_range"]
167 | self.r_max = self.data["max_range"]
168 | self.phi_min = -self.data["vfov"]/2
169 | self.phi_max = self.data["vfov"]/2
170 | self.vfov = self.data["vfov"]
171 | self.hfov = self.data["hfov"]
172 |
173 |
174 | self.cube_center = torch.Tensor([(self.x_max + self.x_min)/2, (self.y_max + self.y_min)/2, (self.z_max + self.z_min)/2])
175 |
176 | self.timef = self.conf.get_bool('conf.timef')
177 | self.end_iter = self.conf.get_int('train.end_iter')
178 | self.start_iter = self.conf.get_int('train.start_iter')
179 |
180 | self.object_bbox_min = self.conf.get_list('mesh.object_bbox_min')
181 | self.object_bbox_max = self.conf.get_list('mesh.object_bbox_max')
182 |
183 | r_increments = []
184 | self.sonar_resolution = (self.r_max-self.r_min)/self.H
185 | for i in range(self.H):
186 | r_increments.append(i*self.sonar_resolution + self.r_min)
187 |
188 | self.r_increments = torch.FloatTensor(r_increments).to(self.device)
189 |
190 | # extrapath = './experiments/{}'.format(self.expID)
191 | # if not os.path.exists(extrapath):
192 | # os.makedirs(extrapath)
193 |
194 | # extrapath = './experiments/{}/checkpoints'.format(self.expID)
195 | # if not os.path.exists(extrapath):
196 | # os.makedirs(extrapath)
197 |
198 | # extrapath = './experiments/{}/model'.format(self.expID)
199 | # if not os.path.exists(extrapath):
200 | # os.makedirs(extrapath)
201 |
202 | # if self.write_config:
203 | # with open('./experiments/{}/config.json'.format(self.expID), 'w') as f:
204 | # json.dump(self.conf.__dict__, f, indent = 2)
205 |
206 | # Create all image tensors beforehand to speed up process
207 |
208 | self.i_train = np.arange(len(self.data[self.image_setkeyname]))
209 |
210 | self.coords_all_ls = [(x, y) for x in np.arange(self.H) for y in np.arange(self.W)]
211 | self.coords_all_set = set(self.coords_all_ls)
212 |
213 | #self.coords_all = torch.from_numpy(np.array(self.coords_all_ls)).to(self.device)
214 |
215 | self.del_coords = []
216 | for y in np.arange(self.W):
217 | tmp = [(x, y) for x in np.arange(0, self.ray_n_samples)]
218 | self.del_coords.extend(tmp)
219 |
220 | self.coords_all = list(self.coords_all_set - set(self.del_coords))
221 | self.coords_all = torch.LongTensor(self.coords_all).to(self.device)
222 |
223 | self.criterion = torch.nn.L1Loss(reduction='sum')
224 |
225 | self.model_list = []
226 | self.writer = None
227 |
228 | # Networks
229 | params_to_train = []
230 | self.sdf_network = SDFNetwork(**self.conf['model.sdf_network']).to(self.device)
231 |
232 | self.deviation_network = SingleVarianceNetwork(**self.conf['model.variance_network']).to(self.device)
233 | self.color_network = RenderingNetwork(**self.conf['model.rendering_network']).to(self.device)
234 | params_to_train += list(self.sdf_network.parameters())
235 | params_to_train += list(self.deviation_network.parameters())
236 | params_to_train += list(self.color_network.parameters())
237 | if self.neus_conf is not None:
238 | neus_runner = NeuS.exp_runner.Runner(self.neus_conf, init_opt=False, sdf_network=self.sdf_network, random_seed=self.random_seed)
239 | params_to_train += list(neus_runner.nerf_outside.parameters())
240 | params_to_train += list(neus_runner.deviation_network.parameters())
241 | params_to_train += list(neus_runner.color_network.parameters())
242 | self.neus_runner = neus_runner
243 |
244 | self.optimizer = torch.optim.Adam(params_to_train, lr=self.learning_rate)
245 |
246 |
247 | self.iter_step = 0
248 | self.renderer = NeuSRenderer(self.sdf_network,
249 | self.deviation_network,
250 | self.color_network if not self.lamb_shading else RenderNetLamb(),
251 | self.base_exp_dir,
252 | self.expID,
253 | **self.conf['model.neus_renderer'])
254 |
255 | latest_model_name = None
256 | if self.is_continue:
257 | model_list_raw = os.listdir(os.path.join(self.base_exp_dir, 'checkpoints'))
258 | model_list = []
259 | for model_name in model_list_raw:
260 | if model_name[-3:] == 'pth': #and int(model_name[5:-4]) <= self.end_iter:
261 | model_list.append(model_name)
262 | model_list.sort()
263 | latest_model_name = model_list[-1]
264 |
265 | if latest_model_name is not None:
266 | logging.info('Find checkpoint: {}'.format(latest_model_name))
267 | ckpt_dir = os.path.join(self.base_exp_dir, 'checkpoints')
268 | self.load_checkpoint(f"{ckpt_dir}/{latest_model_name}")
269 |
270 | if self.accel:
271 | self.occ_eval_fn = make_occ_eval_fn(self)
272 | grid_resolution = 128
273 | grid_nlvl = 1
274 | device = torch.device("cuda:0")
275 | aabb = torch.tensor([-1.0, -1.0, -1.0, 1.0, 1.0, 1.0], device=device)
276 | self.estimator = OccGridEstimator(
277 | roi_aabb=aabb, resolution=grid_resolution, levels=grid_nlvl
278 | ).cuda()
279 | else:
280 | self.estimator = None
281 |
282 |
283 | def plotAllArcs(self, use_new=True):
284 | # This function is used to plot all arc points from all images in the same reference frame
285 | # this is to verify that we get an approximate shape and that the coordinate transformation
286 | # is correct
287 | i_train = np.arange(len(self.data[self.image_setkeyname]))
288 |
289 | all_points = []
290 | for j in trange(0, len(i_train)):
291 | img_i = i_train[j]
292 | # print(img_i)
293 | target = self.data[self.image_setkeyname][img_i]
294 | target = torch.Tensor(target).to(self.device)
295 | pose = self.data["sensor_poses"][img_i]
296 | c2w = torch.tensor(pose).cuda().float()
297 | coords = torch.nonzero(target)
298 | n_pixels = len(coords)
299 | if n_pixels == 0: continue
300 | # TODO: do something faster than below
301 | # essentially, just get dirs before normalization from the get_arcs function
302 |
303 | # old
304 | if not use_new:
305 | _, _, _, _, pts, _ = get_arcs(self.H, self.W, self.phi_min, self.phi_max, self.r_min, self.r_max, torch.Tensor(pose), n_pixels,
306 | self.arc_n_samples, self.ray_n_samples, self.hfov, coords,
307 | self.r_increments, self.randomize_points,
308 | self.device, self.cube_center)
309 | pts = pts.reshape(n_pixels, self.arc_n_samples, self.ray_n_samples, 3)
310 | pts = pts[:, :, -1, :]
311 | # print(pts.shape)
312 | all_points.append(pts)
313 | else:
314 | img_y = coords[:, 0] # img y coords
315 | img_x = coords[:, 1] # img x coords
316 | phi = (
317 | torch.linspace(self.phi_min, self.phi_max, self.arc_n_samples)
318 | .float()
319 | .repeat(n_pixels)
320 | .reshape(n_pixels, -1)
321 | )
322 | sonar_resolution = (self.r_max - self.r_min) / self.H
323 | # compute radius at each pixel
324 | r = img_y * sonar_resolution + self.r_min
325 | # compute bearing angle at each pixel (azimuth)
326 | theta = -self.hfov / 2 + img_x * self.hfov / self.W
327 | coords = torch.stack(
328 | (
329 | r.repeat_interleave(self.arc_n_samples).reshape(n_pixels, -1),
330 | theta.repeat_interleave(self.arc_n_samples).reshape(n_pixels, -1),
331 | phi,
332 | ),
333 | dim=-1,
334 | )
335 | coords = coords.reshape(-1, 3)
336 | X = coords[:, 0] * torch.cos(coords[:, 1]) * torch.cos(coords[:, 2])
337 | Y = coords[:, 0] * torch.sin(coords[:, 1]) * torch.cos(coords[:, 2])
338 | Z = coords[:, 0] * torch.sin(coords[:, 2])
339 | pts = c2w @ torch.stack((X, Y, Z, torch.ones_like(X)))
340 | pts = pts[:3, ...].T
341 |
342 | all_points.append(pts)
343 |
344 | all_points = torch.cat(all_points, dim=0)
345 | return all_points, target
346 |
347 | def getRandomImgCoordsByPercentage(self, target):
348 | # 1. replace below with torch sampling + masking (double check it still works)
349 | # 2. dilate mask ot true locations, so dark areas around bright areas are also preferentially
350 | # sampled
351 | true_coords = []
352 | for y in np.arange(self.W):
353 | col = target[:, y]
354 | gt0 = col > 0
355 | indTrue = np.where(gt0)[0]
356 | if len(indTrue) > 0:
357 | true_coords.extend([(x, y) for x in indTrue])
358 | sampling_perc = int(self.percent_select_true*len(true_coords))
359 | true_coords = random.sample(true_coords, sampling_perc)
360 | true_coords = list(set(true_coords) - set(self.del_coords))
361 | true_coords = torch.LongTensor(true_coords).to(self.device)
362 | target = torch.Tensor(target).to(self.device)
363 | if self.iter_step%len(self.data[self.image_setkeyname]) !=0:
364 | N_rand = 0
365 | else:
366 | N_rand = self.N_rand
367 | N_rand = self.N_rand
368 | coords = select_coordinates(self.coords_all, target, N_rand, self.select_valid_px)
369 | coords = torch.cat((coords, true_coords), dim=0)
370 |
371 | return coords, target
372 |
373 | def train(self):
374 | loss_arr = []
375 | self.validate_mesh(threshold = self.level_set)
376 |
377 | # make train/validation sets
378 | # fix validation set for fair comparisons
379 | # i_all = np.arange(len(self.data[self.image_setkeyname]))
380 | i_train = []
381 | i_val = []
382 | if self.train_frac < 1.0:
383 | val_skip = int(1 / (1 - self.train_frac))
384 | for i in range(len(self.data[self.image_setkeyname])):
385 | if i % val_skip == 0:
386 | i_val.append(i)
387 | else:
388 | i_train.append(i)
389 | else:
390 | i_train = np.arange(len(self.data[self.image_setkeyname]))
391 | i_val = []
392 | # np.random.shuffle(i_all)
393 | # split_ind = int(self.train_frac * len(i_all))
394 | # i_train = i_all[:split_ind]
395 | # i_val = i_all[split_ind:]
396 |
397 | for i in trange(self.start_iter, self.end_iter, len(self.data[self.image_setkeyname])):
398 | # i_train = np.arange(len(self.data[self.image_setkeyname]))
399 | np.random.shuffle(i_train)
400 | loss_total = 0
401 | sum_intensity_loss = 0
402 | sum_eikonal_loss = 0
403 | sum_total_variational = 0
404 | sum_neus_loss = 0
405 |
406 | for j in trange(0, len(i_train)):
407 | if self.accel:
408 | self.estimator.update_every_n_steps(step=self.iter_step, occ_eval_fn=self.occ_eval_fn)
409 | log_dict = {}
410 | img_i = i_train[j]
411 | target = self.data[self.image_setkeyname][img_i]
412 |
413 |
414 | pose = self.data["sensor_poses"][img_i]
415 |
416 | if self.select_px_method == "byprob":
417 | coords, target = self.getRandomImgCoordsByProbability(target)
418 | else:
419 | coords, target = self.getRandomImgCoordsByPercentage(target)
420 |
421 | n_pixels = len(coords)
422 | # print(n_pixels)
423 |
424 | # r holds radius per sample if estimator is none, otherwise it is nONe
425 | rays_d, dphi, r, rs, pts, dists = get_arcs(self.H, self.W, self.phi_min, self.phi_max, self.r_min, self.r_max, torch.Tensor(pose), n_pixels,
426 | self.arc_n_samples, self.ray_n_samples, self.hfov, coords, self.r_increments,
427 | self.randomize_points, self.device, self.cube_center, self.estimator)
428 |
429 |
430 | target_s = target[coords[:, 0], coords[:, 1]]
431 |
432 | render_out = self.renderer.render_sonar(rays_d, pts, dists, n_pixels,
433 | self.arc_n_samples, self.ray_n_samples, r,
434 | cos_anneal_ratio=self.get_cos_anneal_ratio())
435 |
436 |
437 | gradient_error = render_out['gradient_error'].reshape(-1, 1) #.reshape(n_pixels, self.arc_n_samples, -1)
438 | # gradient_error = torch.tensor(0)
439 | eikonal_loss = gradient_error.sum()*(1/gradient_error.shape[0])
440 | variation_regularization = render_out['variation_error']*(1/(self.arc_n_samples*self.ray_n_samples*n_pixels))
441 | # variation_regularization = torch.tensor(0)
442 |
443 | # try bright weight sum regularization
444 | if self.weight_sum_factor > 0.0:
445 | bright_weight_sums = render_out["weight_sum"][target_s > 0.0]
446 | ones_target = torch.ones_like(bright_weight_sums)
447 | # modified with max
448 | # weight_norm_loss = self.weight_sum_factor * torch.mean((torch.maximum(ones_target-bright_weight_sums, torch.zeros_like(ones_target)))**2)
449 | weight_norm_loss = self.weight_sum_factor * torch.mean((ones_target-bright_weight_sums)**2)
450 | else:
451 | weight_norm_loss = torch.tensor(0.0)
452 |
453 | # weight sparsity regularization
454 | # bright_weights = render_out["weights"][target_s > 0.0]
455 | # weight_sparse_loss = 0.1 * torch.nn.functional.l1_loss(bright_weights, torch.zeros_like(bright_weights))
456 | weight_sparse_loss = 0.0
457 |
458 | # dark weight sum regularization
459 | # breakpoint()
460 | if self.dark_weight_sum_factor > 0.0:
461 | dark_weights = render_out["weight_sum"][target_s == 0.0]
462 | zeros_target = torch.zeros_like(dark_weights)
463 | dark_weight_norm_loss = self.dark_weight_sum_factor * torch.mean((dark_weights - zeros_target)**2)
464 | else:
465 | dark_weight_norm_loss = torch.tensor(0.0)
466 |
467 | if self.r_div:
468 | intensityPointsOnArc = render_out["intensityPointsOnArc"]
469 | intensity_fine = (torch.divide(intensityPointsOnArc, rs)*render_out["weights"]).sum(dim=1)
470 | else:
471 | intensity_fine = render_out['color_fine']
472 |
473 | if self.do_weight_norm:
474 | if len(intensity_fine.shape) == 1:
475 | intensity_fine = intensity_fine[:, None]
476 | intensity_fine[target_s > 0.0] = intensity_fine[target_s > 0.0] / render_out["weight_sum"][target_s > 0.0]
477 |
478 | intensity_error = self.criterion(intensity_fine.squeeze(), target_s.squeeze())*(1/n_pixels)
479 |
480 | loss = intensity_error + eikonal_loss * self.igr_weight + variation_regularization*self.variation_reg_weight
481 | loss += weight_norm_loss
482 | loss += weight_sparse_loss
483 | loss += dark_weight_norm_loss
484 | if self.neus_conf is not None:
485 | if self.mode_tradeoff_schedule == "step":
486 | if self.iter_step < self.mode_tradeoff_step_iter:
487 | neus_loss = torch.tensor([0.])
488 | else:
489 | neus_loss = self.neus_runner.do_one_iter(img_i % self.neus_runner.dataset.n_images)
490 | loss = (1 - self.rgb_weight) * loss + self.rgb_weight * neus_loss
491 | else:
492 | neus_loss = self.neus_runner.do_one_iter(img_i % self.neus_runner.dataset.n_images)
493 | loss += neus_loss * 2 # TODO: fix this (add config?)
494 |
495 | self.optimizer.zero_grad()
496 | loss.backward()
497 | self.optimizer.step()
498 |
499 | with torch.no_grad():
500 | lossNG = intensity_error + eikonal_loss * self.igr_weight
501 | loss_total += lossNG.cpu().numpy().item()
502 | sum_intensity_loss += intensity_error.cpu().numpy().item()
503 | sum_eikonal_loss += eikonal_loss.cpu().numpy().item()
504 | sum_total_variational += variation_regularization.cpu().numpy().item()
505 | if self.neus_conf is not None:
506 | sum_neus_loss += neus_loss.cpu().numpy().item()
507 |
508 |
509 | self.iter_step += 1
510 | self.update_learning_rate()
511 |
512 | del(target)
513 | del(target_s)
514 | del(rays_d)
515 | del(pts)
516 | del(dists)
517 | del(render_out)
518 | del(coords)
519 | # break
520 | log_dict["sonar_intensity_loss"] = intensity_error.item()
521 |
522 | # end of epoch
523 | if j == len(i_train) - 1:
524 | epoch_num = i // len(self.data[self.image_setkeyname]) # duplicated with below
525 | log_dict["epoch_sonar_intensity_loss"] = sum_intensity_loss/len(i_train)
526 | log_dict["epoch_num"] = epoch_num
527 | if (epoch_num+1) % self.val_img_freq == 0:
528 | tqdm.write("validation\n")
529 | val_metric = 0
530 | for i in trange(len(i_val)):
531 | val_ind = i_val[i]
532 | curr_img_val = render_image(self, val_ind, self.estimator)
533 | curr_gt_val = self.data[self.image_setkeyname][val_ind]
534 | val_metric += np.mean((curr_img_val - curr_gt_val) ** 2)
535 | val_metric = val_metric / len(i_val)
536 | log_dict["mean_val_mse"] = val_metric
537 |
538 | img = render_image(self, i_val[len(i_val)//2], self.estimator)
539 | if self.use_wandb:
540 | log_dict["val_vis"] = wandb.Image((np.clip(img, 0, 1)*255).astype(np.uint8))
541 | img_train = render_image(self, i_train[len(i_train)//2], self.estimator)
542 | if self.use_wandb:
543 | log_dict["train_vis"] = wandb.Image((np.clip(img_train, 0, 1)*255).astype(np.uint8))
544 | train_gt_img = self.data[self.image_setkeyname][i_train[len(i_train)//2]]
545 | if self.use_wandb:
546 | log_dict["train_gt_vis"] = wandb.Image((np.clip(train_gt_img, 0, 1)*255).astype(np.uint8))
547 | gt_img = self.data[self.image_setkeyname][i_val[len(i_val)//2]]
548 | if self.use_wandb:
549 | log_dict["val_gt_vis"] = wandb.Image((np.clip(gt_img, 0, 1)*255).astype(np.uint8))
550 | log_dict["epoch_num_val"] = epoch_num
551 |
552 | # saving mesh + novel view synthesis for neus
553 | if epoch_num == 0 or epoch_num % self.val_mesh_freq == 0:
554 | mesh_path = self.validate_mesh(threshold = self.level_set)
555 | if self.neus_conf is not None:
556 | # self.neus_runner.validate_mesh()
557 | self.neus_runner.validate_image()
558 | if self.use_wandb:
559 | log_dict["mesh_recon"] = wandb.Object3D(open(mesh_path))
560 | if self.use_wandb:
561 | wandb.log(log_dict)
562 |
563 | with torch.no_grad():
564 | l = loss_total/len(i_train)
565 | iL = sum_intensity_loss/len(i_train)
566 | eikL = sum_eikonal_loss/len(i_train)
567 | varL = sum_total_variational/len(i_train)
568 | if self.neus_conf is not None:
569 | nl = sum_neus_loss / len(i_train)
570 | loss_arr.append(l)
571 | # breakpoint()
572 | epoch_num = i // len(self.data[self.image_setkeyname])
573 |
574 | # saving checkpoint
575 | if epoch_num == 0 or epoch_num % self.save_freq == 0:
576 | logging.info('iter:{} ********************* SAVING CHECKPOINT ****************'.format(self.optimizer.param_groups[0]['lr']))
577 | self.save_checkpoint()
578 | if self.neus_conf is not None:
579 | self.neus_runner.save_checkpoint()
580 |
581 | # write to terminal
582 | if epoch_num % self.report_freq == 0:
583 | report_str = f"iter:{self.iter_step:8>d} Loss={l} | intensity Loss={iL} | eikonal loss={eikL} | total variation loss = {varL} | lr = {self.optimizer.param_groups[0]['lr']}"
584 | if self.neus_conf is not None:
585 | report_str = f"{report_str} | neus loss = {nl}"
586 | report_str = f"{report_str} | weight_norm_loss = {weight_norm_loss.item()}"
587 | report_str = f"{report_str} | dark_weight_norm_loss = {dark_weight_norm_loss.item()}"
588 | # report_str = f"{report_str} | weight_sparse_loss = {weight_sparse_loss.item()}"
589 | # print(report_str)
590 | tqdm.write(report_str)
591 |
592 | self.save_checkpoint()
593 | self.validate_mesh(threshold = self.level_set)
594 |
595 |
596 | def save_checkpoint(self):
597 | checkpoint = {
598 | 'sdf_network_fine': self.sdf_network.state_dict(),
599 | 'variance_network_fine': self.deviation_network.state_dict(),
600 | 'color_network_fine': self.color_network.state_dict(),
601 | 'optimizer': self.optimizer.state_dict(),
602 | 'iter_step': self.iter_step,
603 | }
604 |
605 | os.makedirs(os.path.join(self.base_exp_dir, 'checkpoints'), exist_ok=True)
606 | torch.save(checkpoint, os.path.join(self.base_exp_dir, 'checkpoints', 'ckpt_{:0>6d}.pth'.format(self.iter_step)))
607 |
608 | def load_checkpoint(self, checkpoint_name):
609 | # checkpoint = torch.load(os.path.join(self.base_exp_dir, 'checkpoints', checkpoint_name), map_location=self.device)
610 | checkpoint = torch.load(checkpoint_name, map_location=self.device)
611 | self.sdf_network.load_state_dict(checkpoint['sdf_network_fine'])
612 | self.deviation_network.load_state_dict(checkpoint['variance_network_fine'])
613 | self.color_network.load_state_dict(checkpoint['color_network_fine'])
614 | self.optimizer.load_state_dict(checkpoint['optimizer'])
615 | self.iter_step = checkpoint['iter_step']
616 |
617 | def update_learning_rate(self):
618 | if self.iter_step <= self.warm_up_end: # do i really need <=?
619 | learning_factor = self.iter_step / self.warm_up_end
620 | else:
621 | alpha = self.learning_rate_alpha
622 | progress = (self.iter_step - self.warm_up_end) / (self.end_iter - self.warm_up_end)
623 | learning_factor = (np.cos(np.pi * progress) + 1.0) * 0.5 * (1 - alpha) + alpha
624 |
625 | for g in self.optimizer.param_groups:
626 | g['lr'] = self.learning_rate * learning_factor
627 |
628 | def get_cos_anneal_ratio(self):
629 | if self.anneal_end == 0.0:
630 | return 1.0
631 | else:
632 | return np.min([1.0, self.iter_step / self.anneal_end])
633 |
634 | def validate_mesh(self, world_space=False, resolution=64, threshold=0.0):
635 | # breakpoint()
636 | bound_min = torch.tensor(self.object_bbox_min, dtype=torch.float32)
637 | bound_max = torch.tensor(self.object_bbox_max, dtype=torch.float32)
638 |
639 | vertices, triangles =\
640 | self.renderer.extract_geometry(bound_min, bound_max, resolution=resolution, threshold=threshold)
641 |
642 | os.makedirs(os.path.join(self.base_exp_dir, 'meshes'), exist_ok=True)
643 |
644 | if world_space:
645 | vertices = vertices * self.dataset.scale_mats_np[0][0, 0] + self.dataset.scale_mats_np[0][:3, 3][None]
646 |
647 | mesh = trimesh.Trimesh(vertices, triangles)
648 | mesh_path = os.path.join(self.base_exp_dir, 'meshes', '{:0>8d}.obj'.format(self.iter_step))
649 | mesh.export(mesh_path)
650 | return mesh_path
651 |
652 |
653 | if __name__=='__main__':
654 | torch.set_default_tensor_type('torch.cuda.FloatTensor')
655 | FORMAT = "[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s"
656 | logging.getLogger('matplotlib.font_manager').disabled = True
657 | logging.basicConfig(level=logging.DEBUG, format=FORMAT)
658 |
659 | parser = argparse.ArgumentParser()
660 | parser.add_argument('--conf', type=str, default="./confs/conf.conf")
661 | parser.add_argument('--neus_conf', type=str)
662 | parser.add_argument('--is_continue', default=False, action="store_true")
663 | parser.add_argument('--gpu', type=int, default=0)
664 | parser.add_argument("--testing", action="store_true")
665 | parser.add_argument("--random_seed", type=int, default=0)
666 | parser.add_argument("--disable_wandb", action="store_true")
667 |
668 | args = parser.parse_args()
669 |
670 | torch.cuda.set_device(args.gpu)
671 | runner = Runner(args.conf, args.is_continue, testing=args.testing, neus_conf=args.neus_conf, random_seed=args.random_seed, use_wandb=not args.disable_wandb)
672 | runner.set_params()
673 | runner.train()
674 |
--------------------------------------------------------------------------------