├── .gitignore ├── .gitmodules ├── Dockerfile ├── LICENSE ├── README.md ├── __pycache__ ├── losses.cpython-38.pyc ├── opt.cpython-38.pyc └── utils.cpython-38.pyc ├── datasets ├── CLNerf │ ├── __pycache__ │ │ ├── nerfpp.cpython-38.pyc │ │ ├── nsvf.cpython-38.pyc │ │ ├── nsvf_MEILNERF.cpython-38.pyc │ │ ├── nsvf_MEILNERF_paper.cpython-38.pyc │ │ └── nsvf_TaTSeq.cpython-38.pyc │ ├── nerfpp.py │ ├── nsvf.py │ └── nsvf_TaTSeq.py ├── MEILNerf │ ├── __pycache__ │ │ ├── nerfpp.cpython-38.pyc │ │ ├── nsvf_MEILNERF_paper.cpython-38.pyc │ │ ├── nsvf_MEILNeRF.cpython-38.pyc │ │ ├── nsvf_TaTSeq.cpython-38.pyc │ │ ├── nsvf_TaTSeq100.cpython-38.pyc │ │ └── phototour_nerfw_minScale.cpython-38.pyc │ ├── nerfpp.py │ ├── nsvf_MEILNeRF.py │ └── nsvf_TaTSeq.py ├── NGPA │ ├── __pycache__ │ │ ├── colmap.cpython-38.pyc │ │ ├── colmap_render.cpython-38.pyc │ │ ├── nsvf.cpython-38.pyc │ │ ├── phototour.cpython-38.pyc │ │ ├── phototour_nerfw.cpython-38.pyc │ │ └── phototour_nerfw_minScale.cpython-38.pyc │ ├── colmap.py │ └── colmap_render.py ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-38.pyc │ ├── base.cpython-38.pyc │ ├── colmap.cpython-38.pyc │ ├── colmap_utils.cpython-38.pyc │ ├── color_utils.cpython-38.pyc │ ├── nerf.cpython-38.pyc │ ├── nerfpp.cpython-38.pyc │ ├── nsvf.cpython-38.pyc │ ├── nsvf_MEILNERF.cpython-38.pyc │ ├── nsvf_MEILNERF_paper.cpython-38.pyc │ ├── ray_utils.cpython-38.pyc │ └── rtmv.cpython-38.pyc ├── base.py ├── colmap.py ├── colmap_utils.py ├── color_utils.py ├── depth_utils.py ├── lb │ ├── __pycache__ │ │ ├── nerfpp.cpython-38.pyc │ │ ├── nsvf.cpython-38.pyc │ │ ├── nsvf_MEILNERF.cpython-38.pyc │ │ └── nsvf_MEILNERF_paper.cpython-38.pyc │ ├── nerfpp.py │ └── nsvf.py ├── nerfpp.py ├── nsvf.py ├── nsvf_MEILNERF.py └── ray_utils.py ├── demo └── teaser.png ├── img2video.py ├── img2video.sh ├── install_cuda_module.sh ├── losses.py ├── merge_video.py ├── merge_video.sh ├── models ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-38.pyc │ ├── custom_functions.cpython-38.pyc │ ├── networks.cpython-38.pyc │ ├── rendering.cpython-38.pyc │ └── rendering_NGPA.cpython-38.pyc ├── csrc │ ├── binding.cpp │ ├── build │ │ ├── lib.linux-x86_64-cpython-38 │ │ │ └── vren.cpython-38-x86_64-linux-gnu.so │ │ └── temp.linux-x86_64-cpython-38 │ │ │ ├── binding.o │ │ │ ├── intersection.o │ │ │ ├── losses.o │ │ │ ├── raymarching.o │ │ │ └── volumerendering.o │ ├── include │ │ ├── helper_math.h │ │ └── utils.h │ ├── intersection.cu │ ├── losses.cu │ ├── raymarching.cu │ ├── setup.py │ ├── volumerendering.cu │ └── vren.egg-info │ │ ├── PKG-INFO │ │ ├── SOURCES.txt │ │ ├── dependency_links.txt │ │ └── top_level.txt ├── custom_functions.py ├── networks.py ├── rendering.py └── rendering_NGPA.py ├── opt.py ├── prepare_dataests.sh ├── render_NGP_WAT.py ├── render_video_EWC.py ├── render_video_NeRF.py ├── run_CLNeRF.sh ├── run_ER.sh ├── run_ER_NGP.sh ├── run_EWC.sh ├── run_LB.sh ├── run_MEIL.sh ├── run_NT.sh ├── run_NT_NGP.sh ├── run_UB.sh ├── scripts ├── CLNeRF │ ├── SynthNeRF │ │ └── benchmark_synth_nerf.sh │ ├── WAT │ │ ├── breville.sh │ │ ├── car.sh │ │ ├── community.sh │ │ ├── grill.sh │ │ ├── kitchen.sh │ │ ├── living_room.sh │ │ ├── mac.sh │ │ ├── ninja.sh │ │ ├── render_video.sh │ │ ├── spa.sh │ │ └── street.sh │ └── nerfpp │ │ └── benchmark_nerfpp.sh ├── ER_NGP │ ├── SynthNeRF │ │ └── benchmark_synth_nerf.sh │ ├── WAT │ │ └── breville.sh │ └── nerfpp │ │ └── benchmark_nerfpp.sh ├── EWC │ ├── SynthNeRF │ │ └── nerf_baseline.sh │ ├── WAT │ │ ├── nerf_baseline.sh │ │ └── video_render.sh │ └── nerfpp │ │ └── nerf_baseline.sh ├── MEIL │ ├── SynthNeRF │ │ └── benchmark_synth_nerf.sh │ ├── WAT │ │ ├── breville.sh │ │ ├── car.sh │ │ ├── community.sh │ │ ├── grill.sh │ │ ├── kitchen.sh │ │ ├── living_room.sh │ │ ├── mac.sh │ │ ├── ninja.sh │ │ ├── spa.sh │ │ └── street.sh │ └── nerfpp │ │ └── benchmark_nerfpp.sh ├── NT │ ├── SynthNeRF │ │ ├── benchmark_synth_nerf.sh │ │ └── nerf_baseline.sh │ ├── WAT │ │ ├── breville.sh │ │ ├── car.sh │ │ ├── community.sh │ │ ├── grill.sh │ │ ├── kitchen.sh │ │ ├── living_room.sh │ │ ├── mac.sh │ │ ├── nerf_baseline.sh │ │ ├── ninja.sh │ │ ├── spa.sh │ │ ├── street.sh │ │ └── video_render.sh │ └── nerfpp │ │ ├── benchmark_nerfpp.sh │ │ └── nerf_baseline.sh ├── UB │ ├── SynthNeRF │ │ └── benchmark_synthetic_nerf.sh │ ├── WAT │ │ └── UB_WAT.sh │ └── nerfpp │ │ └── benchmark_nerfpp.sh └── data_prepare │ ├── build_WAT_from_video.sh │ ├── prepare_SynthNeRF.sh │ ├── prepare_WAT.sh │ ├── prepare_nerfpp.sh │ └── resize_videos.py ├── setup_env.sh ├── setup_env_docker.sh ├── source_cuda.sh ├── test_pose_interpolation.py ├── train.py ├── train_CLNerf.py ├── train_MEIL.py ├── train_SynthNeRF_EWC.py ├── train_SynthNeRF_NT.py ├── train_WAT_EWC.py ├── train_WAT_NT.py ├── train_lb.py ├── train_nerfpp_EWC.py ├── train_nerfpp_NT.py ├── train_ngpgv2.py ├── train_ngpgv2_CLNerf.py ├── train_ngpgv2_MEIL.py ├── train_ngpgv2_lb.py └── utils ├── __pycache__ └── utils.cpython-38.pyc ├── data_prepare_utils └── poses │ ├── __init__.py │ ├── __pycache__ │ ├── colmap_read_model.cpython-38.pyc │ ├── colmap_wrapper.cpython-38.pyc │ └── pose_utils.cpython-38.pyc │ ├── colmap_read_model.py │ ├── colmap_wrapper.py │ ├── imgs2poses.py │ └── pose_utils.py ├── nerfacc_radiance_fields ├── __pycache__ │ ├── mlp.cpython-38.pyc │ └── utils.cpython-38.pyc ├── datasets │ ├── __pycache__ │ │ ├── colmap_utils.cpython-38.pyc │ │ ├── ray_utils.cpython-38.pyc │ │ └── utils.cpython-38.pyc │ ├── colmap_utils.py │ ├── color_utils.py │ ├── depth_utils.py │ ├── lb │ │ ├── __pycache__ │ │ │ ├── colmap.cpython-38.pyc │ │ │ ├── colmap_render.cpython-38.pyc │ │ │ ├── nerf_synthetic.cpython-38.pyc │ │ │ └── nerfpp.cpython-38.pyc │ │ ├── colmap.py │ │ ├── colmap_render.py │ │ ├── nerf_synthetic.py │ │ └── nerfpp.py │ ├── nerf_synthetic.py │ ├── nerfpp.py │ ├── ray_utils.py │ └── utils.py ├── mlp.py └── utils.py └── utils.py /.gitignore: -------------------------------------------------------------------------------- 1 | dataset 2 | dataset/ 3 | 4 | results 5 | results/ 6 | 7 | logs 8 | logs/ 9 | 10 | ckpts 11 | ckpts/ 12 | 13 | scripts/data_prepare/job_prepare_WOT.sh 14 | scripts/data_prepare/job_prepare_WAT.sh 15 | 16 | job_run_CLNeRF.sh 17 | 18 | job*.sh 19 | test_run.sh 20 | 21 | demo/*.mp4 -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "utils/pycolmap"] 2 | path = utils/pycolmap 3 | url = https://github.com/rmbrualla/pycolmap.git 4 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Define base image 2 | FROM nvidia/cuda:11.3.1-devel-ubuntu20.04 3 | 4 | # Install required apt packages and clear cache afterwards 5 | RUN apt-get update && \ 6 | apt-get install -y --no-install-recommends \ 7 | libgl1-mesa-glx \ 8 | dos2unix \ 9 | git \ 10 | ninja-build \ 11 | screen \ 12 | wget && \ 13 | rm -rf /var/lib/apt/lists/* 14 | 15 | # Copy required files and folders 16 | COPY setup_env_docker.sh . 17 | COPY install_cuda_module.sh . 18 | COPY models models/ 19 | RUN dos2unix setup_env_docker.sh && \ 20 | dos2unix install_cuda_module.sh 21 | 22 | # Install miniconda 23 | RUN wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O /opt/miniconda-installer.sh && \ 24 | bash /opt/miniconda-installer.sh -b -u -p /opt/miniconda3 25 | 26 | # Set up conda environment and activate by default 27 | ENV PATH=/opt/miniconda3/bin:$PATH 28 | RUN conda init bash 29 | 30 | # Install required packages 31 | RUN bash setup_env_docker.sh && \ 32 | echo "conda activate CLNeRF" >> ~/.bashrc 33 | 34 | # Clean up 35 | RUN rm setup_env_docker.sh && \ 36 | rm install_cuda_module.sh && \ 37 | rm -rf models 38 | 39 | # Switch to workspace folder (this is where the code will be mounted) 40 | WORKDIR /workspace/CLNeRF 41 | 42 | # Bash as default entrypoint 43 | CMD ["/bin/bash"] 44 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | 3 | Copyright 2023 Intel Labs 4 | 5 | Licensed under the Apache License, Version 2.0 (the "License"); 6 | you may not use this file except in compliance with the License. 7 | You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # CLNeRF 2 | Official implementation of 'CLNeRF: Continual Learning Meets NeRF' (accepted to ICCV'23) 3 | 4 | [[Paper](https://arxiv.org/abs/2308.14816)] [[Video](https://youtu.be/nLRt6OoDGq0)] [[Dataset](https://huggingface.co/datasets/IntelLabs/WAT-WorldAcrossTime)] [Web Demo (coming soon)] 5 | 6 | ![Example Image](https://github.com/ZhipengCai/CLNeRF/blob/main/demo/teaser.png) 7 | 8 | We study the problem of continual learning in the context of NeRFs. We propose a new dataset World Across Time (WAT) for this purpose, where during continual learning, the scene appearance and geometry can change over time (at different time step/task of continual learning). We propose a simple yet effective method CLNeRF which combines generative replay with advanced NeRF architectures so that a single NeRF model can efficiently adapt to gradually revealed new data, i.e., render scenes at different time with potential appearance and geometry changes, without the need to store historical images. 9 | 10 | To facilitate future research on continual NeRF, we provide the code to run different continual learning methods on different NeRF datasets (including WAT). 11 | 12 | Please give us a star or cite our paper if you find it useful. 13 | 14 | # Contact 15 | Please contact Zhipeng Cai (homepage: https://zhipengcai.github.io/, email: czptc2h@gmail.com) if you have questions, comments or want to collaborate on this repository to make it better. 16 | 17 | We are actively looking for good research interns, contact Zhipeng if you are interested (multiple bases are possible, e.g., US, Munich, China). 18 | 19 | ```bash 20 | @inproceedings{iccv23clnerf, 21 | title={CLNeRF: Continual Learning Meets NeRF}, 22 | author={Zhipeng Cai, Matthias Müller}, 23 | year={2023}, 24 | booktitle={ICCV}, 25 | } 26 | ``` 27 | 28 | # Installation 29 | 30 | ## Hardware 31 | 32 | * OS: Ubuntu 20.04 33 | * NVIDIA GPU with Compute Compatibility >= 75 and memory > 12GB (Tested with RTX3090 Ti and RTX6000), CUDA 11.3 (might work with older version) 34 | 35 | ## Environment setup 36 | * Clone this repo and submodules (pycolmap): `git clone --recurse-submodules https://github.com/IntelLabs/CLNeRF.git` 37 | * simply run the code in `setup_env.sh` line by line (to avoid failure in specific line so that your own environment is damaged) 38 | 39 | ### Docker (optional) 40 | First make sure you have [installed Docker](https://docs.docker.com/engine/install/), and cloned the repository and it's your current working directory. 41 | ```bash 42 | git clone --recursive https://github.com/IntelLabs/CLNeRF.git 43 | cd CLNeRF 44 | docker pull joaquingajardo/clnerf:latest 45 | docker run -d --name CLNeRF --gpus=all --shm-size=24g -w /workspace/CLNeRF -v ${PWD}:/workspace/CLNeRF -t joaquingajardo/clnerf:latest 46 | docker exec -it CLNeRF bash 47 | # Optionally in vscode you can attach to the container just created for easier debugging and developing 48 | ``` 49 | 50 | ## Dataset prepare (Naming follows Fig.4 of the main paper, currently support WAT, SynthNeRF and NeRF++) 51 | 52 | ```bash 53 | bash prepare_datasets.sh 54 | ``` 55 | 56 | # Run experiments 57 | 58 | ```bash 59 | # run experiments on CLNeRF (WAT, SynthNeRF and NeRF++ datasets are currently supported) 60 | bash run_CLNeRF.sh 61 | # run experiments on MEIL-NeRF 62 | bash run_MEIL.sh 63 | # run experiments on ER (experience replay) 64 | bash run_ER.sh 65 | # run experiments on EWC 66 | bash run_EWC.sh 67 | # run experiments on NT (naive training/finetuning on the sequential data) 68 | bash run_NT.sh 69 | # render video using CLNeRF model 70 | scene=breville 71 | task_number=5 72 | task_curr=4 73 | rep=10 74 | scale=8.0 # change to the right scale according to the corresponding training script (scripts/NT/WAT/breville.sh) 75 | ckpt_path=/export/work/zcai/WorkSpace/CLNeRF/CLNeRF/ckpts/NGPGv2_CL/colmap_ngpa_CLNerf/${scene}_10/epoch=19-v4.ckpt # change to your ckpt path 76 | bash scripts/CLNeRF/WAT/render_video.sh $task_number $task_curr $scene $ckpt_path $rep $scale $render_fname 77 | ``` 78 | # License 79 | 80 | This repository is under the Apache 2.0 License, it is free for non-commercial use. Please contact Zhipeng for other use cases. 81 | -------------------------------------------------------------------------------- /__pycache__/losses.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/__pycache__/losses.cpython-38.pyc -------------------------------------------------------------------------------- /__pycache__/opt.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/__pycache__/opt.cpython-38.pyc -------------------------------------------------------------------------------- /__pycache__/utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/__pycache__/utils.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/CLNerf/__pycache__/nerfpp.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/datasets/CLNerf/__pycache__/nerfpp.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/CLNerf/__pycache__/nsvf.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/datasets/CLNerf/__pycache__/nsvf.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/CLNerf/__pycache__/nsvf_MEILNERF.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/datasets/CLNerf/__pycache__/nsvf_MEILNERF.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/CLNerf/__pycache__/nsvf_MEILNERF_paper.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/datasets/CLNerf/__pycache__/nsvf_MEILNERF_paper.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/CLNerf/__pycache__/nsvf_TaTSeq.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/datasets/CLNerf/__pycache__/nsvf_TaTSeq.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/MEILNerf/__pycache__/nerfpp.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/datasets/MEILNerf/__pycache__/nerfpp.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/MEILNerf/__pycache__/nsvf_MEILNERF_paper.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/datasets/MEILNerf/__pycache__/nsvf_MEILNERF_paper.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/MEILNerf/__pycache__/nsvf_MEILNeRF.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/datasets/MEILNerf/__pycache__/nsvf_MEILNeRF.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/MEILNerf/__pycache__/nsvf_TaTSeq.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/datasets/MEILNerf/__pycache__/nsvf_TaTSeq.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/MEILNerf/__pycache__/nsvf_TaTSeq100.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/datasets/MEILNerf/__pycache__/nsvf_TaTSeq100.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/MEILNerf/__pycache__/phototour_nerfw_minScale.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/datasets/MEILNerf/__pycache__/phototour_nerfw_minScale.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/NGPA/__pycache__/colmap.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/datasets/NGPA/__pycache__/colmap.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/NGPA/__pycache__/colmap_render.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/datasets/NGPA/__pycache__/colmap_render.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/NGPA/__pycache__/nsvf.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/datasets/NGPA/__pycache__/nsvf.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/NGPA/__pycache__/phototour.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/datasets/NGPA/__pycache__/phototour.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/NGPA/__pycache__/phototour_nerfw.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/datasets/NGPA/__pycache__/phototour_nerfw.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/NGPA/__pycache__/phototour_nerfw_minScale.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/datasets/NGPA/__pycache__/phototour_nerfw_minScale.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from .nsvf import NSVFDataset 2 | from .lb.nsvf import NSVFDataset_lb 3 | from .CLNerf.nsvf import NSVFDataset_CLNerf 4 | from .MEILNerf.nsvf_MEILNeRF import NSVFDataset_MEILNeRF 5 | from .CLNerf.nsvf_TaTSeq import NSVFDataset_TaTSeq_CLNerf 6 | from .MEILNerf.nsvf_TaTSeq import NSVFDataset_TaTSeq_MEIL 7 | from .MEILNerf.nerfpp import NeRFPPDataset_MEIL 8 | 9 | from .colmap import ColmapDataset 10 | from .nerfpp import NeRFPPDataset 11 | from .lb.nerfpp import NeRFPPDataset_lb 12 | from .CLNerf.nerfpp import NeRFPPDataset_CLNerf 13 | from .NGPA.colmap import ColmapDataset_NGPA, ColmapDataset_NGPA_lb, ColmapDataset_NGPA_CLNerf, ColmapDataset_NGPA_MEIL 14 | from .NGPA.colmap_render import ColmapDataset_NGPA_CLNerf_render 15 | 16 | dataset_dict = { 17 | 'nsvf': NSVFDataset, # check 18 | 'nsvf_lb': NSVFDataset_lb, 19 | 'nsvf_CLNerf': NSVFDataset_CLNerf, # check 20 | 'nsvf_MEILNERF': NSVFDataset_MEILNeRF, # check 21 | 'nsvf_TaTSeq_CLNerf': NSVFDataset_TaTSeq_CLNerf, 22 | 'nsvf_TaTSeq_MEILNERF': NSVFDataset_TaTSeq_MEIL, 23 | 'colmap': ColmapDataset, 24 | 'colmap_ngpa': ColmapDataset_NGPA, # check 25 | 'colmap_ngpa_lb': ColmapDataset_NGPA_lb, 26 | 'colmap_ngpa_CLNerf': ColmapDataset_NGPA_CLNerf, # check 27 | 'colmap_ngpa_CLNerf_render': ColmapDataset_NGPA_CLNerf_render, # check 28 | 'colmap_ngpa_MEIL': ColmapDataset_NGPA_MEIL, # check 29 | 'nerfpp': NeRFPPDataset, # check 30 | 'nerfpp_lb': NeRFPPDataset_lb, # check 31 | 'nerfpp_CLNerf': NeRFPPDataset_CLNerf, # check 32 | 'nerfpp_MEIL': NeRFPPDataset_MEIL # check 33 | } 34 | -------------------------------------------------------------------------------- /datasets/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/datasets/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/__pycache__/base.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/datasets/__pycache__/base.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/__pycache__/colmap.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/datasets/__pycache__/colmap.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/__pycache__/colmap_utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/datasets/__pycache__/colmap_utils.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/__pycache__/color_utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/datasets/__pycache__/color_utils.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/__pycache__/nerf.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/datasets/__pycache__/nerf.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/__pycache__/nerfpp.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/datasets/__pycache__/nerfpp.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/__pycache__/nsvf.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/datasets/__pycache__/nsvf.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/__pycache__/nsvf_MEILNERF.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/datasets/__pycache__/nsvf_MEILNERF.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/__pycache__/nsvf_MEILNERF_paper.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/datasets/__pycache__/nsvf_MEILNERF_paper.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/__pycache__/ray_utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/datasets/__pycache__/ray_utils.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/__pycache__/rtmv.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/datasets/__pycache__/rtmv.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/base.py: -------------------------------------------------------------------------------- 1 | from torch.utils.data import Dataset 2 | import numpy as np 3 | 4 | 5 | class BaseDataset(Dataset): 6 | """ 7 | Define length and sampling method 8 | """ 9 | def __init__(self, root_dir, split='train', downsample=1.0): 10 | self.root_dir = root_dir 11 | self.split = split 12 | self.downsample = downsample 13 | 14 | def read_intrinsics(self): 15 | raise NotImplementedError 16 | 17 | def __len__(self): 18 | if self.split.startswith('train'): 19 | return 1000 20 | return len(self.poses) 21 | 22 | def __getitem__(self, idx): 23 | if self.split.startswith('train'): 24 | # training pose is retrieved in train.py 25 | if self.ray_sampling_strategy == 'all_images': # randomly select images 26 | img_idxs = np.random.choice(len(self.poses), self.batch_size) 27 | elif self.ray_sampling_strategy == 'same_image': # randomly select ONE image 28 | img_idxs = np.random.choice(len(self.poses), 1)[0] 29 | # randomly select pixels 30 | pix_idxs = np.random.choice(self.img_wh[0]*self.img_wh[1], self.batch_size) 31 | rays = self.rays[img_idxs, pix_idxs] 32 | sample = {'img_idxs': img_idxs, 'pix_idxs': pix_idxs, 33 | 'rgb': rays[:, :3]} 34 | if self.rays.shape[-1] == 4: # HDR-NeRF data 35 | sample['exposure'] = rays[:, 3:] 36 | else: 37 | sample = {'pose': self.poses[idx], 'img_idxs': idx} 38 | if len(self.rays)>0: # if ground truth available 39 | rays = self.rays[idx] 40 | sample['rgb'] = rays[:, :3] 41 | if rays.shape[1] == 4: # HDR-NeRF data 42 | sample['exposure'] = rays[0, 3] # same exposure for all rays 43 | 44 | return sample -------------------------------------------------------------------------------- /datasets/colmap.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | import os 4 | import glob 5 | from tqdm import tqdm 6 | 7 | from .ray_utils import * 8 | from .color_utils import read_image 9 | from .colmap_utils import \ 10 | read_cameras_binary, read_images_binary, read_points3d_binary 11 | 12 | from .base import BaseDataset 13 | 14 | 15 | class ColmapDataset(BaseDataset): 16 | def __init__(self, root_dir, split='train', downsample=1.0, **kwargs): 17 | super().__init__(root_dir, split, downsample) 18 | 19 | self.read_intrinsics() 20 | 21 | if kwargs.get('read_meta', True): 22 | self.read_meta(split, **kwargs) 23 | 24 | def read_intrinsics(self): 25 | # Step 1: read and scale intrinsics (same for all images) 26 | camdata = read_cameras_binary(os.path.join(self.root_dir, 'sparse/0/cameras.bin')) 27 | h = int(camdata[1].height*self.downsample) 28 | w = int(camdata[1].width*self.downsample) 29 | self.img_wh = (w, h) 30 | 31 | if camdata[1].model == 'SIMPLE_RADIAL': 32 | fx = fy = camdata[1].params[0]*self.downsample 33 | cx = camdata[1].params[1]*self.downsample 34 | cy = camdata[1].params[2]*self.downsample 35 | elif camdata[1].model in ['PINHOLE', 'OPENCV']: 36 | fx = camdata[1].params[0]*self.downsample 37 | fy = camdata[1].params[1]*self.downsample 38 | cx = camdata[1].params[2]*self.downsample 39 | cy = camdata[1].params[3]*self.downsample 40 | else: 41 | raise ValueError(f"Please parse the intrinsics for camera model {camdata[1].model}!") 42 | self.K = torch.FloatTensor([[fx, 0, cx], 43 | [0, fy, cy], 44 | [0, 0, 1]]) 45 | self.directions = get_ray_directions(h, w, self.K) 46 | 47 | def read_meta(self, split, **kwargs): 48 | # Step 2: correct poses 49 | # read extrinsics (of successfully reconstructed images) 50 | imdata = read_images_binary(os.path.join(self.root_dir, 'sparse/0/images.bin')) 51 | img_names = [imdata[k].name for k in imdata] 52 | perm = np.argsort(img_names) 53 | if '360_v2' in self.root_dir and self.downsample<1: # mipnerf360 data 54 | folder = f'images_{int(1/self.downsample)}' 55 | else: 56 | folder = 'images' 57 | # read successfully reconstructed images and ignore others 58 | img_paths = [os.path.join(self.root_dir, folder, name) 59 | for name in sorted(img_names)] 60 | w2c_mats = [] 61 | bottom = np.array([[0, 0, 0, 1.]]) 62 | for k in imdata: 63 | im = imdata[k] 64 | R = im.qvec2rotmat(); t = im.tvec.reshape(3, 1) 65 | w2c_mats += [np.concatenate([np.concatenate([R, t], 1), bottom], 0)] 66 | w2c_mats = np.stack(w2c_mats, 0) 67 | poses = np.linalg.inv(w2c_mats)[perm, :3] # (N_images, 3, 4) cam2world matrices 68 | 69 | pts3d = read_points3d_binary(os.path.join(self.root_dir, 'sparse/0/points3D.bin')) 70 | pts3d = np.array([pts3d[k].xyz for k in pts3d]) # (N, 3) 71 | 72 | self.poses, self.pts3d = center_poses(poses, pts3d) 73 | 74 | scale = np.linalg.norm(self.poses[..., 3], axis=-1).min() 75 | self.poses[..., 3] /= scale 76 | self.pts3d /= scale 77 | 78 | self.rays = [] 79 | if split == 'test_traj': # use precomputed test poses 80 | self.poses = create_spheric_poses(1.2, self.poses[:, 1, 3].mean()) 81 | self.poses = torch.FloatTensor(self.poses) 82 | return 83 | 84 | if 'HDR-NeRF' in self.root_dir: # HDR-NeRF data 85 | if 'syndata' in self.root_dir: # synthetic 86 | # first 17 are test, last 18 are train 87 | self.unit_exposure_rgb = 0.73 88 | if split=='train': 89 | img_paths = sorted(glob.glob(os.path.join(self.root_dir, 90 | f'train/*[024].png'))) 91 | self.poses = np.repeat(self.poses[-18:], 3, 0) 92 | elif split=='test': 93 | img_paths = sorted(glob.glob(os.path.join(self.root_dir, 94 | f'test/*[13].png'))) 95 | self.poses = np.repeat(self.poses[:17], 2, 0) 96 | else: 97 | raise ValueError(f"split {split} is invalid for HDR-NeRF!") 98 | else: # real 99 | self.unit_exposure_rgb = 0.5 100 | # even numbers are train, odd numbers are test 101 | if split=='train': 102 | img_paths = sorted(glob.glob(os.path.join(self.root_dir, 103 | f'input_images/*0.jpg')))[::2] 104 | img_paths+= sorted(glob.glob(os.path.join(self.root_dir, 105 | f'input_images/*2.jpg')))[::2] 106 | img_paths+= sorted(glob.glob(os.path.join(self.root_dir, 107 | f'input_images/*4.jpg')))[::2] 108 | self.poses = np.tile(self.poses[::2], (3, 1, 1)) 109 | elif split=='test': 110 | img_paths = sorted(glob.glob(os.path.join(self.root_dir, 111 | f'input_images/*1.jpg')))[1::2] 112 | img_paths+= sorted(glob.glob(os.path.join(self.root_dir, 113 | f'input_images/*3.jpg')))[1::2] 114 | self.poses = np.tile(self.poses[1::2], (2, 1, 1)) 115 | else: 116 | raise ValueError(f"split {split} is invalid for HDR-NeRF!") 117 | else: 118 | # use every 8th image as test set 119 | if split=='train': 120 | img_paths = [x for i, x in enumerate(img_paths) if i%8!=0] 121 | self.poses = np.array([x for i, x in enumerate(self.poses) if i%8!=0]) 122 | elif split=='test': 123 | img_paths = [x for i, x in enumerate(img_paths) if i%8==0] 124 | self.poses = np.array([x for i, x in enumerate(self.poses) if i%8==0]) 125 | 126 | print(f'Loading {len(img_paths)} {split} images ...') 127 | for img_path in tqdm(img_paths): 128 | buf = [] # buffer for ray attributes: rgb, etc 129 | 130 | img = read_image(img_path, self.img_wh, blend_a=False) 131 | img = torch.FloatTensor(img) 132 | buf += [img] 133 | 134 | if 'HDR-NeRF' in self.root_dir: # get exposure 135 | folder = self.root_dir.split('/') 136 | scene = folder[-1] if folder[-1] != '' else folder[-2] 137 | if scene in ['bathroom', 'bear', 'chair', 'desk']: 138 | e_dict = {e: 1/8*4**e for e in range(5)} 139 | elif scene in ['diningroom', 'dog']: 140 | e_dict = {e: 1/16*4**e for e in range(5)} 141 | elif scene in ['sofa']: 142 | e_dict = {0:0.25, 1:1, 2:2, 3:4, 4:16} 143 | elif scene in ['sponza']: 144 | e_dict = {0:0.5, 1:2, 2:4, 3:8, 4:32} 145 | elif scene in ['box']: 146 | e_dict = {0:2/3, 1:1/3, 2:1/6, 3:0.1, 4:0.05} 147 | elif scene in ['computer']: 148 | e_dict = {0:1/3, 1:1/8, 2:1/15, 3:1/30, 4:1/60} 149 | elif scene in ['flower']: 150 | e_dict = {0:1/3, 1:1/6, 2:0.1, 3:0.05, 4:1/45} 151 | elif scene in ['luckycat']: 152 | e_dict = {0:2, 1:1, 2:0.5, 3:0.25, 4:0.125} 153 | e = int(img_path.split('.')[0][-1]) 154 | buf += [e_dict[e]*torch.ones_like(img[:, :1])] 155 | 156 | self.rays += [torch.cat(buf, 1)] 157 | 158 | self.rays = torch.stack(self.rays) # (N_images, hw, ?) 159 | self.poses = torch.FloatTensor(self.poses) # (N_images, 3, 4) -------------------------------------------------------------------------------- /datasets/color_utils.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | from einops import rearrange 3 | import imageio 4 | import numpy as np 5 | from PIL import Image, ImageDraw 6 | import os 7 | 8 | 9 | def srgb_to_linear(img): 10 | limit = 0.04045 11 | return np.where(img > limit, ((img + 0.055) / 1.055)**2.4, img / 12.92) 12 | 13 | 14 | def linear_to_srgb(img): 15 | limit = 0.0031308 16 | img = np.where(img > limit, 1.055 * img**(1 / 2.4) - 0.055, 12.92 * img) 17 | img[img > 1] = 1 # "clamp" tonemapper 18 | return img 19 | 20 | 21 | def read_image(img_path, img_wh, blend_a=True, test_img_gen=False, img_id=0): 22 | img = imageio.imread(img_path).astype(np.float32) / 255.0 23 | 24 | if test_img_gen: 25 | print("[test after train]: img_id= {}, img = {}/{}/{}".format( 26 | img_id, img.shape, img.min(), img.max())) 27 | # save the training image 28 | print("saving training image to {}".format( 29 | 'test/train_img_rep{}.jpg'.format(img_id))) 30 | rgb_img = Image.fromarray((255 * img).astype(np.uint8)) 31 | rgb_img = rgb_img.convert('RGB') 32 | os.makedirs('./test/', exist_ok=True) 33 | rgb_img.save('test/train_img_rep{}.jpeg'.format(img_id)) 34 | 35 | # img[..., :3] = srgb_to_linear(img[..., :3]) 36 | if img.shape[2] == 4: # blend A to RGB 37 | if blend_a: 38 | img = img[..., :3] * img[..., -1:] + (1 - img[..., -1:]) 39 | else: 40 | img = img[..., :3] * img[..., -1:] 41 | 42 | img = cv2.resize(img, img_wh) 43 | img = rearrange(img, 'h w c -> (h w) c') 44 | 45 | return img 46 | 47 | 48 | def add_perturbation(img, perturbation, seed, decent_occ=0): 49 | if 'color' in perturbation: 50 | np.random.seed(seed) 51 | # img_np = np.array(img) / 255.0 52 | s = np.random.uniform(0.8, 1.2, size=3) 53 | b = np.random.uniform(-0.2, 0.2, size=3) 54 | img[..., :3] = np.clip(s * img[..., :3] + b, 0, 1) 55 | # img = Image.fromarray((255 * img_np).astype(np.uint8)) 56 | if 'occ' in perturbation: 57 | 58 | draw = ImageDraw.Draw(img) 59 | np.random.seed(seed) 60 | if decent_occ: 61 | left = np.random.randint(0, 600) 62 | top = np.random.randint(0, 600) 63 | else: 64 | left = np.random.randint(200, 400) 65 | top = np.random.randint(200, 400) 66 | for i in range(10): 67 | np.random.seed(10 * seed + i) 68 | random_color = tuple(np.random.choice(range(256), 3)) 69 | draw.rectangle( 70 | ((left + 20 * i, top), (left + 20 * (i + 1), top + 200)), 71 | fill=random_color) 72 | return img 73 | 74 | 75 | def read_image_ngpa(img_path, 76 | img_wh, 77 | blend_a=True, 78 | split='train', 79 | t=0, 80 | test_img_gen=False, 81 | img_id=0): 82 | img = imageio.imread(img_path).astype(np.float32) / 255.0 83 | 84 | # add perturbations 85 | if t != 0 and split == 'train': # perturb everything except the first image. 86 | # cf. Section D in the supplementary material 87 | img = add_perturbation(img, ['color'], t) 88 | 89 | if test_img_gen and split == 'train': 90 | print("[test after train]: t = {}, img_id= {}, img = {}/{}/{}".format( 91 | t, img_id, img.shape, img.min(), img.max())) 92 | # exit() 93 | # save the training image 94 | print("saving training image to {}".format( 95 | 'test/train_img{}.jpg'.format(img_id))) 96 | rgb_img = Image.fromarray((255 * img).astype(np.uint8)) 97 | rgb_img = rgb_img.convert('RGB') 98 | os.makedirs('./test/', exist_ok=True) 99 | rgb_img.save('test/train_img{}.jpeg'.format(img_id)) 100 | 101 | # img[..., :3] = srgb_to_linear(img[..., :3]) 102 | if img.shape[2] == 4: # blend A to RGB 103 | if blend_a: 104 | img = img[..., :3] * img[..., -1:] + (1 - img[..., -1:]) 105 | else: 106 | img = img[..., :3] * img[..., -1:] 107 | 108 | img = cv2.resize(img, img_wh) 109 | img = rearrange(img, 'h w c -> (h w) c') 110 | 111 | return img 112 | 113 | 114 | def read_image_phototour(img_path, 115 | blend_a=True, 116 | test_img_gen=False, 117 | img_id=0, 118 | downscale=1, 119 | crop_region='full'): 120 | img = imageio.imread(img_path).astype(np.float32) / 255.0 121 | 122 | if test_img_gen: 123 | print("[test after train]: img_id= {}, img = {}/{}/{}".format( 124 | img_id, img.shape, img.min(), img.max())) 125 | # save the training image 126 | print("saving training image to {}".format( 127 | 'test/train_img_rep{}.jpg'.format(img_id))) 128 | rgb_img = Image.fromarray((255 * img).astype(np.uint8)) 129 | rgb_img = rgb_img.convert('RGB') 130 | os.makedirs('./test/', exist_ok=True) 131 | rgb_img.save('test/train_img_rep{}.jpeg'.format(img_id)) 132 | 133 | # img[..., :3] = srgb_to_linear(img[..., :3]) 134 | if img.shape[2] == 4: # blend A to RGB 135 | if blend_a: 136 | img = img[..., :3] * img[..., -1:] + (1 - img[..., -1:]) 137 | else: 138 | img = img[..., :3] * img[..., -1:] 139 | 140 | # height and width 141 | img_hw = (img.shape[0] // downscale, img.shape[1] // downscale) 142 | if downscale > 1: 143 | img = cv2.resize(img, (img_hw[1], img_hw[0])) 144 | 145 | if crop_region == 'left': 146 | img = img[:, :img_hw[1] // 2] 147 | elif crop_region == 'right': 148 | img = img[:, img_hw[1] // 2:] 149 | 150 | img = rearrange(img, 'h w c -> (h w) c') 151 | 152 | return img -------------------------------------------------------------------------------- /datasets/depth_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import re 3 | 4 | 5 | def read_pfm(path): 6 | """Read pfm file. 7 | 8 | Args: 9 | path (str): path to file 10 | 11 | Returns: 12 | tuple: (data, scale) 13 | """ 14 | with open(path, "rb") as file: 15 | 16 | color = None 17 | width = None 18 | height = None 19 | scale = None 20 | endian = None 21 | 22 | header = file.readline().rstrip() 23 | if header.decode("ascii") == "PF": 24 | color = True 25 | elif header.decode("ascii") == "Pf": 26 | color = False 27 | else: 28 | raise Exception("Not a PFM file: " + path) 29 | 30 | dim_match = re.match(r"^(\d+)\s(\d+)\s$", file.readline().decode("ascii")) 31 | if dim_match: 32 | width, height = list(map(int, dim_match.groups())) 33 | else: 34 | raise Exception("Malformed PFM header.") 35 | 36 | scale = float(file.readline().decode("ascii").rstrip()) 37 | if scale < 0: 38 | # little-endian 39 | endian = "<" 40 | scale = -scale 41 | else: 42 | # big-endian 43 | endian = ">" 44 | 45 | data = np.fromfile(file, endian + "f") 46 | shape = (height, width, 3) if color else (height, width) 47 | 48 | data = np.reshape(data, shape) 49 | data = np.flipud(data) 50 | 51 | return data, scale -------------------------------------------------------------------------------- /datasets/lb/__pycache__/nerfpp.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/datasets/lb/__pycache__/nerfpp.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/lb/__pycache__/nsvf.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/datasets/lb/__pycache__/nsvf.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/lb/__pycache__/nsvf_MEILNERF.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/datasets/lb/__pycache__/nsvf_MEILNERF.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/lb/__pycache__/nsvf_MEILNERF_paper.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/datasets/lb/__pycache__/nsvf_MEILNERF_paper.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/lb/nerfpp.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import glob 3 | import numpy as np 4 | import os 5 | from PIL import Image 6 | from tqdm import tqdm 7 | 8 | from ..ray_utils import get_ray_directions 9 | from ..color_utils import read_image 10 | 11 | from ..base import BaseDataset 12 | import random 13 | 14 | class NeRFPPDataset_lb(BaseDataset): 15 | def __init__(self, root_dir, split='train', downsample=1.0, **kwargs): 16 | super().__init__(root_dir, split, downsample) 17 | 18 | self.read_intrinsics() 19 | 20 | self.task_number = kwargs.get('task_number', 5) 21 | self.task_curr = kwargs.get('task_curr', 4) 22 | self.task_split_method = kwargs.get('task_split_method', 'seq') 23 | self.rep_size = kwargs.get('rep_size', 0) 24 | 25 | if kwargs.get('read_meta', True): 26 | self.read_meta(split) 27 | 28 | def read_intrinsics(self): 29 | K = np.loadtxt(glob.glob(os.path.join(self.root_dir, 'train/intrinsics/*.txt'))[0], 30 | dtype=np.float32).reshape(4, 4)[:3, :3] 31 | K[:2] *= self.downsample 32 | w, h = Image.open(glob.glob(os.path.join(self.root_dir, 'train/rgb/*'))[0]).size 33 | w, h = int(w*self.downsample), int(h*self.downsample) 34 | self.K = torch.FloatTensor(K) 35 | self.directions = get_ray_directions(h, w, self.K) 36 | self.img_wh = (w, h) 37 | 38 | def split_tasks(self, poses, task_number, task_split_method): 39 | # return task id for each element in poses 40 | task_id = [] 41 | if task_split_method == 'random': 42 | for i in range(len(poses)): 43 | task_id.append(random.randint(0, task_number - 1)) 44 | else: 45 | # equally split task according to the id 46 | imgs_per_task = len(poses) // task_number 47 | for j in range(task_number): 48 | task_id += ([j] * imgs_per_task) 49 | # task_id.append() 50 | task_id += ([task_number-1] * (len(poses)- imgs_per_task * task_number)) 51 | return task_id 52 | 53 | def read_meta(self, split): 54 | self.rays = [] 55 | self.poses = [] 56 | 57 | if split == 'test_traj': 58 | poses_path = \ 59 | sorted(glob.glob(os.path.join(self.root_dir, 'camera_path/pose/*.txt'))) 60 | self.poses = [np.loadtxt(p).reshape(4, 4)[:3] for p in poses_path] 61 | else: 62 | if split=='trainval': 63 | img_paths = sorted(glob.glob(os.path.join(self.root_dir, 'train/rgb/*')))+\ 64 | sorted(glob.glob(os.path.join(self.root_dir, 'val/rgb/*'))) 65 | poses = sorted(glob.glob(os.path.join(self.root_dir, 'train/pose/*.txt')))+\ 66 | sorted(glob.glob(os.path.join(self.root_dir, 'val/pose/*.txt'))) 67 | else: 68 | img_paths = sorted(glob.glob(os.path.join(self.root_dir, split, 'rgb/*'))) 69 | poses = sorted(glob.glob(os.path.join(self.root_dir, split, 'pose/*.txt'))) 70 | 71 | if split == 'train': 72 | # split the training data into 5 tasks 73 | random.seed(0) 74 | self.task_ids = self.split_tasks(poses, self.task_number, self.task_split_method) 75 | # prepare training data 76 | self.id_task_curr = [] 77 | self.id_rep = [] 78 | for i in range(len(self.task_ids)): 79 | if self.task_ids[i] == self.task_curr: 80 | self.id_task_curr.append(i) 81 | elif self.task_ids[i] < self.task_curr: 82 | self.id_rep.append(i) 83 | if self.rep_size == 0 or self.task_curr == 0: 84 | self.id_train_final = self.id_task_curr 85 | else: 86 | # set random seed 87 | self.id_train_final = self.id_task_curr + random.choices(self.id_rep, k = self.rep_size) 88 | else: 89 | self.id_train_final = list(range(len(poses))) 90 | 91 | self.id_train_final.sort() 92 | 93 | print(f'Loading {len(img_paths)} {split} images ...') 94 | for id_train in tqdm(self.id_train_final): 95 | # for img_path, pose in tqdm(zip(img_paths, poses)): 96 | img_path, pose = img_paths[id_train], poses[id_train] 97 | self.poses += [np.loadtxt(pose).reshape(4, 4)[:3]] 98 | 99 | img = read_image(img_path, self.img_wh) 100 | self.rays += [img] 101 | 102 | self.rays = torch.FloatTensor(np.stack(self.rays)) # (N_images, hw, ?) 103 | self.poses = torch.FloatTensor(self.poses) # (N_images, 3, 4) 104 | -------------------------------------------------------------------------------- /datasets/lb/nsvf.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import glob 3 | import numpy as np 4 | import os 5 | from tqdm import tqdm 6 | 7 | from ..ray_utils import get_ray_directions 8 | from ..color_utils import read_image 9 | 10 | from ..base import BaseDataset 11 | import random 12 | 13 | class NSVFDataset_lb(BaseDataset): 14 | def __init__(self, root_dir, split='train', downsample=1.0, **kwargs): 15 | super().__init__(root_dir, split, downsample) 16 | 17 | self.read_intrinsics() 18 | 19 | self.task_number = kwargs.get('task_number', 5) 20 | self.task_curr = kwargs.get('task_curr', 4) 21 | self.task_split_method = kwargs.get('task_split_method', 'seq') 22 | self.rep_size = kwargs.get('rep_size', 0) 23 | 24 | 25 | if kwargs.get('read_meta', True): 26 | xyz_min, xyz_max = \ 27 | np.loadtxt(os.path.join(root_dir, 'bbox.txt'))[:6].reshape(2, 3) 28 | self.shift = (xyz_max+xyz_min)/2 29 | self.scale = (xyz_max-xyz_min).max()/2 * 1.05 # enlarge a little 30 | 31 | # hard-code fix the bound error for some scenes... 32 | if 'Mic' in self.root_dir: self.scale *= 1.2 33 | elif 'Lego' in self.root_dir: self.scale *= 1.1 34 | 35 | self.read_meta(split) 36 | 37 | def read_intrinsics(self): 38 | if 'Synthetic' in self.root_dir or 'Ignatius' in self.root_dir: 39 | with open(os.path.join(self.root_dir, 'intrinsics.txt')) as f: 40 | fx = fy = float(f.readline().split()[0]) * self.downsample 41 | if 'Synthetic' in self.root_dir: 42 | w = h = int(800*self.downsample) 43 | else: 44 | w, h = int(1920*self.downsample), int(1080*self.downsample) 45 | 46 | K = np.float32([[fx, 0, w/2], 47 | [0, fy, h/2], 48 | [0, 0, 1]]) 49 | else: 50 | K = np.loadtxt(os.path.join(self.root_dir, 'intrinsics.txt'), 51 | dtype=np.float32)[:3, :3] 52 | if 'BlendedMVS' in self.root_dir: 53 | w, h = int(768*self.downsample), int(576*self.downsample) 54 | elif 'Tanks' in self.root_dir: 55 | w, h = int(1920*self.downsample), int(1080*self.downsample) 56 | K[:2] *= self.downsample 57 | 58 | self.K = torch.FloatTensor(K) 59 | self.directions = get_ray_directions(h, w, self.K) 60 | self.img_wh = (w, h) 61 | 62 | def split_tasks(self, poses, task_number, task_split_method): 63 | # return task id for each element in poses 64 | task_id = [] 65 | if task_split_method == 'random': 66 | for i in range(len(poses)): 67 | task_id.append(random.randint(0, task_number - 1)) 68 | else: 69 | # equally split task according to the id 70 | imgs_per_task = len(poses) // task_number 71 | for j in range(task_number): 72 | task_id += ([j] * imgs_per_task) 73 | # task_id.append() 74 | task_id += ([task_number-1] * (len(poses)- imgs_per_task * task_number)) 75 | return task_id 76 | 77 | def read_meta(self, split): 78 | self.rays = [] 79 | self.poses = [] 80 | 81 | if split == 'test_traj': # BlendedMVS and TanksAndTemple 82 | if 'Ignatius' in self.root_dir: 83 | poses_path = \ 84 | sorted(glob.glob(os.path.join(self.root_dir, 'test_pose/*.txt'))) 85 | poses = [np.loadtxt(p) for p in poses_path] 86 | else: 87 | poses = np.loadtxt(os.path.join(self.root_dir, 'test_traj.txt')) 88 | poses = poses.reshape(-1, 4, 4) 89 | for pose in poses: 90 | c2w = pose[:3] 91 | c2w[:, 0] *= -1 # [left down front] to [right down front] 92 | c2w[:, 3] -= self.shift 93 | c2w[:, 3] /= 2*self.scale # to bound the scene inside [-0.5, 0.5] 94 | self.poses += [c2w] 95 | else: 96 | if split == 'train': prefix = '0_' 97 | elif split == 'trainval': prefix = '[0-1]_' 98 | elif split == 'trainvaltest': prefix = '[0-2]_' 99 | elif split == 'val': prefix = '1_' 100 | elif 'Synthetic' in self.root_dir: prefix = '2_' # test set for synthetic scenes 101 | elif split == 'test': prefix = '1_' # test set for real scenes 102 | else: raise ValueError(f'{split} split not recognized!') 103 | img_paths = sorted(glob.glob(os.path.join(self.root_dir, 'rgb', prefix+'*.png'))) 104 | poses = sorted(glob.glob(os.path.join(self.root_dir, 'pose', prefix+'*.txt'))) 105 | 106 | if split == 'train': 107 | # split the training data into 5 tasks 108 | random.seed(0) 109 | self.task_ids = self.split_tasks(poses, self.task_number, self.task_split_method) 110 | # prepare training data 111 | self.id_task_curr = [] 112 | self.id_rep = [] 113 | for i in range(len(self.task_ids)): 114 | if self.task_ids[i] == self.task_curr: 115 | self.id_task_curr.append(i) 116 | elif self.task_ids[i] < self.task_curr: 117 | self.id_rep.append(i) 118 | if self.rep_size == 0 or self.task_curr == 0: 119 | self.id_train_final = self.id_task_curr 120 | else: 121 | # set random seed 122 | self.id_train_final = self.id_task_curr + random.choices(self.id_rep, k = self.rep_size) 123 | else: 124 | self.id_train_final = list(range(len(poses))) 125 | 126 | self.id_train_final.sort() 127 | 128 | print(f'Loading {len(self.id_train_final)} {split} images ...') 129 | print('id_train_final = {}'.format(self.id_train_final)) 130 | for id_train in tqdm(self.id_train_final): 131 | img_path, pose = img_paths[id_train], poses[id_train] 132 | c2w = np.loadtxt(pose)[:3] 133 | c2w[:, 3] -= self.shift 134 | c2w[:, 3] /= 2*self.scale # to bound the scene inside [-0.5, 0.5] 135 | self.poses += [c2w] 136 | 137 | img = read_image(img_path, self.img_wh) 138 | if 'Jade' in self.root_dir or 'Fountain' in self.root_dir: 139 | # these scenes have black background, changing to white 140 | img[torch.all(img<=0.1, dim=-1)] = 1.0 141 | 142 | self.rays += [img] 143 | 144 | self.rays = torch.FloatTensor(np.stack(self.rays)) # (N_images, hw, ?) 145 | self.poses = torch.FloatTensor(self.poses) # (N_images, 3, 4) 146 | 147 | 148 | 149 | 150 | -------------------------------------------------------------------------------- /datasets/nerfpp.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import glob 3 | import numpy as np 4 | import os 5 | from PIL import Image 6 | from tqdm import tqdm 7 | 8 | from .ray_utils import get_ray_directions 9 | from .color_utils import read_image 10 | 11 | from .base import BaseDataset 12 | 13 | 14 | class NeRFPPDataset(BaseDataset): 15 | def __init__(self, root_dir, split='train', downsample=1.0, **kwargs): 16 | super().__init__(root_dir, split, downsample) 17 | 18 | self.read_intrinsics() 19 | 20 | if kwargs.get('read_meta', True): 21 | self.read_meta(split) 22 | 23 | def read_intrinsics(self): 24 | K = np.loadtxt(glob.glob(os.path.join(self.root_dir, 'train/intrinsics/*.txt'))[0], 25 | dtype=np.float32).reshape(4, 4)[:3, :3] 26 | K[:2] *= self.downsample 27 | w, h = Image.open(glob.glob(os.path.join(self.root_dir, 'train/rgb/*'))[0]).size 28 | w, h = int(w*self.downsample), int(h*self.downsample) 29 | self.K = torch.FloatTensor(K) 30 | self.directions = get_ray_directions(h, w, self.K) 31 | self.img_wh = (w, h) 32 | 33 | def read_meta(self, split): 34 | self.rays = [] 35 | self.poses = [] 36 | 37 | if split == 'test_traj': 38 | poses_path = \ 39 | sorted(glob.glob(os.path.join(self.root_dir, 'camera_path/pose/*.txt'))) 40 | self.poses = [np.loadtxt(p).reshape(4, 4)[:3] for p in poses_path] 41 | else: 42 | if split=='trainval': 43 | img_paths = sorted(glob.glob(os.path.join(self.root_dir, 'train/rgb/*')))+\ 44 | sorted(glob.glob(os.path.join(self.root_dir, 'val/rgb/*'))) 45 | poses = sorted(glob.glob(os.path.join(self.root_dir, 'train/pose/*.txt')))+\ 46 | sorted(glob.glob(os.path.join(self.root_dir, 'val/pose/*.txt'))) 47 | else: 48 | img_paths = sorted(glob.glob(os.path.join(self.root_dir, split, 'rgb/*'))) 49 | poses = sorted(glob.glob(os.path.join(self.root_dir, split, 'pose/*.txt'))) 50 | 51 | print(f'Loading {len(img_paths)} {split} images ...') 52 | for img_path, pose in tqdm(zip(img_paths, poses)): 53 | self.poses += [np.loadtxt(pose).reshape(4, 4)[:3]] 54 | 55 | img = read_image(img_path, self.img_wh) 56 | self.rays += [img] 57 | 58 | self.rays = torch.FloatTensor(np.stack(self.rays)) # (N_images, hw, ?) 59 | self.poses = torch.FloatTensor(self.poses) # (N_images, 3, 4) 60 | -------------------------------------------------------------------------------- /datasets/nsvf_MEILNERF.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import glob 3 | import numpy as np 4 | import os 5 | from tqdm import tqdm 6 | 7 | from .ray_utils import get_ray_directions 8 | from .color_utils import read_image 9 | 10 | from .base import BaseDataset 11 | 12 | 13 | class NSVFDataset_MEILNERF(BaseDataset): 14 | def __init__(self, root_dir, split='train', downsample=1.0, **kwargs): 15 | super().__init__(root_dir, split, downsample) 16 | 17 | self.read_intrinsics() 18 | 19 | if kwargs.get('read_meta', True): 20 | xyz_min, xyz_max = \ 21 | np.loadtxt(os.path.join(root_dir, 'bbox.txt'))[:6].reshape(2, 3) 22 | self.shift = (xyz_max+xyz_min)/2 23 | self.scale = (xyz_max-xyz_min).max()/2 * 1.05 # enlarge a little 24 | 25 | # hard-code fix the bound error for some scenes... 26 | if 'Mic' in self.root_dir: self.scale *= 1.2 27 | elif 'Lego' in self.root_dir: self.scale *= 1.1 28 | 29 | self.read_meta(split) 30 | 31 | def read_intrinsics(self): 32 | if 'Synthetic' in self.root_dir or 'Ignatius' in self.root_dir: 33 | with open(os.path.join(self.root_dir, 'intrinsics.txt')) as f: 34 | fx = fy = float(f.readline().split()[0]) * self.downsample 35 | if 'Synthetic' in self.root_dir: 36 | w = h = int(800*self.downsample) 37 | else: 38 | w, h = int(1920*self.downsample), int(1080*self.downsample) 39 | 40 | K = np.float32([[fx, 0, w/2], 41 | [0, fy, h/2], 42 | [0, 0, 1]]) 43 | else: 44 | K = np.loadtxt(os.path.join(self.root_dir, 'intrinsics.txt'), 45 | dtype=np.float32)[:3, :3] 46 | if 'BlendedMVS' in self.root_dir: 47 | w, h = int(768*self.downsample), int(576*self.downsample) 48 | elif 'Tanks' in self.root_dir: 49 | w, h = int(1920*self.downsample), int(1080*self.downsample) 50 | K[:2] *= self.downsample 51 | 52 | self.K = torch.FloatTensor(K) 53 | self.directions = get_ray_directions(h, w, self.K) 54 | self.img_wh = (w, h) 55 | 56 | def read_meta(self, split): 57 | self.rays = [] 58 | self.poses = [] 59 | 60 | if split == 'train': prefix = '0_' 61 | elif split == 'trainval': prefix = '[0-1]_' 62 | elif split == 'trainvaltest': prefix = '[0-2]_' 63 | elif split == 'val': prefix = '1_' 64 | elif 'Synthetic' in self.root_dir: prefix = '2_' # test set for synthetic scenes 65 | elif split == 'test': prefix = '1_' # test set for real scenes 66 | else: raise ValueError(f'{split} split not recognized!') 67 | img_paths = sorted(glob.glob(os.path.join(self.root_dir, 'rgb', prefix+'*.png')), key=lambda x: x[-8:-4]) 68 | poses = sorted(glob.glob(os.path.join(self.root_dir, 'pose', prefix+'*.txt')), key=lambda x: x[-8:-4]) 69 | 70 | # MEIL-NERF 71 | if split == 'train': 72 | img_paths = img_paths[:100] 73 | poses = poses[:100] 74 | print("[train test] img_paths[-1] = {}".format(img_paths[-1])) 75 | else: 76 | # get the max id of train data 77 | prefix_train = '0_' 78 | id_final = sorted(glob.glob(os.path.join(self.root_dir, 'rgb', prefix_train+'*.png')), key=lambda x: x[-8:-4])[99][-8:-4] 79 | # get the first 100 training images 80 | id_selected = 0 81 | for i in range(len(img_paths)): 82 | if img_paths[i][-8:-4] <= id_final: 83 | id_selected += 1 84 | else: 85 | break 86 | img_paths = img_paths[:id_selected] 87 | poses = poses[:id_selected] 88 | print("[test test] img_paths[-1] = {}".format(img_paths[-1])) 89 | 90 | 91 | print(f'Loading {len(img_paths)} {split} images ...') 92 | for img_path, pose in tqdm(zip(img_paths, poses)): 93 | c2w = np.loadtxt(pose)[:3] 94 | c2w[:, 3] -= self.shift 95 | c2w[:, 3] /= 2*self.scale # to bound the scene inside [-0.5, 0.5] 96 | self.poses += [c2w] 97 | 98 | img = read_image(img_path, self.img_wh) 99 | if 'Jade' in self.root_dir or 'Fountain' in self.root_dir: 100 | # these scenes have black background, changing to white 101 | img[torch.all(img<=0.1, dim=-1)] = 1.0 102 | 103 | self.rays += [img] 104 | 105 | self.rays = torch.FloatTensor(np.stack(self.rays)) # (N_images, hw, ?) 106 | self.poses = torch.FloatTensor(self.poses) # (N_images, 3, 4) 107 | 108 | 109 | 110 | -------------------------------------------------------------------------------- /demo/teaser.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/demo/teaser.png -------------------------------------------------------------------------------- /img2video.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import imageio 4 | from PIL import ImageFont, ImageDraw, Image 5 | import sys 6 | from datasets import dataset_dict 7 | import math, os 8 | 9 | def get_task_ids(root_dir, task_number): 10 | dataset = dataset_dict['colmap_ngpa_CLNerf_render'] 11 | kwargs = {'root_dir': root_dir, 12 | 'downsample': 1.0, 13 | 'task_number': task_number, 14 | 'task_curr': task_number-1, 15 | } 16 | test_dataset = dataset(split='test', **kwargs) 17 | return test_dataset.task_ids_interpolate.copy() 18 | 19 | # The first command line argument is the folder path 20 | folder = sys.argv[1] 21 | root_dir = sys.argv[2] 22 | task_number = int(sys.argv[3]) 23 | 24 | # get the number of frames from the corresponding dataset and check whether we have the same number of frames 25 | task_ids = get_task_ids(root_dir, task_number) 26 | 27 | # filenames = sorted((fn for fn in os.listdir(folder) if fn.endswith('.png'))) 28 | filenames = sorted((fn for fn in os.listdir(folder) if fn.endswith('.png')), key=lambda f: int(os.path.splitext(f)[0])) 29 | 30 | if len(filenames) != len(task_ids): 31 | print("[error] len(fnames) = {}, task_ids = {}".format(len(filenames), len(task_ids))) 32 | exit() 33 | 34 | print("filenames = {}".format(filenames[:20])) 35 | # define properties of the output video 36 | fps = 60 # frames per second, you can change it to your desired value 37 | output_file = folder+'/rgb.mp4' # output file name, you can change it 38 | 39 | # create video from images 40 | with imageio.get_writer(output_file, mode='I', fps=fps) as writer: 41 | for filename in filenames: 42 | print("processing {}".format(filename)) 43 | image = imageio.imread(os.path.join(folder, filename)) 44 | writer.append_data(image) 45 | -------------------------------------------------------------------------------- /img2video.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # inputs: 4 | # 1. scene name 5 | # 2. task_number 6 | 7 | export ROOT_DIR=dataset/WAT 8 | 9 | scene_name=$1 10 | python img2video.py \ 11 | '/export/work/zcai/WorkSpace/CLNeRF/CLNeRF/results/WAT/NT_ER/'${scene_name}'_0/video' \ 12 | ${ROOT_DIR}/${scene_name} \ 13 | $2 14 | 15 | 16 | python img2video.py \ 17 | '/export/work/zcai/WorkSpace/CLNeRF/CLNeRF/results/WAT/NT_ER/'${scene_name}'_10/video' \ 18 | ${ROOT_DIR}/${scene_name} \ 19 | $2 20 | 21 | python img2video.py \ 22 | '/export/work/zcai/WorkSpace/CLNeRF/CLNeRF/results/WAT/EWC/'${scene_name}'_0/video' \ 23 | ${ROOT_DIR}/${scene_name} \ 24 | $2 25 | 26 | -------------------------------------------------------------------------------- /install_cuda_module.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | pip install --force-reinstall models/csrc/ 3 | 4 | -------------------------------------------------------------------------------- /losses.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | import vren 4 | 5 | 6 | class DistortionLoss(torch.autograd.Function): 7 | """ 8 | Distortion loss proposed in Mip-NeRF 360 (https://arxiv.org/pdf/2111.12077.pdf) 9 | Implementation is based on DVGO-v2 (https://arxiv.org/pdf/2206.05085.pdf) 10 | 11 | Inputs: 12 | ws: (N) sample point weights 13 | deltas: (N) considered as intervals 14 | ts: (N) considered as midpoints 15 | rays_a: (N_rays, 3) ray_idx, start_idx, N_samples 16 | meaning each entry corresponds to the @ray_idx th ray, 17 | whose samples are [start_idx:start_idx+N_samples] 18 | 19 | Outputs: 20 | loss: (N_rays) 21 | """ 22 | 23 | @staticmethod 24 | def forward(ctx, ws, deltas, ts, rays_a): 25 | loss, ws_inclusive_scan, wts_inclusive_scan = \ 26 | vren.distortion_loss_fw(ws, deltas, ts, rays_a) 27 | ctx.save_for_backward(ws_inclusive_scan, wts_inclusive_scan, ws, 28 | deltas, ts, rays_a) 29 | return loss 30 | 31 | @staticmethod 32 | def backward(ctx, dL_dloss): 33 | (ws_inclusive_scan, wts_inclusive_scan, ws, deltas, ts, 34 | rays_a) = ctx.saved_tensors 35 | dL_dws = vren.distortion_loss_bw(dL_dloss, ws_inclusive_scan, 36 | wts_inclusive_scan, ws, deltas, ts, 37 | rays_a) 38 | return dL_dws, None, None, None 39 | 40 | 41 | class NeRFLoss(nn.Module): 42 | 43 | def __init__(self, lambda_opacity=1e-3, lambda_distortion=1e-3): 44 | super().__init__() 45 | 46 | self.lambda_opacity = lambda_opacity 47 | self.lambda_distortion = lambda_distortion 48 | 49 | def forward(self, results, target, **kwargs): 50 | d = {} 51 | d['rgb'] = (results['rgb'] - target['rgb'])**2 52 | 53 | o = results['opacity'] + 1e-10 54 | # encourage opacity to be either 0 or 1 to avoid floater 55 | d['opacity'] = self.lambda_opacity * (-o * torch.log(o)) 56 | 57 | if self.lambda_distortion > 0: 58 | d['distortion'] = self.lambda_distortion * \ 59 | DistortionLoss.apply(results['ws'], results['deltas'], 60 | results['ts'], results['rays_a']) 61 | 62 | return d 63 | 64 | 65 | class MEILNeRFLoss(nn.Module): 66 | 67 | def __init__(self, lambda_opacity=1e-3, lambda_distortion=1e-3): 68 | super().__init__() 69 | 70 | self.lambda_opacity = lambda_opacity 71 | self.lambda_distortion = lambda_distortion 72 | 73 | def forward(self, results, target, lambda_p, **kwargs): 74 | d = {} 75 | is_rep = target['is_rep'] 76 | id_new = torch.where(is_rep == 0)[0] 77 | id_old = torch.where(is_rep == 1)[0] 78 | loss = ((results['rgb'][id_new] - target['rgb'][id_new])** 79 | 2).sum() / float(id_new.shape[0]) 80 | if id_old.shape[0] > 0: 81 | loss += torch.sum( 82 | torch.sqrt( 83 | (results['rgb'][id_old] - target['rgb'][id_old]).pow(2) + 84 | (1e-3)**2)) * lambda_p / float(id_old.shape[0]) 85 | d['rgb'] = loss 86 | 87 | o = results['opacity'] + 1e-10 88 | # encourage opacity to be either 0 or 1 to avoid floater 89 | d['opacity'] = self.lambda_opacity * (-o * torch.log(o)) 90 | 91 | if self.lambda_distortion > 0: 92 | d['distortion'] = self.lambda_distortion * \ 93 | DistortionLoss.apply(results['ws'], results['deltas'], 94 | results['ts'], results['rays_a']) 95 | 96 | return d 97 | -------------------------------------------------------------------------------- /merge_video.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # inputs: 4 | # 1. scene name 5 | # 2. task_number 6 | 7 | export ROOT_DIR=dataset/WAT 8 | 9 | 10 | scene_name=$1 11 | python merge_video.py \ 12 | "/export/work/zcai/WorkSpace/CLNeRF/CLNeRF/results/WAT/NT_ER/${scene_name}_0/video/rgb.mp4" \ 13 | "/export/work/zcai/WorkSpace/CLNeRF/CLNeRF/results/WAT/EWC/${scene_name}_0/video/rgb.mp4" \ 14 | "/export/work/zcai/WorkSpace/CLNeRF/CLNeRF/results/WAT/NT_ER/${scene_name}_10/video/rgb.mp4" \ 15 | "/export/work/zcai/WorkSpace/CLNeRF/CLNeRF/results/video_demo/MEIL/colmap_ngpa_CLNerf_render/${scene_name}_0_MEIL/rgb.mp4" \ 16 | "/export/work/zcai/WorkSpace/CLNeRF/CLNeRF/results/video_demo/CLNeRF/colmap_ngpa_CLNerf_render/${scene_name}_10_CLNeRF/rgb.mp4" \ 17 | "/export/work/zcai/WorkSpace/CLNeRF/CLNeRF/results/video_demo/UB/colmap_ngpa_CLNerf_render/${scene_name}_0_UB/rgb.mp4" \ 18 | "/export/work/zcai/WorkSpace/CLNeRF/CLNeRF/results/video_demo/colmap_ngpa_CLNerf_render/${scene_name}_10_CLNeRF/comparisons_UB.mp4" \ 19 | 'NT' \ 20 | 'EWC' \ 21 | 'ER' \ 22 | 'MEIL-NeRF' \ 23 | ${ROOT_DIR}/${scene_name} \ 24 | $2 \ 25 | 1 26 | -------------------------------------------------------------------------------- /models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/models/__init__.py -------------------------------------------------------------------------------- /models/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/models/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /models/__pycache__/custom_functions.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/models/__pycache__/custom_functions.cpython-38.pyc -------------------------------------------------------------------------------- /models/__pycache__/networks.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/models/__pycache__/networks.cpython-38.pyc -------------------------------------------------------------------------------- /models/__pycache__/rendering.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/models/__pycache__/rendering.cpython-38.pyc -------------------------------------------------------------------------------- /models/__pycache__/rendering_NGPA.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/models/__pycache__/rendering_NGPA.cpython-38.pyc -------------------------------------------------------------------------------- /models/csrc/build/lib.linux-x86_64-cpython-38/vren.cpython-38-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/models/csrc/build/lib.linux-x86_64-cpython-38/vren.cpython-38-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /models/csrc/build/temp.linux-x86_64-cpython-38/binding.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/models/csrc/build/temp.linux-x86_64-cpython-38/binding.o -------------------------------------------------------------------------------- /models/csrc/build/temp.linux-x86_64-cpython-38/intersection.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/models/csrc/build/temp.linux-x86_64-cpython-38/intersection.o -------------------------------------------------------------------------------- /models/csrc/build/temp.linux-x86_64-cpython-38/losses.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/models/csrc/build/temp.linux-x86_64-cpython-38/losses.o -------------------------------------------------------------------------------- /models/csrc/build/temp.linux-x86_64-cpython-38/raymarching.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/models/csrc/build/temp.linux-x86_64-cpython-38/raymarching.o -------------------------------------------------------------------------------- /models/csrc/build/temp.linux-x86_64-cpython-38/volumerendering.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/models/csrc/build/temp.linux-x86_64-cpython-38/volumerendering.o -------------------------------------------------------------------------------- /models/csrc/include/utils.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | 4 | #define CHECK_CUDA(x) TORCH_CHECK(x.is_cuda(), #x " must be a CUDA tensor") 5 | #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") 6 | #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) 7 | 8 | 9 | std::vector ray_aabb_intersect_cu( 10 | const torch::Tensor rays_o, 11 | const torch::Tensor rays_d, 12 | const torch::Tensor centers, 13 | const torch::Tensor half_sizes, 14 | const int max_hits 15 | ); 16 | 17 | 18 | std::vector ray_sphere_intersect_cu( 19 | const torch::Tensor rays_o, 20 | const torch::Tensor rays_d, 21 | const torch::Tensor centers, 22 | const torch::Tensor radii, 23 | const int max_hits 24 | ); 25 | 26 | 27 | void packbits_cu( 28 | torch::Tensor density_grid, 29 | const float density_threshold, 30 | torch::Tensor density_bitfield 31 | ); 32 | 33 | 34 | torch::Tensor morton3D_cu(const torch::Tensor coords); 35 | torch::Tensor morton3D_invert_cu(const torch::Tensor indices); 36 | 37 | 38 | std::vector raymarching_train_cu( 39 | const torch::Tensor rays_o, 40 | const torch::Tensor rays_d, 41 | const torch::Tensor hits_t, 42 | const torch::Tensor density_bitfield, 43 | const int cascades, 44 | const float scale, 45 | const float exp_step_factor, 46 | const torch::Tensor noise, 47 | const int grid_size, 48 | const int max_samples 49 | ); 50 | 51 | std::vector raymarching_train_ngpa_cu( 52 | const torch::Tensor rays_o, 53 | const torch::Tensor rays_d, 54 | const torch::Tensor img_idxs, 55 | const torch::Tensor hits_t, 56 | const torch::Tensor density_bitfield, 57 | const int cascades, 58 | const float scale, 59 | const float exp_step_factor, 60 | const torch::Tensor noise, 61 | const int grid_size, 62 | const int max_samples 63 | ); 64 | 65 | std::vector raymarching_test_cu( 66 | const torch::Tensor rays_o, 67 | const torch::Tensor rays_d, 68 | torch::Tensor hits_t, 69 | const torch::Tensor alive_indices, 70 | const torch::Tensor density_bitfield, 71 | const int cascades, 72 | const float scale, 73 | const float exp_step_factor, 74 | const int grid_size, 75 | const int max_samples, 76 | const int N_samples 77 | ); 78 | 79 | 80 | std::vector composite_train_fw_cu( 81 | const torch::Tensor sigmas, 82 | const torch::Tensor rgbs, 83 | const torch::Tensor deltas, 84 | const torch::Tensor ts, 85 | const torch::Tensor rays_a, 86 | const float T_threshold 87 | ); 88 | 89 | 90 | std::vector composite_train_bw_cu( 91 | const torch::Tensor dL_dopacity, 92 | const torch::Tensor dL_ddepth, 93 | const torch::Tensor dL_drgb, 94 | const torch::Tensor dL_dws, 95 | const torch::Tensor sigmas, 96 | const torch::Tensor rgbs, 97 | const torch::Tensor ws, 98 | const torch::Tensor deltas, 99 | const torch::Tensor ts, 100 | const torch::Tensor rays_a, 101 | const torch::Tensor opacity, 102 | const torch::Tensor depth, 103 | const torch::Tensor rgb, 104 | const float T_threshold 105 | ); 106 | 107 | 108 | void composite_test_fw_cu( 109 | const torch::Tensor sigmas, 110 | const torch::Tensor rgbs, 111 | const torch::Tensor deltas, 112 | const torch::Tensor ts, 113 | const torch::Tensor hits_t, 114 | const torch::Tensor alive_indices, 115 | const float T_threshold, 116 | const torch::Tensor N_eff_samples, 117 | torch::Tensor opacity, 118 | torch::Tensor depth, 119 | torch::Tensor rgb 120 | ); 121 | 122 | 123 | std::vector distortion_loss_fw_cu( 124 | const torch::Tensor ws, 125 | const torch::Tensor deltas, 126 | const torch::Tensor ts, 127 | const torch::Tensor rays_a 128 | ); 129 | 130 | 131 | torch::Tensor distortion_loss_bw_cu( 132 | const torch::Tensor dL_dloss, 133 | const torch::Tensor ws_inclusive_scan, 134 | const torch::Tensor wts_inclusive_scan, 135 | const torch::Tensor ws, 136 | const torch::Tensor deltas, 137 | const torch::Tensor ts, 138 | const torch::Tensor rays_a 139 | ); -------------------------------------------------------------------------------- /models/csrc/losses.cu: -------------------------------------------------------------------------------- 1 | #include "utils.h" 2 | #include 3 | 4 | 5 | // for details of the formulae, please see https://arxiv.org/pdf/2206.05085.pdf 6 | 7 | template 8 | __global__ void prefix_sums_kernel( 9 | const scalar_t* __restrict__ ws, 10 | const scalar_t* __restrict__ wts, 11 | const torch::PackedTensorAccessor64 rays_a, 12 | scalar_t* __restrict__ ws_inclusive_scan, 13 | scalar_t* __restrict__ ws_exclusive_scan, 14 | scalar_t* __restrict__ wts_inclusive_scan, 15 | scalar_t* __restrict__ wts_exclusive_scan 16 | ){ 17 | const int n = blockIdx.x * blockDim.x + threadIdx.x; 18 | if (n >= rays_a.size(0)) return; 19 | 20 | const int start_idx = rays_a[n][1], N_samples = rays_a[n][2]; 21 | 22 | // compute prefix sum of ws and ws*ts 23 | // [a0, a1, a2, a3, ...] -> [a0, a0+a1, a0+a1+a2, a0+a1+a2+a3, ...] 24 | thrust::inclusive_scan(thrust::device, 25 | ws+start_idx, 26 | ws+start_idx+N_samples, 27 | ws_inclusive_scan+start_idx); 28 | thrust::inclusive_scan(thrust::device, 29 | wts+start_idx, 30 | wts+start_idx+N_samples, 31 | wts_inclusive_scan+start_idx); 32 | // [a0, a1, a2, a3, ...] -> [0, a0, a0+a1, a0+a1+a2, ...] 33 | thrust::exclusive_scan(thrust::device, 34 | ws+start_idx, 35 | ws+start_idx+N_samples, 36 | ws_exclusive_scan+start_idx); 37 | thrust::exclusive_scan(thrust::device, 38 | wts+start_idx, 39 | wts+start_idx+N_samples, 40 | wts_exclusive_scan+start_idx); 41 | } 42 | 43 | 44 | template 45 | __global__ void distortion_loss_fw_kernel( 46 | const scalar_t* __restrict__ _loss, 47 | const torch::PackedTensorAccessor64 rays_a, 48 | torch::PackedTensorAccessor loss 49 | ){ 50 | const int n = blockIdx.x * blockDim.x + threadIdx.x; 51 | if (n >= rays_a.size(0)) return; 52 | 53 | const int ray_idx = rays_a[n][0], start_idx = rays_a[n][1], N_samples = rays_a[n][2]; 54 | 55 | loss[ray_idx] = thrust::reduce(thrust::device, 56 | _loss+start_idx, 57 | _loss+start_idx+N_samples, 58 | (scalar_t)0); 59 | } 60 | 61 | 62 | std::vector distortion_loss_fw_cu( 63 | const torch::Tensor ws, 64 | const torch::Tensor deltas, 65 | const torch::Tensor ts, 66 | const torch::Tensor rays_a 67 | ){ 68 | const int N_rays = rays_a.size(0), N = ws.size(0); 69 | 70 | auto wts = ws * ts; 71 | 72 | auto ws_inclusive_scan = torch::zeros({N}, ws.options()); 73 | auto ws_exclusive_scan = torch::zeros({N}, ws.options()); 74 | auto wts_inclusive_scan = torch::zeros({N}, ws.options()); 75 | auto wts_exclusive_scan = torch::zeros({N}, ws.options()); 76 | 77 | const int threads = 256, blocks = (N_rays+threads-1)/threads; 78 | 79 | AT_DISPATCH_FLOATING_TYPES_AND_HALF(ws.type(), "distortion_loss_fw_cu_prefix_sums", 80 | ([&] { 81 | prefix_sums_kernel<<>>( 82 | ws.data_ptr(), 83 | wts.data_ptr(), 84 | rays_a.packed_accessor64(), 85 | ws_inclusive_scan.data_ptr(), 86 | ws_exclusive_scan.data_ptr(), 87 | wts_inclusive_scan.data_ptr(), 88 | wts_exclusive_scan.data_ptr() 89 | ); 90 | })); 91 | 92 | auto _loss = 2*(wts_inclusive_scan*ws_exclusive_scan- 93 | ws_inclusive_scan*wts_exclusive_scan) + 1.0f/3*ws*ws*deltas; 94 | 95 | auto loss = torch::zeros({N_rays}, ws.options()); 96 | 97 | AT_DISPATCH_FLOATING_TYPES_AND_HALF(ws.type(), "distortion_loss_fw_cu", 98 | ([&] { 99 | distortion_loss_fw_kernel<<>>( 100 | _loss.data_ptr(), 101 | rays_a.packed_accessor64(), 102 | loss.packed_accessor() 103 | ); 104 | })); 105 | 106 | return {loss, ws_inclusive_scan, wts_inclusive_scan}; 107 | } 108 | 109 | 110 | template 111 | __global__ void distortion_loss_bw_kernel( 112 | const torch::PackedTensorAccessor dL_dloss, 113 | const torch::PackedTensorAccessor ws_inclusive_scan, 114 | const torch::PackedTensorAccessor wts_inclusive_scan, 115 | const torch::PackedTensorAccessor ws, 116 | const torch::PackedTensorAccessor deltas, 117 | const torch::PackedTensorAccessor ts, 118 | const torch::PackedTensorAccessor64 rays_a, 119 | torch::PackedTensorAccessor dL_dws 120 | ){ 121 | const int n = blockIdx.x * blockDim.x + threadIdx.x; 122 | if (n >= rays_a.size(0)) return; 123 | 124 | const int ray_idx = rays_a[n][0], start_idx = rays_a[n][1], N_samples = rays_a[n][2]; 125 | const int end_idx = start_idx+N_samples-1; 126 | 127 | const scalar_t ws_sum = ws_inclusive_scan[end_idx]; 128 | const scalar_t wts_sum = wts_inclusive_scan[end_idx]; 129 | // fill in dL_dws from start_idx to end_idx 130 | for (int s=start_idx; s<=end_idx; s++){ 131 | dL_dws[s] = dL_dloss[ray_idx] * 2 * ( 132 | (s==start_idx? 133 | (scalar_t)0: 134 | (ts[s]*ws_inclusive_scan[s-1]-wts_inclusive_scan[s-1]) 135 | ) + 136 | (wts_sum-wts_inclusive_scan[s]-ts[s]*(ws_sum-ws_inclusive_scan[s])) 137 | ); 138 | dL_dws[s] += dL_dloss[ray_idx] * (scalar_t)2/3*ws[s]*deltas[s]; 139 | } 140 | } 141 | 142 | 143 | torch::Tensor distortion_loss_bw_cu( 144 | const torch::Tensor dL_dloss, 145 | const torch::Tensor ws_inclusive_scan, 146 | const torch::Tensor wts_inclusive_scan, 147 | const torch::Tensor ws, 148 | const torch::Tensor deltas, 149 | const torch::Tensor ts, 150 | const torch::Tensor rays_a 151 | ){ 152 | const int N_rays = rays_a.size(0), N = ws.size(0); 153 | 154 | auto dL_dws = torch::zeros({N}, dL_dloss.options()); 155 | 156 | const int threads = 256, blocks = (N_rays+threads-1)/threads; 157 | 158 | AT_DISPATCH_FLOATING_TYPES_AND_HALF(ws.type(), "distortion_loss_bw_cu", 159 | ([&] { 160 | distortion_loss_bw_kernel<<>>( 161 | dL_dloss.packed_accessor(), 162 | ws_inclusive_scan.packed_accessor(), 163 | wts_inclusive_scan.packed_accessor(), 164 | ws.packed_accessor(), 165 | deltas.packed_accessor(), 166 | ts.packed_accessor(), 167 | rays_a.packed_accessor64(), 168 | dL_dws.packed_accessor() 169 | ); 170 | })); 171 | 172 | return dL_dws; 173 | } -------------------------------------------------------------------------------- /models/csrc/setup.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os.path as osp 3 | from setuptools import setup 4 | from torch.utils.cpp_extension import CUDAExtension, BuildExtension 5 | 6 | 7 | ROOT_DIR = osp.dirname(osp.abspath(__file__)) 8 | include_dirs = [osp.join(ROOT_DIR, "include")] 9 | # "helper_math.h" is copied from https://github.com/NVIDIA/cuda-samples/blob/master/Common/helper_math.h 10 | 11 | sources = glob.glob('*.cpp')+glob.glob('*.cu') 12 | 13 | 14 | setup( 15 | name='vren', 16 | version='2.0', 17 | author='ZhipengCai', 18 | author_email='zhipeng.cai@intel.com', 19 | description='cuda volume rendering library', 20 | long_description='cuda volume rendering library', 21 | ext_modules=[ 22 | CUDAExtension( 23 | name='vren', 24 | sources=sources, 25 | include_dirs=include_dirs, 26 | extra_compile_args={'cxx': ['-O2'], 27 | 'nvcc': ['-O2']} 28 | ) 29 | ], 30 | cmdclass={ 31 | 'build_ext': BuildExtension 32 | } 33 | ) -------------------------------------------------------------------------------- /models/csrc/vren.egg-info/PKG-INFO: -------------------------------------------------------------------------------- 1 | Metadata-Version: 2.1 2 | Name: vren 3 | Version: 2.0 4 | Summary: cuda volume rendering library 5 | Author: ZhipengCai 6 | Author-email: zhipeng.cai@intel.com 7 | 8 | cuda volume rendering library 9 | -------------------------------------------------------------------------------- /models/csrc/vren.egg-info/SOURCES.txt: -------------------------------------------------------------------------------- 1 | binding.cpp 2 | intersection.cu 3 | losses.cu 4 | raymarching.cu 5 | setup.py 6 | volumerendering.cu 7 | vren.egg-info/PKG-INFO 8 | vren.egg-info/SOURCES.txt 9 | vren.egg-info/dependency_links.txt 10 | vren.egg-info/top_level.txt -------------------------------------------------------------------------------- /models/csrc/vren.egg-info/dependency_links.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /models/csrc/vren.egg-info/top_level.txt: -------------------------------------------------------------------------------- 1 | vren 2 | -------------------------------------------------------------------------------- /models/rendering.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from .custom_functions import \ 3 | RayAABBIntersector, RayMarcher, VolumeRenderer 4 | from einops import rearrange 5 | import vren 6 | 7 | MAX_SAMPLES = 1024 8 | NEAR_DISTANCE = 0.01 9 | 10 | 11 | @torch.cuda.amp.autocast() 12 | def render(model, rays_o, rays_d, **kwargs): 13 | """ 14 | Render rays by 15 | 1. Compute the intersection of the rays with the scene bounding box 16 | 2. Follow the process in @render_func (different for train/test) 17 | 18 | Inputs: 19 | model: NGP 20 | rays_o: (N_rays, 3) ray origins 21 | rays_d: (N_rays, 3) ray directions 22 | 23 | Outputs: 24 | result: dictionary containing final rgb and depth 25 | """ 26 | rays_o = rays_o.contiguous(); rays_d = rays_d.contiguous() 27 | _, hits_t, _ = \ 28 | RayAABBIntersector.apply(rays_o, rays_d, model.center, model.half_size, 1) 29 | hits_t[(hits_t[:, 0, 0]>=0)&(hits_t[:, 0, 0] (n1 n2) c') 90 | dirs = rearrange(dirs, 'n1 n2 c -> (n1 n2) c') 91 | valid_mask = ~torch.all(dirs==0, dim=1) 92 | if valid_mask.sum()==0: break 93 | 94 | sigmas = torch.zeros(len(xyzs), device=device) 95 | rgbs = torch.zeros(len(xyzs), 3, device=device) 96 | sigmas[valid_mask], _rgbs = model(xyzs[valid_mask], dirs[valid_mask], **kwargs) 97 | rgbs[valid_mask] = _rgbs.float() 98 | sigmas = rearrange(sigmas, '(n1 n2) -> n1 n2', n2=N_samples) 99 | rgbs = rearrange(rgbs, '(n1 n2) c -> n1 n2 c', n2=N_samples) 100 | 101 | vren.composite_test_fw( 102 | sigmas, rgbs, deltas, ts, 103 | hits_t[:, 0], alive_indices, kwargs.get('T_threshold', 1e-4), 104 | N_eff_samples, opacity, depth, rgb) 105 | alive_indices = alive_indices[alive_indices>=0] # remove converged rays 106 | 107 | results['opacity'] = opacity 108 | results['depth'] = depth 109 | results['rgb'] = rgb 110 | results['total_samples'] = total_samples # total samples for all rays 111 | 112 | if exp_step_factor==0: # synthetic 113 | rgb_bg = torch.ones(3, device=device) 114 | else: # real 115 | rgb_bg = torch.zeros(3, device=device) 116 | results['rgb'] += rgb_bg*rearrange(1-opacity, 'n -> n 1') 117 | 118 | return results 119 | 120 | 121 | def __render_rays_train(model, rays_o, rays_d, hits_t, **kwargs): 122 | """ 123 | Render rays by 124 | 1. March the rays along their directions, querying @density_bitfield 125 | to skip empty space, and get the effective sample points (where 126 | there is object) 127 | 2. Infer the NN at these positions and view directions to get properties 128 | (currently sigmas and rgbs) 129 | 3. Use volume rendering to combine the result (front to back compositing 130 | and early stop the ray if its transmittance is below a threshold) 131 | """ 132 | exp_step_factor = kwargs.get('exp_step_factor', 0.) 133 | results = {} 134 | 135 | (rays_a, xyzs, dirs, 136 | results['deltas'], results['ts'], results['rm_samples']) = \ 137 | RayMarcher.apply( 138 | rays_o, rays_d, hits_t[:, 0], model.density_bitfield, 139 | model.cascades, model.scale, 140 | exp_step_factor, model.grid_size, MAX_SAMPLES) 141 | 142 | for k, v in kwargs.items(): # supply additional inputs, repeated per ray 143 | if isinstance(v, torch.Tensor): 144 | kwargs[k] = torch.repeat_interleave(v[rays_a[:, 0]], rays_a[:, 2], 0) 145 | sigmas, rgbs = model(xyzs, dirs, **kwargs) 146 | 147 | (results['vr_samples'], results['opacity'], 148 | results['depth'], results['rgb'], results['ws']) = \ 149 | VolumeRenderer.apply(sigmas, rgbs.contiguous(), results['deltas'], results['ts'], 150 | rays_a, kwargs.get('T_threshold', 1e-4)) 151 | results['rays_a'] = rays_a 152 | 153 | if exp_step_factor==0: # synthetic 154 | rgb_bg = torch.ones(3, device=rays_o.device) 155 | else: # real 156 | if kwargs.get('random_bg', False): 157 | rgb_bg = torch.rand(3, device=rays_o.device) 158 | else: 159 | rgb_bg = torch.zeros(3, device=rays_o.device) 160 | results['rgb'] = results['rgb'] + \ 161 | rgb_bg*rearrange(1-results['opacity'], 'n -> n 1') 162 | 163 | return results 164 | -------------------------------------------------------------------------------- /models/rendering_NGPA.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from .custom_functions import \ 3 | RayAABBIntersector, RayMarcher, RayMarcher_NGPA, VolumeRenderer 4 | from einops import rearrange 5 | import vren 6 | 7 | MAX_SAMPLES = 1024 8 | NEAR_DISTANCE = 0.01 9 | 10 | 11 | @torch.cuda.amp.autocast() 12 | def render(model, rays_o, rays_d, img_idxs, **kwargs): 13 | """ 14 | Render rays by 15 | 1. Compute the intersection of the rays with the scene bounding box 16 | 2. Follow the process in @render_func (different for train/test) 17 | 18 | Inputs: 19 | model: NGP 20 | rays_o: (N_rays, 3) ray origins 21 | rays_d: (N_rays, 3) ray directions 22 | 23 | Outputs: 24 | result: dictionary containing final rgb and depth 25 | """ 26 | rays_o = rays_o.contiguous(); rays_d = rays_d.contiguous() 27 | _, hits_t, _ = \ 28 | RayAABBIntersector.apply(rays_o, rays_d, model.center, model.half_size, 1) 29 | hits_t[(hits_t[:, 0, 0]>=0)&(hits_t[:, 0, 0] (n1 n2) c') 90 | dirs = rearrange(dirs, 'n1 n2 c -> (n1 n2) c') 91 | valid_mask = ~torch.all(dirs==0, dim=1) 92 | if valid_mask.sum()==0: break 93 | 94 | sigmas = torch.zeros(len(xyzs), device=device) 95 | rgbs = torch.zeros(len(xyzs), 3, device=device) 96 | sigmas[valid_mask], _rgbs = model(xyzs[valid_mask], dirs[valid_mask], img_id = img_idxs[0].item() * torch.ones((xyzs.size()[0], 1), device = img_idxs.device).int()[valid_mask], embed_test = None, **kwargs) 97 | rgbs[valid_mask] = _rgbs.float() 98 | sigmas = rearrange(sigmas, '(n1 n2) -> n1 n2', n2=N_samples) 99 | rgbs = rearrange(rgbs, '(n1 n2) c -> n1 n2 c', n2=N_samples) 100 | 101 | vren.composite_test_fw( 102 | sigmas, rgbs, deltas, ts, 103 | hits_t[:, 0], alive_indices, kwargs.get('T_threshold', 1e-4), 104 | N_eff_samples, opacity, depth, rgb) 105 | alive_indices = alive_indices[alive_indices>=0] # remove converged rays 106 | 107 | results['opacity'] = opacity 108 | results['depth'] = depth 109 | results['rgb'] = rgb 110 | results['total_samples'] = total_samples # total samples for all rays 111 | 112 | if exp_step_factor==0: # synthetic 113 | rgb_bg = torch.ones(3, device=device) 114 | else: # real 115 | rgb_bg = torch.zeros(3, device=device) 116 | results['rgb'] += rgb_bg*rearrange(1-opacity, 'n -> n 1') 117 | 118 | return results 119 | 120 | 121 | def __render_rays_train(model, rays_o, rays_d, img_idxs, hits_t, **kwargs): 122 | """ 123 | Render rays by 124 | 1. March the rays along their directions, querying @density_bitfield 125 | to skip empty space, and get the effective sample points (where 126 | there is object) 127 | 2. Infer the NN at these positions and view directions to get properties 128 | (currently sigmas and rgbs) 129 | 3. Use volume rendering to combine the result (front to back compositing 130 | and early stop the ray if its transmittance is below a threshold) 131 | """ 132 | exp_step_factor = kwargs.get('exp_step_factor', 0.) 133 | results = {} 134 | 135 | (rays_a, xyzs, dirs, 136 | results['deltas'], results['ts'], embed_idxs, results['rm_samples']) = \ 137 | RayMarcher_NGPA.apply( 138 | rays_o, rays_d, img_idxs, hits_t[:, 0], model.density_bitfield, 139 | model.cascades, model.scale, 140 | exp_step_factor, model.grid_size, MAX_SAMPLES) 141 | 142 | for k, v in kwargs.items(): # supply additional inputs, repeated per ray 143 | if isinstance(v, torch.Tensor): 144 | kwargs[k] = torch.repeat_interleave(v[rays_a[:, 0]], rays_a[:, 2], 0) 145 | # print("[test render] embed_idxs = {}".format(embed_idxs)) 146 | sigmas, rgbs = model(xyzs, dirs, img_id = embed_idxs, embed_test = None, **kwargs) 147 | 148 | (results['vr_samples'], results['opacity'], 149 | results['depth'], results['rgb'], results['ws']) = \ 150 | VolumeRenderer.apply(sigmas, rgbs.contiguous(), results['deltas'], results['ts'], 151 | rays_a, kwargs.get('T_threshold', 1e-4)) 152 | results['rays_a'] = rays_a 153 | 154 | if exp_step_factor==0: # synthetic 155 | rgb_bg = torch.ones(3, device=rays_o.device) 156 | else: # real 157 | if kwargs.get('random_bg', False): 158 | rgb_bg = torch.rand(3, device=rays_o.device) 159 | else: 160 | rgb_bg = torch.zeros(3, device=rays_o.device) 161 | results['rgb'] = results['rgb'] + \ 162 | rgb_bg*rearrange(1-results['opacity'], 'n -> n 1') 163 | 164 | return results 165 | -------------------------------------------------------------------------------- /prepare_dataests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | cd scripts/data_prepare 3 | bash prepare_nerfpp.sh 4 | bash prepare_SynthNeRF.sh 5 | bash prepare_WAT.sh 6 | cd ../.. -------------------------------------------------------------------------------- /render_video_EWC.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2022 Ruilong Li, UC Berkeley. 3 | """ 4 | 5 | import argparse 6 | import math 7 | import os, sys 8 | import time 9 | 10 | import imageio 11 | import numpy as np 12 | import torch 13 | import torch.nn.functional as F 14 | import tqdm 15 | 16 | import sys 17 | from utils.nerfacc_radiance_fields.mlp import VanillaNeRFRadianceFieldG 18 | from utils.nerfacc_radiance_fields.utils import render_image, set_random_seed 19 | 20 | # metrics 21 | from torchmetrics import ( 22 | PeakSignalNoiseRatio, 23 | StructuralSimilarityIndexMeasure 24 | ) 25 | from torchmetrics.image.lpip import LearnedPerceptualImagePatchSimilarity 26 | from einops import rearrange 27 | 28 | from nerfacc import ContractionType, OccupancyGrid 29 | 30 | # os.environ["CUDA_LAUNCH_BLOCKING"] = "1" 31 | 32 | if __name__ == "__main__": 33 | 34 | device = "cuda:0" 35 | 36 | parser = argparse.ArgumentParser() 37 | parser.add_argument( 38 | "--train_split", 39 | type=str, 40 | default="train", 41 | choices=["train", "trainval"], 42 | help="which train split to use", 43 | ) 44 | parser.add_argument( 45 | "--scene", 46 | type=str, 47 | default="Barn", 48 | help="which scene to use", 49 | ) 50 | parser.add_argument( 51 | "--aabb", 52 | type=lambda s: [float(item) for item in s.split(",")], 53 | default="-1.5,-1.5,-1.5,1.5,1.5,1.5", 54 | help="delimited list input", 55 | ) 56 | parser.add_argument( 57 | "--test_chunk_size", 58 | type=int, 59 | default=8192, 60 | ) 61 | parser.add_argument( 62 | "--unbounded", 63 | action="store_true", 64 | help="whether to use unbounded rendering", 65 | ) 66 | parser.add_argument("--cone_angle", type=float, default=0.0) 67 | # CL params 68 | parser.add_argument('--task_number', type=int, default=10, 69 | help='task_number') 70 | parser.add_argument('--task_curr', type=int, default=9, 71 | help='task_number [0, N-1]') 72 | parser.add_argument('--task_split_method', type=str, default='seq', 73 | help='seq or random') 74 | parser.add_argument('--rep_size', type=int, default=0, 75 | help='0 to number of images') 76 | parser.add_argument('--seed', type=int, default=42, 77 | help='random seed, wrong random seed can lead to nan loss') 78 | parser.add_argument( 79 | "--max_steps", 80 | type=int, 81 | default=50000, 82 | ) 83 | parser.add_argument( 84 | "--dim", 85 | type=int, 86 | default=256, 87 | ) 88 | parser.add_argument( 89 | "--smallAABB", 90 | type=int, 91 | default=0, 92 | help="whether to use a small bounding box", 93 | ) 94 | 95 | parser.add_argument( 96 | "--dim_a", 97 | type=int, 98 | default=48, 99 | help="dimension of appearance code", 100 | ) 101 | parser.add_argument( 102 | "--dim_g", 103 | type=int, 104 | default=16, 105 | help="dimension of geometry code", 106 | ) 107 | parser.add_argument( 108 | "--vocab_size", 109 | type=int, 110 | default=10, 111 | help="total number of tasks", 112 | ) 113 | parser.add_argument( 114 | "--data_root", 115 | type=str, 116 | default='dataset/WAT', 117 | help="total number of tasks", 118 | ) 119 | parser.add_argument( 120 | "--frame_start", 121 | type=int, 122 | default=0, 123 | help="starting frame to render", 124 | ) 125 | parser.add_argument( 126 | "--frame_end", 127 | type=int, 128 | default=10000, 129 | help="end frame to render", 130 | ) 131 | args = parser.parse_args() 132 | 133 | set_random_seed(args.seed) 134 | 135 | if os.path.isfile("/home/zcai/.cache/torch_extensions/py39_cu117/nerfacc_cuda/lock"): 136 | print("lock file exists in cache") 137 | os.remove("/home/zcai/.cache/torch_extensions/py39_cu117/nerfacc_cuda/lock") 138 | else: 139 | print("lock file not exists") 140 | 141 | render_n_samples = 1024 142 | psnr_func = PeakSignalNoiseRatio(data_range=1) 143 | ssim_func = StructuralSimilarityIndexMeasure(data_range=1) 144 | lpip_func = LearnedPerceptualImagePatchSimilarity('vgg') 145 | for p in lpip_func.net.parameters(): 146 | p.requires_grad = False 147 | 148 | # setup the radiance field we want to train. 149 | # max_steps = args.max_steps 150 | max_steps = 100 # test 151 | 152 | grad_scaler = torch.cuda.amp.GradScaler(1) 153 | 154 | # just read out the model 155 | model_dir = f'results/WAT/EWC/{args.scene}_{args.rep_size}' 156 | out_dict_read = torch.load(model_dir+'/model.torchSave') 157 | radiance_field = out_dict_read['model'].to(device).eval() 158 | occupancy_grid = out_dict_read['occupancy_grid'].to(device) 159 | from utils.nerfacc_radiance_fields.datasets.lb.colmap_render import SubjectLoader_lb as SubjectLoader_render 160 | data_root_fp = args.data_root 161 | target_sample_batch_size = 1 << 16 162 | grid_resolution = 128 163 | 164 | contraction_type = ContractionType.AABB 165 | 166 | scene_aabb = torch.tensor(args.aabb, dtype=torch.float32, device=device) 167 | near_plane = None 168 | far_plane = None 169 | 170 | render_step_size = ( 171 | (scene_aabb[3:] - scene_aabb[:3]).max() 172 | * math.sqrt(3) 173 | / render_n_samples 174 | ).item() 175 | 176 | test_dataset_kwargs = {} 177 | test_dataset = SubjectLoader_render( 178 | subject_id=args.scene, 179 | root_fp=data_root_fp, 180 | split="render", 181 | num_rays=None, 182 | **test_dataset_kwargs, 183 | ) 184 | test_dataset.images = test_dataset.images.to(device) 185 | test_dataset.camtoworlds = test_dataset.camtoworlds.to(device) 186 | test_dataset.K = test_dataset.K.to(device) 187 | test_dataset.task_ids = test_dataset.task_ids.to(device) 188 | 189 | 190 | # evaluation 191 | result_dir = f'results/WAT/EWC/{args.scene}_{args.rep_size}/video' 192 | os.makedirs(result_dir, exist_ok=True) 193 | 194 | 195 | if args.frame_start >= len(test_dataset): 196 | print("rendering already finished") 197 | exit() 198 | 199 | args.frame_end = min(len(test_dataset)-1, args.frame_end) 200 | 201 | with torch.no_grad(): 202 | for i in tqdm.tqdm(range(args.frame_start, args.frame_end+1)): 203 | data = test_dataset[i] 204 | render_bkgd = data["color_bkgd"] 205 | rays = data["rays"] 206 | task_id = data['task_id'].flatten() 207 | 208 | # rendering 209 | rgb, acc, depth, _ = render_image( 210 | radiance_field, 211 | occupancy_grid, 212 | rays, 213 | task_id, 214 | scene_aabb, 215 | # rendering options 216 | near_plane=None, 217 | far_plane=None, 218 | render_step_size=render_step_size, 219 | render_bkgd=render_bkgd, 220 | cone_angle=args.cone_angle, 221 | # test options 222 | test_chunk_size=args.test_chunk_size, 223 | ) 224 | 225 | rgb_save = (rgb.cpu().numpy()*255).astype(np.uint8) 226 | imageio.imsave(os.path.join(result_dir, '{}.png'.format(i)), rgb_save) 227 | 228 | -------------------------------------------------------------------------------- /render_video_NeRF.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2022 Ruilong Li, UC Berkeley. 3 | """ 4 | 5 | import argparse 6 | import math 7 | import os, sys 8 | import time 9 | 10 | import imageio 11 | import numpy as np 12 | import torch 13 | import torch.nn.functional as F 14 | import tqdm 15 | 16 | import sys 17 | from utils.nerfacc_radiance_fields.mlp import VanillaNeRFRadianceFieldG 18 | from utils.nerfacc_radiance_fields.utils import render_image, set_random_seed 19 | 20 | # metrics 21 | from torchmetrics import ( 22 | PeakSignalNoiseRatio, 23 | StructuralSimilarityIndexMeasure 24 | ) 25 | from torchmetrics.image.lpip import LearnedPerceptualImagePatchSimilarity 26 | from einops import rearrange 27 | 28 | from nerfacc import ContractionType, OccupancyGrid 29 | 30 | # os.environ["CUDA_LAUNCH_BLOCKING"] = "1" 31 | 32 | if __name__ == "__main__": 33 | 34 | device = "cuda:0" 35 | 36 | parser = argparse.ArgumentParser() 37 | parser.add_argument( 38 | "--train_split", 39 | type=str, 40 | default="train", 41 | choices=["train", "trainval"], 42 | help="which train split to use", 43 | ) 44 | parser.add_argument( 45 | "--scene", 46 | type=str, 47 | default="Barn", 48 | help="which scene to use", 49 | ) 50 | parser.add_argument( 51 | "--aabb", 52 | type=lambda s: [float(item) for item in s.split(",")], 53 | default="-1.5,-1.5,-1.5,1.5,1.5,1.5", 54 | help="delimited list input", 55 | ) 56 | parser.add_argument( 57 | "--test_chunk_size", 58 | type=int, 59 | default=8192, 60 | ) 61 | parser.add_argument( 62 | "--unbounded", 63 | action="store_true", 64 | help="whether to use unbounded rendering", 65 | ) 66 | parser.add_argument("--cone_angle", type=float, default=0.0) 67 | # CL params 68 | parser.add_argument('--task_number', type=int, default=10, 69 | help='task_number') 70 | parser.add_argument('--task_curr', type=int, default=9, 71 | help='task_number [0, N-1]') 72 | parser.add_argument('--task_split_method', type=str, default='seq', 73 | help='seq or random') 74 | parser.add_argument('--rep_size', type=int, default=0, 75 | help='0 to number of images') 76 | parser.add_argument('--seed', type=int, default=42, 77 | help='random seed, wrong random seed can lead to nan loss') 78 | parser.add_argument( 79 | "--max_steps", 80 | type=int, 81 | default=50000, 82 | ) 83 | parser.add_argument( 84 | "--dim", 85 | type=int, 86 | default=256, 87 | ) 88 | parser.add_argument( 89 | "--smallAABB", 90 | type=int, 91 | default=0, 92 | help="whether to use a small bounding box", 93 | ) 94 | 95 | parser.add_argument( 96 | "--dim_a", 97 | type=int, 98 | default=48, 99 | help="dimension of appearance code", 100 | ) 101 | parser.add_argument( 102 | "--dim_g", 103 | type=int, 104 | default=16, 105 | help="dimension of geometry code", 106 | ) 107 | parser.add_argument( 108 | "--vocab_size", 109 | type=int, 110 | default=10, 111 | help="total number of tasks", 112 | ) 113 | parser.add_argument( 114 | "--data_root", 115 | type=str, 116 | default='dataset/WAT', 117 | help="total number of tasks", 118 | ) 119 | parser.add_argument( 120 | "--frame_start", 121 | type=int, 122 | default=0, 123 | help="starting frame to render", 124 | ) 125 | parser.add_argument( 126 | "--frame_end", 127 | type=int, 128 | default=10000, 129 | help="end frame to render", 130 | ) 131 | args = parser.parse_args() 132 | 133 | set_random_seed(args.seed) 134 | 135 | if os.path.isfile("/home/zcai/.cache/torch_extensions/py39_cu117/nerfacc_cuda/lock"): 136 | print("lock file exists in cache") 137 | os.remove("/home/zcai/.cache/torch_extensions/py39_cu117/nerfacc_cuda/lock") 138 | else: 139 | print("lock file not exists") 140 | 141 | render_n_samples = 1024 142 | psnr_func = PeakSignalNoiseRatio(data_range=1) 143 | ssim_func = StructuralSimilarityIndexMeasure(data_range=1) 144 | lpip_func = LearnedPerceptualImagePatchSimilarity('vgg') 145 | for p in lpip_func.net.parameters(): 146 | p.requires_grad = False 147 | 148 | # setup the radiance field we want to train. 149 | # max_steps = args.max_steps 150 | max_steps = 100 # test 151 | 152 | grad_scaler = torch.cuda.amp.GradScaler(1) 153 | 154 | # just read out the model 155 | model_dir = f'results/WAT/NT_ER/{args.scene}_{args.rep_size}' 156 | out_dict_read = torch.load(model_dir+'/model.torchSave') 157 | radiance_field = out_dict_read['model'].to(device).eval() 158 | occupancy_grid = out_dict_read['occupancy_grid'].to(device) 159 | from utils.nerfacc_radiance_fields.datasets.lb.colmap_render import SubjectLoader_lb as SubjectLoader_render 160 | data_root_fp = args.data_root 161 | target_sample_batch_size = 1 << 16 162 | grid_resolution = 128 163 | 164 | contraction_type = ContractionType.AABB 165 | 166 | scene_aabb = torch.tensor(args.aabb, dtype=torch.float32, device=device) 167 | near_plane = None 168 | far_plane = None 169 | 170 | render_step_size = ( 171 | (scene_aabb[3:] - scene_aabb[:3]).max() 172 | * math.sqrt(3) 173 | / render_n_samples 174 | ).item() 175 | 176 | test_dataset_kwargs = {} 177 | test_dataset = SubjectLoader_render( 178 | subject_id=args.scene, 179 | root_fp=data_root_fp, 180 | split="render", 181 | num_rays=None, 182 | **test_dataset_kwargs, 183 | ) 184 | test_dataset.images = test_dataset.images.to(device) 185 | test_dataset.camtoworlds = test_dataset.camtoworlds.to(device) 186 | test_dataset.K = test_dataset.K.to(device) 187 | test_dataset.task_ids = test_dataset.task_ids.to(device) 188 | 189 | # evaluation 190 | result_dir = f'results/WAT/NT_ER/{args.scene}_{args.rep_size}/video' 191 | os.makedirs(result_dir, exist_ok=True) 192 | 193 | 194 | if args.frame_start >= len(test_dataset): 195 | print("rendering already finished") 196 | exit() 197 | 198 | args.frame_end = min(len(test_dataset)-1, args.frame_end) 199 | 200 | with torch.no_grad(): 201 | for i in tqdm.tqdm(range(args.frame_start, args.frame_end+1)): 202 | data = test_dataset[i] 203 | render_bkgd = data["color_bkgd"] 204 | rays = data["rays"] 205 | task_id = data['task_id'].flatten() 206 | 207 | # rendering 208 | rgb, acc, depth, _ = render_image( 209 | radiance_field, 210 | occupancy_grid, 211 | rays, 212 | task_id, 213 | scene_aabb, 214 | # rendering options 215 | near_plane=None, 216 | far_plane=None, 217 | render_step_size=render_step_size, 218 | render_bkgd=render_bkgd, 219 | cone_angle=args.cone_angle, 220 | # test options 221 | test_chunk_size=args.test_chunk_size, 222 | ) 223 | 224 | rgb_save = (rgb.cpu().numpy()*255).astype(np.uint8) 225 | imageio.imsave(os.path.join(result_dir, '{}.png'.format(i)), rgb_save) -------------------------------------------------------------------------------- /run_CLNeRF.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # CLNeRF on WAT dataset 4 | rep=10 5 | bash scripts/CLNeRF/WAT/breville.sh ${rep} 6 | bash scripts/CLNeRF/WAT/community.sh ${rep} 7 | bash scripts/CLNeRF/WAT/kitchen.sh ${rep} 8 | bash scripts/CLNeRF/WAT/living_room.sh ${rep} 9 | bash scripts/CLNeRF/WAT/spa.sh ${rep} 10 | bash scripts/CLNeRF/WAT/street.sh ${rep} 11 | bash scripts/CLNeRF/WAT/car.sh ${rep} 12 | bash scripts/CLNeRF/WAT/grill.sh ${rep} 13 | bash scripts/CLNeRF/WAT/mac.sh ${rep} 14 | bash scripts/CLNeRF/WAT/ninja.sh ${rep} 15 | 16 | 17 | # CLNeRF on Synth-NeRF dataset 18 | rep=10 19 | bash scripts/CLNeRF/SynthNeRF/benchmark_synth_nerf.sh ${rep} 20 | 21 | # CLNeRF on NeRF++ dataset 22 | rep=10 23 | bash scripts/CLNeRF/nerfpp/benchmark_nerfpp.sh ${rep} 24 | 25 | -------------------------------------------------------------------------------- /run_ER.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # ER on WOT dataset 4 | rep=10 5 | bash scripts/NT/WOT/nerf_baseline.sh breville ${rep} 5 dataset/WOT 6 | # bash scripts/NT/WOT/nerf_baseline.sh community ${rep} 10 dataset/WOT 7 | # bash scripts/NT/WOT/nerf_baseline.sh kitchen ${rep} 5 dataset/WOT 8 | # bash scripts/NT/WOT/nerf_baseline.sh living_room ${rep} 5 dataset/WOT 9 | # bash scripts/NT/WOT/nerf_baseline.sh spa ${rep} 5 dataset/WOT 10 | # bash scripts/NT/WOT/nerf_baseline.sh street ${rep} 5 dataset/WOT 11 | 12 | # # # ER on Synth-NeRF dataset 13 | # bash scripts/NT/SynthNeRF/nerf_baseline.sh chair ${rep} 10 dataset/nerf_synthetic 14 | # bash scripts/NT/SynthNeRF/nerf_baseline.sh drums ${rep} 10 dataset/nerf_synthetic 15 | # bash scripts/NT/SynthNeRF/nerf_baseline.sh ficus ${rep} 10 dataset/nerf_synthetic 16 | # bash scripts/NT/SynthNeRF/nerf_baseline.sh hotdog ${rep} 10 dataset/nerf_synthetic 17 | # bash scripts/NT/SynthNeRF/nerf_baseline.sh lego ${rep} 10 dataset/nerf_synthetic 18 | # bash scripts/NT/SynthNeRF/nerf_baseline.sh materials ${rep} 10 dataset/nerf_synthetic 19 | # bash scripts/NT/SynthNeRF/nerf_baseline.sh mic ${rep} 10 dataset/nerf_synthetic 20 | # bash scripts/NT/SynthNeRF/nerf_baseline.sh ship ${rep} 10 dataset/nerf_synthetic 21 | 22 | # # # # CLNeRF on NeRF++ dataset 23 | # bash scripts/NT/nerfpp/nerf_baseline.sh tat_intermediate_M60 ${rep} 10 dataset/tanks_and_temples 24 | # bash scripts/NT/nerfpp/nerf_baseline.sh tat_intermediate_Playground ${rep} 10 dataset/tanks_and_temples 25 | # bash scripts/NT/nerfpp/nerf_baseline.sh tat_intermediate_Train ${rep} 10 dataset/tanks_and_temples 26 | # bash scripts/NT/nerfpp/nerf_baseline.sh tat_training_Truck ${rep} 10 dataset/tanks_and_temples 27 | -------------------------------------------------------------------------------- /run_ER_NGP.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # LB on WAT dataset 4 | rep=0 5 | bash scripts/ER_NGP/WAT/breville.sh 6 | bash scripts/NT/WAT/nerf_baseline.sh community ${rep} 10 dataset/WAT 7 | bash scripts/NT/WAT/nerf_baseline.sh kitchen ${rep} 5 dataset/WAT 8 | bash scripts/NT/WAT/nerf_baseline.sh living_room ${rep} 5 dataset/WAT 9 | bash scripts/NT/WAT/nerf_baseline.sh spa ${rep} 5 dataset/WAT 10 | bash scripts/NT/WAT/nerf_baseline.sh street ${rep} 5 dataset/WAT 11 | 12 | # # # # Synth-NeRF dataset 13 | bash scripts/NT/SynthNeRF/benchmark_synth_nerf.sh 14 | 15 | 16 | # # # # NeRF++ dataset 17 | bash scripts/NT/nerfpp/benchmark_nerfpp.sh 18 | -------------------------------------------------------------------------------- /run_EWC.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # EWC on WAT dataset 4 | rep=0 5 | bash scripts/EWC/WAT/nerf_baseline.sh breville ${rep} 5 dataset/WAT 6 | bash scripts/EWC/WAT/nerf_baseline.sh community ${rep} 10 dataset/WAT 7 | bash scripts/EWC/WAT/nerf_baseline.sh kitchen ${rep} 5 dataset/WAT 8 | bash scripts/EWC/WAT/nerf_baseline.sh living_room ${rep} 5 dataset/WAT 9 | bash scripts/EWC/WAT/nerf_baseline.sh spa ${rep} 5 dataset/WAT 10 | bash scripts/EWC/WAT/nerf_baseline.sh street ${rep} 5 dataset/WAT 11 | 12 | # # # # EWC on Synth-NeRF dataset 13 | bash scripts/EWC/SynthNeRF/nerf_baseline.sh chair ${rep} 10 dataset/nerf_synthetic 14 | bash scripts/EWC/SynthNeRF/nerf_baseline.sh drums ${rep} 10 dataset/nerf_synthetic 15 | bash scripts/EWC/SynthNeRF/nerf_baseline.sh ficus ${rep} 10 dataset/nerf_synthetic 16 | bash scripts/EWC/SynthNeRF/nerf_baseline.sh hotdog ${rep} 10 dataset/nerf_synthetic 17 | bash scripts/EWC/SynthNeRF/nerf_baseline.sh lego ${rep} 10 dataset/nerf_synthetic 18 | bash scripts/EWC/SynthNeRF/nerf_baseline.sh materials ${rep} 10 dataset/nerf_synthetic 19 | bash scripts/EWC/SynthNeRF/nerf_baseline.sh mic ${rep} 10 dataset/nerf_synthetic 20 | bash scripts/EWC/SynthNeRF/nerf_baseline.sh ship ${rep} 10 dataset/nerf_synthetic 21 | 22 | # # # # EWC on NeRF++ dataset 23 | bash scripts/EWC/nerfpp/nerf_baseline.sh tat_intermediate_M60 ${rep} 10 dataset/tanks_and_temples 24 | bash scripts/EWC/nerfpp/nerf_baseline.sh tat_intermediate_Playground ${rep} 10 dataset/tanks_and_temples 25 | bash scripts/EWC/nerfpp/nerf_baseline.sh tat_intermediate_Train ${rep} 10 dataset/tanks_and_temples 26 | bash scripts/EWC/nerfpp/nerf_baseline.sh tat_training_Truck ${rep} 10 dataset/tanks_and_temples 27 | -------------------------------------------------------------------------------- /run_LB.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # LB on WAT dataset 4 | rep=0 5 | bash scripts/LB/WAT/breville.sh ${rep} 6 | bash scripts/LB/WAT/community.sh ${rep} 7 | bash scripts/LB/WAT/kitchen.sh ${rep} 8 | bash scripts/LB/WAT/living_room.sh ${rep} 9 | bash scripts/LB/WAT/spa.sh ${rep} 10 | bash scripts/LB/WAT/street.sh ${rep} 11 | 12 | # LB on Synth-NeRF dataset 13 | bash scripts/LB/SynthNeRF/benchmark_synth_nerf.sh 14 | 15 | # # CLNeRF on NeRF++ dataset 16 | bash scripts/LB/nerfpp/benchmark_nerfpp.sh 17 | 18 | -------------------------------------------------------------------------------- /run_MEIL.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Note: with the un-optimized code, MEIL-NeRF sometimes needs 48gb memory, so please prepare a high-end GPU for this experiment 3 | # MEIL-NeRF on WAT dataset 4 | rep=10 5 | bash scripts/MEIL/WAT/breville.sh ${rep} 6 | bash scripts/MEIL/WAT/community.sh ${rep} 7 | bash scripts/MEIL/WAT/kitchen.sh ${rep} 8 | bash scripts/MEIL/WAT/living_room.sh ${rep} 9 | bash scripts/MEIL/WAT/spa.sh ${rep} 10 | bash scripts/MEIL/WAT/street.sh ${rep} 11 | 12 | # # MEIL-NeRF on Synth-NeRF dataset 13 | rep=10 14 | bash scripts/MEIL/SynthNeRF/benchmark_synth_nerf.sh ${rep} 15 | 16 | # MEIL-NeRF on NeRF++ dataset 17 | rep=10 18 | bash scripts/MEIL/nerfpp/benchmark_nerfpp.sh ${rep} 19 | 20 | -------------------------------------------------------------------------------- /run_NT.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # LB on WAT dataset 4 | rep=0 5 | bash scripts/NT/WAT/nerf_baseline.sh breville ${rep} 5 dataset/WAT 6 | bash scripts/NT/WAT/nerf_baseline.sh community ${rep} 10 dataset/WAT 7 | bash scripts/NT/WAT/nerf_baseline.sh kitchen ${rep} 5 dataset/WAT 8 | bash scripts/NT/WAT/nerf_baseline.sh living_room ${rep} 5 dataset/WAT 9 | bash scripts/NT/WAT/nerf_baseline.sh spa ${rep} 5 dataset/WAT 10 | bash scripts/NT/WAT/nerf_baseline.sh street ${rep} 5 dataset/WAT 11 | bash scripts/NT/WAT/nerf_baseline.sh car_resized ${rep} 5 dataset/WAT 12 | 13 | # # # # Synth-NeRF dataset 14 | bash scripts/NT/SynthNeRF/nerf_baseline.sh chair ${rep} 10 dataset/nerf_synthetic 15 | bash scripts/NT/SynthNeRF/nerf_baseline.sh drums ${rep} 10 dataset/nerf_synthetic 16 | bash scripts/NT/SynthNeRF/nerf_baseline.sh ficus ${rep} 10 dataset/nerf_synthetic 17 | bash scripts/NT/SynthNeRF/nerf_baseline.sh hotdog ${rep} 10 dataset/nerf_synthetic 18 | bash scripts/NT/SynthNeRF/nerf_baseline.sh lego ${rep} 10 dataset/nerf_synthetic 19 | bash scripts/NT/SynthNeRF/nerf_baseline.sh materials ${rep} 10 dataset/nerf_synthetic 20 | bash scripts/NT/SynthNeRF/nerf_baseline.sh mic ${rep} 10 dataset/nerf_synthetic 21 | bash scripts/NT/SynthNeRF/nerf_baseline.sh ship ${rep} 10 dataset/nerf_synthetic 22 | 23 | # # # # NeRF++ dataset 24 | bash scripts/NT/nerfpp/nerf_baseline.sh tat_intermediate_M60 ${rep} 10 dataset/tanks_and_temples 25 | bash scripts/NT/nerfpp/nerf_baseline.sh tat_intermediate_Playground ${rep} 10 dataset/tanks_and_temples 26 | bash scripts/NT/nerfpp/nerf_baseline.sh tat_intermediate_Train ${rep} 10 dataset/tanks_and_temples 27 | bash scripts/NT/nerfpp/nerf_baseline.sh tat_training_Truck ${rep} 10 dataset/tanks_and_temples 28 | -------------------------------------------------------------------------------- /run_NT_NGP.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # LB on WAT dataset 4 | rep=0 5 | # bash scripts/NT/WAT/breville.sh 0 6 | # bash scripts/NT/WAT/nerf_baseline.sh community ${rep} 10 dataset/WAT 7 | # bash scripts/NT/WAT/nerf_baseline.sh kitchen ${rep} 5 dataset/WAT 8 | # bash scripts/NT/WAT/nerf_baseline.sh living_room ${rep} 5 dataset/WAT 9 | # bash scripts/NT/WAT/nerf_baseline.sh spa ${rep} 5 dataset/WAT 10 | # bash scripts/NT/WAT/nerf_baseline.sh street ${rep} 5 dataset/WAT 11 | 12 | # # # # Synth-NeRF dataset 13 | bash scripts/NT/SynthNeRF/benchmark_synth_nerf.sh 14 | 15 | 16 | # # # # NeRF++ dataset 17 | bash scripts/NT/nerfpp/benchmark_nerfpp.sh 18 | -------------------------------------------------------------------------------- /run_UB.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | bash scripts/UB/WAT/UB_WAT.sh 5 | 6 | bash scripts/UB/SynthNeRF/benchmark_synthetic_nerf.sh 7 | 8 | bash scripts/UB/nerfpp/benchmark_nerfpp.sh 9 | -------------------------------------------------------------------------------- /scripts/CLNeRF/SynthNeRF/benchmark_synth_nerf.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export ROOT_DIR=dataset/Synthetic_NeRF 4 | 5 | rep=$1 6 | task_number=10 7 | 8 | # Lego 9 | data=Lego 10 | for ((i=0; i<$task_number; i++)) 11 | do 12 | python train_CLNerf.py \ 13 | --root_dir $ROOT_DIR/$data --dataset_name nsvf_CLNerf \ 14 | --exp_name $data'_'$task_number'_'$rep \ 15 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --rep_size $rep --eval_lpips \ 16 | --task_curr $i --task_number $task_number 17 | done 18 | 19 | # Chair 20 | data=Chair 21 | for ((i=0; i<$task_number; i++)) 22 | do 23 | python train_CLNerf.py \ 24 | --root_dir $ROOT_DIR/$data --dataset_name nsvf_CLNerf \ 25 | --exp_name $data'_'$task_number'_'$rep \ 26 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --rep_size $rep --eval_lpips \ 27 | --task_curr $i --task_number $task_number 28 | done 29 | 30 | # Drums 31 | data=Drums 32 | for ((i=0; i<$task_number; i++)) 33 | do 34 | python train_CLNerf.py \ 35 | --root_dir $ROOT_DIR/$data --dataset_name nsvf_CLNerf \ 36 | --exp_name $data'_'$task_number'_'$rep \ 37 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --rep_size $rep --eval_lpips \ 38 | --task_curr $i --task_number $task_number 39 | done 40 | 41 | 42 | # Ficus 43 | data=Ficus 44 | for ((i=0; i<$task_number; i++)) 45 | do 46 | python train_CLNerf.py \ 47 | --root_dir $ROOT_DIR/$data --dataset_name nsvf_CLNerf \ 48 | --exp_name $data'_'$task_number'_'$rep \ 49 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --rep_size $rep --eval_lpips \ 50 | --task_curr $i --task_number $task_number 51 | done 52 | 53 | # Hotdog 54 | data=Hotdog 55 | for ((i=0; i<$task_number; i++)) 56 | do 57 | python train_CLNerf.py \ 58 | --root_dir $ROOT_DIR/$data --dataset_name nsvf_CLNerf \ 59 | --exp_name $data'_'$task_number'_'$rep \ 60 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --rep_size $rep --eval_lpips \ 61 | --task_curr $i --task_number $task_number 62 | done 63 | 64 | 65 | # Materials 66 | data=Materials 67 | for ((i=0; i<$task_number; i++)) 68 | do 69 | python train_CLNerf.py \ 70 | --root_dir $ROOT_DIR/$data --dataset_name nsvf_CLNerf \ 71 | --exp_name $data'_'$task_number'_'$rep \ 72 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --rep_size $rep --eval_lpips \ 73 | --task_curr $i --task_number $task_number 74 | done 75 | 76 | 77 | # Mic 78 | data=Mic 79 | for ((i=0; i<$task_number; i++)) 80 | do 81 | python train_CLNerf.py \ 82 | --root_dir $ROOT_DIR/$data --dataset_name nsvf_CLNerf \ 83 | --exp_name $data'_'$task_number'_'$rep \ 84 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --rep_size $rep --eval_lpips \ 85 | --task_curr $i --task_number $task_number 86 | done 87 | 88 | 89 | # Ship 90 | data=Ship 91 | for ((i=0; i<$task_number; i++)) 92 | do 93 | python train_CLNerf.py \ 94 | --root_dir $ROOT_DIR/$data --dataset_name nsvf_CLNerf \ 95 | --exp_name $data'_'$task_number'_'$rep \ 96 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --rep_size $rep --eval_lpips \ 97 | --task_curr $i --task_number $task_number 98 | done -------------------------------------------------------------------------------- /scripts/CLNeRF/WAT/breville.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export ROOT_DIR=dataset/WAT 4 | 5 | task_number=5 6 | scene_name=breville 7 | downsample=1.0 8 | 9 | rep=$1 10 | for ((i=0; i<$task_number; i++)) 11 | do 12 | python train_ngpgv2_CLNerf.py \ 13 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa_CLNerf \ 14 | --exp_name ${scene_name}_${rep} \ 15 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --rep_size $rep --eval_lpips \ 16 | --task_curr $i --task_number $task_number --dim_a 48 --scale 8.0 --downsample ${downsample} --vocab_size ${task_number} 17 | done 18 | -------------------------------------------------------------------------------- /scripts/CLNeRF/WAT/car.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export ROOT_DIR=dataset/WAT 4 | 5 | task_number=5 6 | scene_name=car_resized 7 | downsample=1.0 8 | 9 | rep=$1 10 | for ((i=0; i<$task_number; i++)) 11 | do 12 | python train_ngpgv2_CLNerf.py \ 13 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa_CLNerf \ 14 | --exp_name ${scene_name}_${rep} \ 15 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --rep_size $rep --eval_lpips \ 16 | --task_curr $i --task_number $task_number --dim_a 48 --scale 16.0 --downsample ${downsample} --vocab_size ${task_number} 17 | done 18 | -------------------------------------------------------------------------------- /scripts/CLNeRF/WAT/community.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export ROOT_DIR=dataset/WAT 4 | 5 | task_number=10 6 | scene_name=community 7 | downsample=1.0 8 | 9 | rep=$1 10 | for ((i=0; i<$task_number; i++)) 11 | do 12 | python train_ngpgv2_CLNerf.py \ 13 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa_CLNerf \ 14 | --exp_name ${scene_name}_${rep} \ 15 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --rep_size $rep --eval_lpips \ 16 | --task_curr $i --task_number $task_number --dim_a 48 --scale 32.0 --downsample ${downsample} --vocab_size ${task_number} 17 | done 18 | 19 | -------------------------------------------------------------------------------- /scripts/CLNeRF/WAT/grill.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export ROOT_DIR=dataset/WAT 4 | 5 | task_number=5 6 | scene_name=grill_resized 7 | downsample=1.0 8 | 9 | rep=$1 10 | for ((i=0; i<$task_number; i++)) 11 | do 12 | python train_ngpgv2_CLNerf.py \ 13 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa_CLNerf \ 14 | --exp_name ${scene_name}_${rep} \ 15 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --rep_size $rep --eval_lpips \ 16 | --task_curr $i --task_number $task_number --dim_a 48 --scale 16.0 --downsample ${downsample} --vocab_size ${task_number} 17 | done 18 | -------------------------------------------------------------------------------- /scripts/CLNeRF/WAT/kitchen.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export ROOT_DIR=dataset/WAT 4 | 5 | task_number=5 6 | scene_name=kitchen 7 | downsample=1.0 8 | 9 | rep=$1 10 | for ((i=0; i<$task_number; i++)) 11 | do 12 | python train_ngpgv2_CLNerf.py \ 13 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa_CLNerf \ 14 | --exp_name ${scene_name}_${rep} \ 15 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --rep_size $rep --eval_lpips \ 16 | --task_curr $i --task_number $task_number --dim_a 48 --scale 8.0 --downsample ${downsample} --vocab_size ${task_number} 17 | done 18 | -------------------------------------------------------------------------------- /scripts/CLNeRF/WAT/living_room.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export ROOT_DIR=dataset/WAT 4 | 5 | task_number=5 6 | scene_name=living_room 7 | downsample=1.0 8 | 9 | rep=$1 10 | for ((i=0; i<$task_number; i++)) 11 | do 12 | python train_ngpgv2_CLNerf.py \ 13 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa_CLNerf \ 14 | --exp_name ${scene_name}_${rep} \ 15 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --rep_size $rep --eval_lpips \ 16 | --task_curr $i --task_number $task_number --dim_a 48 --scale 8.0 --downsample ${downsample} --vocab_size ${task_number} 17 | done 18 | -------------------------------------------------------------------------------- /scripts/CLNeRF/WAT/mac.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export ROOT_DIR=dataset/WAT 4 | 5 | task_number=6 6 | scene_name=mac 7 | downsample=1.0 8 | 9 | rep=$1 10 | for ((i=0; i<$task_number; i++)) 11 | do 12 | python train_ngpgv2_CLNerf.py \ 13 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa_CLNerf \ 14 | --exp_name ${scene_name}_${rep} \ 15 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --rep_size $rep --eval_lpips \ 16 | --task_curr $i --task_number $task_number --dim_a 48 --scale 8.0 --downsample ${downsample} --vocab_size ${task_number} 17 | done 18 | -------------------------------------------------------------------------------- /scripts/CLNeRF/WAT/ninja.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export ROOT_DIR=dataset/WAT 4 | 5 | task_number=5 6 | scene_name=ninja 7 | downsample=1.0 8 | 9 | rep=$1 10 | for ((i=0; i<$task_number; i++)) 11 | do 12 | python train_ngpgv2_CLNerf.py \ 13 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa_CLNerf \ 14 | --exp_name ${scene_name}_${rep} \ 15 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --rep_size $rep --eval_lpips \ 16 | --task_curr $i --task_number $task_number --dim_a 48 --scale 8.0 --downsample ${downsample} --vocab_size ${task_number} 17 | done 18 | -------------------------------------------------------------------------------- /scripts/CLNeRF/WAT/render_video.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | export ROOT_DIR=dataset/WAT 3 | 4 | task_number=$1 5 | task_curr=$2 6 | scene_name=$3 7 | model_path=$4 8 | rep=$5 9 | scale=$6 10 | render_fname=$7 11 | downsample=1.0 12 | 13 | export CUDA_HOME=/usr/local/cuda-11.6 14 | export PATH=/usr/local/cuda-11.6/bin:$PATH 15 | export LD_LIBRARY_PATH=/usr/local/cuda-11.6/lib64:$LD_LIBRARY_PATH 16 | 17 | 18 | python render_NGP_WAT.py \ 19 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa_CLNerf_render \ 20 | --exp_name ${scene_name}_${rep} \ 21 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --rep_size $rep --eval_lpips \ 22 | --task_curr ${task_curr} --task_number $task_number --dim_a 48 --scale ${scale} --downsample ${downsample} --vocab_size ${task_number} \ 23 | --weight_path ${model_path} --render_fname ${render_fname} --val_only 24 | 25 | -------------------------------------------------------------------------------- /scripts/CLNeRF/WAT/spa.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export ROOT_DIR=dataset/WAT 4 | 5 | task_number=5 6 | scene_name=spa 7 | downsample=1.0 8 | scale=16.0 9 | 10 | rep=$1 11 | for ((i=0; i<$task_number; i++)) 12 | do 13 | python train_ngpgv2_CLNerf.py \ 14 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa_CLNerf \ 15 | --exp_name ${scene_name}_${rep} \ 16 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --rep_size $rep --eval_lpips \ 17 | --task_curr $i --task_number $task_number --dim_a 48 --scale ${scale} --downsample ${downsample} --vocab_size ${task_number} 18 | done 19 | -------------------------------------------------------------------------------- /scripts/CLNeRF/WAT/street.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export ROOT_DIR=dataset/WAT 4 | 5 | task_number=5 6 | scene_name=street 7 | downsample=1.0 8 | scale=32.0 9 | 10 | rep=$1 11 | for ((i=0; i<$task_number; i++)) 12 | do 13 | python train_ngpgv2_CLNerf.py \ 14 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa_CLNerf \ 15 | --exp_name ${scene_name}_${rep} \ 16 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --rep_size $rep --eval_lpips \ 17 | --task_curr $i --task_number $task_number --dim_a 48 --scale ${scale} --downsample ${downsample} --vocab_size ${task_number} 18 | done 19 | -------------------------------------------------------------------------------- /scripts/CLNeRF/nerfpp/benchmark_nerfpp.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | export ROOT_DIR=dataset/tanks_and_temples 3 | 4 | task_number=10 5 | rep=$1 6 | 7 | data=tat_intermediate_M60 8 | for ((i=0; i<$task_number; i++)) 9 | do 10 | python train_CLNerf.py \ 11 | --root_dir $ROOT_DIR'/'$data --dataset_name nerfpp_CLNerf \ 12 | --exp_name $data'_'$task_number'task_'$rep \ 13 | --num_epochs 20 --scale 4.0 --rep_size $rep --eval_lpips \ 14 | --task_curr $i --task_number $task_number 15 | done 16 | 17 | data=tat_intermediate_Playground 18 | for ((i=0; i<$task_number; i++)) 19 | do 20 | python train_CLNerf.py \ 21 | --root_dir $ROOT_DIR'/'$data --dataset_name nerfpp_CLNerf \ 22 | --exp_name $data'_'$task_number'task_'$rep \ 23 | --num_epochs 20 --scale 4.0 --rep_size $rep --eval_lpips \ 24 | --task_curr $i --task_number $task_number 25 | done 26 | 27 | 28 | data=tat_intermediate_Train 29 | for ((i=0; i<$task_number; i++)) 30 | do 31 | python train_CLNerf.py \ 32 | --root_dir $ROOT_DIR'/'$data --dataset_name nerfpp_CLNerf \ 33 | --exp_name $data'_'$task_number'task_'$rep \ 34 | --num_epochs 20 --scale 16.0 --batch_size 4096 --rep_size $rep --eval_lpips \ 35 | --task_curr $i --task_number $task_number 36 | done 37 | 38 | data=tat_training_Truck 39 | for ((i=0; i<$task_number; i++)) 40 | do 41 | python train_CLNerf.py \ 42 | --root_dir $ROOT_DIR'/'$data --dataset_name nerfpp_CLNerf \ 43 | --exp_name $data'_'$task_number'task_'$rep \ 44 | --num_epochs 20 --scale 16.0 --batch_size 4096 --rep_size $rep --eval_lpips \ 45 | --task_curr $i --task_number $task_number 46 | done 47 | 48 | -------------------------------------------------------------------------------- /scripts/ER_NGP/SynthNeRF/benchmark_synth_nerf.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export ROOT_DIR=dataset/Synthetic_NeRF 4 | 5 | 6 | rep=10 7 | task_number=10 8 | task_curr=9 9 | 10 | python train_lb.py \ 11 | --root_dir $ROOT_DIR/Chair --dataset_name nsvf_lb \ 12 | --exp_name Chair_10task --no_save_test \ 13 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --eval_lpips \ 14 | --task_number $task_number --task_curr $task_curr --rep_size $rep 15 | 16 | python train_lb.py \ 17 | --root_dir $ROOT_DIR/Drums --dataset_name nsvf_lb \ 18 | --exp_name Drums_10task --no_save_test \ 19 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --eval_lpips \ 20 | --task_number $task_number --task_curr $task_curr --rep_size $rep 21 | 22 | python train_lb.py \ 23 | --root_dir $ROOT_DIR/Ficus --dataset_name nsvf_lb \ 24 | --exp_name Ficus_10task --no_save_test \ 25 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --eval_lpips \ 26 | --task_number $task_number --task_curr $task_curr --rep_size $rep 27 | 28 | python train_lb.py \ 29 | --root_dir $ROOT_DIR/Hotdog --dataset_name nsvf_lb \ 30 | --exp_name Hotdog_10task --no_save_test \ 31 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --eval_lpips \ 32 | --task_number $task_number --task_curr $task_curr --rep_size $rep 33 | 34 | python train_lb.py \ 35 | --root_dir $ROOT_DIR/Lego --dataset_name nsvf_lb \ 36 | --exp_name Lego_10task --no_save_test \ 37 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --eval_lpips \ 38 | --task_number $task_number --task_curr $task_curr --rep_size $rep 39 | 40 | python train_lb.py \ 41 | --root_dir $ROOT_DIR/Materials --dataset_name nsvf_lb \ 42 | --exp_name Materials_10task --no_save_test \ 43 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --eval_lpips \ 44 | --task_number $task_number --task_curr $task_curr --rep_size $rep 45 | 46 | python train_lb.py \ 47 | --root_dir $ROOT_DIR/Mic --dataset_name nsvf_lb \ 48 | --exp_name Mic_10task --no_save_test \ 49 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --eval_lpips \ 50 | --task_number $task_number --task_curr $task_curr --rep_size $rep 51 | 52 | python train_lb.py \ 53 | --root_dir $ROOT_DIR/Ship --dataset_name nsvf_lb \ 54 | --exp_name Ship_10task --no_save_test \ 55 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --eval_lpips \ 56 | --task_number $task_number --task_curr $task_curr --rep_size $rep -------------------------------------------------------------------------------- /scripts/ER_NGP/WAT/breville.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export ROOT_DIR=dataset/WAT 4 | 5 | task_number=5 6 | task_curr=4 7 | scene_name=breville 8 | 9 | 10 | downsample=1.0 11 | rep=10 12 | python train_ngpgv2_lb.py \ 13 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa_lb \ 14 | --exp_name ${scene_name}_${rep}_autoScale8.0_8.0 \ 15 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --eval_lpips \ 16 | --task_number $task_number --task_curr $task_curr --dim_a 48 --dim_g 16 --scale 8.0 --downsample ${downsample} --rep_size $rep --vocab_size=5 17 | -------------------------------------------------------------------------------- /scripts/ER_NGP/nerfpp/benchmark_nerfpp.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export ROOT_DIR=dataset/tanks_and_temples 4 | 5 | rep=10 6 | task_number=10 7 | task_curr=9 8 | 9 | data=tat_intermediate_M60 10 | python train_lb.py \ 11 | --root_dir $ROOT_DIR'/'$data --dataset_name nerfpp_lb \ 12 | --exp_name $data'_'$task_number'task_'$rep --no_save_test \ 13 | --num_epochs 20 --scale 4.0 \ 14 | --task_number $task_number --task_curr $task_curr --rep_size $rep 15 | 16 | data=tat_intermediate_Playground 17 | python train_lb.py \ 18 | --root_dir $ROOT_DIR'/'$data --dataset_name nerfpp_lb \ 19 | --exp_name $data'_'$task_number'task_'$rep --no_save_test \ 20 | --num_epochs 20 --scale 4.0 \ 21 | --task_number $task_number --task_curr $task_curr --rep_size $rep 22 | 23 | 24 | data=tat_intermediate_Train 25 | python train_lb.py \ 26 | --root_dir $ROOT_DIR'/'$data --dataset_name nerfpp_lb \ 27 | --exp_name $data'_'$task_number'task_'$rep --no_save_test \ 28 | --num_epochs 20 --scale 16.0 --batch_size 4096 \ 29 | --task_number $task_number --task_curr $task_curr --rep_size $rep 30 | 31 | 32 | data=tat_training_Truck 33 | python train_lb.py \ 34 | --root_dir $ROOT_DIR'/'$data --dataset_name nerfpp_lb \ 35 | --exp_name $data'_'$task_number'task_'$rep --no_save_test \ 36 | --num_epochs 20 --scale 16.0 --batch_size 4096 \ 37 | --task_number $task_number --task_curr $task_curr --rep_size $rep -------------------------------------------------------------------------------- /scripts/EWC/SynthNeRF/nerf_baseline.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python train_SynthNeRF_EWC.py --train_split train --scene $1 --rep_size $2 --max_steps 50000 --task_number $3 --data_root $4 --EWC_weight 1e5 4 | -------------------------------------------------------------------------------- /scripts/EWC/WAT/nerf_baseline.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python train_WAT_EWC.py --train_split train --scene $1 --rep_size $2 --vocab_size $3 --max_steps 50000 --task_number $3 --data_root $4 --EWC_weight 1e5 4 | -------------------------------------------------------------------------------- /scripts/EWC/WAT/video_render.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python render_video_EWC.py --train_split train --scene $1 --rep_size $2 --vocab_size $3 --max_steps 50000 --task_number $3 --data_root $4 --frame_start $5 --frame_end $6 4 | -------------------------------------------------------------------------------- /scripts/EWC/nerfpp/nerf_baseline.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python train_nerfpp_EWC.py --train_split train --scene $1 --rep_size $2 --max_steps 50000 --task_number $3 --data_root $4 --EWC_weight 1e5 4 | -------------------------------------------------------------------------------- /scripts/MEIL/SynthNeRF/benchmark_synth_nerf.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export ROOT_DIR=dataset/Synthetic_NeRF 4 | rep=$1 5 | task_number=10 6 | 7 | # Lego 8 | data=Lego 9 | for ((i=0; i<$task_number; i++)) 10 | do 11 | python train_MEIL.py \ 12 | --root_dir $ROOT_DIR/$data --dataset_name nsvf_MEILNERF \ 13 | --exp_name $data'_'$task_number'_'$rep \ 14 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --rep_size $rep --eval_lpips \ 15 | --task_curr $i --task_number $task_number 16 | done 17 | 18 | # Chair 19 | data=Chair 20 | for ((i=0; i<$task_number; i++)) 21 | do 22 | python train_MEIL.py \ 23 | --root_dir $ROOT_DIR/$data --dataset_name nsvf_MEILNERF \ 24 | --exp_name $data'_'$task_number'_'$rep \ 25 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --rep_size $rep --eval_lpips \ 26 | --task_curr $i --task_number $task_number 27 | done 28 | 29 | # Drums 30 | data=Drums 31 | for ((i=0; i<$task_number; i++)) 32 | do 33 | python train_MEIL.py \ 34 | --root_dir $ROOT_DIR/$data --dataset_name nsvf_MEILNERF \ 35 | --exp_name $data'_'$task_number'_'$rep \ 36 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --rep_size $rep --eval_lpips \ 37 | --task_curr $i --task_number $task_number 38 | done 39 | 40 | 41 | # Ficus 42 | data=Ficus 43 | for ((i=0; i<$task_number; i++)) 44 | do 45 | python train_MEIL.py \ 46 | --root_dir $ROOT_DIR/$data --dataset_name nsvf_MEILNERF \ 47 | --exp_name $data'_'$task_number'_'$rep \ 48 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --rep_size $rep --eval_lpips \ 49 | --task_curr $i --task_number $task_number 50 | done 51 | 52 | # Hotdog 53 | data=Hotdog 54 | for ((i=0; i<$task_number; i++)) 55 | do 56 | python train_MEIL.py \ 57 | --root_dir $ROOT_DIR/$data --dataset_name nsvf_MEILNERF \ 58 | --exp_name $data'_'$task_number'_'$rep \ 59 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --rep_size $rep --eval_lpips \ 60 | --task_curr $i --task_number $task_number 61 | done 62 | 63 | 64 | # Materials 65 | data=Materials 66 | for ((i=0; i<$task_number; i++)) 67 | do 68 | python train_MEIL.py \ 69 | --root_dir $ROOT_DIR/$data --dataset_name nsvf_MEILNERF \ 70 | --exp_name $data'_'$task_number'_'$rep \ 71 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --rep_size $rep --eval_lpips \ 72 | --task_curr $i --task_number $task_number 73 | done 74 | 75 | 76 | # Mic 77 | data=Mic 78 | for ((i=0; i<$task_number; i++)) 79 | do 80 | python train_MEIL.py \ 81 | --root_dir $ROOT_DIR/$data --dataset_name nsvf_MEILNERF \ 82 | --exp_name $data'_'$task_number'_'$rep \ 83 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --rep_size $rep --eval_lpips \ 84 | --task_curr $i --task_number $task_number 85 | done 86 | 87 | 88 | # Ship 89 | data=Ship 90 | for ((i=0; i<$task_number; i++)) 91 | do 92 | python train_MEIL.py \ 93 | --root_dir $ROOT_DIR/$data --dataset_name nsvf_MEILNERF \ 94 | --exp_name $data'_'$task_number'_'$rep \ 95 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --rep_size $rep --eval_lpips \ 96 | --task_curr $i --task_number $task_number 97 | done -------------------------------------------------------------------------------- /scripts/MEIL/WAT/breville.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export ROOT_DIR=dataset/WAT 4 | 5 | task_number=5 6 | scene_name=breville 7 | downsample=1.0 8 | 9 | rep=$1 10 | for ((i=0; i<$task_number; i++)) 11 | do 12 | python train_ngpgv2_MEIL.py \ 13 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa_MEIL \ 14 | --exp_name ${scene_name}_${rep} \ 15 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --rep_size $rep --eval_lpips \ 16 | --task_curr $i --task_number $task_number --dim_a 48 --scale 8.0 --downsample ${downsample} --vocab_size ${task_number} 17 | done 18 | -------------------------------------------------------------------------------- /scripts/MEIL/WAT/car.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export ROOT_DIR=dataset/WAT 4 | 5 | task_number=5 6 | scene_name=car_resized 7 | downsample=1.0 8 | 9 | rep=$1 10 | for ((i=0; i<$task_number; i++)) 11 | do 12 | python train_ngpgv2_MEIL.py \ 13 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa_MEIL \ 14 | --exp_name ${scene_name}_${rep} \ 15 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --rep_size $rep --eval_lpips \ 16 | --task_curr $i --task_number $task_number --dim_a 48 --scale 16.0 --downsample ${downsample} --vocab_size ${task_number} 17 | done 18 | -------------------------------------------------------------------------------- /scripts/MEIL/WAT/community.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export ROOT_DIR=dataset/WAT 4 | 5 | task_number=10 6 | scene_name=community 7 | downsample=1.0 8 | 9 | rep=$1 10 | for ((i=0; i<$task_number; i++)) 11 | do 12 | python train_ngpgv2_MEIL.py \ 13 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa_MEIL \ 14 | --exp_name ${scene_name}_${rep} \ 15 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --rep_size $rep --eval_lpips \ 16 | --task_curr $i --task_number $task_number --dim_a 48 --scale 32.0 --downsample ${downsample} --vocab_size ${task_number} 17 | done 18 | -------------------------------------------------------------------------------- /scripts/MEIL/WAT/grill.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export ROOT_DIR=dataset/WAT 4 | 5 | task_number=5 6 | scene_name=grill_resized 7 | downsample=1.0 8 | 9 | rep=$1 10 | for ((i=0; i<$task_number; i++)) 11 | do 12 | python train_ngpgv2_MEIL.py \ 13 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa_MEIL \ 14 | --exp_name ${scene_name}_${rep} \ 15 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --rep_size $rep --eval_lpips \ 16 | --task_curr $i --task_number $task_number --dim_a 48 --scale 16.0 --downsample ${downsample} --vocab_size ${task_number} 17 | done 18 | -------------------------------------------------------------------------------- /scripts/MEIL/WAT/kitchen.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export ROOT_DIR=dataset/WAT 4 | 5 | task_number=5 6 | scene_name=kitchen 7 | downsample=1.0 8 | 9 | rep=$1 10 | for ((i=0; i<$task_number; i++)) 11 | do 12 | python train_ngpgv2_MEIL.py \ 13 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa_MEIL \ 14 | --exp_name ${scene_name}_${rep} \ 15 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --rep_size $rep --eval_lpips \ 16 | --task_curr $i --task_number $task_number --dim_a 48 --scale 8.0 --downsample ${downsample} --vocab_size ${task_number} 17 | done 18 | -------------------------------------------------------------------------------- /scripts/MEIL/WAT/living_room.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export ROOT_DIR=dataset/WAT 4 | 5 | task_number=5 6 | scene_name=living_room 7 | downsample=1.0 8 | 9 | rep=$1 10 | for ((i=0; i<$task_number; i++)) 11 | do 12 | python train_ngpgv2_MEIL.py \ 13 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa_MEIL \ 14 | --exp_name ${scene_name}_${rep} \ 15 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --rep_size $rep --eval_lpips \ 16 | --task_curr $i --task_number $task_number --dim_a 48 --scale 8.0 --downsample ${downsample} --vocab_size ${task_number} 17 | done 18 | -------------------------------------------------------------------------------- /scripts/MEIL/WAT/mac.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export ROOT_DIR=dataset/WAT 4 | export CUDA_HOME=/usr/local/cuda-11.6 5 | export PATH=/usr/local/cuda-11.6/bin:$PATH 6 | export LD_LIBRARY_PATH=/usr/local/cuda-11.6/lib64:$LD_LIBRARY_PATH 7 | 8 | task_number=6 9 | scene_name=mac 10 | downsample=1.0 11 | 12 | rep=$1 13 | for ((i=0; i<$task_number; i++)) 14 | do 15 | python train_ngpgv2_MEIL.py \ 16 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa_MEIL \ 17 | --exp_name ${scene_name}_${rep} \ 18 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --rep_size $rep --eval_lpips \ 19 | --task_curr $i --task_number $task_number --dim_a 48 --scale 8.0 --downsample ${downsample} --vocab_size ${task_number} 20 | done 21 | -------------------------------------------------------------------------------- /scripts/MEIL/WAT/ninja.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export ROOT_DIR=dataset/WAT 4 | 5 | task_number=5 6 | scene_name=ninja 7 | downsample=1.0 8 | 9 | rep=$1 10 | for ((i=0; i<$task_number; i++)) 11 | do 12 | python train_ngpgv2_MEIL.py \ 13 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa_MEIL \ 14 | --exp_name ${scene_name}_${rep} \ 15 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --rep_size $rep --eval_lpips \ 16 | --task_curr $i --task_number $task_number --dim_a 48 --scale 8.0 --downsample ${downsample} --vocab_size ${task_number} 17 | done 18 | -------------------------------------------------------------------------------- /scripts/MEIL/WAT/spa.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export ROOT_DIR=dataset/WAT 4 | 5 | task_number=5 6 | scene_name=spa 7 | downsample=1.0 8 | scale=16.0 9 | 10 | rep=$1 11 | for ((i=0; i<$task_number; i++)) 12 | do 13 | python train_ngpgv2_MEIL.py \ 14 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa_MEIL \ 15 | --exp_name ${scene_name}_${rep} \ 16 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --rep_size $rep --eval_lpips \ 17 | --task_curr $i --task_number $task_number --dim_a 48 --scale ${scale} --downsample ${downsample} --vocab_size ${task_number} 18 | done 19 | -------------------------------------------------------------------------------- /scripts/MEIL/WAT/street.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export ROOT_DIR=dataset/WAT 4 | 5 | task_number=5 6 | scene_name=street 7 | downsample=1.0 8 | scale=32.0 9 | 10 | rep=$1 11 | for ((i=0; i<$task_number; i++)) 12 | do 13 | python train_ngpgv2_MEIL.py \ 14 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa_MEIL \ 15 | --exp_name ${scene_name}_${rep} \ 16 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --rep_size $rep --eval_lpips \ 17 | --task_curr $i --task_number $task_number --dim_a 48 --scale ${scale} --downsample ${downsample} --vocab_size ${task_number} 18 | done 19 | -------------------------------------------------------------------------------- /scripts/MEIL/nerfpp/benchmark_nerfpp.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | export ROOT_DIR=dataset/tanks_and_temples 3 | 4 | task_number=10 5 | rep=$1 6 | 7 | data=tat_intermediate_M60 8 | for ((i=0; i<$task_number; i++)) 9 | do 10 | python train_MEIL.py \ 11 | --root_dir $ROOT_DIR'/'$data --dataset_name nerfpp_MEIL \ 12 | --exp_name $data'_'$task_number'task_'$rep \ 13 | --num_epochs 20 --scale 4.0 --rep_size $rep --eval_lpips \ 14 | --task_curr $i --task_number $task_number 15 | done 16 | 17 | data=tat_intermediate_Playground 18 | for ((i=0; i<$task_number; i++)) 19 | do 20 | python train_MEIL.py \ 21 | --root_dir $ROOT_DIR'/'$data --dataset_name nerfpp_MEIL \ 22 | --exp_name $data'_'$task_number'task_'$rep \ 23 | --num_epochs 20 --scale 4.0 --rep_size $rep --eval_lpips \ 24 | --task_curr $i --task_number $task_number 25 | done 26 | 27 | 28 | data=tat_intermediate_Train 29 | for ((i=0; i<$task_number; i++)) 30 | do 31 | python train_MEIL.py \ 32 | --root_dir $ROOT_DIR'/'$data --dataset_name nerfpp_MEIL \ 33 | --exp_name $data'_'$task_number'task_'$rep \ 34 | --num_epochs 20 --scale 16.0 --batch_size 4096 --rep_size $rep --eval_lpips \ 35 | --task_curr $i --task_number $task_number 36 | done 37 | 38 | 39 | data=tat_training_Truck 40 | for ((i=0; i<$task_number; i++)) 41 | do 42 | python train_MEIL.py \ 43 | --root_dir $ROOT_DIR'/'$data --dataset_name nerfpp_MEIL \ 44 | --exp_name $data'_'$task_number'task_'$rep \ 45 | --num_epochs 20 --scale 16.0 --batch_size 4096 --rep_size $rep --eval_lpips \ 46 | --task_curr $i --task_number $task_number 47 | done 48 | 49 | -------------------------------------------------------------------------------- /scripts/NT/SynthNeRF/benchmark_synth_nerf.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export ROOT_DIR=dataset/Synthetic_NeRF 4 | 5 | task_number=10 6 | task_curr=9 7 | 8 | python train_lb.py \ 9 | --root_dir $ROOT_DIR/Chair --dataset_name nsvf_lb \ 10 | --exp_name Chair_10task --no_save_test \ 11 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --eval_lpips \ 12 | --task_number $task_number --task_curr $task_curr 13 | 14 | python train_lb.py \ 15 | --root_dir $ROOT_DIR/Drums --dataset_name nsvf_lb \ 16 | --exp_name Drums_10task --no_save_test \ 17 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --eval_lpips \ 18 | --task_number $task_number --task_curr $task_curr 19 | 20 | python train_lb.py \ 21 | --root_dir $ROOT_DIR/Ficus --dataset_name nsvf_lb \ 22 | --exp_name Ficus_10task --no_save_test \ 23 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --eval_lpips \ 24 | --task_number $task_number --task_curr $task_curr 25 | 26 | python train_lb.py \ 27 | --root_dir $ROOT_DIR/Hotdog --dataset_name nsvf_lb \ 28 | --exp_name Hotdog_10task --no_save_test \ 29 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --eval_lpips \ 30 | --task_number $task_number --task_curr $task_curr 31 | 32 | python train_lb.py \ 33 | --root_dir $ROOT_DIR/Lego --dataset_name nsvf_lb \ 34 | --exp_name Lego_10task --no_save_test \ 35 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --eval_lpips \ 36 | --task_number $task_number --task_curr $task_curr 37 | 38 | python train_lb.py \ 39 | --root_dir $ROOT_DIR/Materials --dataset_name nsvf_lb \ 40 | --exp_name Materials_10task --no_save_test \ 41 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --eval_lpips \ 42 | --task_number $task_number --task_curr $task_curr 43 | 44 | python train_lb.py \ 45 | --root_dir $ROOT_DIR/Mic --dataset_name nsvf_lb \ 46 | --exp_name Mic_10task --no_save_test \ 47 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --eval_lpips \ 48 | --task_number $task_number --task_curr $task_curr 49 | 50 | python train_lb.py \ 51 | --root_dir $ROOT_DIR/Ship --dataset_name nsvf_lb \ 52 | --exp_name Ship_10task --no_save_test \ 53 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --eval_lpips \ 54 | --task_number $task_number --task_curr $task_curr -------------------------------------------------------------------------------- /scripts/NT/SynthNeRF/nerf_baseline.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python train_SynthNeRF_NT.py --train_split train --scene $1 --rep_size $2 --max_steps 50000 --task_number $3 --data_root $4 4 | -------------------------------------------------------------------------------- /scripts/NT/WAT/breville.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export ROOT_DIR=dataset/WAT 4 | 5 | task_number=5 6 | task_curr=4 7 | scene_name=breville 8 | 9 | 10 | downsample=1.0 11 | rep=$1 12 | python train_ngpgv2_lb.py \ 13 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa_lb \ 14 | --exp_name ${scene_name}_${rep}_autoScale8.0_8.0 \ 15 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --eval_lpips \ 16 | --task_number $task_number --task_curr $task_curr --dim_a 48 --dim_g 16 --scale 8.0 --downsample ${downsample} --rep_size $rep --vocab_size ${task_number} 17 | 18 | -------------------------------------------------------------------------------- /scripts/NT/WAT/car.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export ROOT_DIR=dataset/WAT 4 | 5 | task_number=5 6 | task_curr=4 7 | scene_name=car_resized 8 | 9 | 10 | downsample=1.0 11 | rep=$1 12 | python train_ngpgv2_lb.py \ 13 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa_lb \ 14 | --exp_name ${scene_name}_${rep} \ 15 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --eval_lpips \ 16 | --task_number $task_number --task_curr $task_curr --dim_a 48 --dim_g 16 --scale 16.0 --downsample ${downsample} --rep_size $rep --vocab_size ${task_number} 17 | -------------------------------------------------------------------------------- /scripts/NT/WAT/community.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export ROOT_DIR=dataset/WAT 4 | 5 | task_number=10 6 | task_curr=9 7 | scene_name=community 8 | 9 | downsample=1.0 10 | rep=$1 11 | python train_ngpgv2_lb.py \ 12 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa_lb \ 13 | --exp_name ${scene_name}_${rep}_autoscale8.0_32.0 \ 14 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --eval_lpips \ 15 | --task_number $task_number --task_curr $task_curr --dim_a 48 --scale 32.0 --downsample ${downsample} --rep_size $rep --vocab_size ${task_number} 16 | -------------------------------------------------------------------------------- /scripts/NT/WAT/grill.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | export ROOT_DIR=dataset/WAT 3 | 4 | task_number=5 5 | task_curr=4 6 | scene_name=grill_resized 7 | 8 | 9 | downsample=1.0 10 | rep=$1 11 | python train_ngpgv2_lb.py \ 12 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa_lb \ 13 | --exp_name ${scene_name}_${rep} \ 14 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --eval_lpips \ 15 | --task_number $task_number --task_curr $task_curr --dim_a 48 --dim_g 16 --scale 16.0 --downsample ${downsample} --rep_size $rep --vocab_size ${task_number} 16 | -------------------------------------------------------------------------------- /scripts/NT/WAT/kitchen.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export ROOT_DIR=dataset/WAT 4 | 5 | task_number=5 6 | task_curr=4 7 | scene_name=kitchen 8 | 9 | downsample=1.0 10 | 11 | rep=$1 12 | python train_ngpgv2_lb.py \ 13 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa_lb \ 14 | --exp_name ${scene_name}_${rep}_autoScale8.0_8.0 \ 15 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --eval_lpips \ 16 | --task_number $task_number --task_curr $task_curr --dim_a 48 --dim_g 16 --scale 8.0 --downsample ${downsample} --rep_size $rep --vocab_size=5 17 | -------------------------------------------------------------------------------- /scripts/NT/WAT/living_room.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export ROOT_DIR=dataset/WAT 4 | 5 | task_number=5 6 | task_curr=4 7 | scene_name=living_room 8 | 9 | downsample=1.0 10 | rep=$1 11 | python train_ngpgv2_lb.py \ 12 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa_lb \ 13 | --exp_name ${scene_name}_${rep}_autoscale8.0_8.0 \ 14 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --eval_lpips \ 15 | --task_number $task_number --task_curr $task_curr --dim_a 48 --scale 8.0 --downsample ${downsample} --rep_size $rep --vocab_size=5 16 | -------------------------------------------------------------------------------- /scripts/NT/WAT/mac.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export ROOT_DIR=dataset/WAT 4 | 5 | task_number=6 6 | task_curr=5 7 | scene_name=mac 8 | 9 | 10 | downsample=1.0 11 | rep=$1 12 | python train_ngpgv2_lb.py \ 13 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa_lb \ 14 | --exp_name ${scene_name}_${rep} \ 15 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --eval_lpips \ 16 | --task_number $task_number --task_curr $task_curr --dim_a 48 --dim_g 16 --scale 8.0 --downsample ${downsample} --rep_size $rep --vocab_size ${task_number} 17 | -------------------------------------------------------------------------------- /scripts/NT/WAT/nerf_baseline.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | python train_WAT_NT.py --train_split train --scene $1 --rep_size $2 --vocab_size $3 --max_steps 50000 --task_number $3 --data_root $4 5 | -------------------------------------------------------------------------------- /scripts/NT/WAT/ninja.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export ROOT_DIR=dataset/WAT 4 | 5 | task_number=5 6 | task_curr=4 7 | scene_name=ninja 8 | 9 | 10 | downsample=1.0 11 | rep=$1 12 | python train_ngpgv2_lb.py \ 13 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa_lb \ 14 | --exp_name ${scene_name}_${rep} \ 15 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --eval_lpips \ 16 | --task_number $task_number --task_curr $task_curr --dim_a 48 --dim_g 16 --scale 8.0 --downsample ${downsample} --rep_size $rep --vocab_size ${task_number} 17 | -------------------------------------------------------------------------------- /scripts/NT/WAT/spa.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export ROOT_DIR=dataset/WAT 4 | 5 | task_number=5 6 | task_curr=4 7 | scene_name=spa 8 | 9 | downsample=1.0 10 | scale=32.0 11 | rep=$1 12 | python train_ngpgv2_lb.py \ 13 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa_lb \ 14 | --exp_name ${scene_name}_${rep}_autoScale8.0_${scale} \ 15 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --eval_lpips \ 16 | --task_number $task_number --task_curr $task_curr --dim_a 48 --scale ${scale} --downsample ${downsample} --rep_size $rep --vocab_size=5 17 | -------------------------------------------------------------------------------- /scripts/NT/WAT/street.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export ROOT_DIR=dataset/WAT 4 | 5 | task_number=5 6 | task_curr=4 7 | scene_name=street 8 | 9 | downsample=1.0 10 | scale=32.0 11 | rep=$1 12 | python train_ngpgv2_lb.py \ 13 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa_lb \ 14 | --exp_name ${scene_name}_${rep}_autoscale8.0_${scale} \ 15 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --eval_lpips \ 16 | --task_number $task_number --task_curr $task_curr --dim_a 48 --scale ${scale} --downsample ${downsample} --rep_size $rep --vocab_size=${task_number} 17 | -------------------------------------------------------------------------------- /scripts/NT/WAT/video_render.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | python render_video_NeRF.py --train_split train --scene $1 --rep_size $2 --vocab_size $3 --max_steps 50000 --task_number $3 --data_root $4 --frame_start $5 --frame_end $6 5 | -------------------------------------------------------------------------------- /scripts/NT/nerfpp/benchmark_nerfpp.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export ROOT_DIR=dataset/tanks_and_temples 4 | 5 | task_number=10 6 | task_curr=9 7 | 8 | data=tat_intermediate_M60 9 | python train_lb.py \ 10 | --root_dir $ROOT_DIR'/'$data --dataset_name nerfpp_lb \ 11 | --exp_name $data'_'$task_number'task' --no_save_test \ 12 | --num_epochs 20 --scale 4.0 \ 13 | --task_number $task_number --task_curr $task_curr 14 | 15 | data=tat_intermediate_Playground 16 | python train_lb.py \ 17 | --root_dir $ROOT_DIR'/'$data --dataset_name nerfpp_lb \ 18 | --exp_name $data'_'$task_number'task' --no_save_test \ 19 | --num_epochs 20 --scale 4.0 \ 20 | --task_number $task_number --task_curr $task_curr 21 | 22 | 23 | data=tat_intermediate_Train 24 | python train_lb.py \ 25 | --root_dir $ROOT_DIR'/'$data --dataset_name nerfpp_lb \ 26 | --exp_name $data'_'$task_number'task' --no_save_test \ 27 | --num_epochs 20 --scale 16.0 --batch_size 4096 \ 28 | --task_number $task_number --task_curr $task_curr 29 | 30 | 31 | data=tat_training_Truck 32 | python train_lb.py \ 33 | --root_dir $ROOT_DIR'/'$data --dataset_name nerfpp_lb \ 34 | --exp_name $data'_'$task_number'task' --no_save_test \ 35 | --num_epochs 20 --scale 16.0 --batch_size 4096 \ 36 | --task_number $task_number --task_curr $task_curr 37 | -------------------------------------------------------------------------------- /scripts/NT/nerfpp/nerf_baseline.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python train_nerfpp_NT.py --train_split train --scene $1 --rep_size $2 --max_steps 50000 --task_number $3 --data_root $4 4 | -------------------------------------------------------------------------------- /scripts/UB/SynthNeRF/benchmark_synthetic_nerf.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export ROOT_DIR=dataset/Synthetic_NeRF 4 | 5 | python train.py \ 6 | --root_dir $ROOT_DIR/Chair \ 7 | --exp_name Chair \ 8 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --eval_lpips 9 | 10 | python train.py \ 11 | --root_dir $ROOT_DIR/Drums \ 12 | --exp_name Drums \ 13 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --eval_lpips 14 | 15 | python train.py \ 16 | --root_dir $ROOT_DIR/Ficus \ 17 | --exp_name Ficus \ 18 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --eval_lpips 19 | 20 | python train.py \ 21 | --root_dir $ROOT_DIR/Hotdog \ 22 | --exp_name Hotdog \ 23 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --eval_lpips 24 | 25 | python train.py \ 26 | --root_dir $ROOT_DIR/Lego \ 27 | --exp_name Lego \ 28 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --eval_lpips 29 | 30 | python train.py \ 31 | --root_dir $ROOT_DIR/Materials \ 32 | --exp_name Materials \ 33 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --eval_lpips 34 | 35 | python train.py \ 36 | --root_dir $ROOT_DIR/Mic \ 37 | --exp_name Mic \ 38 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --eval_lpips 39 | 40 | python train.py \ 41 | --root_dir $ROOT_DIR/Ship \ 42 | --exp_name Ship \ 43 | --num_epochs 20 --batch_size 16384 --lr 2e-2 --eval_lpips -------------------------------------------------------------------------------- /scripts/UB/WAT/UB_WAT.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export ROOT_DIR=dataset/WAT 4 | export CUDA_HOME=/usr/local/cuda-11.6 5 | export PATH=/usr/local/cuda-11.6/bin:$PATH 6 | export LD_LIBRARY_PATH=/usr/local/cuda-11.6/lib64:$LD_LIBRARY_PATH 7 | 8 | scene_name=breville 9 | python train_ngpgv2.py \ 10 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa \ 11 | --exp_name ${scene_name} --downsample 1.0 \ 12 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --dim_a 48 --dim_g 16 --scale 8.0 --eval_lpips --vocab_size=5 13 | 14 | scene_name=car_resized 15 | python train_ngpgv2.py \ 16 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa \ 17 | --exp_name ${scene_name} --downsample 1.0 \ 18 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --dim_a 48 --dim_g 16 --scale 16.0 --eval_lpips --vocab_size=5 19 | 20 | scene_name=community 21 | python train_ngpgv2.py \ 22 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa \ 23 | --exp_name ${scene_name} --downsample 1.0 \ 24 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --dim_a 48 --dim_g 16 --scale 32.0 --eval_lpips --vocab_size=10 25 | 26 | scene_name=grill_resized 27 | python train_ngpgv2.py \ 28 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa \ 29 | --exp_name ${scene_name} --downsample 1.0 \ 30 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --dim_a 48 --dim_g 16 --scale 16.0 --eval_lpips --vocab_size=5 31 | 32 | scene_name=kitchen 33 | python train_ngpgv2.py \ 34 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa \ 35 | --exp_name ${scene_name} --downsample 1.0 \ 36 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --dim_a 48 --dim_g 16 --scale 8.0 --eval_lpips --vocab_size=5 37 | 38 | scene_name=living_room 39 | python train_ngpgv2.py \ 40 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa \ 41 | --exp_name ${scene_name} --downsample 1.0 \ 42 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --dim_a 48 --dim_g 16 --scale 8.0 --eval_lpips --vocab_size=5 43 | 44 | scene_name=mac 45 | python train_ngpgv2.py \ 46 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa \ 47 | --exp_name ${scene_name} --downsample 1.0 \ 48 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --dim_a 48 --dim_g 16 --scale 8.0 --eval_lpips --vocab_size=6 49 | 50 | scene_name=ninja 51 | python train_ngpgv2.py \ 52 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa \ 53 | --exp_name ${scene_name} --downsample 1.0 \ 54 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --dim_a 48 --dim_g 16 --scale 8.0 --eval_lpips --vocab_size=5 55 | 56 | scene_name=spa 57 | python train_ngpgv2.py \ 58 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa \ 59 | --exp_name ${scene_name} --downsample 1.0 \ 60 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --dim_a 48 --dim_g 16 --scale 16.0 --eval_lpips --vocab_size=5 61 | 62 | scene_name=street 63 | python train_ngpgv2.py \ 64 | --root_dir $ROOT_DIR/${scene_name} --dataset_name colmap_ngpa \ 65 | --exp_name ${scene_name} --downsample 1.0 \ 66 | --num_epochs 20 --batch_size 8192 --lr 1e-2 --dim_a 48 --dim_g 16 --scale 32.0 --eval_lpips --vocab_size=5 67 | -------------------------------------------------------------------------------- /scripts/UB/nerfpp/benchmark_nerfpp.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export ROOT_DIR=dataset/tanks_and_temples 4 | 5 | python train.py \ 6 | --root_dir $ROOT_DIR/tat_intermediate_M60 --dataset_name nerfpp \ 7 | --exp_name tat_intermediate_M60 \ 8 | --num_epochs 20 --scale 4.0 9 | 10 | python train.py \ 11 | --root_dir $ROOT_DIR/tat_intermediate_Playground --dataset_name nerfpp \ 12 | --exp_name tat_intermediate_Playground \ 13 | --num_epochs 20 --scale 4.0 14 | 15 | python train.py \ 16 | --root_dir $ROOT_DIR/tat_intermediate_Train --dataset_name nerfpp \ 17 | --exp_name tat_intermediate_Train \ 18 | --num_epochs 20 --scale 16.0 --batch_size 4096 19 | 20 | python train.py \ 21 | --root_dir $ROOT_DIR/tat_training_Truck --dataset_name nerfpp \ 22 | --exp_name tat_training_Truck \ 23 | --num_epochs 20 --scale 16.0 --batch_size 4096 24 | -------------------------------------------------------------------------------- /scripts/data_prepare/build_WAT_from_video.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | python ../../utils/data_prepare_utils/poses/imgs2poses.py --is_video 1 --frame_rate $1 $2 3 | -------------------------------------------------------------------------------- /scripts/data_prepare/prepare_SynthNeRF.sh: -------------------------------------------------------------------------------- 1 | wget -O ../../dataset/Synth_NeRF.zip https://dl.fbaipublicfiles.com/nsvf/dataset/Synthetic_NeRF.zip 2 | unzip ../../dataset/Synth_NeRF.zip -d ../../dataset 3 | rm ../../dataset/Synth_NeRF.zip 4 | 5 | gdown --id 18JxhpWD-4ZmuFKLzKlAw-w5PpzZxXOcG -O ../../dataset/Synthetic_NeRF_nerfacc.zip 6 | unzip ../../dataset/Synthetic_NeRF_nerfacc.zip -d ../../dataset 7 | rm ../../dataset/Synthetic_NeRF_nerfacc.zip 8 | -------------------------------------------------------------------------------- /scripts/data_prepare/prepare_WAT.sh: -------------------------------------------------------------------------------- 1 | # gdown --id 1iAE8iWtUokQ6waZYc9ysy2OFkXyptdso -O ../../dataset/WAT.zip 2 | wget -O ../../dataset/WAT.zip https://huggingface.co/datasets/zcai/WAT-WorldOverTime/resolve/main/WAT.zip 3 | unzip ../../dataset/WAT.zip -d ../../dataset 4 | rm ../../dataset/WAT.zip 5 | 6 | # if you want to rerun the colmap reconstruction, please uncomment (and change the name to your customized dataset folder) this code if you want to prepare WAT-type dataset from a sequence of videos 7 | # python ../../utils/data_prepare_utils/poses/imgs2poses.py --is_video 1 --frame_rate 20 ../../dataset/WAT/breville 8 | # python ../../utils/data_prepare_utils/poses/imgs2poses.py --is_video 1 --frame_rate 20 ../../dataset/WAT/community 9 | # python ../../utils/data_prepare_utils/poses/imgs2poses.py --is_video 1 --frame_rate 20 ../../dataset/WAT/kitchen 10 | # python ../../utils/data_prepare_utils/poses/imgs2poses.py --is_video 1 --frame_rate 20 ../../dataset/WAT/living_room 11 | # python ../../utils/data_prepare_utils/poses/imgs2poses.py --is_video 1 --frame_rate 20 ../../dataset/WAT/spa 12 | # python ../../utils/data_prepare_utils/poses/imgs2poses.py --is_video 1 --frame_rate 20 ../../dataset/WAT/street 13 | -------------------------------------------------------------------------------- /scripts/data_prepare/prepare_nerfpp.sh: -------------------------------------------------------------------------------- 1 | gdown --id 11KRfN91W1AxAW6lOFs4EeYDbeoQZCi87 -O ../../dataset/tanks_and_temples.zip 2 | unzip ../../dataset/tanks_and_temples.zip -d ../../dataset 3 | rm ../../dataset/tanks_and_temples.zip 4 | -------------------------------------------------------------------------------- /scripts/data_prepare/resize_videos.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import os 3 | from pathlib import Path 4 | 5 | def resize_video(input_file_path, output_file_path): 6 | # Open the video file 7 | video = cv2.VideoCapture(input_file_path) 8 | 9 | # Check if the video file is opened successfully 10 | if not video.isOpened(): 11 | print("Error opening video file") 12 | 13 | # Get the original video frame width and height 14 | frame_width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH)) 15 | frame_height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)) 16 | 17 | # Calculate the new size: half the original width and height 18 | new_width = int(frame_width / 2) 19 | new_height = int(frame_height / 2) 20 | 21 | # Define the codec and create a VideoWriter object 22 | fourcc = cv2.VideoWriter_fourcc(*'mp4v') 23 | out = cv2.VideoWriter(output_file_path, fourcc, 30.0, (new_width, new_height)) 24 | 25 | while(video.isOpened()): 26 | ret, frame = video.read() 27 | if ret: 28 | # Resize frame 29 | frame = cv2.resize(frame, (new_width, new_height)) 30 | 31 | # Write the resized frame 32 | out.write(frame) 33 | else: 34 | break 35 | 36 | # Release everything when the job is finished 37 | video.release() 38 | out.release() 39 | cv2.destroyAllWindows() 40 | 41 | 42 | def process_videos(input_directory_path, output_directory_path): 43 | video_extensions = ['.mp4', '.avi', '.MOV', '.flv', '.mkv'] # add more if needed 44 | 45 | # Convert to Path objects 46 | input_directory_path = Path(input_directory_path) 47 | output_directory_path = Path(output_directory_path) 48 | 49 | # Iterate over all files in the directory and subdirectories 50 | for input_file_path in input_directory_path.rglob('*'): 51 | if input_file_path.suffix in video_extensions: 52 | relative_path = input_file_path.relative_to(input_directory_path) 53 | output_file_path = output_directory_path / relative_path.with_suffix('.mov') 54 | 55 | # Create output directories if they don't exist 56 | output_file_path.parent.mkdir(parents=True, exist_ok=True) 57 | 58 | resize_video(str(input_file_path), str(output_file_path)) 59 | 60 | 61 | # Usage: 62 | process_videos('/export/work/zcai/WorkSpace/CLNeRF/CLNeRF/dataset/WAT/car', '/export/work/zcai/WorkSpace/CLNeRF/CLNeRF/dataset/WAT/car_resized') 63 | process_videos('/export/work/zcai/WorkSpace/CLNeRF/CLNeRF/dataset/WAT/grill', '/export/work/zcai/WorkSpace/CLNeRF/CLNeRF/dataset/WAT/grill_resized') 64 | -------------------------------------------------------------------------------- /setup_env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # initialize environment 3 | conda create -n CLNeRF python=3.8 4 | conda activate CLNeRF 5 | # link to your cuda 11 folder 6 | export CUDA_HOME=/usr/local/cuda-11.6 # change to your own cuda 11 repository (11.3 will be the best) 7 | 8 | # install pytorch 9 | pip install torch==1.11.0 torchvision==0.12.0 --force-reinstall --extra-index-url https://download.pytorch.org/whl/cu113 10 | pip install pytorch-lightning==1.9.5 # must use old version to be compatible with the code 11 | # torch scatter (see https://github.com/rusty1s/pytorch_scatter#installation for more details) 12 | pip install --force-reinstall torch-scatter -f https://data.pyg.org/whl/torch-1.11.0+cu113.html 13 | # tinycudann (see https://github.com/NVlabs/tiny-cuda-nn#requirements) 14 | pip install --force-reinstall git+https://github.com/NVlabs/tiny-cuda-nn/#subdirectory=bindings/torch 15 | # apex (see https://github.com/NVIDIA/apex#linux) 16 | # note that you can comment out the cuda version check at line 32 of setup.py in apex folder so that if you don't have exactly cuda 11.3 it will still work 17 | git clone https://github.com/NVIDIA/apex 18 | cd apex 19 | pip install --force-reinstall packaging 20 | pip install flit_core 21 | pip install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./ 22 | 23 | cd .. 24 | rm -rf apex 25 | # cuda extension (modified NGP architecture with appearance and geometric embeddings, please run this line each time you pull the code) 26 | bash install_cuda_module.sh 27 | 28 | # nerfacc (for vanilla NeRF baselines) 29 | pip install nerfacc==0.3.5 -f https://nerfacc-bucket.s3.us-west-2.amazonaws.com/whl/torch-1.11.0_cu113.html 30 | 31 | # install some necessary libraries 32 | pip install imageio==2.19.3 33 | pip install opencv-python 34 | pip install einops 35 | pip install tqdm 36 | pip install kornia 37 | pip install pandas 38 | pip install torchmetrics 39 | pip install torchmetrics[image] 40 | pip install -U 'tensorboardX' 41 | 42 | # the followings are for creating WAT dataset for your custom scenes, can ignore if you dont want them 43 | pip install scikit-image==0.19.3 44 | conda install -c conda-forge colmap 45 | -------------------------------------------------------------------------------- /setup_env_docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e # exit on error 3 | # initialize environment 4 | source /opt/miniconda3/etc/profile.d/conda.sh 5 | conda create -n CLNeRF python=3.8 -y 6 | conda activate CLNeRF 7 | 8 | # link to your cuda 11 folder 9 | export CUDA_HOME=/usr/local/cuda # change to your own cuda 11 repository (11.3 will be the best) 10 | export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$CUDA_HOME/lib64:$CUDA_HOME/extras/CUPTI/lib64 11 | export TCNN_CUDA_ARCHITECTURES="86" # needed by tinycudann, adjust to your own GPU architecture 12 | export TORCH_CUDA_ARCH_LIST="8.6" # needed by install_cuda_module.sh, adjust to your own GPU architecture 13 | 14 | # install pytorch 15 | pip install torch==1.11.0 torchvision==0.12.0 --force-reinstall --extra-index-url https://download.pytorch.org/whl/cu113 16 | pip install pytorch-lightning==1.9.5 # must use old version to be compatible with the code 17 | # torch scatter (see https://github.com/rusty1s/pytorch_scatter#installation for more details) 18 | pip install --force-reinstall torch-scatter -f https://data.pyg.org/whl/torch-1.11.0+cu113.html 19 | # tinycudann (see https://github.com/NVlabs/tiny-cuda-nn#requirements) 20 | pip install --force-reinstall git+https://github.com/NVlabs/tiny-cuda-nn/#subdirectory=bindings/torch 21 | # apex (see https://github.com/NVIDIA/apex#linux) 22 | # note that you can comment out the cuda version check at line 32 of setup.py in apex folder so that if you don't have exactly cuda 11.3 it will still work 23 | git clone https://github.com/NVIDIA/apex 24 | cd apex 25 | pip install --force-reinstall packaging 26 | pip install flit_core 27 | git checkout 2386a912164b0c5cfcd8be7a2b890fbac5607c82 # FIX for ninja issue in next step: https://github.com/NVIDIA/apex/issues/1735#issuecomment-1751917444 28 | #pip install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./ 29 | # For pip version > 21.3, the following command is recommended in https://github.com/NVIDIA/apex as of 21/08/2024. It fixes torch not found issue 30 | pip install -v --disable-pip-version-check --no-cache-dir --no-build-isolation --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" ./ 31 | 32 | cd .. 33 | rm -rf apex 34 | # cuda extension (modified NGP architecture with appearance and geometric embeddings, please run this line each time you pull the code) 35 | bash install_cuda_module.sh 36 | 37 | # nerfacc (for vanilla NeRF baselines) 38 | pip install nerfacc==0.3.5 -f https://nerfacc-bucket.s3.us-west-2.amazonaws.com/whl/torch-1.11.0_cu113.html 39 | 40 | # install some necessary libraries 41 | pip install imageio==2.19.3 42 | pip install imageio[ffmpeg] 43 | pip install opencv-python 44 | pip install einops 45 | pip install tqdm 46 | pip install kornia 47 | pip install pandas 48 | pip install torchmetrics 49 | pip install torchmetrics[image] 50 | pip install -U 'tensorboardX' 51 | 52 | # the followings are for creating WAT dataset for your custom scenes, can ignore if you dont want them 53 | pip install scikit-image==0.19.3 54 | conda install -c conda-forge colmap -y 55 | -------------------------------------------------------------------------------- /source_cuda.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | export CUDA_HOME=/usr/local/cuda-11.6 3 | export PATH=/usr/local/cuda-11.6/bin:$PATH 4 | export LD_LIBRARY_PATH=/usr/local/cuda-11.6/lib64:$LD_LIBRARY_PATH 5 | -------------------------------------------------------------------------------- /test_pose_interpolation.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import scipy.spatial.transform 3 | 4 | def interpolate_poses(start_pose, end_pose, N): 5 | assert start_pose.shape == (3, 4) 6 | assert end_pose.shape == (3, 4) 7 | 8 | start_rot = start_pose[:3, :3] 9 | start_trans = start_pose[:3, 3] 10 | end_rot = end_pose[:3, :3] 11 | end_trans = end_pose[:3, 3] 12 | 13 | start_rot_q = torch.tensor(scipy.spatial.transform.Rotation.from_matrix(start_rot.numpy()).as_quat(), dtype=torch.float32) 14 | end_rot_q = torch.tensor(scipy.spatial.transform.Rotation.from_matrix(end_rot.numpy()).as_quat(), dtype=torch.float32) 15 | 16 | # Ensure the quaternions have the shortest arc for interpolation 17 | if torch.dot(start_rot_q, end_rot_q) < 0: 18 | end_rot_q = -end_rot_q 19 | 20 | intermediate_poses = [] 21 | 22 | for i in range(1, N + 1): 23 | t = i / (N + 1) # Normalized interpolation factor 24 | 25 | # Lerp for translation 26 | inter_trans = start_trans * (1 - t) + end_trans * t 27 | 28 | # Slerp for rotation 29 | inter_rot_q = torch.lerp(start_rot_q, end_rot_q, t) 30 | inter_rot_q /= inter_rot_q.norm() 31 | inter_rot = torch.tensor(scipy.spatial.transform.Rotation.from_quat(inter_rot_q.numpy()).as_matrix(), dtype=torch.float32) 32 | 33 | inter_pose = torch.cat((inter_rot, inter_trans.unsqueeze(-1)), dim=-1) 34 | intermediate_poses.append(inter_pose) 35 | 36 | return intermediate_poses 37 | 38 | # # Usage example: 39 | # start_pose = torch.eye(3, 4) 40 | # end_pose = torch.eye(3, 4) 41 | # end_pose[0, 3] = 1.0 42 | 43 | # interpolated_poses = interpolate_poses(start_pose, end_pose, 5) 44 | 45 | # for pose in interpolated_poses: 46 | # print(pose) 47 | import torch 48 | import numpy as np 49 | import scipy.spatial.transform 50 | 51 | # Define the start and end poses 52 | start_pose = torch.eye(3, 4) 53 | end_rot = scipy.spatial.transform.Rotation.from_euler('z', 30, degrees=True).as_matrix() 54 | end_pose = torch.eye(3, 4) 55 | end_pose[:3, :3] = torch.tensor(end_rot, dtype=torch.float32) 56 | end_pose[0, 3], end_pose[1, 3], end_pose[2, 3] = 1.0, 1.0, 1.0 57 | 58 | # Interpolation function 59 | # ... (as defined previously) 60 | 61 | # Generate interpolated poses 62 | interpolated_poses = interpolate_poses(start_pose, end_pose, 5) 63 | 64 | # Verification steps 65 | def angle_from_quaternion_dot(dot_val): 66 | """Compute the angle of rotation based on the dot product of two quaternions.""" 67 | return 2 * np.arccos(dot_val) 68 | 69 | # 1. Verify Translation 70 | differences = [] 71 | for i in range(1, len(interpolated_poses)): 72 | trans_diff = torch.norm(interpolated_poses[i][:3, 3] - interpolated_poses[i-1][:3, 3]).item() 73 | differences.append(trans_diff) 74 | 75 | print("Translation differences between consecutive poses:", differences) 76 | print("Are translations consistently interpolated?", len(set(differences)) == 1) 77 | 78 | # 2. Verify Rotation 79 | angles = [] 80 | start_rot_q = scipy.spatial.transform.Rotation.from_matrix(start_pose[:3, :3].numpy()).as_quat() 81 | for pose in interpolated_poses: 82 | interp_rot_q = scipy.spatial.transform.Rotation.from_matrix(pose[:3, :3].numpy()).as_quat() 83 | dot_product = np.dot(start_rot_q, interp_rot_q) 84 | angle = angle_from_quaternion_dot(dot_product) 85 | angles.append(np.degrees(angle)) # Convert angle from radians to degrees 86 | 87 | print("\nAngles (in degrees) from start pose to each interpolated pose:", angles) 88 | print("Are all angles <= 180 degrees?", all(angle <= 180 for angle in angles)) 89 | -------------------------------------------------------------------------------- /utils/__pycache__/utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/utils/__pycache__/utils.cpython-38.pyc -------------------------------------------------------------------------------- /utils/data_prepare_utils/poses/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/utils/data_prepare_utils/poses/__init__.py -------------------------------------------------------------------------------- /utils/data_prepare_utils/poses/__pycache__/colmap_read_model.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/utils/data_prepare_utils/poses/__pycache__/colmap_read_model.cpython-38.pyc -------------------------------------------------------------------------------- /utils/data_prepare_utils/poses/__pycache__/colmap_wrapper.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/utils/data_prepare_utils/poses/__pycache__/colmap_wrapper.cpython-38.pyc -------------------------------------------------------------------------------- /utils/data_prepare_utils/poses/__pycache__/pose_utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/utils/data_prepare_utils/poses/__pycache__/pose_utils.cpython-38.pyc -------------------------------------------------------------------------------- /utils/data_prepare_utils/poses/colmap_wrapper.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | 4 | def run_colmap(basedir, match_type): 5 | 6 | logfile_name = os.path.join(basedir, 'colmap_output.txt') 7 | logfile = open(logfile_name, 'w') 8 | 9 | feature_extractor_args = [ 10 | 'colmap', 'feature_extractor', 11 | '--database_path', os.path.join(basedir, 'database.db'), 12 | '--image_path', os.path.join(basedir, 'images'), 13 | '--ImageReader.single_camera', '1', 14 | '--SiftExtraction.use_gpu', '0', 15 | ] 16 | feat_output = ( subprocess.check_output(feature_extractor_args, universal_newlines=True) ) 17 | logfile.write(feat_output) 18 | print('Features extracted') 19 | 20 | exhaustive_matcher_args = [ 21 | 'colmap', match_type, 22 | '--SiftMatching.use_gpu', '0', 23 | '--SiftMatching.min_num_inliers', '15', 24 | '--database_path', os.path.join(basedir, 'database.db'), 25 | ] 26 | 27 | match_output = ( subprocess.check_output(exhaustive_matcher_args, universal_newlines=True) ) 28 | logfile.write(match_output) 29 | print('Features matched') 30 | 31 | p = os.path.join(basedir, 'sparse') 32 | if not os.path.exists(p): 33 | os.makedirs(p) 34 | 35 | mapper_args = [ 36 | 'colmap', 'mapper', 37 | '--database_path', os.path.join(basedir, 'database.db'), 38 | '--image_path', os.path.join(basedir, 'images'), 39 | '--output_path', os.path.join(basedir, 'sparse'), # --export_path changed to --output_path in colmap 3.6 40 | '--Mapper.num_threads', '16', 41 | '--Mapper.init_min_tri_angle', '16', 42 | '--Mapper.multiple_models', '1', 43 | '--Mapper.extract_colors', '0' 44 | # '--init_image_id1', '0', 45 | # '--init_image_id2', '1' 46 | ] 47 | 48 | map_output = ( subprocess.check_output(mapper_args, universal_newlines=True) ) 49 | logfile.write(map_output) 50 | logfile.close() 51 | print('Sparse map created') 52 | 53 | print( 'Finished running COLMAP, see {} for logs'.format(logfile_name) ) 54 | 55 | 56 | -------------------------------------------------------------------------------- /utils/data_prepare_utils/poses/imgs2poses.py: -------------------------------------------------------------------------------- 1 | import sys, os 2 | # sys.path.append(os.path.join(os.path.dirname(__file__),'../../..')) 3 | from pose_utils import gen_poses 4 | 5 | 6 | import argparse 7 | parser = argparse.ArgumentParser() 8 | parser.add_argument('--match_type', type=str, 9 | default='exhaustive_matcher', help='type of matcher used. Valid options: \ 10 | exhaustive_matcher sequential_matcher. Other matchers not supported at this time') 11 | parser.add_argument('--is_video', type=int, 12 | default=0, help='whether the input is video') 13 | 14 | parser.add_argument('--frame_rate', type=int, 15 | default=10, help='frame rate to sample images') 16 | 17 | parser.add_argument('scenedir', type=str, 18 | help='input scene directory') 19 | args = parser.parse_args() 20 | 21 | if args.match_type != 'exhaustive_matcher' and args.match_type != 'sequential_matcher': 22 | print('ERROR: matcher type ' + args.match_type + ' is not valid. Aborting') 23 | sys.exit() 24 | 25 | if __name__=='__main__': 26 | gen_poses(args.scenedir, args.match_type, args.is_video, args.frame_rate) -------------------------------------------------------------------------------- /utils/nerfacc_radiance_fields/__pycache__/mlp.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/utils/nerfacc_radiance_fields/__pycache__/mlp.cpython-38.pyc -------------------------------------------------------------------------------- /utils/nerfacc_radiance_fields/__pycache__/utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/utils/nerfacc_radiance_fields/__pycache__/utils.cpython-38.pyc -------------------------------------------------------------------------------- /utils/nerfacc_radiance_fields/datasets/__pycache__/colmap_utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/utils/nerfacc_radiance_fields/datasets/__pycache__/colmap_utils.cpython-38.pyc -------------------------------------------------------------------------------- /utils/nerfacc_radiance_fields/datasets/__pycache__/ray_utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/utils/nerfacc_radiance_fields/datasets/__pycache__/ray_utils.cpython-38.pyc -------------------------------------------------------------------------------- /utils/nerfacc_radiance_fields/datasets/__pycache__/utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/utils/nerfacc_radiance_fields/datasets/__pycache__/utils.cpython-38.pyc -------------------------------------------------------------------------------- /utils/nerfacc_radiance_fields/datasets/color_utils.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | from einops import rearrange 3 | import imageio 4 | import numpy as np 5 | from PIL import Image, ImageDraw 6 | import os 7 | 8 | def srgb_to_linear(img): 9 | limit = 0.04045 10 | return np.where(img>limit, ((img+0.055)/1.055)**2.4, img/12.92) 11 | 12 | 13 | def linear_to_srgb(img): 14 | limit = 0.0031308 15 | img = np.where(img>limit, 1.055*img**(1/2.4)-0.055, 12.92*img) 16 | img[img>1] = 1 # "clamp" tonemapper 17 | return img 18 | 19 | 20 | def read_image(img_path, img_wh, blend_a=True, test_img_gen = False, img_id = 0): 21 | img = imageio.imread(img_path).astype(np.float32)/255.0 22 | 23 | if test_img_gen: 24 | print("[test after train]: img_id= {}, img = {}/{}/{}".format(img_id, img.shape, img.min(), img.max())) 25 | # exit() 26 | # save the training image 27 | print("saving training image to {}".format( 28 | 'test/train_img_rep{}.jpg'.format(img_id))) 29 | rgb_img = Image.fromarray((255 * img).astype(np.uint8)) 30 | rgb_img = rgb_img.convert('RGB') 31 | os.makedirs('./test/', exist_ok = True) 32 | rgb_img.save('test/train_img_rep{}.jpeg'.format(img_id)) 33 | 34 | # img[..., :3] = srgb_to_linear(img[..., :3]) 35 | if img.shape[2] == 4: # blend A to RGB 36 | if blend_a: 37 | img = img[..., :3]*img[..., -1:]+(1-img[..., -1:]) 38 | else: 39 | img = img[..., :3]*img[..., -1:] 40 | 41 | img = cv2.resize(img, img_wh) 42 | img = rearrange(img, 'h w c -> (h w) c') 43 | 44 | return img 45 | 46 | def add_perturbation(img, perturbation, seed, decent_occ=0): 47 | if 'color' in perturbation: 48 | np.random.seed(seed) 49 | # img_np = np.array(img) / 255.0 50 | s = np.random.uniform(0.8, 1.2, size=3) 51 | b = np.random.uniform(-0.2, 0.2, size=3) 52 | img[..., :3] = np.clip(s * img[..., :3] + b, 0, 1) 53 | # img = Image.fromarray((255 * img_np).astype(np.uint8)) 54 | if 'occ' in perturbation: 55 | 56 | draw = ImageDraw.Draw(img) 57 | np.random.seed(seed) 58 | if decent_occ: 59 | left = np.random.randint(0, 600) 60 | top = np.random.randint(0, 600) 61 | else: 62 | left = np.random.randint(200, 400) 63 | top = np.random.randint(200, 400) 64 | for i in range(10): 65 | np.random.seed(10 * seed + i) 66 | random_color = tuple(np.random.choice(range(256), 3)) 67 | draw.rectangle( 68 | ((left + 20 * i, top), (left + 20 * (i + 1), top + 200)), 69 | fill=random_color) 70 | return img 71 | 72 | def read_image_ngpa(img_path, img_wh, blend_a=True, split='train', t = 0, test_img_gen = False, img_id = 0): 73 | img = imageio.imread(img_path).astype(np.float32)/255.0 74 | 75 | # add perturbations 76 | if t != 0 and split == 'train': # perturb everything except the first image. 77 | # cf. Section D in the supplementary material 78 | img = add_perturbation(img, ['color'], t) 79 | 80 | if test_img_gen and split == 'train': 81 | print("[test after train]: t = {}, img_id= {}, img = {}/{}/{}".format(t, img_id, img.shape, img.min(), img.max())) 82 | # exit() 83 | # save the training image 84 | print("saving training image to {}".format( 85 | 'test/train_img{}.jpg'.format(img_id))) 86 | rgb_img = Image.fromarray((255 * img).astype(np.uint8)) 87 | rgb_img = rgb_img.convert('RGB') 88 | os.makedirs('./test/', exist_ok = True) 89 | rgb_img.save('test/train_img{}.jpeg'.format(img_id)) 90 | 91 | # img[..., :3] = srgb_to_linear(img[..., :3]) 92 | if img.shape[2] == 4: # blend A to RGB 93 | if blend_a: 94 | img = img[..., :3]*img[..., -1:]+(1-img[..., -1:]) 95 | else: 96 | img = img[..., :3]*img[..., -1:] 97 | 98 | img = cv2.resize(img, img_wh) 99 | img = rearrange(img, 'h w c -> (h w) c') 100 | 101 | return img 102 | 103 | 104 | def read_image_phototour(img_path, blend_a=True, test_img_gen = False, img_id = 0, downscale = 1, crop_region = 'full'): 105 | img = imageio.imread(img_path).astype(np.float32)/255.0 106 | 107 | if test_img_gen: 108 | print("[test after train]: img_id= {}, img = {}/{}/{}".format(img_id, img.shape, img.min(), img.max())) 109 | # save the training image 110 | print("saving training image to {}".format( 111 | 'test/train_img_rep{}.jpg'.format(img_id))) 112 | rgb_img = Image.fromarray((255 * img).astype(np.uint8)) 113 | rgb_img = rgb_img.convert('RGB') 114 | os.makedirs('./test/', exist_ok = True) 115 | rgb_img.save('test/train_img_rep{}.jpeg'.format(img_id)) 116 | 117 | # img[..., :3] = srgb_to_linear(img[..., :3]) 118 | if img.shape[2] == 4: # blend A to RGB 119 | if blend_a: 120 | img = img[..., :3]*img[..., -1:]+(1-img[..., -1:]) 121 | else: 122 | img = img[..., :3]*img[..., -1:] 123 | 124 | # height and width 125 | img_hw = (img.shape[0]//downscale, img.shape[1]//downscale) 126 | if downscale > 1: 127 | img = cv2.resize(img, (img_hw[1], img_hw[0])) 128 | 129 | if crop_region == 'left': 130 | img = img[:, : img_hw[1]//2] 131 | elif crop_region == 'right': 132 | img = img[:, img_hw[1]//2 :] 133 | 134 | img = rearrange(img, 'h w c -> (h w) c') 135 | 136 | return img -------------------------------------------------------------------------------- /utils/nerfacc_radiance_fields/datasets/depth_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import re 3 | 4 | 5 | def read_pfm(path): 6 | """Read pfm file. 7 | 8 | Args: 9 | path (str): path to file 10 | 11 | Returns: 12 | tuple: (data, scale) 13 | """ 14 | with open(path, "rb") as file: 15 | 16 | color = None 17 | width = None 18 | height = None 19 | scale = None 20 | endian = None 21 | 22 | header = file.readline().rstrip() 23 | if header.decode("ascii") == "PF": 24 | color = True 25 | elif header.decode("ascii") == "Pf": 26 | color = False 27 | else: 28 | raise Exception("Not a PFM file: " + path) 29 | 30 | dim_match = re.match(r"^(\d+)\s(\d+)\s$", file.readline().decode("ascii")) 31 | if dim_match: 32 | width, height = list(map(int, dim_match.groups())) 33 | else: 34 | raise Exception("Malformed PFM header.") 35 | 36 | scale = float(file.readline().decode("ascii").rstrip()) 37 | if scale < 0: 38 | # little-endian 39 | endian = "<" 40 | scale = -scale 41 | else: 42 | # big-endian 43 | endian = ">" 44 | 45 | data = np.fromfile(file, endian + "f") 46 | shape = (height, width, 3) if color else (height, width) 47 | 48 | data = np.reshape(data, shape) 49 | data = np.flipud(data) 50 | 51 | return data, scale -------------------------------------------------------------------------------- /utils/nerfacc_radiance_fields/datasets/lb/__pycache__/colmap.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/utils/nerfacc_radiance_fields/datasets/lb/__pycache__/colmap.cpython-38.pyc -------------------------------------------------------------------------------- /utils/nerfacc_radiance_fields/datasets/lb/__pycache__/colmap_render.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/utils/nerfacc_radiance_fields/datasets/lb/__pycache__/colmap_render.cpython-38.pyc -------------------------------------------------------------------------------- /utils/nerfacc_radiance_fields/datasets/lb/__pycache__/nerf_synthetic.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/utils/nerfacc_radiance_fields/datasets/lb/__pycache__/nerf_synthetic.cpython-38.pyc -------------------------------------------------------------------------------- /utils/nerfacc_radiance_fields/datasets/lb/__pycache__/nerfpp.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IntelLabs/CLNeRF/d5573a7bca33e55da0ed2f3e637dd065e594d4a2/utils/nerfacc_radiance_fields/datasets/lb/__pycache__/nerfpp.cpython-38.pyc -------------------------------------------------------------------------------- /utils/nerfacc_radiance_fields/datasets/nerfpp.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2022 Ruilong Li, UC Berkeley. 3 | """ 4 | 5 | import collections 6 | import json 7 | import os 8 | from tqdm import tqdm 9 | 10 | from PIL import Image 11 | import imageio.v2 as imageio 12 | import numpy as np 13 | import torch 14 | import torch.nn.functional as F 15 | 16 | from .utils import Rays 17 | import glob 18 | from .ray_utils import center_poses 19 | # from .color_utils import read_image 20 | 21 | 22 | def _load_renderings(data_dir: str, split: str): 23 | 24 | img_paths = sorted(glob.glob(os.path.join(data_dir, split, 'rgb/*'))) 25 | poses = sorted(glob.glob(os.path.join(data_dir, split, 'pose/*.txt'))) 26 | 27 | images = [] 28 | camtoworlds = [] 29 | print(f'Loading {len(img_paths)} {split} images ...') 30 | for img_path, pose in tqdm(zip(img_paths, poses)): 31 | camtoworlds += [np.loadtxt(pose).reshape(4, 4)[:3]] 32 | 33 | rgba = imageio.imread(img_path) 34 | # print("rgba.shape = {}".format(rgba.shape)) 35 | images.append(rgba) 36 | 37 | images = np.stack(images, axis=0) 38 | camtoworlds = np.stack(camtoworlds, axis=0) 39 | return images, camtoworlds 40 | 41 | 42 | class SubjectLoader(torch.utils.data.Dataset): 43 | """Single subject data loader for training and evaluation.""" 44 | 45 | SPLITS = ["train", "val", "trainval", "test"] 46 | 47 | # WIDTH, HEIGHT = 800, 800 48 | NEAR, FAR = 0.01, 16.0 49 | OPENGL_CAMERA = False 50 | 51 | def __init__( 52 | self, 53 | subject_id: str, 54 | root_fp: str, 55 | split: str, 56 | color_bkgd_aug: str = "random", 57 | num_rays: int = None, 58 | near: float = None, 59 | far: float = None, 60 | batch_over_images: bool = True, 61 | ): 62 | super().__init__() 63 | assert split in self.SPLITS, "%s" % split 64 | # assert subject_id in self.SUBJECT_IDS, "%s" % subject_id 65 | assert color_bkgd_aug in ["white", "black", "random"] 66 | self.split = split 67 | self.num_rays = num_rays 68 | self.near = self.NEAR if near is None else near 69 | self.far = self.FAR if far is None else far 70 | self.training = (num_rays is not None) and (split 71 | in ["train", "trainval"]) 72 | self.color_bkgd_aug = color_bkgd_aug 73 | self.batch_over_images = batch_over_images 74 | """Load images from disk.""" 75 | if not root_fp.startswith("/"): 76 | # allow relative path. e.g., "./data/nerf_synthetic/" 77 | root_fp = os.path.join( 78 | os.path.dirname(os.path.abspath(__file__)), 79 | "..", 80 | "..", 81 | root_fp, 82 | ) 83 | 84 | self.root_dir = os.path.join(root_fp, subject_id) 85 | 86 | self.images, self.camtoworlds = _load_renderings(self.root_dir, split) 87 | self.images = torch.from_numpy(self.images).to(torch.uint8) 88 | self.camtoworlds = torch.from_numpy(self.camtoworlds).to(torch.float32) 89 | self.read_intrinsics() 90 | assert self.images.shape[1:3] == (self.HEIGHT, self.WIDTH) 91 | 92 | def read_intrinsics(self): 93 | K = np.loadtxt(glob.glob( 94 | os.path.join(self.root_dir, 'train/intrinsics/*.txt'))[0], 95 | dtype=np.float32).reshape(4, 4)[:3, :3] 96 | w, h = Image.open( 97 | glob.glob(os.path.join(self.root_dir, 'train/rgb/*'))[0]).size 98 | # w, h = int(w), int(h) 99 | self.K = torch.FloatTensor(K) 100 | self.HEIGHT = int(h) 101 | self.WIDTH = int(w) 102 | 103 | def __len__(self): 104 | return len(self.images) 105 | 106 | @torch.no_grad() 107 | def __getitem__(self, index): 108 | data = self.fetch_data(index) 109 | data = self.preprocess(data) 110 | return data 111 | 112 | def preprocess(self, data): 113 | """Process the fetched / cached data with randomness.""" 114 | rgba, rays = data["rgba"], data["rays"] 115 | if rgba.shape[-1] == 4: 116 | pixels, alpha = torch.split(rgba, [3, 1], dim=-1) 117 | else: 118 | pixels = rgba 119 | 120 | device = "cuda:0" 121 | 122 | if self.training: 123 | if self.color_bkgd_aug == "random": 124 | color_bkgd = torch.rand(3, device=device) 125 | elif self.color_bkgd_aug == "white": 126 | color_bkgd = torch.ones(3, device=device) 127 | elif self.color_bkgd_aug == "black": 128 | color_bkgd = torch.zeros(3, device=device) 129 | else: 130 | # just use white during inference 131 | color_bkgd = torch.ones(3, device=device) 132 | 133 | if rgba.shape[-1] == 4: 134 | pixels = pixels * alpha + color_bkgd * (1.0 - alpha) 135 | return { 136 | "pixels": pixels, # [n_rays, 3] or [h, w, 3] 137 | "rays": rays, # [n_rays,] or [h, w] 138 | "color_bkgd": color_bkgd, # [3,] 139 | **{k: v 140 | for k, v in data.items() if k not in ["rgba", "rays"]}, 141 | } 142 | 143 | def update_num_rays(self, num_rays): 144 | self.num_rays = num_rays 145 | 146 | def fetch_data(self, index): 147 | """Fetch the data (it maybe cached for multiple batches).""" 148 | num_rays = self.num_rays 149 | 150 | device = "cuda:0" 151 | 152 | if self.training: 153 | if self.batch_over_images: 154 | image_id = torch.randint( 155 | 0, 156 | len(self.images), 157 | size=(num_rays, ), 158 | ) 159 | else: 160 | image_id = [index] 161 | x = torch.randint(0, self.WIDTH, size=(num_rays, )) 162 | y = torch.randint(0, self.HEIGHT, size=(num_rays, )) 163 | else: 164 | image_id = [index] 165 | x, y = torch.meshgrid( 166 | torch.arange(self.WIDTH), 167 | torch.arange(self.HEIGHT), 168 | indexing="xy", 169 | ) 170 | x = x.flatten() 171 | y = y.flatten() 172 | 173 | # generate rays 174 | rgba = self.images[image_id, y, x] / 255.0 # (num_rays, 4) 175 | c2w = self.camtoworlds[image_id] # (num_rays, 3, 4) 176 | camera_dirs = F.pad( 177 | torch.stack( 178 | [ 179 | (x - self.K[0, 2] + 0.5) / self.K[0, 0], 180 | (y - self.K[1, 2] + 0.5) / self.K[1, 1] * 181 | (-1.0 if self.OPENGL_CAMERA else 1.0), 182 | ], 183 | dim=-1, 184 | ), 185 | (0, 1), 186 | value=(-1.0 if self.OPENGL_CAMERA else 1.0), 187 | ) # [num_rays, 3] 188 | 189 | # [n_cams, height, width, 3] 190 | directions = (camera_dirs[:, None, :] * c2w[:, :3, :3]).sum(dim=-1) 191 | origins = torch.broadcast_to(c2w[:, :3, -1], directions.shape) 192 | viewdirs = directions / torch.linalg.norm( 193 | directions, dim=-1, keepdims=True) 194 | 195 | if self.training: 196 | origins = torch.reshape(origins, (num_rays, 3)) 197 | viewdirs = torch.reshape(viewdirs, (num_rays, 3)) 198 | rgba = torch.reshape(rgba, (num_rays, rgba.shape[-1])) 199 | else: 200 | origins = torch.reshape(origins, (self.HEIGHT, self.WIDTH, 3)) 201 | viewdirs = torch.reshape(viewdirs, (self.HEIGHT, self.WIDTH, 3)) 202 | rgba = torch.reshape(rgba, 203 | (self.HEIGHT, self.WIDTH, rgba.shape[-1])) 204 | 205 | rays = Rays(origins=origins.to(device), viewdirs=viewdirs.to(device)) 206 | 207 | return { 208 | "rgba": rgba.to(device), # [h, w, 3] or [num_rays, 3] 209 | "rays": rays, # [h, w, 3 or 4] or [num_rays, 3] 210 | } 211 | -------------------------------------------------------------------------------- /utils/nerfacc_radiance_fields/datasets/utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2022 Ruilong Li, UC Berkeley. 3 | """ 4 | 5 | import collections 6 | 7 | Rays = collections.namedtuple("Rays", ("origins", "viewdirs")) 8 | 9 | 10 | def namedtuple_map(fn, tup): 11 | """Apply `fn` to each element of `tup` and cast to `tup`'s namedtuple.""" 12 | return type(tup)(*(None if x is None else fn(x) for x in tup)) 13 | -------------------------------------------------------------------------------- /utils/nerfacc_radiance_fields/utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2022 Ruilong Li, UC Berkeley. 3 | """ 4 | 5 | import random 6 | from typing import Optional 7 | 8 | import numpy as np 9 | import torch 10 | from .datasets.utils import Rays, namedtuple_map 11 | 12 | from nerfacc import OccupancyGrid, ray_marching, rendering 13 | 14 | 15 | def set_random_seed(seed): 16 | random.seed(seed) 17 | np.random.seed(seed) 18 | torch.manual_seed(seed) 19 | 20 | 21 | def render_image( 22 | # scene 23 | radiance_field: torch.nn.Module, 24 | occupancy_grid: OccupancyGrid, 25 | rays: Rays, 26 | task_id: torch.Tensor, 27 | scene_aabb: torch.Tensor, 28 | # rendering options 29 | near_plane: Optional[float] = None, 30 | far_plane: Optional[float] = None, 31 | render_step_size: float = 1e-3, 32 | render_bkgd: Optional[torch.Tensor] = None, 33 | cone_angle: float = 0.0, 34 | alpha_thre: float = 0.0, 35 | # test options 36 | test_chunk_size: int = 8192, 37 | # only useful for dnerf 38 | timestamps: Optional[torch.Tensor] = None, 39 | ): 40 | """Render the pixels of an image.""" 41 | rays_shape = rays.origins.shape 42 | if len(rays_shape) == 3: 43 | height, width, _ = rays_shape 44 | num_rays = height * width 45 | rays = namedtuple_map( 46 | lambda r: r.reshape([num_rays] + list(r.shape[2:])), rays 47 | ) 48 | else: 49 | num_rays, _ = rays_shape 50 | 51 | def sigma_fn(t_starts, t_ends, ray_indices): 52 | t_origins = chunk_rays.origins[ray_indices] 53 | t_dirs = chunk_rays.viewdirs[ray_indices] 54 | positions = t_origins + t_dirs * (t_starts + t_ends) / 2.0 55 | # print("t_origins = {}/{}, positions = {}/{}, task_id = {}/{}, ray_indices = {}/{}, origins = {}/{}".format(t_origins, t_origins.shape, positions, positions.shape, task_id, task_id.shape, ray_indices, ray_indices.shape, chunk_rays.origins, chunk_rays.origins.shape)) 56 | # exit() 57 | if timestamps is not None: 58 | # dnerf 59 | t = ( 60 | timestamps[ray_indices] 61 | if radiance_field.training 62 | else timestamps.expand_as(positions[:, :1]) 63 | ) 64 | return radiance_field.query_density(positions, task_id[ray_indices], t) 65 | # print("positions = {}/{}, task_id = {}/{}, ray_indices = {}/{}".format(positions, positions.shape, task_id, task_id.shape, ray_indices, ray_indices.shape)) 66 | return radiance_field.query_density(positions, task_id[ray_indices]) 67 | 68 | def rgb_sigma_fn(t_starts, t_ends, ray_indices): 69 | t_origins = chunk_rays.origins[ray_indices] 70 | t_dirs = chunk_rays.viewdirs[ray_indices] 71 | positions = t_origins + t_dirs * (t_starts + t_ends) / 2.0 72 | if timestamps is not None: 73 | # dnerf 74 | t = ( 75 | timestamps[ray_indices] 76 | if radiance_field.training 77 | else timestamps.expand_as(positions[:, :1]) 78 | ) 79 | return radiance_field(positions, task_id[ray_indices], t, t_dirs) 80 | return radiance_field(positions, task_id[ray_indices], t_dirs) 81 | 82 | results = [] 83 | chunk = ( 84 | torch.iinfo(torch.int32).max 85 | if radiance_field.training 86 | else test_chunk_size 87 | ) 88 | for i in range(0, num_rays, chunk): 89 | chunk_rays = namedtuple_map(lambda r: r[i : i + chunk], rays) 90 | ray_indices, t_starts, t_ends = ray_marching( 91 | chunk_rays.origins, 92 | chunk_rays.viewdirs, 93 | scene_aabb=scene_aabb, 94 | grid=occupancy_grid, 95 | sigma_fn=sigma_fn, 96 | near_plane=near_plane, 97 | far_plane=far_plane, 98 | render_step_size=render_step_size, 99 | stratified=radiance_field.training, 100 | cone_angle=cone_angle, 101 | alpha_thre=alpha_thre, 102 | ) 103 | rgb, opacity, depth = rendering( 104 | t_starts, 105 | t_ends, 106 | ray_indices, 107 | n_rays=chunk_rays.origins.shape[0], 108 | rgb_sigma_fn=rgb_sigma_fn, 109 | render_bkgd=render_bkgd, 110 | ) 111 | chunk_results = [rgb, opacity, depth, len(t_starts)] 112 | results.append(chunk_results) 113 | colors, opacities, depths, n_rendering_samples = [ 114 | torch.cat(r, dim=0) if isinstance(r[0], torch.Tensor) else r 115 | for r in zip(*results) 116 | ] 117 | return ( 118 | colors.view((*rays_shape[:-1], -1)), 119 | opacities.view((*rays_shape[:-1], -1)), 120 | depths.view((*rays_shape[:-1], -1)), 121 | sum(n_rendering_samples), 122 | ) 123 | 124 | 125 | 126 | def render_image_ori( 127 | # scene 128 | radiance_field: torch.nn.Module, 129 | occupancy_grid: OccupancyGrid, 130 | rays: Rays, 131 | scene_aabb: torch.Tensor, 132 | # rendering options 133 | near_plane: Optional[float] = None, 134 | far_plane: Optional[float] = None, 135 | render_step_size: float = 1e-3, 136 | render_bkgd: Optional[torch.Tensor] = None, 137 | cone_angle: float = 0.0, 138 | alpha_thre: float = 0.0, 139 | # test options 140 | test_chunk_size: int = 8192, 141 | # only useful for dnerf 142 | timestamps: Optional[torch.Tensor] = None, 143 | ): 144 | """Render the pixels of an image.""" 145 | rays_shape = rays.origins.shape 146 | if len(rays_shape) == 3: 147 | height, width, _ = rays_shape 148 | num_rays = height * width 149 | rays = namedtuple_map( 150 | lambda r: r.reshape([num_rays] + list(r.shape[2:])), rays 151 | ) 152 | else: 153 | num_rays, _ = rays_shape 154 | 155 | def sigma_fn(t_starts, t_ends, ray_indices): 156 | t_origins = chunk_rays.origins[ray_indices] 157 | t_dirs = chunk_rays.viewdirs[ray_indices] 158 | positions = t_origins + t_dirs * (t_starts + t_ends) / 2.0 159 | # print("t_origins = {}/{}, positions = {}/{}, task_id = {}/{}, ray_indices = {}/{}, origins = {}/{}".format(t_origins, t_origins.shape, positions, positions.shape, task_id, task_id.shape, ray_indices, ray_indices.shape, chunk_rays.origins, chunk_rays.origins.shape)) 160 | # exit() 161 | if timestamps is not None: 162 | # dnerf 163 | t = ( 164 | timestamps[ray_indices] 165 | if radiance_field.training 166 | else timestamps.expand_as(positions[:, :1]) 167 | ) 168 | return radiance_field.query_density(positions, t) 169 | return radiance_field.query_density(positions) 170 | 171 | def rgb_sigma_fn(t_starts, t_ends, ray_indices): 172 | t_origins = chunk_rays.origins[ray_indices] 173 | t_dirs = chunk_rays.viewdirs[ray_indices] 174 | positions = t_origins + t_dirs * (t_starts + t_ends) / 2.0 175 | if timestamps is not None: 176 | # dnerf 177 | t = ( 178 | timestamps[ray_indices] 179 | if radiance_field.training 180 | else timestamps.expand_as(positions[:, :1]) 181 | ) 182 | return radiance_field(positions, t, t_dirs) 183 | return radiance_field(positions, t_dirs) 184 | 185 | results = [] 186 | chunk = ( 187 | torch.iinfo(torch.int32).max 188 | if radiance_field.training 189 | else test_chunk_size 190 | ) 191 | for i in range(0, num_rays, chunk): 192 | chunk_rays = namedtuple_map(lambda r: r[i : i + chunk], rays) 193 | ray_indices, t_starts, t_ends = ray_marching( 194 | chunk_rays.origins, 195 | chunk_rays.viewdirs, 196 | scene_aabb=scene_aabb, 197 | grid=occupancy_grid, 198 | sigma_fn=sigma_fn, 199 | near_plane=near_plane, 200 | far_plane=far_plane, 201 | render_step_size=render_step_size, 202 | stratified=radiance_field.training, 203 | cone_angle=cone_angle, 204 | alpha_thre=alpha_thre, 205 | ) 206 | rgb, opacity, depth = rendering( 207 | t_starts, 208 | t_ends, 209 | ray_indices, 210 | n_rays=chunk_rays.origins.shape[0], 211 | rgb_sigma_fn=rgb_sigma_fn, 212 | render_bkgd=render_bkgd, 213 | ) 214 | chunk_results = [rgb, opacity, depth, len(t_starts)] 215 | results.append(chunk_results) 216 | colors, opacities, depths, n_rendering_samples = [ 217 | torch.cat(r, dim=0) if isinstance(r[0], torch.Tensor) else r 218 | for r in zip(*results) 219 | ] 220 | return ( 221 | colors.view((*rays_shape[:-1], -1)), 222 | opacities.view((*rays_shape[:-1], -1)), 223 | depths.view((*rays_shape[:-1], -1)), 224 | sum(n_rendering_samples), 225 | ) -------------------------------------------------------------------------------- /utils/utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | def extract_model_state_dict(ckpt_path, model_name='model', prefixes_to_ignore=[]): 5 | checkpoint = torch.load(ckpt_path, map_location='cpu') 6 | checkpoint_ = {} 7 | if 'state_dict' in checkpoint: # if it's a pytorch-lightning checkpoint 8 | checkpoint = checkpoint['state_dict'] 9 | for k, v in checkpoint.items(): 10 | if not k.startswith(model_name): 11 | continue 12 | k = k[len(model_name)+1:] 13 | for prefix in prefixes_to_ignore: 14 | if k.startswith(prefix): 15 | break 16 | else: 17 | checkpoint_[k] = v 18 | return checkpoint_ 19 | 20 | 21 | def load_ckpt(model, ckpt_path, model_name='model', prefixes_to_ignore=[]): 22 | if not ckpt_path: return 23 | model_dict = model.state_dict() 24 | print("loading checkpoint...") 25 | checkpoint_ = extract_model_state_dict(ckpt_path, model_name, prefixes_to_ignore) 26 | model_dict.update(checkpoint_) 27 | model.load_state_dict(model_dict) 28 | 29 | 30 | def slim_ckpt(ckpt_path, save_poses=False): 31 | ckpt = torch.load(ckpt_path, map_location='cpu') 32 | # pop unused parameters 33 | keys_to_pop = ['directions', 'model.density_grid', 'model.grid_coords'] 34 | if not save_poses: keys_to_pop += ['poses'] 35 | for k in ckpt['state_dict']: 36 | if k.startswith('val_lpips'): 37 | keys_to_pop += [k] 38 | for k in keys_to_pop: 39 | ckpt['state_dict'].pop(k, None) 40 | return ckpt['state_dict'] 41 | 42 | 43 | import math 44 | import torch 45 | from torch.optim.lr_scheduler import _LRScheduler 46 | 47 | class CosineAnnealingWarmupRestarts(_LRScheduler): 48 | """ 49 | optimizer (Optimizer): Wrapped optimizer. 50 | first_cycle_steps (int): First cycle step size. 51 | cycle_mult(float): Cycle steps magnification. Default: -1. 52 | max_lr(float): First cycle's max learning rate. Default: 0.1. 53 | min_lr(float): Min learning rate. Default: 0.001. 54 | warmup_steps(int): Linear warmup step size. Default: 0. 55 | gamma(float): Decrease rate of max learning rate by cycle. Default: 1. 56 | last_epoch (int): The index of last epoch. Default: -1. 57 | """ 58 | 59 | def __init__(self, 60 | optimizer : torch.optim.Optimizer, 61 | first_cycle_steps : int, 62 | cycle_mult : float = 1., 63 | max_lr : float = 0.1, 64 | min_lr : float = 0.001, 65 | warmup_steps : int = 0, 66 | gamma : float = 1., 67 | last_epoch : int = -1 68 | ): 69 | assert warmup_steps < first_cycle_steps 70 | 71 | self.first_cycle_steps = first_cycle_steps # first cycle step size 72 | self.cycle_mult = cycle_mult # cycle steps magnification 73 | self.base_max_lr = max_lr # first max learning rate 74 | self.max_lr = max_lr # max learning rate in the current cycle 75 | self.min_lr = min_lr # min learning rate 76 | self.warmup_steps = warmup_steps # warmup step size 77 | self.gamma = gamma # decrease rate of max learning rate by cycle 78 | 79 | self.cur_cycle_steps = first_cycle_steps # first cycle step size 80 | self.cycle = 0 # cycle count 81 | self.step_in_cycle = last_epoch # step size of the current cycle 82 | 83 | super(CosineAnnealingWarmupRestarts, self).__init__(optimizer, last_epoch) 84 | 85 | # set learning rate min_lr 86 | self.init_lr() 87 | 88 | def init_lr(self): 89 | self.base_lrs = [] 90 | for param_group in self.optimizer.param_groups: 91 | param_group['lr'] = self.min_lr 92 | self.base_lrs.append(self.min_lr) 93 | 94 | def get_lr(self): 95 | if self.step_in_cycle == -1: 96 | return self.base_lrs 97 | elif self.step_in_cycle < self.warmup_steps: 98 | return [(self.max_lr - base_lr)*self.step_in_cycle / self.warmup_steps + base_lr for base_lr in self.base_lrs] 99 | else: 100 | return [base_lr + (self.max_lr - base_lr) \ 101 | * (1 + math.cos(math.pi * (self.step_in_cycle-self.warmup_steps) \ 102 | / (self.cur_cycle_steps - self.warmup_steps))) / 2 103 | for base_lr in self.base_lrs] 104 | 105 | def step(self, epoch=None): 106 | if epoch is None: 107 | epoch = self.last_epoch + 1 108 | self.step_in_cycle = self.step_in_cycle + 1 109 | if self.step_in_cycle >= self.cur_cycle_steps: 110 | self.cycle += 1 111 | self.step_in_cycle = self.step_in_cycle - self.cur_cycle_steps 112 | self.cur_cycle_steps = int((self.cur_cycle_steps - self.warmup_steps) * self.cycle_mult) + self.warmup_steps 113 | else: 114 | if epoch >= self.first_cycle_steps: 115 | if self.cycle_mult == 1.: 116 | self.step_in_cycle = epoch % self.first_cycle_steps 117 | self.cycle = epoch // self.first_cycle_steps 118 | else: 119 | n = int(math.log((epoch / self.first_cycle_steps * (self.cycle_mult - 1) + 1), self.cycle_mult)) 120 | self.cycle = n 121 | self.step_in_cycle = epoch - int(self.first_cycle_steps * (self.cycle_mult ** n - 1) / (self.cycle_mult - 1)) 122 | self.cur_cycle_steps = self.first_cycle_steps * self.cycle_mult ** (n) 123 | else: 124 | self.cur_cycle_steps = self.first_cycle_steps 125 | self.step_in_cycle = epoch 126 | 127 | self.max_lr = self.base_max_lr * (self.gamma**self.cycle) 128 | self.last_epoch = math.floor(epoch) 129 | for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()): 130 | param_group['lr'] = lr --------------------------------------------------------------------------------