├── .gitattributes ├── .github ├── ISSUE_TEMPLATE │ ├── bug-report.md │ └── feature_request.md └── workflows │ ├── core_code_checks.yml │ ├── publish.yml │ └── viewer_build_deploy.yml ├── .gitignore ├── .prettierrc.js ├── .readthedocs.yaml ├── .run_training_monocular.sh.swp ├── .vscode ├── c_cpp_properties.json ├── launch.json └── settings.json ├── Dockerfile ├── LICENSE ├── README.md ├── colab └── demo.ipynb ├── docs ├── Makefile ├── _pygments │ └── style.py ├── _static │ ├── custom.js │ ├── imgs │ │ ├── logo-dark.png │ │ ├── logo.png │ │ ├── readme_colab.png │ │ ├── readme_documentation.png │ │ └── readme_viewer.png │ └── require.min.js ├── _templates │ ├── base.html │ └── sidebar │ │ └── brand.html ├── conf.py ├── developer_guides │ ├── config.md │ ├── debugging_tools │ │ ├── benchmarking.md │ │ ├── index.rst │ │ ├── local_logger.md │ │ └── profiling.md │ ├── pipelines │ │ ├── datamanagers.md │ │ ├── dataparsers.md │ │ ├── fields.md │ │ ├── imgs │ │ │ ├── pipeline_datamanager-dark.png │ │ │ ├── pipeline_datamanager-light.png │ │ │ ├── pipeline_field-dark.png │ │ │ ├── pipeline_field-light.png │ │ │ ├── pipeline_model-dark.png │ │ │ ├── pipeline_model-light.png │ │ │ ├── pipeline_overview-dark.png │ │ │ ├── pipeline_overview-light.png │ │ │ ├── pipeline_parser-dark.png │ │ │ ├── pipeline_parser-light.png │ │ │ ├── pipeline_pipeline-dark.png │ │ │ └── pipeline_pipeline-light.png │ │ ├── index.rst │ │ ├── models.md │ │ └── pipelines.md │ └── viewer │ │ ├── imgs │ │ └── viewer_figure.png │ │ └── viewer_overview.md ├── index.md ├── make.bat ├── nerfology │ ├── methods │ │ ├── imgs │ │ │ ├── mipnerf │ │ │ │ ├── models_mipnerf_field-dark.png │ │ │ │ ├── models_mipnerf_field-light.png │ │ │ │ ├── models_mipnerf_pipeline-dark.png │ │ │ │ └── models_mipnerf_pipeline-light.png │ │ │ ├── models_nerf-field-dark.png │ │ │ ├── models_nerf-field-light.png │ │ │ ├── models_nerf-pipeline-dark.png │ │ │ ├── models_nerf-pipeline-field-dark.png │ │ │ ├── models_nerf-pipeline-field-light.png │ │ │ ├── models_nerf-pipeline-light.png │ │ │ ├── models_nerf-pipeline-renderer-dark.png │ │ │ ├── models_nerf-pipeline-renderer-light.png │ │ │ ├── models_nerf-pipeline-sampler-dark.png │ │ │ ├── models_nerf-pipeline-sampler-light.png │ │ │ └── nerfacto │ │ │ │ ├── models_nerfacto_field-dark.png │ │ │ │ ├── models_nerfacto_field-light.png │ │ │ │ ├── models_nerfacto_pipeline-dark.png │ │ │ │ └── models_nerfacto_pipeline-light.png │ │ ├── index.md │ │ ├── instant_ngp.md │ │ ├── mipnerf.md │ │ ├── nerf.md │ │ ├── nerfacto.md │ │ └── semantic_nerfw.md │ └── model_components │ │ ├── imgs │ │ ├── frustums-dark.png │ │ ├── frustums.png │ │ ├── samplers_stratified-dark.png │ │ ├── samplers_stratified-light.png │ │ ├── samplers_type-dark.png │ │ └── samplers_type-light.png │ │ ├── index.md │ │ ├── visualize_cameras.ipynb │ │ ├── visualize_encoders.ipynb │ │ ├── visualize_samplers.ipynb │ │ ├── visualize_samples.ipynb │ │ └── visualize_spatial_distortions.ipynb ├── quickstart │ ├── custom_dataset.md │ ├── data_conventions.md │ ├── export_geometry.md │ ├── first_nerf.md │ ├── imgs │ │ ├── polycam_export.png │ │ ├── polycam_settings.png │ │ ├── record3d_promo.png │ │ ├── record_3d_export_selection.png │ │ ├── record_3d_video_selection.png │ │ └── viewer_link.png │ ├── installation.md │ └── viewer_quickstart.rst ├── reference │ ├── api │ │ ├── config.rst │ │ ├── data.rst │ │ ├── field_components │ │ │ ├── embeddings.rst │ │ │ ├── encodings.rst │ │ │ ├── field_heads.rst │ │ │ ├── index.rst │ │ │ ├── mlp.rst │ │ │ └── spatial_distortions.rst │ │ ├── fields.rst │ │ ├── index.rst │ │ ├── model_components │ │ │ ├── index.rst │ │ │ ├── losses.rst │ │ │ ├── ray_sampler.rst │ │ │ └── renderers.rst │ │ ├── models.rst │ │ ├── optimizers.rst │ │ ├── utils │ │ │ ├── colormaps.rst │ │ │ ├── colors.rst │ │ │ ├── index.rst │ │ │ ├── math.rst │ │ │ └── tensor_dataclass.rst │ │ └── viewer.rst │ ├── cli │ │ ├── index.md │ │ ├── ns_download_data.md │ │ ├── ns_eval.md │ │ ├── ns_export.md │ │ ├── ns_process_data.md │ │ ├── ns_render.md │ │ └── ns_train.md │ └── contributing.md ├── sdfstudio-data.md ├── sdfstudio-examples.md └── sdfstudio-methods.md ├── media ├── help-output.png ├── overview.png ├── overview.svg ├── sdf_studio_4.png ├── sdf_studio_4.svg ├── training-process.png └── viewer_screenshot.png ├── nerfstudio ├── __init__.py ├── cameras │ ├── __init__.py │ ├── camera_optimizers.py │ ├── camera_paths.py │ ├── camera_utils.py │ ├── cameras.py │ ├── lie_groups.py │ └── rays.py ├── configs │ ├── __init__.py │ ├── base_config.py │ ├── config_utils.py │ └── method_configs.py ├── data │ ├── __init__.py │ ├── datamanagers │ │ ├── __init__.py │ │ ├── base_datamanager.py │ │ ├── semantic_datamanager.py │ │ └── variable_res_datamanager.py │ ├── dataparsers │ │ ├── __init__.py │ │ ├── base_dataparser.py │ │ ├── blender_dataparser.py │ │ ├── dnerf_dataparser.py │ │ ├── friends_dataparser.py │ │ ├── heritage_dataparser.py │ │ ├── instant_ngp_dataparser.py │ │ ├── mipnerf360_dataparser.py │ │ ├── monosdf_dataparser.py │ │ ├── nerfstudio_dataparser.py │ │ ├── nuscenes_dataparser.py │ │ ├── phototourism_dataparser.py │ │ ├── record3d_dataparser.py │ │ └── sdfstudio_dataparser.py │ ├── datasets │ │ ├── __init__.py │ │ ├── base_dataset.py │ │ └── semantic_dataset.py │ ├── pixel_samplers.py │ ├── scene_box.py │ └── utils │ │ ├── __init__.py │ │ ├── colmap_utils.py │ │ ├── data_utils.py │ │ ├── dataloaders.py │ │ └── nerfstudio_collate.py ├── engine │ ├── __init__.py │ ├── callbacks.py │ ├── optimizers.py │ ├── schedulers.py │ └── trainer.py ├── exporter │ ├── __init__.py │ ├── exporter_utils.py │ ├── texture_utils.py │ └── tsdf_utils.py ├── field_components │ ├── __init__.py │ ├── activations.py │ ├── base_field_component.py │ ├── embedding.py │ ├── encodings.py │ ├── field_heads.py │ ├── mlp.py │ ├── spatial_distortions.py │ └── temporal_distortions.py ├── fields │ ├── __init__.py │ ├── base_field.py │ ├── density_fields.py │ ├── instant_ngp_field.py │ ├── nerfacto_field.py │ ├── nerfw_field.py │ ├── sdf_field.py │ ├── semantic_nerf_field.py │ ├── tensorf_field.py │ └── vanilla_nerf_field.py ├── model_components │ ├── __init__.py │ ├── losses.py │ ├── patch_warping.py │ ├── ray_generators.py │ ├── ray_samplers.py │ ├── renderers.py │ └── scene_colliders.py ├── models │ ├── __init__.py │ ├── bakedangelo.py │ ├── bakedsdf.py │ ├── base_model.py │ ├── base_surface_model.py │ ├── dto.py │ ├── instant_ngp.py │ ├── mipnerf.py │ ├── monosdf.py │ ├── nerfacto.py │ ├── neuralangelo.py │ ├── neuralreconW.py │ ├── neus.py │ ├── neus_acc.py │ ├── neus_facto.py │ ├── semantic_nerfw.py │ ├── tensorf.py │ ├── unisurf.py │ ├── vanilla_nerf.py │ └── volsdf.py ├── pipelines │ ├── __init__.py │ ├── base_pipeline.py │ └── dynamic_batch.py ├── process_data │ ├── __init__.py │ ├── colmap_utils.py │ ├── hloc_utils.py │ ├── insta360_utils.py │ ├── metashape_utils.py │ ├── polycam_utils.py │ ├── process_data_utils.py │ └── record3d_utils.py ├── py.typed ├── utils │ ├── __init__.py │ ├── colormaps.py │ ├── colors.py │ ├── comms.py │ ├── decorators.py │ ├── eval_utils.py │ ├── images.py │ ├── install_checks.py │ ├── io.py │ ├── marching_cubes.py │ ├── math.py │ ├── misc.py │ ├── plotly_utils.py │ ├── poses.py │ ├── printing.py │ ├── profiler.py │ ├── rich_utils.py │ ├── scripts.py │ ├── tensor_dataclass.py │ └── writer.py └── viewer │ ├── __init__.py │ ├── app │ ├── .env.development │ ├── .eslintrc.json │ ├── .gitignore │ ├── package.json │ ├── public │ │ ├── electron.js │ │ ├── favicon.png │ │ ├── index.html │ │ ├── manifest.json │ │ └── robots.txt │ ├── requirements.txt │ ├── run_deploy.py │ ├── src │ │ ├── App.jsx │ │ ├── SceneNode.js │ │ ├── index.jsx │ │ ├── index.scss │ │ ├── modules │ │ │ ├── Banner │ │ │ │ ├── Banner.jsx │ │ │ │ └── index.jsx │ │ │ ├── ConfigPanel │ │ │ │ ├── ConfigPanel.jsx │ │ │ │ └── ConfigPanelSlice.js │ │ │ ├── LandingModal │ │ │ │ ├── LandingModal.jsx │ │ │ │ └── index.jsx │ │ │ ├── LogPanel │ │ │ │ └── LogPanel.jsx │ │ │ ├── RenderModal │ │ │ │ ├── RenderModal.jsx │ │ │ │ └── index.jsx │ │ │ ├── RenderWindow │ │ │ │ └── RenderWindow.jsx │ │ │ ├── Scene │ │ │ │ ├── Scene.jsx │ │ │ │ └── drawing.js │ │ │ ├── SidePanel │ │ │ │ ├── CameraPanel │ │ │ │ │ ├── CameraHelper.js │ │ │ │ │ ├── CameraPanel.jsx │ │ │ │ │ ├── curve.js │ │ │ │ │ └── index.jsx │ │ │ │ ├── ExportPanel │ │ │ │ │ ├── ExportPanel.jsx │ │ │ │ │ ├── MeshSubPanel.jsx │ │ │ │ │ ├── PointcloudSubPanel.jsx │ │ │ │ │ └── index.jsx │ │ │ │ ├── ScenePanel │ │ │ │ │ ├── ScenePanel.jsx │ │ │ │ │ └── index.jsx │ │ │ │ ├── SidePanel.jsx │ │ │ │ └── StatusPanel │ │ │ │ │ ├── StatusPanel.jsx │ │ │ │ │ └── index.jsx │ │ │ ├── ViewerWindow │ │ │ │ ├── ViewerWindow.jsx │ │ │ │ └── ViewerWindowSlice.js │ │ │ ├── ViewportControlsModal │ │ │ │ ├── ViewportControlsModal.jsx │ │ │ │ └── index.jsx │ │ │ ├── WebSocket │ │ │ │ └── WebSocket.jsx │ │ │ └── WebSocketUrlField.jsx │ │ ├── reducer.js │ │ ├── setupTests.js │ │ ├── store.js │ │ ├── subscriber.js │ │ ├── themes │ │ │ ├── leva_theme.json │ │ │ └── theme.ts │ │ └── utils.js │ └── yarn.lock │ └── server │ ├── README.md │ ├── __init__.py │ ├── path.py │ ├── server.py │ ├── state │ ├── node.py │ └── state_node.py │ ├── subprocess.py │ ├── utils.py │ ├── viewer_utils.py │ └── visualizer.py ├── pyproject.toml ├── scripts ├── __init__.py ├── benchmarking │ ├── launch_eval_blender.sh │ └── launch_train_blender.sh ├── completions │ ├── .gitignore │ ├── __init__.py │ ├── install.py │ ├── setup.bash │ └── setup.zsh ├── datasets │ ├── extract_monocular_cues.py │ ├── process_nerfstudio_to_sdfstudio.py │ ├── process_neuralrgbd_to_sdfstudio.py │ ├── process_nuscenes_masks.py │ └── process_scannet_to_sdfstudio.py ├── docs │ ├── __init__.py │ ├── add_nb_tags.py │ └── build_docs.py ├── downloads │ ├── __init__.py │ └── download_data.py ├── eval.py ├── exporter.py ├── extract_mesh.py ├── generate_kitti360_trainsplit.py ├── github │ ├── __init__.py │ └── run_actions.py ├── heritage_to_nerfstudio.py ├── licensing │ ├── copyright.txt │ └── license_headers.sh ├── process_data.py ├── render.json ├── render.py ├── render_mesh.py ├── texture.py ├── train.py └── viewer │ └── view_dataset.py ├── setup.cfg └── tests ├── cameras ├── test_cameras.py └── test_rays.py ├── data └── lego_test │ ├── train │ └── r_0.png │ ├── transforms_train.json │ ├── transforms_val.json │ └── val │ └── r_0.png ├── field_components ├── test_embedding.py ├── test_encodings.py ├── test_field_outputs.py ├── test_fields.py ├── test_mlp.py └── test_temporal_distortions.py ├── model_components ├── test_ray_sampler.py └── test_renderers.py ├── test_train.py └── utils ├── test_poses.py ├── test_tensor_dataclass.py └── test_visualization.py /.gitattributes: -------------------------------------------------------------------------------- 1 | *.ipynb linguist-documentation -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug-report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Click on '....' 17 | 3. Scroll down to '....' 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Screenshots** 24 | If applicable, add screenshots to help explain your problem. 25 | 26 | **Additional context** 27 | Add any other context about the problem here. 28 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/workflows/core_code_checks.yml: -------------------------------------------------------------------------------- 1 | name: Core Tests. 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | branches: [main] 8 | 9 | permissions: 10 | contents: read 11 | 12 | jobs: 13 | build: 14 | runs-on: ubuntu-latest 15 | 16 | steps: 17 | - uses: actions/checkout@v3 18 | - name: Set up Python 3.7.13 19 | uses: actions/setup-python@v4 20 | with: 21 | python-version: '3.7.13' 22 | - uses: actions/cache@v2 23 | with: 24 | path: ${{ env.pythonLocation }} 25 | key: ${{ env.pythonLocation }}-${{ hashFiles('pyproject.toml') }} 26 | - name: Install dependencies 27 | run: | 28 | pip install --upgrade --upgrade-strategy eager -e .[dev] 29 | - name: Run license checks 30 | run: | 31 | ./scripts/licensing/license_headers.sh --check 32 | - name: Check notebook cell metadata 33 | run: | 34 | python ./scripts/docs/add_nb_tags.py --check 35 | - name: Run isort 36 | run: isort docs/ nerfstudio/ scripts/ tests/ --profile black --check 37 | - name: Run Black 38 | run: black docs/ nerfstudio/ scripts/ tests/ --check 39 | - name: Python Pylint 40 | run: | 41 | pylint nerfstudio tests scripts 42 | - name: Test with pytest 43 | run: | 44 | pytest 45 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | # This workflows will upload a Python Package using twine when a release is created 2 | # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries 3 | 4 | name: Upload Python Package 5 | 6 | on: 7 | release: 8 | types: [created] 9 | 10 | jobs: 11 | deploy: 12 | runs-on: ubuntu-latest 13 | 14 | steps: 15 | - uses: actions/checkout@v2 16 | - name: Set up Python 17 | uses: actions/setup-python@v1 18 | with: 19 | python-version: '3.8' 20 | - name: Install dependencies 21 | run: | 22 | python -m pip install build twine 23 | - name: Strip unsupported tags in README 24 | run: | 25 | sed -i '//,//d' README.md 26 | - name: Build and publish 27 | env: 28 | PYPI_TOKEN: ${{ secrets.PYPI_TOKEN }} 29 | run: | 30 | python -m build 31 | twine upload --username __token__ --password $PYPI_TOKEN dist/* 32 | -------------------------------------------------------------------------------- /.github/workflows/viewer_build_deploy.yml: -------------------------------------------------------------------------------- 1 | name: Viewer Build and Deploy. 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | branches: [main] 8 | 9 | jobs: 10 | build: 11 | name: Build 12 | runs-on: ubuntu-latest 13 | defaults: 14 | run: 15 | working-directory: ./nerfstudio/viewer/app 16 | steps: 17 | - name: Checkout code 18 | uses: actions/checkout@master 19 | 20 | - name: Install Node.js 21 | uses: actions/setup-node@v3 22 | with: 23 | node-version: 17.8.0 24 | cache: 'yarn' 25 | cache-dependency-path: ./nerfstudio/viewer/app/yarn.lock 26 | 27 | - name: Install packages 28 | run: yarn install 29 | 30 | - name: Build project 31 | run: CI=false yarn build 32 | 33 | - name: Upload production-ready build files 34 | uses: actions/upload-artifact@v2 35 | with: 36 | name: production-files 37 | path: ./nerfstudio/viewer/app/build 38 | 39 | deploy: 40 | name: Deploy 41 | needs: build 42 | runs-on: ubuntu-latest 43 | 44 | env: 45 | SSH_KEY: ${{secrets.SSH_KEY}} 46 | 47 | steps: 48 | - uses: actions/checkout@v3 49 | - name: Set up Python 3.8.12 50 | uses: actions/setup-python@v4 51 | with: 52 | python-version: '3.8.12' 53 | 54 | - name: Install dependencies 55 | run: | 56 | pip install -r ./nerfstudio/viewer/app/requirements.txt 57 | 58 | - name: Download artifact 59 | uses: actions/download-artifact@v2 60 | with: 61 | name: production-files 62 | path: ./nerfstudio/viewer/app/build 63 | 64 | - name: Get branch name (merge) 65 | if: github.event_name != 'pull_request' 66 | shell: bash 67 | run: echo "BRANCH_NAME=$(echo ${GITHUB_REF#refs/heads/} | tr / -)" >> $GITHUB_ENV 68 | 69 | - name: Get branch name (pull request) 70 | if: github.event_name == 'pull_request' 71 | shell: bash 72 | run: echo "BRANCH_NAME=$(echo ${GITHUB_HEAD_REF} | tr / -)" >> $GITHUB_ENV 73 | 74 | # TODO: detect file or scheme changes of the viewer and only 75 | # increment the version.txt file when there is a change. 76 | # Update the version.txt code and push to master when things change. 77 | # https://github.com/marketplace/actions/changed-files 78 | # - name: Run changed-files with defaults on the dir1 79 | # id: changed-files-for-dir1 80 | # uses: tj-actions/changed-files@v29.0.3 81 | # with: 82 | # path: nerfstudio/viewer/app 83 | 84 | # - name: List all added files in dir1 85 | # run: | 86 | # for file in ${{ steps.changed-files-for-dir1.outputs.modified_files }}; do 87 | # echo "$file was modified" 88 | # done 89 | 90 | - run: | 91 | python ./nerfstudio/viewer/app/run_deploy.py \ 92 | --branch-name ${{ env.BRANCH_NAME }} \ 93 | --ssh-key-string "$SSH_KEY" \ 94 | --local-folder ./nerfstudio/viewer/app/build \ 95 | --package-json-filename ./nerfstudio/viewer/app/package.json \ 96 | --increment-version "False" 97 | - run: cat ~/.ssh/config 98 | -------------------------------------------------------------------------------- /.prettierrc.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | trailingComma: 'all', 3 | arrowParens: 'always', 4 | singleQuote: true, 5 | jsxSingleQuote: false, 6 | }; -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yaml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | # Set the version of Python and other tools you might need 9 | build: 10 | os: ubuntu-20.04 11 | tools: 12 | python: '3.9' 13 | # You can also specify other tool versions: 14 | # nodejs: "16" 15 | # rust: "1.55" 16 | # golang: "1.17" 17 | 18 | # Build documentation in the docs/ directory with Sphinx 19 | sphinx: 20 | fail_on_warning: true 21 | configuration: docs/conf.py 22 | 23 | # If using Sphinx, optionally build your docs in additional formats such as PDF 24 | # formats: 25 | # - pdf 26 | 27 | # Optionally declare the Python requirements required to build your docs 28 | python: 29 | install: 30 | # Equivalent to 'pip install .' 31 | - method: pip 32 | path: . 33 | # Equivalent to 'pip install .[docs]' 34 | - method: pip 35 | path: . 36 | extra_requirements: 37 | - docs 38 | -------------------------------------------------------------------------------- /.run_training_monocular.sh.swp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/.run_training_monocular.sh.swp -------------------------------------------------------------------------------- /.vscode/c_cpp_properties.json: -------------------------------------------------------------------------------- 1 | { 2 | "configurations": [ 3 | { 4 | "name": "Linux", 5 | "includePath": [ 6 | "${workspaceFolder}/**" 7 | ], 8 | "defines": [], 9 | "compilerPath": "/usr/bin/gcc", 10 | "cStandard": "c11", 11 | "intelliSenseMode": "linux-gcc-x64", 12 | "configurationProvider": "ms-vscode.makefile-tools" 13 | } 14 | ], 15 | "version": 4 16 | } -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/_pygments/style.py: -------------------------------------------------------------------------------- 1 | """Custom Pygments styles for the Sphinx documentation.""" 2 | 3 | from pygments.style import Style 4 | from pygments.token import ( 5 | Comment, 6 | Error, 7 | Generic, 8 | Keyword, 9 | Name, 10 | Number, 11 | Operator, 12 | Punctuation, 13 | String, 14 | Token, 15 | Whitespace, 16 | ) 17 | 18 | 19 | class NerfstudioStyleLight(Style): 20 | """ 21 | A style based on the manni pygments style. 22 | """ 23 | 24 | background_color = "#f8f9fb" 25 | 26 | styles = { 27 | Whitespace: "#bbbbbb", 28 | Comment: "italic #d34600", 29 | Comment.Preproc: "noitalic #009999", 30 | Comment.Special: "bold", 31 | Keyword: "bold #006699", 32 | Keyword.Pseudo: "nobold", 33 | Keyword.Type: "#007788", 34 | Operator: "#555555", 35 | Operator.Word: "bold #000000", 36 | Name.Builtin: "#336666", 37 | Name.Function: "#CC00FF", 38 | Name.Class: "bold #00AA88", 39 | Name.Namespace: "bold #00CCFF", 40 | Name.Exception: "bold #CC0000", 41 | Name.Variable: "#003333", 42 | Name.Constant: "#336600", 43 | Name.Label: "#9999FF", 44 | Name.Entity: "bold #999999", 45 | Name.Attribute: "#330099", 46 | Name.Tag: "bold #330099", 47 | Name.Decorator: "#9999FF", 48 | String: "#CC3300", 49 | String.Doc: "italic", 50 | String.Interpol: "#AA0000", 51 | String.Escape: "bold #CC3300", 52 | String.Regex: "#33AAAA", 53 | String.Symbol: "#FFCC33", 54 | String.Other: "#CC3300", 55 | Number: "#FF6600", 56 | Generic.Heading: "bold #003300", 57 | Generic.Subheading: "bold #003300", 58 | Generic.Deleted: "border:#CC0000 bg:#FFCCCC", 59 | Generic.Inserted: "border:#00CC00 bg:#CCFFCC", 60 | Generic.Error: "#FF0000", 61 | Generic.Emph: "italic", 62 | Generic.Strong: "bold", 63 | Generic.Prompt: "bold #000099", 64 | Generic.Output: "#AAAAAA", 65 | Generic.Traceback: "#99CC66", 66 | Error: "bg:#FFAAAA #AA0000", 67 | } 68 | 69 | 70 | class NerfstudioStyleDark(Style): 71 | """ 72 | A style based on the one-dark style. 73 | """ 74 | 75 | background_color = "#282C34" 76 | 77 | styles = { 78 | Token: "#ABB2BF", 79 | Punctuation: "#ABB2BF", 80 | Punctuation.Marker: "#ABB2BF", 81 | Keyword: "#C678DD", 82 | Keyword.Constant: "#fdd06c", 83 | Keyword.Declaration: "#C678DD", 84 | Keyword.Namespace: "#C678DD", 85 | Keyword.Reserved: "#C678DD", 86 | Keyword.Type: "#fdd06c", 87 | Name: "#ff8c58", 88 | Name.Attribute: "#ff8c58", 89 | Name.Builtin: "#fdd06c", 90 | Name.Class: "#fdd06c", 91 | Name.Function: "bold #61AFEF", 92 | Name.Function.Magic: "bold #56B6C2", 93 | Name.Other: "#ff8c58", 94 | Name.Tag: "#ff8c58", 95 | Name.Decorator: "#61AFEF", 96 | Name.Variable.Class: "", 97 | String: "#bde3a1", 98 | Number: "#D19A66", 99 | Operator: "#56B6C2", 100 | Comment: "#7F848E", 101 | } 102 | -------------------------------------------------------------------------------- /docs/_static/custom.js: -------------------------------------------------------------------------------- 1 | requirejs.config({ 2 | paths: { 3 | base: '/static/base', 4 | plotly: 'https://cdn.plot.ly/plotly-2.12.1.min.js?noext', 5 | }, 6 | }); -------------------------------------------------------------------------------- /docs/_static/imgs/logo-dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/_static/imgs/logo-dark.png -------------------------------------------------------------------------------- /docs/_static/imgs/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/_static/imgs/logo.png -------------------------------------------------------------------------------- /docs/_static/imgs/readme_colab.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/_static/imgs/readme_colab.png -------------------------------------------------------------------------------- /docs/_static/imgs/readme_documentation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/_static/imgs/readme_documentation.png -------------------------------------------------------------------------------- /docs/_static/imgs/readme_viewer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/_static/imgs/readme_viewer.png -------------------------------------------------------------------------------- /docs/developer_guides/debugging_tools/index.rst: -------------------------------------------------------------------------------- 1 | Debugging tools 2 | ==================== 3 | 4 | We document a few of the supported tooling systems and pipelines we support for debugging our models (e.g. profiling to debug speed). 5 | As we grow, we hope to provide more updated and extensive tooling support. 6 | 7 | .. toctree:: 8 | :maxdepth: 1 9 | 10 | local_logger 11 | profiling 12 | benchmarking -------------------------------------------------------------------------------- /docs/developer_guides/debugging_tools/local_logger.md: -------------------------------------------------------------------------------- 1 | # Local writer 2 | 3 | The `LocalWriter` simply outputs numerical stats to the terminal. 4 | You can specify additional parameters to customize your logging experience. 5 | A skeleton of the local writer config is defined below. 6 | 7 | ```python 8 | """nerfstudio/configs/base_config.py"""" 9 | 10 | @dataclass 11 | class LocalWriterConfig(InstantiateConfig): 12 | """Local Writer config""" 13 | 14 | _target: Type = writer.LocalWriter 15 | enable: bool = False 16 | stats_to_track: Tuple[writer.EventName, ...] = ( 17 | writer.EventName.ITER_TRAIN_TIME, 18 | ... 19 | ) 20 | max_log_size: int = 10 21 | 22 | ``` 23 | 24 | You can customize the local writer by editing the attributes: 25 | - `enable`: enable/disable the logger. 26 | - `stats_to_track`: all the stats that you want to print to the terminal (see list under `EventName` in `utils/writer.py`). You can add or remove any of the defined enums. 27 | - `max_log_size`: how much content to print onto the screen (By default, only print 10 lines onto the screen at a time). If 0, will print everything without deleting any previous lines. 28 | 29 | :::{admonition} Tip 30 | :class: info 31 | 32 | If you want to create a new stat to track, simply add the stat name to the `EventName` enum. 33 | - Remember to call some put event (e.g. `put_scalar` from `utils/writer.py` to place the value in the `EVENT_STORAGE`. 34 | - Remember to add the new enum to the `stats_to_track` list 35 | ::: 36 | 37 | The local writer is easily configurable via CLI. 38 | A few common commands to use: 39 | 40 | - Disable local writer 41 | ```bash 42 | ns-train {METHOD_NAME} --logging.local-writer.no-enable 43 | ``` 44 | 45 | - Disable line wrapping 46 | ```bash 47 | ns-train {METHOD_NAME} --logging.local-writer.max-log-size=0 48 | ``` -------------------------------------------------------------------------------- /docs/developer_guides/debugging_tools/profiling.md: -------------------------------------------------------------------------------- 1 | # Code profiling support 2 | 3 | We provide built-in performance profiling capabilities to make it easier for you to debug and assess the performance of your code. 4 | 5 | #### In-house profiler 6 | 7 | You can use our built-in profiler. By default, it is enabled and will print at the termination of the program. You can disable it via CLI using the flag `--logging.no-enable-profiler`. 8 | 9 | 10 | The profiler computes the average total time of execution for any function with the `@profiler.time_function` decorator. 11 | For instance, if you wanted to profile the total time it takes to generate rays given pixel and camera indices via the `RayGenerator` class, you might want to time its `forward()` function. In that case, you would need to add the decorator to the function. 12 | 13 | ```python 14 | """nerfstudio/model_components/ray_generators.py"""" 15 | 16 | class RayGenerator(nn.Module): 17 | 18 | ... 19 | 20 | @profiler.time_function # <-- add the profiler decorator before the function 21 | def forward(self, ray_indices: TensorType["num_rays", 3]) -> RayBundle: 22 | # implementation here 23 | ... 24 | ``` 25 | 26 | At termination of training or end of the training run, the profiler will print out the average execution time for all of the functions that have the profiler tag. 27 | 28 | :::{admonition} Tip 29 | :class: info 30 | 31 | Use this profiler if there are *specific/individual functions* you want to measure the times for. 32 | ::: 33 | 34 | 35 | #### Profiling with PySpy 36 | 37 | If you want to profile the entire codebase, consider using [PySpy](https://github.com/benfred/py-spy). 38 | 39 | Install PySpy 40 | 41 | ```bash 42 | pip install py-spy 43 | ``` 44 | 45 | To perform the profiling, you can either specify that you want to generate a flame graph or generate a live-view of the profiler. 46 | 47 | - flame graph: with wandb logging and our inhouse logging disabled 48 | ```bash 49 | program="ns-train nerfacto -- --vis=wandb --logging.no-enable-profiler blender-data" 50 | py-spy record -o {PATH_TO_OUTPUT_SVG} $program 51 | ``` 52 | - top-down stats: running same program configuration as above 53 | ```bash 54 | py-spy top $program 55 | ``` 56 | 57 | :::{admonition} Attention 58 | :class: attention 59 | 60 | In defining `program`, you will need to add an extra `--` before you specify your program's arguments. 61 | ::: -------------------------------------------------------------------------------- /docs/developer_guides/pipelines/imgs/pipeline_datamanager-dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/developer_guides/pipelines/imgs/pipeline_datamanager-dark.png -------------------------------------------------------------------------------- /docs/developer_guides/pipelines/imgs/pipeline_datamanager-light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/developer_guides/pipelines/imgs/pipeline_datamanager-light.png -------------------------------------------------------------------------------- /docs/developer_guides/pipelines/imgs/pipeline_field-dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/developer_guides/pipelines/imgs/pipeline_field-dark.png -------------------------------------------------------------------------------- /docs/developer_guides/pipelines/imgs/pipeline_field-light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/developer_guides/pipelines/imgs/pipeline_field-light.png -------------------------------------------------------------------------------- /docs/developer_guides/pipelines/imgs/pipeline_model-dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/developer_guides/pipelines/imgs/pipeline_model-dark.png -------------------------------------------------------------------------------- /docs/developer_guides/pipelines/imgs/pipeline_model-light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/developer_guides/pipelines/imgs/pipeline_model-light.png -------------------------------------------------------------------------------- /docs/developer_guides/pipelines/imgs/pipeline_overview-dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/developer_guides/pipelines/imgs/pipeline_overview-dark.png -------------------------------------------------------------------------------- /docs/developer_guides/pipelines/imgs/pipeline_overview-light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/developer_guides/pipelines/imgs/pipeline_overview-light.png -------------------------------------------------------------------------------- /docs/developer_guides/pipelines/imgs/pipeline_parser-dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/developer_guides/pipelines/imgs/pipeline_parser-dark.png -------------------------------------------------------------------------------- /docs/developer_guides/pipelines/imgs/pipeline_parser-light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/developer_guides/pipelines/imgs/pipeline_parser-light.png -------------------------------------------------------------------------------- /docs/developer_guides/pipelines/imgs/pipeline_pipeline-dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/developer_guides/pipelines/imgs/pipeline_pipeline-dark.png -------------------------------------------------------------------------------- /docs/developer_guides/pipelines/imgs/pipeline_pipeline-light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/developer_guides/pipelines/imgs/pipeline_pipeline-light.png -------------------------------------------------------------------------------- /docs/developer_guides/pipelines/index.rst: -------------------------------------------------------------------------------- 1 | Pipelines overview 2 | ------------------------- 3 | 4 | Here we describe what a Pipeline is and how it works. You can see an overview figure with the major Pipeline components below. 5 | 6 | .. image:: imgs/pipeline_overview-light.png 7 | :width: 600 8 | :align: center 9 | :alt: pipeline figure 10 | :class: only-light 11 | 12 | .. image:: imgs/pipeline_overview-dark.png 13 | :width: 600 14 | :align: center 15 | :alt: pipeline figure 16 | :class: only-dark 17 | 18 | 19 | .. admonition:: Note 20 | 21 | RayGT and RayOutputs are currently dictionaries. In the future, they will be typed objects. 22 | 23 | 24 | Why Pipelines? 25 | ========================== 26 | 27 | Our goal is for any NeRF paper to be implemented as a Pipeline. 28 | 29 | The Pipeline is composed of two major components, namely the DataManager and the Model. The DataManager is responsible for loading data and generating RayBundle and RayGT objects. RayBundles are the input to the forward pass of the Model. These are needed for both training and inference time. RayGT objects, however, are needed only during training to calculate the losses in the Loss Dict. 30 | 31 | RayBundle objects describe origins and viewing directions. The model will take these rays and render them into quantities as RayOutputs. RayGT contains the necessary ground truth (GT) information needed to compute losses. For example, the GT pixel values can be used to supervise the rendered rays with an L2 loss. 32 | 33 | In the following sections, we describe the Pipeline components and look at their code. 34 | 35 | .. toctree:: 36 | :maxdepth: 1 37 | 38 | dataparsers 39 | datamanagers 40 | models 41 | fields 42 | pipelines 43 | 44 | Implementing NeRF Papers 45 | ========================== 46 | 47 | Let's say you want to create a custom Pipeline that has a custom DataManager and a custom Model. Perhaps you care about dynamically adding cameras to the DataManager during training or you want to importance sample and generate rays from pixels where the loss is high. This can be accomplished by mixing and matching components into a Pipeline. The following guide will take you through an example of this. 48 | 49 | This guide is coming soon! 50 | -------------------------------------------------------------------------------- /docs/developer_guides/pipelines/pipelines.md: -------------------------------------------------------------------------------- 1 | # Pipelines 2 | 3 | ```{image} imgs/pipeline_pipeline-light.png 4 | :align: center 5 | :class: only-light 6 | :width: 600 7 | ``` 8 | 9 | ```{image} imgs/pipeline_pipeline-dark.png 10 | :align: center 11 | :class: only-dark 12 | :width: 600 13 | ``` 14 | 15 | ## What is a Pipeline? 16 | 17 | The Pipeline contains all the code you need to implement a NeRF method. There are two main functions that you need to implement for the Pipeline. 18 | 19 | ```python 20 | class Pipeline(nn.Module): 21 | 22 | datamanager: DataManager 23 | model: Model 24 | 25 | @profiler.time_function 26 | def get_train_loss_dict(self, step: int): 27 | """This function gets your training loss dict. This will be responsible for 28 | getting the next batch of data from the DataManager and interfacing with the 29 | Model class, feeding the data to the model's forward function. 30 | 31 | Args: 32 | step: current iteration step to update sampler if using DDP (distributed) 33 | """ 34 | 35 | @profiler.time_function 36 | def get_eval_loss_dict(self, step: int): 37 | """This function gets your evaluation loss dict. It needs to get the data 38 | from the DataManager and feed it to the model's forward function 39 | 40 | Args: 41 | step: current iteration step 42 | """ 43 | ``` 44 | 45 | ## Vanilla Implementation 46 | 47 | Here you can see a simple implementation of the get_train_loss_dict from the VanillaPipeline. Essentially, all the pipeline has to do is route data from the DataManager to the Model. 48 | 49 | ```python 50 | @profiler.time_function 51 | def get_train_loss_dict(self, step: int): 52 | ray_bundle, batch = self.datamanager.next_train(step) 53 | model_outputs = self.model(ray_bundle) 54 | metrics_dict = self.model.get_metrics_dict(model_outputs, batch) 55 | loss_dict = self.model.get_loss_dict(model_outputs, batch, metrics_dict) 56 | return model_outputs, loss_dict, metrics_dict 57 | ``` 58 | 59 | ## Creating Custom Methods 60 | 61 | :::{admonition} Note 62 | :class: info 63 | 64 | The VanillaPipeline works for most of our methods. 65 | ::: 66 | 67 | We also have a DynamicBatchPipeline that is used with InstantNGP to dynamically choose the number of rays to use per training and evaluation iteration. 68 | 69 | ```{button-link} https://github.com/nerfstudio-project/nerfstudio/blob/master/nerfstudio/pipelines/dynamic_batch.py 70 | :color: primary 71 | :outline: 72 | See the code! 73 | ``` -------------------------------------------------------------------------------- /docs/developer_guides/viewer/imgs/viewer_figure.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/developer_guides/viewer/imgs/viewer_figure.png -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | 13 | %SPHINXBUILD% >NUL 2>NUL 14 | if errorlevel 9009 ( 15 | echo. 16 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 17 | echo.installed, then set the SPHINXBUILD environment variable to point 18 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 19 | echo.may add the Sphinx directory to PATH. 20 | echo. 21 | echo.If you don't have Sphinx installed, grab it from 22 | echo.https://www.sphinx-doc.org/ 23 | exit /b 1 24 | ) 25 | 26 | if "%1" == "" goto help 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /docs/nerfology/methods/imgs/mipnerf/models_mipnerf_field-dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/nerfology/methods/imgs/mipnerf/models_mipnerf_field-dark.png -------------------------------------------------------------------------------- /docs/nerfology/methods/imgs/mipnerf/models_mipnerf_field-light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/nerfology/methods/imgs/mipnerf/models_mipnerf_field-light.png -------------------------------------------------------------------------------- /docs/nerfology/methods/imgs/mipnerf/models_mipnerf_pipeline-dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/nerfology/methods/imgs/mipnerf/models_mipnerf_pipeline-dark.png -------------------------------------------------------------------------------- /docs/nerfology/methods/imgs/mipnerf/models_mipnerf_pipeline-light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/nerfology/methods/imgs/mipnerf/models_mipnerf_pipeline-light.png -------------------------------------------------------------------------------- /docs/nerfology/methods/imgs/models_nerf-field-dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/nerfology/methods/imgs/models_nerf-field-dark.png -------------------------------------------------------------------------------- /docs/nerfology/methods/imgs/models_nerf-field-light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/nerfology/methods/imgs/models_nerf-field-light.png -------------------------------------------------------------------------------- /docs/nerfology/methods/imgs/models_nerf-pipeline-dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/nerfology/methods/imgs/models_nerf-pipeline-dark.png -------------------------------------------------------------------------------- /docs/nerfology/methods/imgs/models_nerf-pipeline-field-dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/nerfology/methods/imgs/models_nerf-pipeline-field-dark.png -------------------------------------------------------------------------------- /docs/nerfology/methods/imgs/models_nerf-pipeline-field-light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/nerfology/methods/imgs/models_nerf-pipeline-field-light.png -------------------------------------------------------------------------------- /docs/nerfology/methods/imgs/models_nerf-pipeline-light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/nerfology/methods/imgs/models_nerf-pipeline-light.png -------------------------------------------------------------------------------- /docs/nerfology/methods/imgs/models_nerf-pipeline-renderer-dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/nerfology/methods/imgs/models_nerf-pipeline-renderer-dark.png -------------------------------------------------------------------------------- /docs/nerfology/methods/imgs/models_nerf-pipeline-renderer-light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/nerfology/methods/imgs/models_nerf-pipeline-renderer-light.png -------------------------------------------------------------------------------- /docs/nerfology/methods/imgs/models_nerf-pipeline-sampler-dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/nerfology/methods/imgs/models_nerf-pipeline-sampler-dark.png -------------------------------------------------------------------------------- /docs/nerfology/methods/imgs/models_nerf-pipeline-sampler-light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/nerfology/methods/imgs/models_nerf-pipeline-sampler-light.png -------------------------------------------------------------------------------- /docs/nerfology/methods/imgs/nerfacto/models_nerfacto_field-dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/nerfology/methods/imgs/nerfacto/models_nerfacto_field-dark.png -------------------------------------------------------------------------------- /docs/nerfology/methods/imgs/nerfacto/models_nerfacto_field-light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/nerfology/methods/imgs/nerfacto/models_nerfacto_field-light.png -------------------------------------------------------------------------------- /docs/nerfology/methods/imgs/nerfacto/models_nerfacto_pipeline-dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/nerfology/methods/imgs/nerfacto/models_nerfacto_pipeline-dark.png -------------------------------------------------------------------------------- /docs/nerfology/methods/imgs/nerfacto/models_nerfacto_pipeline-light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/nerfology/methods/imgs/nerfacto/models_nerfacto_pipeline-light.png -------------------------------------------------------------------------------- /docs/nerfology/methods/index.md: -------------------------------------------------------------------------------- 1 | # Methods 2 | 3 | We provide a set of pre implemented nerfstudio methods. 4 | 5 | **The goal of nerfstudio is to modularize the various NeRF techniques as much as possible.** 6 | 7 | As a result, many of the techniques from these pre-implemented methods can be mixed 🎨. 8 | 9 | ## Running a method 10 | 11 | It's easy! 12 | 13 | ```bash 14 | ns-train {METHOD_NAME} 15 | ``` 16 | 17 | To list the available methods run: 18 | 19 | ```bash 20 | ns-train --help 21 | ``` 22 | 23 | ## Guides 24 | 25 | In addition to their implementations, we have provided guides that walk through each of these method. 26 | 27 | ```{toctree} 28 | :maxdepth: 1 29 | NeRF 30 | Mip-NeRF 31 | Nerfacto 32 | Instant-NGP 33 | Semantic NeRF-W 34 | ``` 35 | -------------------------------------------------------------------------------- /docs/nerfology/methods/instant_ngp.md: -------------------------------------------------------------------------------- 1 | # Instant-NGP 2 | 3 |

Instant Neural Graphics Primitives with a Multiresolution Hash Encoding

4 | 5 | ```{button-link} https://nvlabs.github.io/instant-ngp/ 6 | :color: primary 7 | :outline: 8 | Paper Website 9 | ``` 10 | 11 | ### Running Model 12 | 13 | ```bash 14 | ns-train instant-ngp 15 | ``` 16 | 17 | 18 | ```{admonition} Coming Soon 19 | This guide is coming soon. 20 | ``` -------------------------------------------------------------------------------- /docs/nerfology/methods/mipnerf.md: -------------------------------------------------------------------------------- 1 | # Mip-NeRF 2 | 3 |

A Multiscale Representation for Anti-Aliasing Neural Radiance Fields

4 | 5 | ```{button-link} https://jonbarron.info/mipnerf/ 6 | :color: primary 7 | :outline: 8 | Paper Website 9 | ``` 10 | 11 | ### Running Model 12 | 13 | ```bash 14 | ns-train mipnerf 15 | ``` 16 | 17 | ## Overview 18 | 19 | ```{image} imgs/mipnerf/models_mipnerf_pipeline-light.png 20 | :align: center 21 | :class: only-light 22 | ``` 23 | 24 | ```{image} imgs/mipnerf/models_mipnerf_pipeline-dark.png 25 | :align: center 26 | :class: only-dark 27 | ``` 28 | 29 | The primary modification in MipNeRF is in the encoding for the field representation. With the modification the same _mip-NeRF_ field can be use for the coarse and fine steps of the rendering hierarchy. 30 | 31 | ```{image} imgs/mipnerf/models_mipnerf_field-light.png 32 | :align: center 33 | :class: only-light 34 | :width: 400 35 | ``` 36 | 37 | ```{image} imgs/mipnerf/models_mipnerf_field-dark.png 38 | :align: center 39 | :class: only-dark 40 | :width: 400 41 | ``` 42 | 43 | In the field, the Positional Encoding (PE) is replaced with an Integrated Positional Encoding (IPE) that takes into account the size of the sample. 44 | -------------------------------------------------------------------------------- /docs/nerfology/methods/semantic_nerfw.md: -------------------------------------------------------------------------------- 1 | # Semantic NeRF-W 2 | 3 |

Semantic NeRF

4 | 5 | ```{button-link} https://shuaifengzhi.com/Semantic-NeRF/ 6 | :color: primary 7 | :outline: 8 | Paper Website 9 | ``` 10 | 11 |

NeRF in the Wild

12 | 13 | ```{button-link} https://nerf-w.github.io/ 14 | :color: primary 15 | :outline: 16 | Paper Website 17 | ``` 18 | 19 | ```{admonition} Coming Soon 20 | The transient embeddings are still under development. Please stay tuned. 21 | ``` 22 | 23 | ### Running Model 24 | 25 | Download the Friends Dataset 26 | 27 | ```bash 28 | ns-download-data friends 29 | ``` 30 | 31 | ```bash 32 | ns-train semantic-nerfw 33 | ``` 34 | 35 | This model defaults to using the "friends" dataset from the paper ["The One Where They Reconstructed 3D Humans and Environments in TV Shows"](https://ethanweber.me/sitcoms3D/). 36 | 37 | 38 | -------------------------------------------------------------------------------- /docs/nerfology/model_components/imgs/frustums-dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/nerfology/model_components/imgs/frustums-dark.png -------------------------------------------------------------------------------- /docs/nerfology/model_components/imgs/frustums.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/nerfology/model_components/imgs/frustums.png -------------------------------------------------------------------------------- /docs/nerfology/model_components/imgs/samplers_stratified-dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/nerfology/model_components/imgs/samplers_stratified-dark.png -------------------------------------------------------------------------------- /docs/nerfology/model_components/imgs/samplers_stratified-light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/nerfology/model_components/imgs/samplers_stratified-light.png -------------------------------------------------------------------------------- /docs/nerfology/model_components/imgs/samplers_type-dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/nerfology/model_components/imgs/samplers_type-dark.png -------------------------------------------------------------------------------- /docs/nerfology/model_components/imgs/samplers_type-light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/nerfology/model_components/imgs/samplers_type-light.png -------------------------------------------------------------------------------- /docs/nerfology/model_components/index.md: -------------------------------------------------------------------------------- 1 | # Model components 2 | 3 | It can be difficult getting started with NeRFs. The reserach field is still quite new and most of the key nuggets are burried in academic papers. For this reason, we have consoladated many of the key concepts into a series of guides. 4 | 5 | ```{toctree} 6 | :maxdepth: 1 7 | Cameras models 8 | Sample representation 9 | Ray samplers 10 | Spatial distortions 11 | Encoders 12 | ``` 13 | -------------------------------------------------------------------------------- /docs/quickstart/data_conventions.md: -------------------------------------------------------------------------------- 1 | # Data conventions 2 | 3 | ## Coordinate conventions 4 | 5 | Here we explain the coordinate conventions for using our repo. 6 | 7 | ### Camera/view space 8 | 9 | We use the OpenGL/Blender (and original NeRF) coordinate convention for cameras. +X is right, +Y is up, and +Z is pointing back and away from the camera. -Z is the look-at direction. Other codebases may use the COLMAP/OpenCV convention, where the Y and Z axes are flipped from ours but the +X axis remains the same. 10 | 11 | ### World space 12 | 13 | Our world space is oriented such that the up vector is +Z. The XY plane is parallel to the ground plane. In the viewer, you'll notice that red, green, and blue vectors correspond to X, Y, and Z respectively. 14 | 15 |
16 | 17 | ## Dataset format 18 | 19 | Our explanation here is for the nerfstudio data format. The `transforms.json` has a similar format to [Instant NGP](https://github.com/NVlabs/instant-ngp). 20 | 21 | **Camera intrinsics** 22 | 23 | At the top of the file, we specify the camera intrinsics. We assume that all the intrinsics parameters are the same for every camera in the dataset. The following is an example 24 | 25 | ```json 26 | { 27 | "fl_x": 1072.281897246229, // focal length x 28 | "fl_y": 1068.6906965388932, // focal length y 29 | "cx": 1504.0, // principal point x 30 | "cy": 1000.0, // principal point y 31 | "w": 3008, // image width 32 | "h": 2000, // image height 33 | "camera_model": "OPENCV_FISHEYE", // camera model type 34 | "k1": 0.03126218448029553, // first radial distorial parameter 35 | "k2": 0.005177020067511987, // second radial distorial parameter 36 | "k3": 0.0006640977794272005, // third radial distorial parameter 37 | "k4": 0.00010067035656515042, // fourth radial distorial parameter 38 | "p1": -6.472477652140879e-5, // first tangential distortion parameter 39 | "p2": -1.374647851912992e-7, // second tangential distortion parameter 40 | "frames": // ... extrinsics parameters explained below 41 | } 42 | ``` 43 | 44 | The valid `camera_model` strings are currently "OPENCV" and "OPENCV_FISHEYE". "OPENCV" (i.e., perspective) uses k1-2 and p1-2. "OPENCV_FISHEYE" uses k1-4. 45 | 46 | **Camera extrinsics** 47 | 48 | For a transform matrix, the first 3 columns are the +X, +Y, and +Z defining the camera orientation, and the X, Y, Z values define the origin. The last row is to be compatible with homogeneous coordinates. 49 | 50 | ```json 51 | { 52 | // ... intrinsics parameters 53 | "frames": [ 54 | { 55 | "file_path": "images/frame_00001.jpeg", 56 | "transform_matrix": [ 57 | // [+X0 +Y0 +Z0 X] 58 | // [+X1 +Y1 +Z1 Y] 59 | // [+X2 +Y2 +Z2 Z] 60 | // [0.0 0.0 0.0 1] 61 | [1.0, 0.0, 0.0, 0.0], 62 | [0.0, 1.0, 0.0, 0.0], 63 | [0.0, 0.0, 1.0, 0.0], 64 | [0.0, 0.0, 0.0, 1.0] 65 | ] 66 | } 67 | ] 68 | } 69 | ``` 70 | -------------------------------------------------------------------------------- /docs/quickstart/export_geometry.md: -------------------------------------------------------------------------------- 1 | # Export geometry 2 | 3 | Here we document how to export point clouds and meshes from nerfstudio. The main command you'll be working with is `ns-export`. Our point clouds are exported as `.ply` files and the textured meshes are exported as `.obj` files. 4 | 5 | ## Exporting a mesh 6 | 7 | ### 1. TSDF Fusion 8 | 9 | TSDF (truncated signed distance function) Fusion is a meshing algorithm that uses depth maps to extract a surface as a mesh. This method works for all models. 10 | 11 | ```python 12 | ns-export tsdf --load-config CONFIG.yml --output-dir OUTPUT_DIR 13 | ``` 14 | 15 | ### 2. Poisson surface reconstruction 16 | 17 | Poisson surface reconstruction gives the highest quality meshes. See the steps below to use Poisson surface reconstruction in our repo. 18 | 19 | :::{admonition} Note 20 | :class: info 21 | 22 | This will only work with a Model that computes or predicts normals, e.g., nerfacto. 23 | ::: 24 | 25 | 1. Train nerfacto with network settings that predict normals. 26 | 27 | ```bash 28 | ns-train nerfacto --pipeline.model.predict-normals True 29 | ``` 30 | 31 | 2. Export a mesh with the Poisson meshing algorithm. 32 | 33 | ```bash 34 | ns-export poisson --load-config CONFIG.yml --output-dir OUTPUT_DIR 35 | ``` 36 | 37 | ## Exporting a point cloud 38 | 39 | ```bash 40 | ns-export pointcloud --help 41 | ``` 42 | 43 | ## Other exporting methods 44 | 45 | Run the folowing command to see other export methods that may exist. 46 | 47 | ```python 48 | ns-export --help 49 | ``` 50 | 51 | ## Texturing an existing mesh with NeRF 52 | 53 | Say you want to simplify and/or smooth a mesh offline, and then you want to texture it with NeRF. You can do that with the following command. It will work for any mesh filetypes that [PyMeshLab](https://pymeshlab.readthedocs.io/en/latest/) can support, for example a `.ply`. 54 | 55 | ```python 56 | python scripts/texture.py --load-config CONFIG.yml --input-mesh-filename FILENAME --output-dir OUTPUT_DIR 57 | ``` 58 | 59 | ## Dependencies 60 | 61 | Our dependencies are shipped with the pip package in the pyproject.toml file. These are the following: 62 | 63 | - [xatlas-python](https://github.com/mworchel/xatlas-python) for unwrapping meshes to a UV map 64 | - [pymeshlab](https://pymeshlab.readthedocs.io/en/latest/) for reducing the number of faces in a mesh 65 | -------------------------------------------------------------------------------- /docs/quickstart/imgs/polycam_export.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/quickstart/imgs/polycam_export.png -------------------------------------------------------------------------------- /docs/quickstart/imgs/polycam_settings.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/quickstart/imgs/polycam_settings.png -------------------------------------------------------------------------------- /docs/quickstart/imgs/record3d_promo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/quickstart/imgs/record3d_promo.png -------------------------------------------------------------------------------- /docs/quickstart/imgs/record_3d_export_selection.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/quickstart/imgs/record_3d_export_selection.png -------------------------------------------------------------------------------- /docs/quickstart/imgs/record_3d_video_selection.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/quickstart/imgs/record_3d_video_selection.png -------------------------------------------------------------------------------- /docs/quickstart/imgs/viewer_link.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/docs/quickstart/imgs/viewer_link.png -------------------------------------------------------------------------------- /docs/reference/api/config.rst: -------------------------------------------------------------------------------- 1 | .. _configs: 2 | 3 | Configs 4 | ============ 5 | 6 | .. automodule:: nerfstudio.configs.base_config 7 | :members: 8 | :show-inheritance: 9 | -------------------------------------------------------------------------------- /docs/reference/api/data.rst: -------------------------------------------------------------------------------- 1 | .. _dataset: 2 | 3 | Data 4 | ============ 5 | 6 | Data Parsers 7 | ---------------- 8 | 9 | .. automodule:: nerfstudio.data.dataparsers 10 | :members: 11 | :show-inheritance: 12 | 13 | Datasets 14 | ---------------- 15 | 16 | .. automodule:: nerfstudio.data.datasets 17 | :members: 18 | :show-inheritance: 19 | 20 | Dataloaders 21 | ---------------- 22 | 23 | .. automodule:: nerfstudio.data.utils.dataloaders 24 | :members: 25 | :show-inheritance: 26 | 27 | Pixel Samplers 28 | ---------------- 29 | 30 | .. automodule:: nerfstudio.data.pixel_samplers 31 | :members: 32 | :show-inheritance: 33 | 34 | Scene Box 35 | ---------------- 36 | 37 | .. automodule:: nerfstudio.data.scene_box 38 | :members: 39 | :show-inheritance: -------------------------------------------------------------------------------- /docs/reference/api/field_components/embeddings.rst: -------------------------------------------------------------------------------- 1 | .. _embeddings: 2 | 3 | Embeddings 4 | =================== 5 | 6 | .. automodule:: nerfstudio.field_components.embedding 7 | :members: 8 | :show-inheritance: -------------------------------------------------------------------------------- /docs/reference/api/field_components/encodings.rst: -------------------------------------------------------------------------------- 1 | .. _encodings: 2 | 3 | Encodings 4 | =================== 5 | 6 | .. automodule:: nerfstudio.field_components.encodings 7 | :members: 8 | :show-inheritance: -------------------------------------------------------------------------------- /docs/reference/api/field_components/field_heads.rst: -------------------------------------------------------------------------------- 1 | .. _field_heads: 2 | 3 | Field Heads 4 | =================== 5 | 6 | .. automodule:: nerfstudio.field_components.field_heads 7 | :members: 8 | :show-inheritance: -------------------------------------------------------------------------------- /docs/reference/api/field_components/index.rst: -------------------------------------------------------------------------------- 1 | .. _field_modules: 2 | 3 | Field Modules 4 | =================== 5 | 6 | TODO: High level description of field modules and how they connect together. 7 | 8 | .. toctree:: 9 | 10 | encodings 11 | embeddings 12 | field_heads 13 | mlp 14 | spatial_distortions -------------------------------------------------------------------------------- /docs/reference/api/field_components/mlp.rst: -------------------------------------------------------------------------------- 1 | .. _mlp: 2 | 3 | MLP 4 | =================== 5 | 6 | .. automodule:: nerfstudio.field_components.mlp 7 | :members: 8 | :show-inheritance: -------------------------------------------------------------------------------- /docs/reference/api/field_components/spatial_distortions.rst: -------------------------------------------------------------------------------- 1 | .. _spatial_distortions: 2 | 3 | Spatial Distortions 4 | ===================== 5 | 6 | .. automodule:: nerfstudio.field_components.spatial_distortions 7 | :members: 8 | :show-inheritance: -------------------------------------------------------------------------------- /docs/reference/api/fields.rst: -------------------------------------------------------------------------------- 1 | .. _fields: 2 | 3 | Fields 4 | ============ 5 | 6 | Base 7 | ---------------- 8 | 9 | .. automodule:: nerfstudio.fields.base_field 10 | :members: 11 | :show-inheritance: 12 | 13 | Instant NGP 14 | ---------------- 15 | 16 | .. automodule:: nerfstudio.fields.instant_ngp_field 17 | :members: 18 | :show-inheritance: 19 | 20 | Vanilla NeRF 21 | ---------------- 22 | 23 | .. automodule:: nerfstudio.fields.vanilla_nerf_field 24 | :members: 25 | :show-inheritance: 26 | -------------------------------------------------------------------------------- /docs/reference/api/index.rst: -------------------------------------------------------------------------------- 1 | .. _reference: 2 | 3 | API 4 | ============ 5 | 6 | TODO: Explanation of each component 7 | 8 | .. toctree:: 9 | 10 | config 11 | data 12 | fields 13 | field_components/index 14 | models 15 | model_components/index 16 | optimizers 17 | utils/index 18 | viewer 19 | -------------------------------------------------------------------------------- /docs/reference/api/model_components/index.rst: -------------------------------------------------------------------------------- 1 | .. _graph_modules: 2 | 3 | Model components 4 | =================== 5 | 6 | .. toctree:: 7 | 8 | ray_sampler 9 | losses 10 | renderers -------------------------------------------------------------------------------- /docs/reference/api/model_components/losses.rst: -------------------------------------------------------------------------------- 1 | .. _losses: 2 | 3 | Losses 4 | =================== 5 | 6 | .. automodule:: nerfstudio.model_components.losses 7 | :members: 8 | :show-inheritance: -------------------------------------------------------------------------------- /docs/reference/api/model_components/ray_sampler.rst: -------------------------------------------------------------------------------- 1 | .. _ray_sampler: 2 | 3 | Ray Sampler 4 | =================== 5 | 6 | .. automodule:: nerfstudio.model_components.ray_samplers 7 | :members: 8 | :show-inheritance: -------------------------------------------------------------------------------- /docs/reference/api/model_components/renderers.rst: -------------------------------------------------------------------------------- 1 | .. _renderers: 2 | 3 | Renderers 4 | ============ 5 | 6 | .. automodule:: nerfstudio.model_components.renderers 7 | :members: 8 | :show-inheritance: 9 | -------------------------------------------------------------------------------- /docs/reference/api/models.rst: -------------------------------------------------------------------------------- 1 | .. _graphs: 2 | 3 | Models 4 | ============ 5 | 6 | Base 7 | ---------------- 8 | 9 | .. automodule:: nerfstudio.models.base_model 10 | :members: 11 | :show-inheritance: 12 | 13 | Instant NGP 14 | ---------------- 15 | 16 | .. automodule:: nerfstudio.models.instant_ngp 17 | :members: 18 | :show-inheritance: 19 | 20 | Semantic NeRF-W 21 | ---------------- 22 | 23 | .. automodule:: nerfstudio.models.semantic_nerfw 24 | :members: 25 | :show-inheritance: 26 | 27 | NeRF 28 | ---------------- 29 | 30 | .. automodule:: nerfstudio.models.vanilla_nerf 31 | :members: 32 | :show-inheritance: -------------------------------------------------------------------------------- /docs/reference/api/optimizers.rst: -------------------------------------------------------------------------------- 1 | .. _engine: 2 | 3 | Engine 4 | ============ 5 | 6 | Optimizers 7 | ---------------- 8 | 9 | .. automodule:: nerfstudio.engine.optimizers 10 | :members: 11 | :show-inheritance: 12 | 13 | Schedulers 14 | ---------------- 15 | 16 | .. automodule:: nerfstudio.engine.schedulers 17 | :members: 18 | :show-inheritance: 19 | -------------------------------------------------------------------------------- /docs/reference/api/utils/colormaps.rst: -------------------------------------------------------------------------------- 1 | .. _colormaps: 2 | 3 | Colormaps 4 | ---------------- 5 | 6 | .. automodule:: nerfstudio.utils.colormaps 7 | :members: 8 | :show-inheritance: 9 | -------------------------------------------------------------------------------- /docs/reference/api/utils/colors.rst: -------------------------------------------------------------------------------- 1 | .. _colors: 2 | 3 | Colors 4 | ------------ 5 | 6 | .. automodule:: nerfstudio.utils.colors 7 | :members: 8 | :show-inheritance: 9 | -------------------------------------------------------------------------------- /docs/reference/api/utils/index.rst: -------------------------------------------------------------------------------- 1 | .. _utils: 2 | 3 | Utils 4 | =================== 5 | 6 | .. toctree:: 7 | 8 | colors 9 | math 10 | colormaps 11 | tensor_dataclass -------------------------------------------------------------------------------- /docs/reference/api/utils/math.rst: -------------------------------------------------------------------------------- 1 | .. _math: 2 | 3 | Math 4 | ============ 5 | 6 | .. automodule:: nerfstudio.utils.math 7 | :members: 8 | :show-inheritance: 9 | -------------------------------------------------------------------------------- /docs/reference/api/utils/tensor_dataclass.rst: -------------------------------------------------------------------------------- 1 | .. _tensor_dataclass: 2 | 3 | TensorDataclass 4 | ================= 5 | 6 | .. automodule:: nerfstudio.utils.tensor_dataclass 7 | :members: 8 | :show-inheritance: 9 | -------------------------------------------------------------------------------- /docs/reference/api/viewer.rst: -------------------------------------------------------------------------------- 1 | .. _viewer: 2 | 3 | Viewer 4 | ============ 5 | 6 | .. automodule:: nerfstudio.viewer 7 | :members: 8 | :show-inheritance: 9 | -------------------------------------------------------------------------------- /docs/reference/cli/index.md: -------------------------------------------------------------------------------- 1 | # CLI 2 | 3 | We provide a command line interface for training your own NeRFs (no coding necessary). You can learn more about each command by using the `--help` argument. 4 | 5 | ## Commands 6 | 7 | Here are the popular commands that we offer. If you've cloned the repo, you can also look at the [pyproject.toml file](https://github.com/nerfstudio-project/nerfstudio/blob/main/pyproject.toml) at the `[project.scripts]` section for details. 8 | 9 | | Command | Description | Filename | 10 | | ---------------- | -------------------------------------- | ---------------------------------- | 11 | | ns-install-cli | Install tab completion for all scripts | scripts/completions/install.py | 12 | | ns-process-data | Generate a dataset from your own data | scripts/process_data.py | 13 | | ns-download-data | Download existing captures | scripts/downloads/download_data.py | 14 | | ns-train | Generate a NeRF | scripts/train.py | 15 | | ns-eval | Run evaluation metrics for your Model | scripts/eval.py | 16 | | ns-render | Render out a video of your NeRF | scripts/render.py | 17 | | ns-export | Export a NeRF into other formats | scripts/exporter.py | 18 | 19 | ```{toctree} 20 | :maxdepth: 1 21 | :hidden: 22 | 23 | ns_process_data 24 | ns_download_data 25 | ns_train 26 | ns_render 27 | ns_export 28 | ns_eval 29 | ``` 30 | -------------------------------------------------------------------------------- /docs/reference/cli/ns_download_data.md: -------------------------------------------------------------------------------- 1 | # ns-download-data 2 | 3 | ```{eval-rst} 4 | .. argparse:: 5 | :module: scripts.downloads.download_data 6 | :func: get_parser_fn 7 | :prog: ns-download-data 8 | :nodefault: 9 | ``` 10 | -------------------------------------------------------------------------------- /docs/reference/cli/ns_eval.md: -------------------------------------------------------------------------------- 1 | # ns-eval 2 | 3 | ```{eval-rst} 4 | .. argparse:: 5 | :module: scripts.eval 6 | :func: get_parser_fn 7 | :prog: ns-eval 8 | :nodefault: 9 | ``` 10 | -------------------------------------------------------------------------------- /docs/reference/cli/ns_export.md: -------------------------------------------------------------------------------- 1 | # ns-export 2 | 3 | ```{eval-rst} 4 | .. argparse:: 5 | :module: scripts.exporter 6 | :func: get_parser_fn 7 | :prog: ns-export 8 | :nodefault: 9 | ``` 10 | -------------------------------------------------------------------------------- /docs/reference/cli/ns_process_data.md: -------------------------------------------------------------------------------- 1 | # ns-process-data 2 | 3 | :::{admonition} Note 4 | :class: warning 5 | Make sure to have [COLMAP](https://colmap.github.io) and [FFmpeg](https://ffmpeg.org/download.html) installed. 6 | You may also want to install [hloc](https://github.com/cvg/Hierarchical-Localization) (optional) for more feature detector and matcher options. 7 | ::: 8 | 9 | ```{eval-rst} 10 | .. argparse:: 11 | :module: scripts.process_data 12 | :func: get_parser_fn 13 | :prog: ns-process-data 14 | :nodefault: 15 | ``` 16 | -------------------------------------------------------------------------------- /docs/reference/cli/ns_render.md: -------------------------------------------------------------------------------- 1 | # ns-render 2 | 3 | :::{admonition} Note 4 | :class: warning 5 | Make sure to have [FFmpeg](https://ffmpeg.org/download.html) installed. 6 | ::: 7 | 8 | ```{eval-rst} 9 | .. argparse:: 10 | :module: scripts.render 11 | :func: get_parser_fn 12 | :prog: ns-render 13 | :nodefault: 14 | ``` 15 | -------------------------------------------------------------------------------- /docs/reference/cli/ns_train.md: -------------------------------------------------------------------------------- 1 | # ns-train 2 | 3 | Primary interface for training a NeRF model. `--help` is your friend when navigating command arguments. We also recommend installing the tab completion `ns-install-cli`. 4 | 5 | ```bash 6 | usage: ns-train {method} [method args] {dataparser} [dataparser args] 7 | ``` 8 | 9 | If you are using a nerfstudio data set, the minimal command is: 10 | 11 | ```bash 12 | ns-train nerfacto --data YOUR_DATA 13 | ``` 14 | 15 | To learn about the available methods: 16 | 17 | ```bash 18 | ns-train --help 19 | ``` 20 | 21 | To learn about a methods parameters: 22 | 23 | ```bash 24 | ns-train {method} --help 25 | ``` 26 | 27 | By default the nerfstudio dataparser is used. If you would like to use a different dataparser it can be specified after all of the method arguments. For a list of dataparser options: 28 | 29 | ```bash 30 | ns-train {method} {dataparser} --help 31 | ``` 32 | -------------------------------------------------------------------------------- /media/help-output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/media/help-output.png -------------------------------------------------------------------------------- /media/overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/media/overview.png -------------------------------------------------------------------------------- /media/sdf_studio_4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/media/sdf_studio_4.png -------------------------------------------------------------------------------- /media/training-process.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/media/training-process.png -------------------------------------------------------------------------------- /media/viewer_screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/media/viewer_screenshot.png -------------------------------------------------------------------------------- /nerfstudio/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /nerfstudio/cameras/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /nerfstudio/configs/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /nerfstudio/configs/config_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Some utility code for configs. 17 | """ 18 | 19 | from __future__ import annotations 20 | 21 | from dataclasses import field 22 | from typing import Any, Dict 23 | 24 | from rich.console import Console 25 | 26 | CONSOLE = Console() 27 | # pylint: disable=import-outside-toplevel 28 | 29 | # cannot use mutable types directly within dataclass; abstracting default factory calls 30 | def to_immutable_dict(d: Dict[str, Any]): 31 | """Method to convert mutable dict to default factory dict 32 | 33 | Args: 34 | d: dictionary to convert into default factory dict for dataclass 35 | """ 36 | return field(default_factory=lambda: dict(d)) 37 | 38 | 39 | def convert_markup_to_ansi(markup_string: str) -> str: 40 | """Convert rich-style markup to ANSI sequences for command-line formatting. 41 | 42 | Args: 43 | markup_string: Text with rich-style markup. 44 | 45 | Returns: 46 | Text formatted via ANSI sequences. 47 | """ 48 | with CONSOLE.capture() as out: 49 | CONSOLE.print(markup_string, soft_wrap=True) 50 | return out.get() 51 | -------------------------------------------------------------------------------- /nerfstudio/data/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /nerfstudio/data/datamanagers/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /nerfstudio/data/datamanagers/semantic_datamanager.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Semantic datamanager. 17 | """ 18 | 19 | from dataclasses import dataclass, field 20 | from typing import Type 21 | 22 | from nerfstudio.data.datamanagers.base_datamanager import ( 23 | VanillaDataManager, 24 | VanillaDataManagerConfig, 25 | ) 26 | from nerfstudio.data.datasets.semantic_dataset import SemanticDataset 27 | 28 | 29 | @dataclass 30 | class SemanticDataManagerConfig(VanillaDataManagerConfig): 31 | """A semantic datamanager - required to use with .setup()""" 32 | 33 | _target: Type = field(default_factory=lambda: SemanticDataManager) 34 | 35 | 36 | class SemanticDataManager(VanillaDataManager): # pylint: disable=abstract-method 37 | """Data manager implementation for data that also requires processing semantic data. 38 | 39 | Args: 40 | config: the DataManagerConfig used to instantiate class 41 | """ 42 | 43 | def create_train_dataset(self) -> SemanticDataset: 44 | return SemanticDataset( 45 | dataparser_outputs=self.dataparser.get_dataparser_outputs(split="train"), 46 | scale_factor=self.config.camera_res_scale_factor, 47 | ) 48 | 49 | def create_eval_dataset(self) -> SemanticDataset: 50 | return SemanticDataset( 51 | dataparser_outputs=self.dataparser.get_dataparser_outputs(split=self.test_split), 52 | scale_factor=self.config.camera_res_scale_factor, 53 | ) 54 | -------------------------------------------------------------------------------- /nerfstudio/data/datamanagers/variable_res_datamanager.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Data loader for variable resolution datasets, where batching raw image tensors isn't possible. 17 | """ 18 | 19 | from __future__ import annotations 20 | 21 | from dataclasses import dataclass 22 | from typing import Dict, List 23 | 24 | from nerfstudio.data.datamanagers.base_datamanager import VanillaDataManagerConfig 25 | from nerfstudio.data.utils.nerfstudio_collate import nerfstudio_collate 26 | 27 | 28 | def variable_res_collate(batch: List[Dict]) -> Dict: 29 | """Default collate function for the cached dataloader. 30 | Args: 31 | batch: Batch of samples from the dataset. 32 | Returns: 33 | Collated batch. 34 | """ 35 | images = [] 36 | masks = [] 37 | for data in batch: 38 | image = data.pop("image") 39 | mask = data.pop("mask", None) 40 | images.append(image) 41 | if mask: 42 | masks.append(mask) 43 | 44 | new_batch: dict = nerfstudio_collate(batch) 45 | new_batch["image"] = images 46 | if masks: 47 | new_batch["mask"] = masks 48 | 49 | return new_batch 50 | 51 | 52 | @dataclass 53 | class VariableResDataManagerConfig(VanillaDataManagerConfig): 54 | """A datamanager for variable resolution datasets, with presets to optimize 55 | for the fact that we are now dealing with lists of images and masks. 56 | """ 57 | 58 | train_num_images_to_sample_from: int = 40 59 | """Number of images to sample during training iteration.""" 60 | train_num_times_to_repeat_images: int = 100 61 | """When not training on all images, number of iterations before picking new 62 | images. If -1, never pick new images.""" 63 | eval_num_images_to_sample_from: int = 40 64 | """Number of images to sample during eval iteration.""" 65 | eval_num_times_to_repeat_images: int = 100 66 | collate_fn = staticmethod(variable_res_collate) 67 | -------------------------------------------------------------------------------- /nerfstudio/data/dataparsers/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /nerfstudio/data/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /nerfstudio/data/datasets/semantic_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Semantic dataset. 17 | """ 18 | 19 | from typing import Dict 20 | 21 | import torch 22 | 23 | from nerfstudio.data.dataparsers.base_dataparser import DataparserOutputs, Semantics 24 | from nerfstudio.data.datasets.base_dataset import InputDataset 25 | from nerfstudio.data.utils.data_utils import get_semantics_and_mask_tensors_from_path 26 | 27 | 28 | class SemanticDataset(InputDataset): 29 | """Dataset that returns images and semantics and masks. 30 | 31 | Args: 32 | dataparser_outputs: description of where and how to read input images. 33 | """ 34 | 35 | def __init__(self, dataparser_outputs: DataparserOutputs, scale_factor: float = 1.0): 36 | super().__init__(dataparser_outputs, scale_factor) 37 | assert "semantics" in dataparser_outputs.metadata.keys() and isinstance(self.metadata["semantics"], Semantics) 38 | self.semantics = self.metadata["semantics"] 39 | self.mask_indices = torch.tensor( 40 | [self.semantics.classes.index(mask_class) for mask_class in self.semantics.mask_classes] 41 | ).view(1, 1, -1) 42 | 43 | def get_metadata(self, data: Dict) -> Dict: 44 | # handle mask 45 | filepath = self.semantics.filenames[data["image_idx"]] 46 | semantic_label, mask = get_semantics_and_mask_tensors_from_path( 47 | filepath=filepath, mask_indices=self.mask_indices, scale_factor=self.scale_factor 48 | ) 49 | if "mask" in data.keys(): 50 | mask = mask & data["mask"] 51 | return {"mask": mask, "semantics": semantic_label} 52 | -------------------------------------------------------------------------------- /nerfstudio/data/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /nerfstudio/data/utils/data_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """Utility functions to allow easy re-use of common operations across dataloaders""" 16 | from pathlib import Path 17 | from typing import List, Tuple, Union 18 | 19 | import numpy as np 20 | import torch 21 | from PIL import Image 22 | 23 | 24 | def get_image_mask_tensor_from_path(filepath: Path, scale_factor: float = 1.0) -> torch.Tensor: 25 | """ 26 | Utility function to read a mask image from the given path and return a boolean tensor 27 | """ 28 | pil_mask = Image.open(filepath) 29 | if scale_factor != 1.0: 30 | width, height = pil_mask.size 31 | newsize = (int(width * scale_factor), int(height * scale_factor)) 32 | pil_mask = pil_mask.resize(newsize, resample=Image.NEAREST) 33 | mask_tensor = torch.from_numpy(np.array(pil_mask)).unsqueeze(-1).bool() 34 | return mask_tensor 35 | 36 | 37 | def get_semantics_and_mask_tensors_from_path( 38 | filepath: Path, mask_indices: Union[List, torch.Tensor], scale_factor: float = 1.0 39 | ) -> Tuple[torch.Tensor, torch.Tensor]: 40 | """ 41 | Utility function to read segmentation from the given filepath 42 | If no mask is required - use mask_indices = [] 43 | """ 44 | if isinstance(mask_indices, List): 45 | mask_indices = torch.tensor(mask_indices, dtype="int64").view(1, 1, -1) 46 | pil_image = Image.open(filepath) 47 | if scale_factor != 1.0: 48 | width, height = pil_image.size 49 | newsize = (int(width * scale_factor), int(height * scale_factor)) 50 | pil_image = pil_image.resize(newsize, resample=Image.NEAREST) 51 | semantics = torch.from_numpy(np.array(pil_image, dtype="int64"))[..., None] 52 | mask = torch.sum(semantics == mask_indices, dim=-1, keepdim=True) == 0 53 | return semantics, mask 54 | -------------------------------------------------------------------------------- /nerfstudio/engine/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /nerfstudio/exporter/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /nerfstudio/field_components/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """init field modules""" 16 | from .base_field_component import FieldComponent 17 | from .encodings import Encoding, ScalingAndOffset 18 | from .mlp import MLP 19 | -------------------------------------------------------------------------------- /nerfstudio/field_components/activations.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Special activation functions. 17 | """ 18 | 19 | import torch 20 | from torch.autograd import Function 21 | from torch.cuda.amp import custom_bwd, custom_fwd 22 | 23 | 24 | class _TruncExp(Function): # pylint: disable=abstract-method 25 | # Implementation from torch-ngp: 26 | # https://github.com/ashawkey/torch-ngp/blob/93b08a0d4ec1cc6e69d85df7f0acdfb99603b628/activation.py 27 | @staticmethod 28 | @custom_fwd(cast_inputs=torch.float32) 29 | def forward(ctx, x): # pylint: disable=arguments-differ 30 | ctx.save_for_backward(x) 31 | return torch.exp(x) 32 | 33 | @staticmethod 34 | @custom_bwd 35 | def backward(ctx, g): # pylint: disable=arguments-differ 36 | x = ctx.saved_tensors[0] 37 | return g * torch.exp(x.clamp(-15, 15)) 38 | 39 | 40 | trunc_exp = _TruncExp.apply 41 | """Same as torch.exp, but with the backward pass clipped to prevent vanishing/exploding 42 | gradients.""" 43 | -------------------------------------------------------------------------------- /nerfstudio/field_components/base_field_component.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | The field module baseclass. 17 | """ 18 | from abc import abstractmethod 19 | from typing import Optional 20 | 21 | from torch import nn 22 | from torchtyping import TensorType 23 | 24 | 25 | class FieldComponent(nn.Module): 26 | """Field modules that can be combined to store and compute the fields. 27 | 28 | Args: 29 | in_dim: Input dimension to module. 30 | out_dim: Ouput dimension to module. 31 | """ 32 | 33 | def __init__(self, in_dim: Optional[int] = None, out_dim: Optional[int] = None) -> None: 34 | super().__init__() 35 | self.in_dim = in_dim 36 | self.out_dim = out_dim 37 | 38 | def build_nn_modules(self) -> None: 39 | """Function instantiates any torch.nn members within the module. 40 | If none exist, do nothing.""" 41 | 42 | def set_in_dim(self, in_dim: int) -> None: 43 | """Sets input dimension of encoding 44 | 45 | Args: 46 | in_dim: input dimension 47 | """ 48 | if in_dim <= 0: 49 | raise ValueError("Input dimension should be greater than zero") 50 | self.in_dim = in_dim 51 | 52 | def get_out_dim(self) -> int: 53 | """Calculates output dimension of encoding.""" 54 | if self.out_dim is None: 55 | raise ValueError("Output dimension has not been set") 56 | return self.out_dim 57 | 58 | @abstractmethod 59 | def forward(self, in_tensor: TensorType["bs":..., "input_dim"]) -> TensorType["bs":..., "output_dim"]: 60 | """ 61 | Returns processed tensor 62 | 63 | Args: 64 | in_tensor: Input tensor to process 65 | """ 66 | raise NotImplementedError 67 | -------------------------------------------------------------------------------- /nerfstudio/field_components/embedding.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Code for embeddings. 17 | """ 18 | 19 | 20 | import torch 21 | from torchtyping import TensorType 22 | 23 | from nerfstudio.field_components.base_field_component import FieldComponent 24 | 25 | 26 | class Embedding(FieldComponent): 27 | """Index into embeddings. 28 | # TODO: add different types of initializations 29 | 30 | Args: 31 | in_dim: Number of embeddings 32 | out_dim: Dimension of the embedding vectors 33 | """ 34 | 35 | def __init__(self, in_dim: int, out_dim: int) -> None: 36 | super().__init__() 37 | self.in_dim = in_dim 38 | self.out_dim = out_dim 39 | self.build_nn_modules() 40 | 41 | def build_nn_modules(self) -> None: 42 | self.embedding = torch.nn.Embedding(self.in_dim, self.out_dim) 43 | 44 | def mean(self, dim=0): 45 | """Return the mean of the embedding weights along a dim.""" 46 | return self.embedding.weight.mean(dim) 47 | 48 | def forward(self, in_tensor: TensorType[..., "input_dim"]) -> TensorType[..., "output_dim"]: 49 | """Call forward 50 | 51 | Args: 52 | in_tensor: input tensor to process 53 | """ 54 | return self.embedding(in_tensor) 55 | -------------------------------------------------------------------------------- /nerfstudio/fields/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /nerfstudio/model_components/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /nerfstudio/model_components/ray_generators.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Ray generator. 17 | """ 18 | from torch import nn 19 | from torchtyping import TensorType 20 | 21 | from nerfstudio.cameras.camera_optimizers import CameraOptimizer 22 | from nerfstudio.cameras.cameras import Cameras 23 | from nerfstudio.cameras.rays import RayBundle 24 | 25 | 26 | class RayGenerator(nn.Module): 27 | """torch.nn Module for generating rays. 28 | This class is the interface between the scene's cameras/camera optimizer and the ray sampler. 29 | 30 | Args: 31 | cameras: Camera objects containing camera info. 32 | pose_optimizer: pose optimization module, for optimizing noisy camera intrisics/extrinsics. 33 | """ 34 | 35 | def __init__(self, cameras: Cameras, pose_optimizer: CameraOptimizer) -> None: 36 | super().__init__() 37 | self.cameras = cameras 38 | self.pose_optimizer = pose_optimizer 39 | self.image_coords = nn.Parameter(cameras.get_image_coords(), requires_grad=False) 40 | 41 | def forward(self, ray_indices: TensorType["num_rays", 3]) -> RayBundle: 42 | """Index into the cameras to generate the rays. 43 | 44 | Args: 45 | ray_indices: Contains camera, row, and col indicies for target rays. 46 | """ 47 | c = ray_indices[:, 0] # camera indices 48 | y = ray_indices[:, 1] # row indices 49 | x = ray_indices[:, 2] # col indices 50 | coords = self.image_coords[y, x] 51 | 52 | camera_opt_to_camera = self.pose_optimizer(c) 53 | 54 | ray_bundle = self.cameras.generate_rays( 55 | camera_indices=c.unsqueeze(-1), 56 | coords=coords, 57 | camera_opt_to_camera=camera_opt_to_camera, 58 | ) 59 | return ray_bundle 60 | -------------------------------------------------------------------------------- /nerfstudio/models/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /nerfstudio/models/monosdf.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Implementation of MonoSDF. 17 | """ 18 | 19 | from __future__ import annotations 20 | 21 | from dataclasses import dataclass, field 22 | from typing import Type 23 | 24 | from nerfstudio.models.volsdf import VolSDFModel, VolSDFModelConfig 25 | 26 | 27 | @dataclass 28 | class MonoSDFModelConfig(VolSDFModelConfig): 29 | """Nerfacto Model Config""" 30 | 31 | _target: Type = field(default_factory=lambda: MonoSDFModel) 32 | mono_normal_loss_mult: float = 0.1 33 | """Monocular normal consistency loss multiplier.""" 34 | mono_depth_loss_mult: float = 0.05 35 | """Monocular depth consistency loss multiplier.""" 36 | 37 | 38 | class MonoSDFModel(VolSDFModel): 39 | """MonoSDF model 40 | 41 | Args: 42 | config: MonoSDF configuration to instantiate model 43 | """ 44 | 45 | config: MonoSDFModelConfig 46 | -------------------------------------------------------------------------------- /nerfstudio/models/neuralreconW.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Implementation of VolSDF. 17 | """ 18 | 19 | from __future__ import annotations 20 | 21 | from dataclasses import dataclass, field 22 | from typing import List, Type 23 | 24 | from nerfstudio.engine.callbacks import ( 25 | TrainingCallback, 26 | TrainingCallbackAttributes, 27 | TrainingCallbackLocation, 28 | ) 29 | from nerfstudio.model_components.ray_samplers import NeuralReconWSampler 30 | from nerfstudio.model_components.scene_colliders import SphereCollider 31 | from nerfstudio.models.neus import NeuSModel, NeuSModelConfig 32 | 33 | 34 | @dataclass 35 | class NeuralReconWModelConfig(NeuSModelConfig): 36 | """UniSurf Model Config""" 37 | 38 | _target: Type = field(default_factory=lambda: NeuralReconWModel) 39 | 40 | 41 | class NeuralReconWModel(NeuSModel): 42 | """VolSDF model 43 | 44 | Args: 45 | config: MonoSDF configuration to instantiate model 46 | """ 47 | 48 | config: NeuralReconWModelConfig 49 | 50 | def populate_modules(self): 51 | """Set the fields and modules.""" 52 | super().populate_modules() 53 | 54 | # voxel surface bybrid sampler from NeuralReconW 55 | self.sampler = NeuralReconWSampler( 56 | aabb=self.scene_box.aabb, coarse_binary_grid=self.scene_box.coarse_binary_gird 57 | ) 58 | # Neural Reconstruction in the wild use sphere collider so we overwrite it here 59 | self.collider = SphereCollider(radius=1.0, soft_intersection=False) 60 | 61 | def get_training_callbacks( 62 | self, training_callback_attributes: TrainingCallbackAttributes 63 | ) -> List[TrainingCallback]: 64 | callbacks = super().get_training_callbacks(training_callback_attributes) 65 | 66 | # add sampler call backs 67 | sdf_fn = lambda x: self.field.forward_geonetwork(x)[:, 0].contiguous() 68 | callbacks.append( 69 | TrainingCallback( 70 | where_to_run=[TrainingCallbackLocation.AFTER_TRAIN_ITERATION], 71 | update_every_num_iters=1, 72 | func=self.sampler.update_binary_grid, 73 | kwargs={"sdf_fn": sdf_fn}, 74 | ) 75 | ) 76 | 77 | return callbacks 78 | -------------------------------------------------------------------------------- /nerfstudio/models/volsdf.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Implementation of VolSDF. 17 | """ 18 | 19 | from __future__ import annotations 20 | 21 | from dataclasses import dataclass, field 22 | from typing import Dict, Type 23 | 24 | from nerfstudio.cameras.rays import RayBundle 25 | from nerfstudio.field_components.field_heads import FieldHeadNames 26 | from nerfstudio.model_components.ray_samplers import ErrorBoundedSampler 27 | from nerfstudio.models.base_surface_model import SurfaceModel, SurfaceModelConfig 28 | 29 | 30 | @dataclass 31 | class VolSDFModelConfig(SurfaceModelConfig): 32 | """VolSDF Model Config""" 33 | 34 | _target: Type = field(default_factory=lambda: VolSDFModel) 35 | num_samples: int = 64 36 | """Number of samples after error bounded sampling""" 37 | num_samples_eval: int = 128 38 | """Number of samples per iteration used in error bounded sampling""" 39 | num_samples_extra: int = 32 40 | """Number of uniformly sampled points for training""" 41 | 42 | 43 | class VolSDFModel(SurfaceModel): 44 | """VolSDF model 45 | 46 | Args: 47 | config: VolSDF configuration to instantiate model 48 | """ 49 | 50 | config: VolSDFModelConfig 51 | 52 | def populate_modules(self): 53 | """Set the fields and modules.""" 54 | super().populate_modules() 55 | 56 | self.sampler = ErrorBoundedSampler( 57 | num_samples=self.config.num_samples, 58 | num_samples_eval=self.config.num_samples_eval, 59 | num_samples_extra=self.config.num_samples_extra, 60 | ) 61 | 62 | def sample_and_forward_field(self, ray_bundle: RayBundle) -> Dict: 63 | ray_samples, eik_points = self.sampler( 64 | ray_bundle, density_fn=self.field.laplace_density, sdf_fn=self.field.get_sdf 65 | ) 66 | field_outputs = self.field(ray_samples) 67 | weights, transmittance = ray_samples.get_weights_and_transmittance(field_outputs[FieldHeadNames.DENSITY]) 68 | bg_transmittance = transmittance[:, -1, :] 69 | 70 | samples_and_field_outputs = { 71 | "ray_samples": ray_samples, 72 | "eik_points": eik_points, 73 | "field_outputs": field_outputs, 74 | "weights": weights, 75 | "bg_transmittance": bg_transmittance, 76 | } 77 | return samples_and_field_outputs 78 | 79 | def get_metrics_dict(self, outputs, batch) -> Dict: 80 | metrics_dict = super().get_metrics_dict(outputs, batch) 81 | if self.training: 82 | # training statics 83 | metrics_dict["beta"] = self.field.laplace_density.get_beta().item() 84 | metrics_dict["alpha"] = 1.0 / self.field.laplace_density.get_beta().item() 85 | 86 | return metrics_dict 87 | -------------------------------------------------------------------------------- /nerfstudio/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /nerfstudio/process_data/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /nerfstudio/process_data/record3d_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """Helper functions for processing record3d data.""" 16 | 17 | import json 18 | from pathlib import Path 19 | from typing import List 20 | 21 | import numpy as np 22 | from rich.console import Console 23 | from scipy.spatial.transform import Rotation 24 | 25 | from nerfstudio.process_data.process_data_utils import CAMERA_MODELS 26 | from nerfstudio.utils import io 27 | 28 | CONSOLE = Console(width=120) 29 | 30 | 31 | def record3d_to_json(images_paths: List[Path], metadata_path: Path, output_dir: Path, indices: np.ndarray) -> int: 32 | """Converts Record3D's metadata and image paths to a JSON file. 33 | 34 | Args: 35 | images_paths: list if image paths. 36 | metadata_path: Path to the Record3D metadata JSON file. 37 | output_dir: Path to the output directory. 38 | indices: Indices to sample the metadata_path. Should be the same length as images_paths. 39 | 40 | Returns: 41 | The number of registered images. 42 | """ 43 | 44 | assert len(images_paths) == len(indices) 45 | 46 | metadata_dict = io.load_from_json(metadata_path) 47 | 48 | poses_data = np.array(metadata_dict["poses"]) # (N, 3, 4) 49 | camera_to_worlds = np.concatenate( 50 | [Rotation.from_quat(poses_data[:, :4]).as_matrix(), poses_data[:, 4:, None]], 51 | axis=-1, 52 | ).astype(np.float32) 53 | camera_to_worlds = camera_to_worlds[indices] 54 | 55 | homogeneous_coord = np.zeros_like(camera_to_worlds[..., :1, :]) 56 | homogeneous_coord[..., :, 3] = 1 57 | camera_to_worlds = np.concatenate([camera_to_worlds, homogeneous_coord], -2) 58 | 59 | frames = [] 60 | for i, im_path in enumerate(images_paths): 61 | c2w = camera_to_worlds[i] 62 | frame = { 63 | "file_path": im_path.as_posix(), 64 | "transform_matrix": c2w.tolist(), 65 | } 66 | frames.append(frame) 67 | 68 | # Camera intrinsics 69 | K = np.array(metadata_dict["K"]).reshape((3, 3)).T 70 | focal_length = K[0, 0] 71 | 72 | H = metadata_dict["h"] 73 | W = metadata_dict["w"] 74 | 75 | # TODO(akristoffersen): The metadata dict comes with principle points, 76 | # but caused errors in image coord indexing. Should update once that is fixed. 77 | cx, cy = W / 2, H / 2 78 | 79 | out = { 80 | "fl_x": focal_length, 81 | "fl_y": focal_length, 82 | "cx": cx, 83 | "cy": cy, 84 | "w": W, 85 | "h": H, 86 | "camera_model": CAMERA_MODELS["perspective"].name, 87 | } 88 | 89 | out["frames"] = frames 90 | 91 | with open(output_dir / "transforms.json", "w", encoding="utf-8") as f: 92 | json.dump(out, f, indent=4) 93 | 94 | return len(frames) 95 | -------------------------------------------------------------------------------- /nerfstudio/py.typed: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/nerfstudio/py.typed -------------------------------------------------------------------------------- /nerfstudio/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /nerfstudio/utils/colors.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """Common Colors""" 16 | from typing import Union 17 | 18 | import torch 19 | from torchtyping import TensorType 20 | 21 | WHITE = torch.tensor([1.0, 1.0, 1.0]) 22 | BLACK = torch.tensor([0.0, 0.0, 0.0]) 23 | RED = torch.tensor([1.0, 0.0, 0.0]) 24 | GREEN = torch.tensor([0.0, 1.0, 0.0]) 25 | BLUE = torch.tensor([0.0, 0.0, 1.0]) 26 | 27 | COLORS_DICT = { 28 | "white": WHITE, 29 | "black": BLACK, 30 | "red": RED, 31 | "green": GREEN, 32 | "blue": BLUE, 33 | } 34 | 35 | 36 | def get_color(color: Union[str, list]) -> TensorType[3]: 37 | """ 38 | Args: 39 | color (Union[str, list]): Color as a string or a rgb list 40 | 41 | Returns: 42 | TensorType[3]: Parsed color 43 | """ 44 | if isinstance(color, str): 45 | color = color.lower() 46 | if color not in COLORS_DICT: 47 | raise ValueError(f"{color} is not a valid preset color") 48 | return COLORS_DICT[color] 49 | if isinstance(color, list): 50 | if len(color) != 3: 51 | raise ValueError(f"Color should be 3 values (RGB) instead got {color}") 52 | return torch.tensor(color) 53 | 54 | raise ValueError(f"Color should be an RGB list or string, instead got {type(color)}") 55 | -------------------------------------------------------------------------------- /nerfstudio/utils/comms.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """functionality to handle multiprocessing syncing and communicating""" 16 | import torch.distributed as dist 17 | 18 | LOCAL_PROCESS_GROUP = None 19 | 20 | 21 | def is_dist_avail_and_initialized() -> bool: 22 | """Returns True if distributed is available and initialized.""" 23 | return dist.is_available() and dist.is_initialized() 24 | 25 | 26 | def get_world_size() -> int: 27 | """Get total number of available gpus""" 28 | if not is_dist_avail_and_initialized(): 29 | return 1 30 | return dist.get_world_size() 31 | 32 | 33 | def get_rank() -> int: 34 | """Get global rank of current thread""" 35 | if not is_dist_avail_and_initialized(): 36 | return 0 37 | return dist.get_rank() 38 | 39 | 40 | def get_local_rank() -> int: 41 | """The rank of the current process within the local (per-machine) process group.""" 42 | if not is_dist_avail_and_initialized(): 43 | return 0 44 | assert ( 45 | LOCAL_PROCESS_GROUP is not None 46 | ), "Local process group is not created! Please use launch() to spawn processes!" 47 | return dist.get_rank(group=LOCAL_PROCESS_GROUP) 48 | 49 | 50 | def get_local_size() -> int: 51 | """ 52 | The size of the per-machine process group, 53 | i.e. the number of processes per machine. 54 | """ 55 | if not is_dist_avail_and_initialized(): 56 | return 1 57 | return dist.get_world_size(group=LOCAL_PROCESS_GROUP) 58 | 59 | 60 | def is_main_process() -> bool: 61 | """check to see if you are currently on the main process""" 62 | return get_rank() == 0 63 | 64 | 65 | def synchronize(): 66 | """ 67 | Helper function to synchronize (barrier) among all processes when 68 | using distributed training 69 | """ 70 | if dist.get_world_size() == 1: 71 | return 72 | if dist.get_backend() == dist.Backend.NCCL: 73 | # This argument is needed to avoid warnings. 74 | # It's valid only for NCCL backend. 75 | dist.barrier(device_ids=[get_local_rank()]) 76 | else: 77 | dist.barrier() 78 | -------------------------------------------------------------------------------- /nerfstudio/utils/decorators.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Decorator definitions 17 | """ 18 | from typing import Callable, List 19 | 20 | from nerfstudio.utils import comms 21 | 22 | 23 | def decorate_all(decorators: List[Callable]) -> Callable: 24 | """A decorator to decorate all member functions of a class 25 | 26 | Args: 27 | decorators: list of decorators to add to all functions in the class 28 | """ 29 | 30 | def decorate(cls): 31 | for attr in cls.__dict__: 32 | if callable(getattr(cls, attr)) and attr != "__init__": 33 | for decorator in decorators: 34 | setattr(cls, attr, decorator(getattr(cls, attr))) 35 | return cls 36 | 37 | return decorate 38 | 39 | 40 | def check_profiler_enabled(func: Callable) -> Callable: 41 | """Decorator: check if profiler is enabled""" 42 | 43 | def wrapper(self, *args, **kwargs): 44 | ret = None 45 | if self.config.enable_profiler: 46 | ret = func(self, *args, **kwargs) 47 | return ret 48 | 49 | return wrapper 50 | 51 | 52 | def check_viewer_enabled(func: Callable) -> Callable: 53 | """Decorator: check if viewer is enabled and only run on main process""" 54 | 55 | def wrapper(self, *args, **kwargs): 56 | ret = None 57 | if self.config.is_viewer_enabled() and comms.is_main_process(): 58 | ret = func(self, *args, **kwargs) 59 | return ret 60 | 61 | return wrapper 62 | 63 | 64 | def check_eval_enabled(func: Callable) -> Callable: 65 | """Decorator: check if evaluation step is enabled""" 66 | 67 | def wrapper(self, *args, **kwargs): 68 | ret = None 69 | if self.config.is_wandb_enabled() or self.config.is_tensorboard_enabled(): 70 | ret = func(self, *args, **kwargs) 71 | return ret 72 | 73 | return wrapper 74 | 75 | 76 | def check_main_thread(func: Callable) -> Callable: 77 | """Decorator: check if you are on main thread""" 78 | 79 | def wrapper(*args, **kwargs): 80 | ret = None 81 | if comms.is_main_process(): 82 | ret = func(*args, **kwargs) 83 | return ret 84 | 85 | return wrapper 86 | -------------------------------------------------------------------------------- /nerfstudio/utils/images.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Defines an image that can be batched with the default nerfstudio collate fn, even if the images 17 | aren't of the same height and width. 18 | """ 19 | 20 | from typing import List 21 | 22 | import torch 23 | 24 | 25 | # pylint: disable=too-few-public-methods 26 | class BasicImages: 27 | """This is a very primitive struct for holding images, especially for when these images 28 | are of different heights / widths. 29 | 30 | The purpose of this is to have a special struct wrapping around a list so that the 31 | nerfstudio_collate fn and other parts of the code recognise this as a struct to leave alone 32 | instead of reshaping or concatenating into a single tensor (since this will likely be used 33 | for cases where we have images of different sizes and shapes). 34 | 35 | This only has one batch dimension and will likely be replaced down the line with some 36 | TensorDataclass alternative that supports arbitrary batches. 37 | """ 38 | 39 | def __init__(self, images: List): 40 | assert isinstance(images, List) 41 | assert not images or isinstance( 42 | images[0], torch.Tensor 43 | ), f"Input should be a list of tensors, not {type(images[0]) if isinstance(images, List) else type(images)}" 44 | self.images = images 45 | 46 | def to(self, device): 47 | """Move the images to the given device.""" 48 | assert isinstance(device, torch.device) 49 | return BasicImages([image.to(device) for image in self.images]) 50 | -------------------------------------------------------------------------------- /nerfstudio/utils/install_checks.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """Helpers for checking if programs are installed""" 16 | 17 | import shutil 18 | import sys 19 | 20 | from rich.console import Console 21 | 22 | CONSOLE = Console(width=120) 23 | 24 | 25 | def check_ffmpeg_installed(): 26 | """Checks if ffmpeg is installed.""" 27 | ffmpeg_path = shutil.which("ffmpeg") 28 | if ffmpeg_path is None: 29 | CONSOLE.print("[bold red]Could not find ffmpeg. Please install ffmpeg.") 30 | print("See https://ffmpeg.org/download.html for installation instructions.") 31 | print("ffmpeg is only necessary if using videos as input.") 32 | sys.exit(1) 33 | 34 | 35 | def check_colmap_installed(): 36 | """Checks if colmap is installed.""" 37 | colmap_path = shutil.which("colmap") 38 | if colmap_path is None: 39 | CONSOLE.print("[bold red]Could not find COLMAP. Please install COLMAP.") 40 | print("See https://colmap.github.io/install.html for installation instructions.") 41 | sys.exit(1) 42 | -------------------------------------------------------------------------------- /nerfstudio/utils/io.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Input/output utils. 17 | """ 18 | 19 | import json 20 | from pathlib import Path 21 | 22 | 23 | def load_from_json(filename: Path): 24 | """Load a dictionary from a JSON filename. 25 | 26 | Args: 27 | filename: The filename to load from. 28 | """ 29 | assert filename.suffix == ".json" 30 | with open(filename, encoding="UTF-8") as file: 31 | return json.load(file) 32 | 33 | 34 | def write_to_json(filename: Path, content: dict): 35 | """Write data to a JSON file. 36 | 37 | Args: 38 | filename: The filename to write to. 39 | content: The dictionary data to write. 40 | """ 41 | assert filename.suffix == ".json" 42 | with open(filename, "w", encoding="UTF-8") as file: 43 | json.dump(content, file) 44 | -------------------------------------------------------------------------------- /nerfstudio/utils/poses.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Common 3D pose methods 17 | """ 18 | 19 | import torch 20 | from torchtyping import TensorType 21 | 22 | 23 | def to4x4(pose: TensorType[..., 3, 4]) -> TensorType[..., 4, 4]: 24 | """Convert 3x4 pose matrices to a 4x4 with the addition of a homogeneous coordinate. 25 | 26 | Args: 27 | pose: Camera pose without homogenous coordinate. 28 | 29 | Returns: 30 | Camera poses with additional homogenous coordinate added. 31 | """ 32 | constants = torch.zeros_like(pose[..., :1, :], device=pose.device) 33 | constants[..., :, 3] = 1 34 | return torch.cat([pose, constants], dim=-2) 35 | 36 | 37 | def inverse(pose: TensorType[..., 3, 4]) -> TensorType[..., 3, 4]: 38 | """Invert provided pose matrix. 39 | 40 | Args: 41 | pose: Camera pose without homogenous coordinate. 42 | 43 | Returns: 44 | Inverse of pose. 45 | """ 46 | R = pose[..., :3, :3] 47 | t = pose[..., :3, 3:] 48 | R_inverse = R.transpose(-2, -1) # pylint: disable=invalid-name 49 | t_inverse = -R_inverse.matmul(t) 50 | return torch.cat([R_inverse, t_inverse], dim=-1) 51 | 52 | 53 | def multiply(pose_a: TensorType[..., 3, 4], pose_b: TensorType[..., 3, 4]) -> TensorType[..., 3, 4]: 54 | """Multiply two pose matrices, A @ B. 55 | 56 | Args: 57 | pose_a: Left pose matrix, usually a transformation applied to the right. 58 | pose_b: Right pose matrix, usually a camera pose that will be tranformed by pose_a. 59 | 60 | Returns: 61 | Camera pose matrix where pose_a was applied to pose_b. 62 | """ 63 | R1, t1 = pose_a[..., :3, :3], pose_a[..., :3, 3:] 64 | R2, t2 = pose_b[..., :3, :3], pose_b[..., :3, 3:] 65 | R = R1.matmul(R2) 66 | t = t1 + R1.matmul(t2) 67 | return torch.cat([R, t], dim=-1) 68 | 69 | 70 | def normalize(poses: TensorType[..., 3, 4]) -> TensorType[..., 3, 4]: 71 | """Normalize the XYZs of poses to fit within a unit cube ([-1, 1]). Note: This operation is not in-place. 72 | 73 | Args: 74 | poses: A collection of poses to be normalized. 75 | 76 | Returns; 77 | Normalized collection of poses. 78 | """ 79 | pose_copy = torch.clone(poses) 80 | pose_copy[..., :3, 3] /= torch.max(torch.abs(poses[..., :3, 3])) 81 | 82 | return pose_copy 83 | -------------------------------------------------------------------------------- /nerfstudio/utils/printing.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """A collection of common strings and print statements used throughout the codebase.""" 16 | 17 | from math import floor, log 18 | 19 | from rich.console import Console 20 | 21 | CONSOLE = Console(width=120) 22 | 23 | 24 | def print_tcnn_speed_warning(method_name: str): 25 | """Prints a warning about the speed of the TCNN.""" 26 | CONSOLE.line() 27 | CONSOLE.print(f"[bold yellow]WARNING: Using a slow implementation of {method_name}. ") 28 | CONSOLE.print( 29 | "[bold yellow]:person_running: :person_running: " 30 | + "Install tcnn for speedups :person_running: :person_running:" 31 | ) 32 | CONSOLE.print("[yellow]pip install git+https://github.com/NVlabs/tiny-cuda-nn/#subdirectory=bindings/torch") 33 | CONSOLE.line() 34 | 35 | 36 | def human_format(num): 37 | """Format a number in a more human readable way 38 | 39 | Args: 40 | num: number to format 41 | """ 42 | units = ["", "K", "M", "B", "T", "P"] 43 | k = 1000.0 44 | magnitude = int(floor(log(num, k))) 45 | return f"{(num / k**magnitude):.2f} {units[magnitude]}" 46 | -------------------------------------------------------------------------------- /nerfstudio/utils/rich_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """Additional rich ui components""" 16 | 17 | from contextlib import nullcontext 18 | from typing import Optional 19 | 20 | from rich.console import Console 21 | from rich.progress import ( 22 | BarColumn, 23 | Progress, 24 | ProgressColumn, 25 | TaskProgressColumn, 26 | TextColumn, 27 | TimeRemainingColumn, 28 | ) 29 | from rich.text import Text 30 | 31 | CONSOLE = Console(width=120) 32 | 33 | 34 | class ItersPerSecColumn(ProgressColumn): 35 | """Renders the iterations per second for a progress bar.""" 36 | 37 | def __init__(self, suffix="it/s") -> None: 38 | super().__init__() 39 | self.suffix = suffix 40 | 41 | def render(self, task: "Task") -> Text: 42 | """Show data transfer speed.""" 43 | speed = task.finished_speed or task.speed 44 | if speed is None: 45 | return Text("?", style="progress.data.speed") 46 | return Text(f"{speed:.2f} {self.suffix}", style="progress.data.speed") 47 | 48 | 49 | def status(msg: str, spinner: str = "bouncingBall", verbose: bool = False): 50 | """A context manager that does nothing is verbose is True. Otherwise it hides logs under a message. 51 | 52 | Args: 53 | msg: The message to log. 54 | spinner: The spinner to use. 55 | verbose: If True, print all logs, else hide them. 56 | """ 57 | if verbose: 58 | return nullcontext() 59 | return CONSOLE.status(msg, spinner=spinner) 60 | 61 | 62 | def get_progress(description: str, suffix: Optional[str] = None): 63 | """Helper function to return a rich Progress object.""" 64 | progress_list = [TextColumn(description), BarColumn(), TaskProgressColumn(show_speed=True)] 65 | progress_list += [ItersPerSecColumn(suffix=suffix)] if suffix else [] 66 | progress_list += [TimeRemainingColumn(elapsed_when_finished=True, compact=True)] 67 | progress = Progress(*progress_list) 68 | return progress 69 | -------------------------------------------------------------------------------- /nerfstudio/utils/scripts.py: -------------------------------------------------------------------------------- 1 | """Helpers for running script commands.""" 2 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | import subprocess 17 | import sys 18 | from typing import Optional 19 | 20 | from rich.console import Console 21 | 22 | CONSOLE = Console(width=120) 23 | 24 | 25 | def run_command(cmd: str, verbose=False) -> Optional[str]: 26 | """Runs a command and returns the output. 27 | 28 | Args: 29 | cmd: Command to run. 30 | verbose: If True, logs the output of the command. 31 | Returns: 32 | The output of the command if return_output is True, otherwise None. 33 | """ 34 | out = subprocess.run(cmd, capture_output=not verbose, shell=True, check=False) 35 | if out.returncode != 0: 36 | CONSOLE.rule("[bold red] :skull: :skull: :skull: ERROR :skull: :skull: :skull: ", style="red") 37 | CONSOLE.print(f"[bold red]Error running command: {cmd}") 38 | CONSOLE.rule(style="red") 39 | CONSOLE.print(out.stderr.decode("utf-8")) 40 | sys.exit(1) 41 | if out.stdout is not None: 42 | return out.stdout.decode("utf-8") 43 | return out 44 | -------------------------------------------------------------------------------- /nerfstudio/viewer/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /nerfstudio/viewer/app/.env.development: -------------------------------------------------------------------------------- 1 | BROWSER=none 2 | FAST_REFRESH=false 3 | HOST=0.0.0.0 4 | PORT=4000 5 | ESLINT_NO_DEV_ERRORS=true -------------------------------------------------------------------------------- /nerfstudio/viewer/app/.eslintrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "env": { 3 | "browser": true, 4 | "es2021": true 5 | }, 6 | "extends": [ 7 | "eslint:recommended", 8 | "plugin:react/recommended", 9 | "airbnb", 10 | "prettier" 11 | ], 12 | "parser": "@typescript-eslint/parser", 13 | "parserOptions": { 14 | "ecmaFeatures": { 15 | "jsx": true 16 | }, 17 | "ecmaVersion": "latest", 18 | "sourceType": "module" 19 | }, 20 | "plugins": ["react", "@typescript-eslint", "unused-imports"], 21 | "rules": { 22 | "arrow-body-style": "off", 23 | "camelcase": "off", 24 | "import/prefer-default-export": "off", 25 | "no-alert": "off", 26 | "no-console": "off", 27 | "prefer-destructuring": "off", 28 | "react/destructuring-assignment": "off", 29 | "react/prop-types": 0, 30 | "unused-imports/no-unused-imports-ts": 2 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /nerfstudio/viewer/app/.gitignore: -------------------------------------------------------------------------------- 1 | .idea/ 2 | .vscode/ 3 | node_modules/ 4 | build/ 5 | .DS_Store 6 | *.tgz 7 | my-app* 8 | template/src/__tests__/__snapshots__/ 9 | lerna-debug.log 10 | npm-debug.log* 11 | yarn-debug.log* 12 | yarn-error.log* 13 | /.changelog 14 | .npm/ -------------------------------------------------------------------------------- /nerfstudio/viewer/app/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "viewer", 3 | "homepage": ".", 4 | "version": "23-03-9-0", 5 | "private": true, 6 | "dependencies": { 7 | "@emotion/react": "^11.10.4", 8 | "@emotion/styled": "^11.10.4", 9 | "@mui/icons-material": "^5.10.3", 10 | "@mui/lab": "^5.0.0-alpha.98", 11 | "@mui/material": "^5.10.3", 12 | "@mui/system": "^5.10.6", 13 | "@mui/x-date-pickers": "^5.0.0", 14 | "@reduxjs/toolkit": "^1.8.3", 15 | "@testing-library/jest-dom": "^5.16.4", 16 | "@testing-library/react": "^13.3.0", 17 | "@testing-library/user-event": "^14.2.0", 18 | "camera-controls": "^1.37.2", 19 | "classnames": "^2.3.1", 20 | "dat.gui": "^0.7.9", 21 | "dayjs": "^1.11.5", 22 | "eslint-config-prettier": "^8.5.0", 23 | "eslint-plugin-unused-imports": "^2.0.0", 24 | "leva": "^0.9.29", 25 | "meshline": "^2.0.4", 26 | "msgpack-lite": "^0.1.26", 27 | "prop-types": "^15.8.1", 28 | "re-resizable": "^6.9.9", 29 | "react": "^18.1.0", 30 | "react-dom": "^18.1.0", 31 | "react-icons": "^4.4.0", 32 | "react-pro-sidebar": "^0.7.1", 33 | "react-redux": "^8.0.2", 34 | "redux": "^4.2.0", 35 | "sass": "^1.54.8", 36 | "socket.io-client": "^4.5.1", 37 | "three": "^0.142.0", 38 | "three-wtm": "^1.0", 39 | "websocket": "^1.0.34", 40 | "wwobjloader2": "^4.0" 41 | }, 42 | "scripts": { 43 | "start": "react-scripts start", 44 | "build": "react-scripts build", 45 | "test": "react-scripts test", 46 | "eject": "react-scripts eject", 47 | "electron": "electron .", 48 | "lint": "eslint --ext .js,.jsx .", 49 | "lint:fix": "eslint --fix --ext .js,.jsx ." 50 | }, 51 | "eslintConfig": { 52 | "extends": "react-app" 53 | }, 54 | "browserslist": { 55 | "production": [ 56 | ">0.2%", 57 | "not dead", 58 | "not op_mini all" 59 | ], 60 | "development": [ 61 | "last 1 chrome version", 62 | "last 1 firefox version", 63 | "last 1 safari version" 64 | ] 65 | }, 66 | "main": "public/electron.js", 67 | "author": "", 68 | "license": "ISC", 69 | "description": "", 70 | "devDependencies": { 71 | "concurrently": "^7.2.1", 72 | "eslint": "^8.2.0", 73 | "eslint-config-airbnb": "19.0.4", 74 | "eslint-plugin-import": "^2.25.3", 75 | "eslint-plugin-jsx-a11y": "^6.5.1", 76 | "eslint-plugin-react": "^7.28.0", 77 | "eslint-plugin-react-hooks": "^4.3.0", 78 | "prettier": "2.7.1", 79 | "react-scripts": "^5.0.1", 80 | "typescript": "^4.7.3", 81 | "wait-on": "^6.0.1" 82 | }, 83 | "resolutions": { 84 | "nth-check": "^2.0.1" 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /nerfstudio/viewer/app/public/electron.js: -------------------------------------------------------------------------------- 1 | const path = require('path'); 2 | 3 | const { app, BrowserWindow } = require('electron'); 4 | const isDev = require('electron-is-dev'); 5 | 6 | function createWindow() { 7 | // Create the browser window. 8 | const win = new BrowserWindow({ 9 | width: 800, 10 | height: 600, 11 | webPreferences: { 12 | nodeIntegration: true, 13 | }, 14 | }); 15 | 16 | // and load the index.html of the app. 17 | // win.loadFile("index.html"); 18 | win.loadURL( 19 | isDev 20 | ? 'http://localhost:3000' 21 | : `file://${path.join(__dirname, '../build/index.html')}`, 22 | ); 23 | // Open the DevTools. 24 | if (isDev) { 25 | win.webContents.openDevTools({ mode: 'detach' }); 26 | } 27 | } 28 | 29 | // This method will be called when Electron has finished 30 | // initialization and is ready to create browser windows. 31 | // Some APIs can only be used after this event occurs. 32 | app.whenReady().then(createWindow); 33 | 34 | // Quit when all windows are closed, except on macOS. There, it's common 35 | // for applications and their menu bar to stay active until the user quits 36 | // explicitly with Cmd + Q. 37 | app.on('window-all-closed', () => { 38 | if (process.platform !== 'darwin') { 39 | app.quit(); 40 | } 41 | }); 42 | 43 | app.on('activate', () => { 44 | if (BrowserWindow.getAllWindows().length === 0) { 45 | createWindow(); 46 | } 47 | }); 48 | -------------------------------------------------------------------------------- /nerfstudio/viewer/app/public/favicon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/nerfstudio/viewer/app/public/favicon.png -------------------------------------------------------------------------------- /nerfstudio/viewer/app/public/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 9 | 18 | 19 | 20 | 21 | 22 | 26 | 27 | 31 | 32 | 41 | nerfstudio 42 | 47 | 48 | 53 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 |
67 | 77 | 78 | 79 | -------------------------------------------------------------------------------- /nerfstudio/viewer/app/public/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "short_name": "nerfstudio viewer", 3 | "name": "Interactive NeRF viewer", 4 | "start_url": ".", 5 | "display": "standalone", 6 | "theme_color": "#000000", 7 | "background_color": "#ffffff" 8 | } 9 | -------------------------------------------------------------------------------- /nerfstudio/viewer/app/public/robots.txt: -------------------------------------------------------------------------------- 1 | # https://www.robotstxt.org/robotstxt.html 2 | User-agent: * 3 | Disallow: 4 | -------------------------------------------------------------------------------- /nerfstudio/viewer/app/requirements.txt: -------------------------------------------------------------------------------- 1 | tyro>=0.3.22 2 | sshconf==0.2.5 3 | -------------------------------------------------------------------------------- /nerfstudio/viewer/app/src/App.jsx: -------------------------------------------------------------------------------- 1 | import { CssBaseline, ThemeProvider } from '@mui/material'; 2 | import React from 'react'; 3 | import { 4 | SceneTreeWebSocketListener, 5 | get_scene_tree, 6 | } from './modules/Scene/Scene'; 7 | 8 | import Banner from './modules/Banner'; 9 | import { BasicTabs } from './modules/SidePanel/SidePanel'; 10 | import ViewerWindow from './modules/ViewerWindow/ViewerWindow'; 11 | import { appTheme } from './themes/theme.ts'; 12 | 13 | export default function App() { 14 | // The scene tree won't rerender but it will listen to changes 15 | // from the redux store and draw three.js objects. 16 | // In particular, it listens to changes to 'sceneState' coming over the websocket. 17 | const sceneTree = get_scene_tree(); 18 | 19 | return ( 20 | 21 | 22 |
23 | {/* Listens for websocket 'write' messages and updates the redux store. */} 24 | 25 | {/* The banner at the top of the page. */} 26 | 27 |
28 | {/* Order matters here. The viewer window must be rendered first. */} 29 | 30 |
31 | 32 |
33 |
34 |
35 |
36 | ); 37 | } 38 | -------------------------------------------------------------------------------- /nerfstudio/viewer/app/src/index.jsx: -------------------------------------------------------------------------------- 1 | import './index.scss'; 2 | import React from 'react'; 3 | import ReactDOM from 'react-dom'; 4 | import { Provider } from 'react-redux'; 5 | import App from './App'; 6 | import WebSocketProvider from './modules/WebSocket/WebSocket'; 7 | import store from './store'; 8 | 9 | const root = ReactDOM.createRoot(document.getElementById('root')); 10 | root.render( 11 | 12 | 13 | 14 | 15 | , 16 | ); 17 | -------------------------------------------------------------------------------- /nerfstudio/viewer/app/src/modules/Banner/Banner.jsx: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import { useDispatch } from 'react-redux'; 3 | 4 | import Button from '@mui/material/Button'; 5 | import GitHubIcon from '@mui/icons-material/GitHub'; 6 | import DescriptionRoundedIcon from '@mui/icons-material/DescriptionRounded'; 7 | import LandingModal from '../LandingModal'; 8 | import ViewportControlsModal from '../ViewportControlsModal'; 9 | 10 | function getParam(param_name) { 11 | // https://stackoverflow.com/questions/831030/how-to-get-get-request-parameters-in-javascript 12 | const params = new RegExp( 13 | `[?&]${encodeURIComponent(param_name)}=([^&]*)`, 14 | ).exec(window.location.href); 15 | if (params === null) { 16 | return undefined; 17 | } 18 | return decodeURIComponent(params[1]); 19 | } 20 | 21 | export default function Banner() { 22 | const dispatch = useDispatch(); 23 | 24 | let open_modal = true; 25 | 26 | // possibly set the websocket url 27 | const websocket_url_from_argument = getParam('websocket_url'); 28 | if (websocket_url_from_argument !== undefined) { 29 | open_modal = false; 30 | dispatch({ 31 | type: 'write', 32 | path: 'websocketState/websocket_url', 33 | data: websocket_url_from_argument, 34 | }); 35 | } 36 | 37 | return ( 38 |
39 | 40 | 50 | 60 | 61 | 62 |
63 | The favicon. 68 |
69 |
70 | ); 71 | } 72 | -------------------------------------------------------------------------------- /nerfstudio/viewer/app/src/modules/Banner/index.jsx: -------------------------------------------------------------------------------- 1 | import Banner from './Banner'; 2 | 3 | export default Banner; 4 | -------------------------------------------------------------------------------- /nerfstudio/viewer/app/src/modules/ConfigPanel/ConfigPanelSlice.js: -------------------------------------------------------------------------------- 1 | // The function below is called a selector and allows us to select a value from 2 | // the state. Selectors can also be defined inline where they're used instead of 3 | // in the slice file. For example: `useSelector((state) => state.counter.value)` 4 | export const selectTrainingState = (state) => 5 | state.shared.rendering.training_state; 6 | export const selectOutputOptions = (state) => state.shared.output_options; 7 | export const selectColormapOptions = (state) => state.shared.colormap_options; 8 | -------------------------------------------------------------------------------- /nerfstudio/viewer/app/src/modules/LandingModal/index.jsx: -------------------------------------------------------------------------------- 1 | import LandingModel from './LandingModal'; 2 | 3 | export default LandingModel; 4 | -------------------------------------------------------------------------------- /nerfstudio/viewer/app/src/modules/LogPanel/LogPanel.jsx: -------------------------------------------------------------------------------- 1 | import { useContext, useEffect } from 'react'; 2 | import { useDispatch, useSelector } from 'react-redux'; 3 | 4 | import { WebSocketContext } from '../WebSocket/WebSocket'; 5 | 6 | const msgpack = require('msgpack-lite'); 7 | 8 | export function LogPanel() { 9 | const websocket = useContext(WebSocketContext).socket; 10 | const dispatch = useDispatch(); 11 | const gpu_oom_error_msg = 'GPU out of memory'; 12 | const resolved_msg = 'resolved'; 13 | let local_error = resolved_msg; 14 | // connection status indicators 15 | 16 | const set_max_train_util = () => { 17 | if (websocket.readyState === WebSocket.OPEN) { 18 | dispatch({ 19 | type: 'write', 20 | path: 'renderingState/targetTrainUtil', 21 | data: 0.9, 22 | }); 23 | const cmd = 'write'; 24 | const path = 'renderingState/targetTrainUtil'; 25 | const data = { 26 | type: cmd, 27 | path, 28 | data: 0.9, 29 | }; 30 | const message = msgpack.encode(data); 31 | websocket.send(message); 32 | } 33 | }; 34 | 35 | const set_small_resolution = () => { 36 | if (websocket.readyState === WebSocket.OPEN) { 37 | dispatch({ 38 | type: 'write', 39 | path: 'renderingState/maxResolution', 40 | data: 512, 41 | }); 42 | const cmd = 'write'; 43 | const path = 'renderingState/maxResolution'; 44 | const data = { 45 | type: cmd, 46 | path, 47 | data: 512, 48 | }; 49 | const message = msgpack.encode(data); 50 | websocket.send(message); 51 | } 52 | }; 53 | 54 | const set_log_message = () => { 55 | if (websocket.readyState === WebSocket.OPEN) { 56 | dispatch({ 57 | type: 'write', 58 | path: 'renderingState/log_errors', 59 | data: resolved_msg, 60 | }); 61 | const cmd = 'write'; 62 | const path = 'renderingState/log_errors'; 63 | const data = { 64 | type: cmd, 65 | path, 66 | data: resolved_msg, 67 | }; 68 | const message = msgpack.encode(data); 69 | websocket.send(message); 70 | } 71 | }; 72 | 73 | const check_error = useSelector((state) => { 74 | local_error = state.renderingState.log_errors; 75 | if (local_error.includes(gpu_oom_error_msg)) { 76 | console.log(local_error); 77 | set_log_message(); 78 | set_small_resolution(); 79 | set_max_train_util(); 80 | } 81 | }); 82 | 83 | useEffect(() => {}, [check_error, local_error]); 84 | 85 | return null; 86 | } 87 | -------------------------------------------------------------------------------- /nerfstudio/viewer/app/src/modules/RenderModal/RenderModal.jsx: -------------------------------------------------------------------------------- 1 | /* eslint-disable react/jsx-props-no-spreading */ 2 | import * as React from 'react'; 3 | 4 | import { Box, Button, Modal, TextField, Typography } from '@mui/material'; 5 | import { useSelector } from 'react-redux'; 6 | import ContentCopyRoundedIcon from '@mui/icons-material/ContentCopyRounded'; 7 | 8 | interface RenderModalProps { 9 | open: object; 10 | setOpen: object; 11 | } 12 | 13 | export default function RenderModal(props: RenderModalProps) { 14 | const open = props.open; 15 | const setOpen = props.setOpen; 16 | 17 | // redux store state 18 | const config_base_dir = useSelector( 19 | (state) => state.renderingState.config_base_dir, 20 | ); 21 | 22 | // react state 23 | const [filename, setFilename] = React.useState('render_output'); 24 | 25 | const handleClose = () => setOpen(false); 26 | 27 | // Copy the text inside the text field 28 | const config_filename = `${config_base_dir}/config.yml`; 29 | const camera_path_filename = `${config_base_dir}/camera_path.json`; 30 | const cmd = `ns-render --load-config ${config_filename} --traj filename --camera-path-filename ${camera_path_filename} --output-path renders/${filename}.mp4`; 31 | 32 | const text_intro = `To render a full resolution video, run the following command in a terminal.`; 33 | 34 | const handleCopy = () => { 35 | navigator.clipboard.writeText(cmd); 36 | }; 37 | 38 | return ( 39 |
40 | 46 | 47 | 52 |
53 |

Rendering

54 |

55 | {text_intro} 56 |
57 | The video will be saved to{' '} 58 | 59 | ./renders/{filename}.mp4 60 | 61 | . 62 |

63 | { 70 | setFilename(e.target.value); 71 | }} 72 | value={filename} 73 | variant="standard" 74 | /> 75 |
{cmd}
76 |
77 | 86 |
87 |
88 |
89 |
90 |
91 |
92 | ); 93 | } 94 | -------------------------------------------------------------------------------- /nerfstudio/viewer/app/src/modules/RenderModal/index.jsx: -------------------------------------------------------------------------------- 1 | import RenderModal from './RenderModal'; 2 | 3 | export default RenderModal; 4 | -------------------------------------------------------------------------------- /nerfstudio/viewer/app/src/modules/RenderWindow/RenderWindow.jsx: -------------------------------------------------------------------------------- 1 | import * as React from 'react'; 2 | import { useSelector } from 'react-redux'; 3 | 4 | export default function RenderWindow() { 5 | const render_img = useSelector((state) => state.render_img); 6 | 7 | return ( 8 |
9 | Render window 10 |
11 | ); 12 | } -------------------------------------------------------------------------------- /nerfstudio/viewer/app/src/modules/SidePanel/CameraPanel/curve.js: -------------------------------------------------------------------------------- 1 | // Code for creating a curve from a set of points 2 | 3 | import * as THREE from 'three'; 4 | 5 | function get_catmull_rom_curve(list_of_3d_vectors, is_cycle, smoothness_value) { 6 | // TODO: add some hyperparameters to this function 7 | const curve = new THREE.CatmullRomCurve3( 8 | list_of_3d_vectors, 9 | is_cycle, 10 | // 'centripetal' 11 | 'catmullrom', 12 | smoothness_value, 13 | ); 14 | return curve; 15 | } 16 | 17 | export function get_curve_object_from_cameras( 18 | cameras, 19 | is_cycle, 20 | smoothness_value, 21 | ) { 22 | if (cameras.length === 0) { 23 | return null; 24 | } 25 | // interpolate positions, lookat directions, and ups 26 | // similar to 27 | // https://github.com/google-research/multinerf/blob/1c8b1c552133cdb2de1c1f3c871b2813f6662265/internal/camera_utils.py#L281 28 | 29 | const positions = []; 30 | const lookats = []; 31 | const ups = []; 32 | const fovs = []; 33 | 34 | for (let i = 0; i < cameras.length; i += 1) { 35 | const camera = cameras[i]; 36 | 37 | const up = new THREE.Vector3(0, 1, 0); // y is up in local space 38 | const lookat = new THREE.Vector3(0, 0, 1); // z is forward in local space 39 | 40 | up.applyQuaternion(camera.quaternion); 41 | lookat.applyQuaternion(camera.quaternion); 42 | 43 | positions.push(camera.position); 44 | ups.push(up); 45 | lookats.push(lookat); 46 | // Reuse catmullromcurve3 for 1d values. TODO fix this 47 | fovs.push(new THREE.Vector3(0, 0, camera.fov)); 48 | } 49 | 50 | let curve_positions = null; 51 | let curve_lookats = null; 52 | let curve_ups = null; 53 | let curve_fovs = null; 54 | 55 | curve_positions = get_catmull_rom_curve(positions, is_cycle, smoothness_value); 56 | curve_lookats = get_catmull_rom_curve(lookats, is_cycle, smoothness_value); 57 | curve_ups = get_catmull_rom_curve(ups, is_cycle, smoothness_value); 58 | curve_fovs = get_catmull_rom_curve(fovs, is_cycle, smoothness_value / 10); 59 | 60 | const curve_object = { 61 | curve_positions, 62 | curve_lookats, 63 | curve_ups, 64 | curve_fovs, 65 | }; 66 | return curve_object; 67 | } 68 | 69 | export function get_transform_matrix(position, lookat, up) { 70 | // normalize the vectors 71 | lookat.normalize(); 72 | // make up orthogonal to lookat 73 | const up_proj = lookat.clone().multiplyScalar(up.dot(lookat)); 74 | up.sub(up_proj); 75 | up.normalize(); 76 | 77 | // create a copy of the vector up 78 | const up_copy = up.clone(); 79 | const cross = up_copy.cross(lookat); 80 | cross.normalize(); 81 | 82 | // create the camera transform matrix 83 | const mat = new THREE.Matrix4(); 84 | mat.set( 85 | cross.x, 86 | up.x, 87 | lookat.x, 88 | position.x, 89 | cross.y, 90 | up.y, 91 | lookat.y, 92 | position.y, 93 | cross.z, 94 | up.z, 95 | lookat.z, 96 | position.z, 97 | 0, 98 | 0, 99 | 0, 100 | 1, 101 | ); 102 | return mat; 103 | } 104 | -------------------------------------------------------------------------------- /nerfstudio/viewer/app/src/modules/SidePanel/CameraPanel/index.jsx: -------------------------------------------------------------------------------- 1 | import CameraPanel from './CameraPanel'; 2 | 3 | export default CameraPanel; 4 | -------------------------------------------------------------------------------- /nerfstudio/viewer/app/src/modules/SidePanel/ExportPanel/index.jsx: -------------------------------------------------------------------------------- 1 | import ExportPanel from './ExportPanel'; 2 | 3 | export default ExportPanel; 4 | -------------------------------------------------------------------------------- /nerfstudio/viewer/app/src/modules/SidePanel/ScenePanel/index.jsx: -------------------------------------------------------------------------------- 1 | import ScenePanel from './ScenePanel'; 2 | 3 | export default ScenePanel; 4 | -------------------------------------------------------------------------------- /nerfstudio/viewer/app/src/modules/SidePanel/StatusPanel/index.jsx: -------------------------------------------------------------------------------- 1 | import StatusPanel from './StatusPanel'; 2 | 3 | export default StatusPanel; 4 | -------------------------------------------------------------------------------- /nerfstudio/viewer/app/src/modules/ViewerWindow/ViewerWindowSlice.js: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/nerfstudio/viewer/app/src/modules/ViewerWindow/ViewerWindowSlice.js -------------------------------------------------------------------------------- /nerfstudio/viewer/app/src/modules/ViewportControlsModal/ViewportControlsModal.jsx: -------------------------------------------------------------------------------- 1 | /* eslint-disable react/jsx-props-no-spreading */ 2 | import * as React from 'react'; 3 | 4 | import { Box, Button, Modal } from '@mui/material'; 5 | import KeyboardIcon from '@mui/icons-material/Keyboard'; 6 | 7 | export default function ControlsModal() { 8 | const [open, setOpen] = React.useState(false); 9 | const handleOpen = () => setOpen(true); 10 | const handleClose = () => setOpen(false); 11 | 12 | return ( 13 |
14 | 23 | 29 | 30 |
31 | The favicon. 36 | Controls diagram 41 |
42 |
43 |
44 |
45 | ); 46 | } 47 | -------------------------------------------------------------------------------- /nerfstudio/viewer/app/src/modules/ViewportControlsModal/index.jsx: -------------------------------------------------------------------------------- 1 | import ViewportControlsModal from './ViewportControlsModal'; 2 | 3 | export default ViewportControlsModal; 4 | -------------------------------------------------------------------------------- /nerfstudio/viewer/app/src/modules/WebSocket/WebSocket.jsx: -------------------------------------------------------------------------------- 1 | // Much of this code comes from or is inspired by: 2 | // https://www.pluralsight.com/guides/using-web-sockets-in-your-reactredux-app 3 | 4 | import React, { createContext, useEffect } from 'react'; 5 | import { useDispatch, useSelector } from 'react-redux'; 6 | 7 | import PropTypes from 'prop-types'; 8 | 9 | const WebSocketContext = createContext(null); 10 | 11 | export { WebSocketContext }; 12 | 13 | export default function WebSocketContextFunction({ children }) { 14 | const dispatch = useDispatch(); 15 | let ws = null; 16 | let socket = null; 17 | 18 | // this code will rerender anytime the websocket url changes 19 | const websocket_url = useSelector( 20 | (state) => state.websocketState.websocket_url, 21 | ); 22 | 23 | const connect = () => { 24 | // of the form wss://ip_address:port 25 | console.log(websocket_url); 26 | try { 27 | socket = new WebSocket(websocket_url); 28 | } catch (error) { 29 | socket = new WebSocket('ws://localhost:7007'); 30 | } 31 | socket.binaryType = 'arraybuffer'; 32 | socket.onopen = () => { 33 | dispatch({ 34 | type: 'write', 35 | path: 'websocketState/isConnected', 36 | data: true, 37 | }); 38 | }; 39 | 40 | socket.onclose = () => { 41 | // when closed, the websocket will try to reconnect every second 42 | dispatch({ 43 | type: 'write', 44 | path: 'websocketState/isConnected', 45 | data: false, 46 | }); 47 | }; 48 | 49 | socket.onerror = (err) => { 50 | console.error( 51 | 'Socket encountered error: ', 52 | err.message, 53 | 'Closing socket', 54 | ); 55 | socket.close(); 56 | }; 57 | return socket; 58 | }; 59 | 60 | useEffect(() => { 61 | // cleanup function to close the websocket on rerender 62 | return () => { 63 | if (socket !== null) { 64 | socket.close(); 65 | } 66 | }; 67 | }, [websocket_url]); 68 | 69 | connect(); 70 | ws = { 71 | socket, 72 | }; 73 | 74 | return ( 75 | {children} 76 | ); 77 | } 78 | 79 | WebSocketContextFunction.propTypes = { 80 | children: PropTypes.node.isRequired, 81 | }; 82 | -------------------------------------------------------------------------------- /nerfstudio/viewer/app/src/modules/WebSocketUrlField.jsx: -------------------------------------------------------------------------------- 1 | import * as React from 'react'; 2 | 3 | import { TextField, Link } from '@mui/material'; 4 | import { useDispatch, useSelector } from 'react-redux'; 5 | 6 | export default function WebSocketUrlField() { 7 | const dispatch = useDispatch(); 8 | 9 | // websocket url 10 | const websocket_url = useSelector( 11 | (state) => state.websocketState.websocket_url, 12 | ); 13 | const websocket_url_onchange = (event) => { 14 | const value = event.target.value; 15 | dispatch({ 16 | type: 'write', 17 | path: 'websocketState/websocket_url', 18 | data: value, 19 | }); 20 | }; 21 | 22 | const testWebSocket = (url) => { 23 | try { 24 | // eslint-disable-next-line no-new 25 | new WebSocket(url); 26 | return false; 27 | } catch (error) { 28 | return true; 29 | } 30 | }; 31 | 32 | return ( 33 |
34 | 44 | 45 | viewer.nerf.studio?websocket_url={websocket_url} 46 | 47 |
48 | ); 49 | } 50 | -------------------------------------------------------------------------------- /nerfstudio/viewer/app/src/reducer.js: -------------------------------------------------------------------------------- 1 | import { split_path } from './utils'; 2 | 3 | const initialState = { 4 | // the websocket connection state 5 | websocketState: { 6 | isConnected: false, 7 | websocket_url: 'ws://localhost:7007', 8 | }, 9 | // for sending actual commands to the client 10 | camera_path_payload: null, 11 | render_img: null, // The rendered images 12 | // the rendering state 13 | renderingState: { 14 | // cameras 15 | camera_choice: 'Main Camera', // the camera being used to render the scene 16 | 17 | // camera path information 18 | config_base_dir: 'config_base_dir', // the base directory of the config file 19 | render_height: 1080, 20 | render_width: 1920, 21 | field_of_view: 50, 22 | 23 | isTraining: true, 24 | output_options: ['rgb'], // populated by the possible Graph outputs 25 | output_choice: 'rgb', // the selected output 26 | colormap_options: ['default'], // populated by the output choice 27 | colormap_choice: 'default', // the selected colormap 28 | maxResolution: 1024, 29 | targetTrainUtil: 0.9, 30 | eval_res: '?', 31 | train_eta: 'Paused', 32 | vis_train_ratio: 'Paused', 33 | log_errors: '', 34 | renderTime: 0.0, 35 | 36 | // export options 37 | clipping_enabled: true, 38 | clipping_center: [0.0, 0.0, 0.0], 39 | clipping_box_scale: [2.0, 2.0, 2.0], 40 | }, 41 | // the scene state 42 | sceneState: { 43 | sceneBox: null, 44 | cameras: null, 45 | }, 46 | }; 47 | 48 | function setData(newState, state, path, data) { 49 | if (path === 'colormap_options') { 50 | newState.colormap_choice = 'default'; // eslint-disable-line no-param-reassign 51 | } 52 | if (path.length === 1) { 53 | newState[path[0]] = data; // eslint-disable-line no-param-reassign 54 | } else { 55 | newState[path[0]] = { ...state[path[0]] }; // eslint-disable-line no-param-reassign 56 | setData(newState[path[0]], state[path[0]], path.slice(1), data); 57 | } 58 | } 59 | 60 | // Use the initialState as a default value 61 | // eslint-disable-next-line default-param-last 62 | export default function rootReducer(state = initialState, action) { 63 | // The reducer normally looks at the action type field to decide what happens 64 | 65 | switch (action.type) { 66 | case 'write': { 67 | const path = split_path(action.path); // convert string with "/"s to a list 68 | const data = action.data; 69 | const newState = { ...state }; 70 | setData(newState, state, path, data); 71 | return newState; 72 | } 73 | default: 74 | // If this reducer doesn't recognize the action type, or doesn't 75 | // care about this specific action, return the existing state unchanged 76 | return state; 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /nerfstudio/viewer/app/src/setupTests.js: -------------------------------------------------------------------------------- 1 | // jest-dom adds custom jest matchers for asserting on DOM nodes. 2 | // allows you to do things like: 3 | // expect(element).toHaveTextContent(/react/i) 4 | // learn more: https://github.com/testing-library/jest-dom 5 | import '@testing-library/jest-dom/extend-expect'; 6 | -------------------------------------------------------------------------------- /nerfstudio/viewer/app/src/store.js: -------------------------------------------------------------------------------- 1 | import { configureStore } from '@reduxjs/toolkit'; 2 | import rootReducer from './reducer'; 3 | 4 | export default configureStore({ reducer: rootReducer }); 5 | -------------------------------------------------------------------------------- /nerfstudio/viewer/app/src/subscriber.js: -------------------------------------------------------------------------------- 1 | import { useContext } from 'react'; 2 | import { ReactReduxContext } from 'react-redux'; 3 | 4 | export function subscribe_to_changes(selector_fn, fn) { 5 | // selector_fn: returns a value from the redux state 6 | // fn_valid: function to run on a valid input 7 | // fn_null: function to run on a null input 8 | const { store } = useContext(ReactReduxContext); 9 | 10 | let current; 11 | const handleChange = () => { 12 | const previous = current; 13 | current = selector_fn(store.getState()); 14 | if (previous !== current) { 15 | fn(previous, current); 16 | } 17 | }; 18 | store.subscribe(handleChange); 19 | } 20 | -------------------------------------------------------------------------------- /nerfstudio/viewer/app/src/themes/leva_theme.json: -------------------------------------------------------------------------------- 1 | { 2 | "colors": { 3 | "elevation1": "#292d39", 4 | "elevation2": "#222831", 5 | "elevation3": "#393E46", 6 | "accent1": "#ffc640", 7 | "accent2": "#FFD369", 8 | "accent3": "#ffd65c", 9 | "highlight1": "#d1d4db", 10 | "highlight2": "#EEEEEE", 11 | "highlight3": "#222831", 12 | "disabled": "#595959", 13 | "vivid1": "#ffcc00" 14 | }, 15 | "radii": { 16 | "xs": "2px", 17 | "sm": "4px", 18 | "lg": "10px" 19 | }, 20 | "space": { 21 | "sm": "6px", 22 | "md": "5px", 23 | "rowGap": "6px", 24 | "colGap": "7px" 25 | }, 26 | "fontSizes": { 27 | "root": "11px" 28 | }, 29 | "sizes": { 30 | "rootWidth": "310px", 31 | "controlWidth": "170px", 32 | "scrubberWidth": "8px", 33 | "scrubberHeight": "16px", 34 | "rowHeight": "24px", 35 | "folderHeight": "20px", 36 | "checkboxSize": "16px", 37 | "joystickWidth": "100px", 38 | "joystickHeight": "100px", 39 | "colorPickerWidth": "160px", 40 | "colorPickerHeight": "100px", 41 | "monitorHeight": "60px", 42 | "titleBarHeight": "39px" 43 | }, 44 | "borderWidths": { 45 | "root": "0px", 46 | "input": "1px", 47 | "focus": "1px", 48 | "hover": "1px", 49 | "active": "1px", 50 | "folder": "1px" 51 | }, 52 | "fontWeights": { 53 | "label": "normal", 54 | "folder": "normal", 55 | "button": "normal" 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /nerfstudio/viewer/app/src/utils.js: -------------------------------------------------------------------------------- 1 | export function split_path(path_str) { 2 | return path_str.split('/').filter((x) => x.length > 0); 3 | } 4 | -------------------------------------------------------------------------------- /nerfstudio/viewer/server/README.md: -------------------------------------------------------------------------------- 1 | # Python Kernel and Client Viewer App communication 2 | 3 | > The purpose of this document is to explain how to communicate from Python with the Client Viewer app. We will eventually move this into the read the docs. 4 | 5 | - Python Kernel (nerfstudio code) 6 | - Bridge Server 7 | - Client Viewer App 8 | 9 | We have two types of components that we want to keep state updated in both locations. 10 | 11 | - Widgets 12 | - The widgets are used to keep track of the 13 | - SceneNode 14 | - The scene nodes are used to represent the three.js objects. The properties relevant to these objects are the following: `"object", "transform", "properties"`. 15 | 16 | # Checklist 17 | 18 | - [ ] Currently using request-reply (REQ, REP with zmq). I.e., Python Kernel -> Bridge Server <-> Client Viewer App. We want a way to update the Python Kernel when the Bridge Server is updated. This requries some form of binding with callbacks. When the Bridge Server is updated, we want to update the binded Python variable. We can take inspiration from [ipywidgets](https://ipywidgets.readthedocs.io/en/stable/examples/Widget%20Basics.html). 19 | 20 | - [ ] 21 | -------------------------------------------------------------------------------- /nerfstudio/viewer/server/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /nerfstudio/viewer/server/path.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """Path class 16 | """ 17 | 18 | 19 | from typing import Tuple 20 | 21 | UNICODE = str 22 | 23 | 24 | class Path: 25 | """Path class 26 | 27 | Args: 28 | entries: component parts of the path 29 | """ 30 | 31 | __slots__ = ["entries"] 32 | 33 | def __init__(self, entries: Tuple = tuple()): 34 | self.entries = entries 35 | 36 | def append(self, other: str) -> "Path": 37 | """Methodthat appends a new component and returns new Path 38 | 39 | Args: 40 | other: _description_ 41 | """ 42 | new_path = self.entries 43 | for element in other.split("/"): 44 | if len(element) == 0: 45 | new_path = tuple() 46 | else: 47 | new_path = new_path + (element,) 48 | return Path(new_path) 49 | 50 | def lower(self): 51 | """Convert path object to serializable format""" 52 | return UNICODE("/" + "/".join(self.entries)) 53 | 54 | def __hash__(self): 55 | return hash(self.entries) 56 | 57 | def __eq__(self, other): 58 | return self.entries == other.entries 59 | -------------------------------------------------------------------------------- /nerfstudio/viewer/server/state/node.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | For tree logic code. 17 | """ 18 | 19 | from collections import defaultdict 20 | from typing import Callable 21 | 22 | 23 | class Node(defaultdict): 24 | """ 25 | The base class Node. 26 | """ 27 | 28 | def __init__(self, *args, **kwargs): 29 | super().__init__(*args, **kwargs) 30 | 31 | 32 | def get_tree(node_class: Callable) -> Callable: 33 | """ 34 | Get a tree from a node class. 35 | This allows one to do tree["path"]["to"]["node"] 36 | and it will return a new node if it doesn't exist 37 | or the current node if it does. 38 | """ 39 | assert isinstance(node_class(), Node) 40 | tree = lambda: node_class(tree) 41 | return tree() 42 | 43 | 44 | def find_node(tree, path): 45 | if len(path) == 0: 46 | return tree 47 | else: 48 | return find_node(tree[path[0]], path[1:]) 49 | 50 | 51 | def set_node_value(tree, path, value): 52 | if len(path) == 0: 53 | tree.data = value 54 | else: 55 | set_node_value(tree[path[0]], path[1:], value) 56 | 57 | 58 | def walk(path, tree): 59 | """Walk the entire tree and return the values 60 | Args: 61 | tree: the root of the tree to start search 62 | """ 63 | yield path, tree 64 | for k, v in tree.items(): 65 | yield from walk(path + "/" + k, v) 66 | -------------------------------------------------------------------------------- /nerfstudio/viewer/server/state/state_node.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from typing import List 16 | 17 | from nerfstudio.viewer.server.state.node import Node 18 | 19 | 20 | class StateNode(Node): 21 | """Node that holds a hierarchy of state nodes""" 22 | 23 | __slots__ = ["data"] 24 | 25 | def __init__(self, *args, **kwargs): 26 | super().__init__(*args, **kwargs) 27 | self.path = None 28 | self.data = None 29 | -------------------------------------------------------------------------------- /scripts/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/scripts/__init__.py -------------------------------------------------------------------------------- /scripts/benchmarking/launch_eval_blender.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | helpFunction_launch_eval() 4 | { 5 | echo "Usage: $0 -m -o -t [-s] []" 6 | echo -e "\t-m name of method to benchmark (e.g. nerfacto, instant-ngp)" 7 | echo -e "\t-o base directory for where all the benchmarks are stored (e.g. outputs/)" 8 | echo -e "\t-t : if using launch_train_blender.sh will be of format %Y-%m-%d_%H%M%S" 9 | echo -e "\t-s: Launch a single evaluation job per gpu." 10 | echo -e "\t [OPTIONAL] list of space-separated gpu numbers to launch train on (e.g. 0 2 4 5)" 11 | exit 1 # Exit program after printing help 12 | } 13 | 14 | single=false 15 | while getopts "m:o:t:s" opt; do 16 | case "$opt" in 17 | m ) method_name="$OPTARG" ;; 18 | o ) output_dir="$OPTARG" ;; 19 | t ) timestamp="$OPTARG" ;; 20 | s ) single=true ;; 21 | ? ) helpFunction_launch_eval ;; 22 | esac 23 | done 24 | 25 | if [ -z "$method_name" ]; then 26 | echo "Missing method name" 27 | helpFunction_launch_eval 28 | fi 29 | 30 | if [ -z "$output_dir" ]; then 31 | echo "Missing output directory location" 32 | helpFunction_launch_eval 33 | fi 34 | 35 | if [ -z "$timestamp" ]; then 36 | echo "Missing timestamp specification" 37 | helpFunction_launch_eval 38 | fi 39 | 40 | shift $((OPTIND-1)) 41 | 42 | # Deal with gpu's. If passed in, use those. 43 | GPU_IDX=("$@") 44 | if [ -z "${GPU_IDX[0]+x}" ]; then 45 | echo "no gpus set... finding available gpus" 46 | # Find available devices 47 | num_device=$(nvidia-smi --query-gpu=name --format=csv,noheader | wc -l) 48 | START=0 49 | END=${num_device}-1 50 | GPU_IDX=() 51 | 52 | for (( id=START; id<=END; id++ )); do 53 | free_mem=$(nvidia-smi --query-gpu=memory.free --format=csv -i $id | grep -Eo '[0-9]+') 54 | if [[ $free_mem -gt 10000 ]]; then 55 | GPU_IDX+=( "$id" ) 56 | fi 57 | done 58 | fi 59 | echo "available gpus... ${GPU_IDX[*]}" 60 | 61 | DATASETS=("mic" "ficus" "chair" "hotdog" "materials" "drums" "ship" "lego") 62 | idx=0 63 | len=${#GPU_IDX[@]} 64 | GPU_PID=() 65 | # kill all the background jobs if terminated: 66 | trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM EXIT 67 | 68 | for dataset in "${DATASETS[@]}"; do 69 | if "$single" && [ -n "${GPU_PID[$idx]+x}" ]; then 70 | wait "${GPU_PID[$idx]}" 71 | fi 72 | export CUDA_VISIBLE_DEVICES=${GPU_IDX[$idx]} 73 | config_path="${output_dir}/blender_${dataset}_${timestamp::-7}/${method_name}/${timestamp}/config.yml" 74 | ns-eval --load-config="${config_path}" \ 75 | --output-path="${output_dir}/${method_name}/blender_${dataset}_${timestamp}.json" & GPU_PID[$idx]=$! 76 | echo "Launched ${config_path} on gpu ${GPU_IDX[$idx]}" 77 | 78 | # update gpu 79 | ((idx=(idx+1)%len)) 80 | done 81 | wait 82 | echo "Done." 83 | -------------------------------------------------------------------------------- /scripts/completions/.gitignore: -------------------------------------------------------------------------------- 1 | bash/ 2 | zsh/ 3 | -------------------------------------------------------------------------------- /scripts/completions/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/scripts/completions/__init__.py -------------------------------------------------------------------------------- /scripts/completions/setup.bash: -------------------------------------------------------------------------------- 1 | # nerfstudio completions for bash. 2 | # 3 | # This should generally be installed automatically by `configure.py`. 4 | 5 | completions_dir="$(dirname "$BASH_SOURCE")"/bash 6 | 7 | if [ ! -d "${completions_dir}" ]; then 8 | echo "$0: Completions are missing!" 9 | echo "Please generate them with nerfstudio/scripts/completions/generate.py!" 10 | return 1 11 | fi 12 | 13 | # Source each completion script. 14 | for completion_path in ${completions_dir}/* 15 | do 16 | source $completion_path 17 | done 18 | -------------------------------------------------------------------------------- /scripts/completions/setup.zsh: -------------------------------------------------------------------------------- 1 | # nerfstudio completions for zsh. 2 | # 3 | # This should generally be installed automatically by `configure.py`. 4 | 5 | completions_dir="${0:a:h}"/zsh 6 | 7 | if [ ! -d "${completions_dir}" ]; then 8 | echo "$0: Completions are missing!" 9 | echo "Please generate them with nerfstudio/scripts/completions/generate.py!" 10 | return 1 11 | fi 12 | 13 | # Manually load and define each completion. 14 | # 15 | # Adding the completions directory to our fpath and re-initializing would work 16 | # as well: 17 | # fpath+=${completions_dir} 18 | # autoload -Uz compinit; compinit 19 | # But would be several orders of magnitude slower. 20 | for completion_path in ${completions_dir}/* 21 | do 22 | # /some/path/to/_our_completion_py => _our_completion_py 23 | completion_name=${completion_path##*/} 24 | if [[ $name == *_py ]]; then 25 | # _our_completion_py => our_completion.py 26 | script_name="${completion_name:1:-3}.py" 27 | else 28 | # _entry-point => entry-point 29 | script_name="${completion_name:1}" 30 | fi 31 | 32 | autoload -Uz $completion_path 33 | compdef $completion_name $script_name 34 | done 35 | -------------------------------------------------------------------------------- /scripts/docs/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/scripts/docs/__init__.py -------------------------------------------------------------------------------- /scripts/docs/add_nb_tags.py: -------------------------------------------------------------------------------- 1 | """Helper that add tags to notebooks based on cell comments.""" 2 | 3 | import sys 4 | from glob import glob 5 | 6 | import nbformat as nbf 7 | import tyro 8 | from rich.console import Console 9 | 10 | CONSOLE = Console(width=120) 11 | 12 | 13 | def main(check: bool = False): 14 | """Add tags to notebooks based on cell comments. 15 | 16 | In notebook cells, you can add the folling tags to the notebook by adding a comment: 17 | "# HIDDEN" - This cell will be hidden from the notebook. 18 | "# OUTPUT_ONLY" - This cell will only show the output. 19 | "# COLLAPSED" - Hide the code and include a button to show the code. 20 | 21 | Args: 22 | check: check will not modify the notebooks. 23 | """ 24 | # Collect a list of all notebooks in the content folder 25 | notebooks = glob("./docs/**/*.ipynb", recursive=True) 26 | 27 | # Text to look for in adding tags 28 | text_search_dict = { 29 | "# HIDDEN": "remove-cell", # Remove the whole cell 30 | "# OUTPUT_ONLY": "remove-input", # Remove only the input 31 | "# COLLAPSED": "hide-input", # Hide the input w/ a button to show 32 | } 33 | 34 | # Search through each notebook and look for the text, add a tag if necessary 35 | any_missing = False 36 | for ipath in notebooks: 37 | ntbk = nbf.read(ipath, nbf.NO_CONVERT) 38 | 39 | incorrect_metadata = False 40 | for cell in ntbk.cells: 41 | cell_tags = cell.get("metadata", {}).get("tags", []) 42 | found_keys = [] 43 | found_tags = [] 44 | for key, val in text_search_dict.items(): 45 | if key in cell.source: 46 | found_keys.append(key) 47 | found_tags.append(val) 48 | 49 | if len(found_keys) > 1: 50 | CONSOLE.print(f"[bold yellow]Found multiple tags {found_keys} for {ipath}") 51 | sys.exit(1) 52 | 53 | if len(cell_tags) != len(found_tags): 54 | incorrect_metadata = True 55 | elif len(cell_tags) == 1 and len(found_keys) == 1: 56 | if found_tags[0] != cell_tags[0]: 57 | incorrect_metadata = True 58 | 59 | cell["metadata"]["tags"] = found_tags 60 | if incorrect_metadata: 61 | if check: 62 | CONSOLE.print( 63 | f"[bold yellow]{ipath} has incorrect metadata. Call `python scripts.docs.add_nb_tags.py` to add it." 64 | ) 65 | any_missing = True 66 | else: 67 | print(f"Adding metadata to {ipath}") 68 | nbf.write(ntbk, ipath) 69 | 70 | if not any_missing: 71 | CONSOLE.print("[green]All notebooks have correct metadata.") 72 | 73 | if check and any_missing: 74 | sys.exit(1) 75 | 76 | 77 | if __name__ == "__main__": 78 | tyro.extras.set_accent_color("bright_yellow") 79 | tyro.cli(main) 80 | -------------------------------------------------------------------------------- /scripts/docs/build_docs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """Simple yaml debugger""" 3 | import subprocess 4 | import sys 5 | 6 | import tyro 7 | from rich.console import Console 8 | from rich.style import Style 9 | 10 | CONSOLE = Console(width=120) 11 | 12 | LOCAL_TESTS = ["Run license checks", "Run Black", "Python Pylint", "Test with pytest"] 13 | 14 | 15 | def run_command(command: str) -> None: 16 | """Run a command kill actions if it fails 17 | 18 | Args: 19 | command: command to run 20 | """ 21 | ret_code = subprocess.call(command, shell=True) 22 | if ret_code != 0: 23 | CONSOLE.print(f"[bold red]Error: `{command}` failed. Exiting...") 24 | sys.exit(1) 25 | 26 | 27 | def main(clean_cache: bool = False): 28 | """Run the github actions locally. 29 | 30 | Args: 31 | clean_cache: whether to clean the cache before building docs. 32 | """ 33 | 34 | CONSOLE.print("[green]Adding notebook documentation metadata") 35 | run_command("python scripts/docs/add_nb_tags.py") 36 | 37 | # Add checks for building documentation 38 | CONSOLE.print("[green]Building Documentation") 39 | if clean_cache: 40 | run_command("cd docs/; make clean; make html SPHINXOPTS='-W;'") 41 | else: 42 | run_command("cd docs/; make html SPHINXOPTS='-W;'") 43 | 44 | CONSOLE.line() 45 | CONSOLE.rule(characters="=", style=Style(color="green")) 46 | CONSOLE.print("[bold green]Done") 47 | CONSOLE.rule(characters="=", style=Style(color="green")) 48 | 49 | 50 | if __name__ == "__main__": 51 | tyro.extras.set_accent_color("bright_yellow") 52 | tyro.cli(main) 53 | -------------------------------------------------------------------------------- /scripts/downloads/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/scripts/downloads/__init__.py -------------------------------------------------------------------------------- /scripts/eval.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | eval.py 4 | """ 5 | from __future__ import annotations 6 | 7 | import json 8 | from dataclasses import dataclass 9 | from pathlib import Path 10 | 11 | import cv2 12 | import numpy as np 13 | import torch 14 | import tyro 15 | from rich.console import Console 16 | 17 | from nerfstudio.utils.eval_utils import eval_setup 18 | 19 | CONSOLE = Console(width=120) 20 | 21 | # speedup for when input size to model doesn't change (much) 22 | torch.backends.cudnn.benchmark = True # type: ignore 23 | torch.set_float32_matmul_precision("high") 24 | 25 | 26 | @dataclass 27 | class ComputePSNR: 28 | """Load a checkpoint, compute some PSNR metrics, and save it to a JSON file.""" 29 | 30 | # Path to config YAML file. 31 | load_config: Path 32 | # Name of the output file. 33 | output_path: Path = Path("output.json") 34 | # Name of the output images dir. 35 | output_images_path: Path = Path("output_images/") 36 | 37 | def main(self) -> None: 38 | """Main function.""" 39 | config, pipeline, checkpoint_path = eval_setup(self.load_config) 40 | assert self.output_path.suffix == ".json" 41 | metrics_dict, images_dict_list = pipeline.get_average_eval_image_metrics() 42 | self.output_path.parent.mkdir(parents=True, exist_ok=True) 43 | self.output_images_path.mkdir(parents=True, exist_ok=True) 44 | 45 | # Get the output and define the names to save to 46 | benchmark_info = { 47 | "experiment_name": config.experiment_name, 48 | "method_name": config.method_name, 49 | "checkpoint": str(checkpoint_path), 50 | "results": metrics_dict, 51 | } 52 | # Save output to output file 53 | self.output_path.write_text(json.dumps(benchmark_info, indent=2), "utf8") 54 | CONSOLE.print(f"Saved results to: {self.output_path}") 55 | 56 | for idx, images_dict in enumerate(images_dict_list): 57 | for k, v in images_dict.items(): 58 | cv2.imwrite( 59 | str(self.output_images_path / Path(f"{k}_{idx}.png")), 60 | (v.cpu().numpy() * 255.0).astype(np.uint8)[..., ::-1], 61 | ) 62 | CONSOLE.print(f"Saved rendering results to: {self.output_images_path}") 63 | 64 | 65 | def entrypoint(): 66 | """Entrypoint for use with pyproject scripts.""" 67 | tyro.extras.set_accent_color("bright_yellow") 68 | tyro.cli(ComputePSNR).main() 69 | 70 | 71 | if __name__ == "__main__": 72 | entrypoint() 73 | 74 | # For sphinx docs 75 | get_parser_fn = lambda: tyro.extras.get_parser(ComputePSNR) # noqa 76 | -------------------------------------------------------------------------------- /scripts/github/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/scripts/github/__init__.py -------------------------------------------------------------------------------- /scripts/licensing/copyright.txt: -------------------------------------------------------------------------------- 1 | # Copyright 2022 The Nerfstudio Team. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | -------------------------------------------------------------------------------- /scripts/licensing/license_headers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | VALID_ARGS=$(getopt -o c --long check -- "$@") 4 | 5 | eval set -- "$VALID_ARGS" 6 | check=false 7 | while [ : ]; do 8 | case "$1" in 9 | -c | --check) 10 | check=true 11 | shift 12 | ;; 13 | --) shift; 14 | break 15 | ;; 16 | esac 17 | done 18 | 19 | check_failed=false 20 | added_headers=false 21 | for i in $(find nerfstudio/ -name '*.py'); 22 | do 23 | if ! grep -q Copyright $i 24 | then 25 | if [ "$check" = true ]; 26 | then 27 | echo "$i missing copyright header" 28 | check_failed=true 29 | else 30 | cat scripts/licensing/copyright.txt $i >$i.new && mv $i.new $i 31 | echo "Adding license header to $i." 32 | fi 33 | added_headers=true 34 | fi 35 | done 36 | 37 | if [ "$check_failed" = true ]; 38 | then 39 | echo "Run '.scripts/licensing/license_headers.sh to add missing headers.'" 40 | exit 1 41 | fi 42 | 43 | if [ "$added_headers" = false ]; 44 | then 45 | echo "No missing license headers found." 46 | fi 47 | 48 | exit 0 -------------------------------------------------------------------------------- /scripts/render.json: -------------------------------------------------------------------------------- 1 | { 2 | "background_color" : [ 1.0, 1.0, 1.0 ], 3 | "class_name" : "RenderOption", 4 | "default_mesh_color" : [ 0.69999999999999996, 0.69999999999999996, 0.69999999999999996 ], 5 | "image_max_depth" : 3000, 6 | "image_stretch_option" : 1, 7 | "interpolation_option" : 0, 8 | "light0_color" : [ 0.69999999999999996, 0.69999999999999996, 0.69999999999999996 ], 9 | "light0_diffuse_power" : 0.66000000000000014, 10 | "light0_position" : [ 0.0, 0.0, 2.0 ], 11 | "light0_specular_power" : 0.20000000000000001, 12 | "light0_specular_shininess" : 10.0, 13 | "light1_color" : [ 0.69999999999999996, 0.69999999999999996, 0.69999999999999996 ], 14 | "light1_diffuse_power" : 0.66000000000000014, 15 | "light1_position" : [ 0.0, 0.0, -2.0 ], 16 | "light1_specular_power" : 0.20000000000000001, 17 | "light1_specular_shininess" : 10.0, 18 | "light2_color" : [ 0.69999999999999996, 0.69999999999999996, 0.69999999999999996 ], 19 | "light2_diffuse_power" : 0.36000000000000015, 20 | "light2_position" : [ 2.0, 2.0, 0.0 ], 21 | "light2_specular_power" : 1.0000000000000001e-17, 22 | "light2_specular_shininess" : 10.0, 23 | "light3_color" : [ 0.69999999999999996, 0.69999999999999996, 0.69999999999999996 ], 24 | "light3_diffuse_power" : 0.10000000000000014, 25 | "light3_position" : [ -2.0, -2.0, 0.0 ], 26 | "light3_specular_power" : 9.9999999999999998e-17, 27 | "light3_specular_shininess" : 0.10000000000000001, 28 | "light_ambient_color" : [ 0.29999999999999999, 0.29999999999999999, 0.29999999999999999 ], 29 | "light_on" : true, 30 | "line_width" : 1.0, 31 | "mesh_color_option" : 1, 32 | "mesh_shade_option" : 10, 33 | "mesh_show_back_face" : false, 34 | "mesh_show_wireframe" : false, 35 | "point_color_option" : 0, 36 | "point_show_normal" : false, 37 | "point_size" : 5.0, 38 | "show_coordinate_frame" : false, 39 | "version_major" : 1, 40 | "version_minor" : 0 41 | } -------------------------------------------------------------------------------- /scripts/texture.py: -------------------------------------------------------------------------------- 1 | """ 2 | Script to texture an existing mesh file. 3 | """ 4 | 5 | from __future__ import annotations 6 | 7 | from dataclasses import dataclass 8 | from pathlib import Path 9 | from typing import Optional 10 | import torch 11 | import torchvision 12 | 13 | import tyro 14 | from rich.console import Console 15 | from typing_extensions import Literal 16 | 17 | from nerfstudio.exporter import texture_utils 18 | from nerfstudio.exporter.exporter_utils import get_mesh_from_filename 19 | from nerfstudio.utils.eval_utils import eval_setup 20 | 21 | CONSOLE = Console(width=120) 22 | 23 | 24 | @dataclass 25 | class TextureMesh: 26 | """ 27 | Export a textured mesh with color computed from the NeRF. 28 | """ 29 | 30 | load_config: Path 31 | """Path to the config YAML file.""" 32 | output_dir: Path 33 | """Path to the output directory.""" 34 | input_mesh_filename: Path 35 | """Mesh filename to texture.""" 36 | px_per_uv_triangle: int = 4 37 | """Number of pixels per UV square.""" 38 | unwrap_method: Literal["xatlas", "custom"] = "xatlas" 39 | """The method to use for unwrapping the mesh.""" 40 | num_pixels_per_side: int = 2048 41 | """If using xatlas for unwrapping, the pixels per side of the texture image.""" 42 | target_num_faces: Optional[int] = 50000 43 | """Target number of faces for the mesh to texture.""" 44 | 45 | def main(self) -> None: 46 | """Export textured mesh""" 47 | # pylint: disable=too-many-statements 48 | 49 | if not self.output_dir.exists(): 50 | self.output_dir.mkdir(parents=True) 51 | 52 | # load the Mesh 53 | mesh = get_mesh_from_filename(str(self.input_mesh_filename), target_num_faces=self.target_num_faces) 54 | 55 | # load the Pipeline 56 | _, pipeline, _ = eval_setup(self.load_config, test_mode="inference") 57 | 58 | # texture the mesh with NeRF and export to a mesh.obj file 59 | # and a material and texture file 60 | texture_utils.export_textured_mesh( 61 | mesh, 62 | pipeline, 63 | px_per_uv_triangle=self.px_per_uv_triangle, 64 | output_dir=self.output_dir, 65 | unwrap_method=self.unwrap_method, 66 | num_pixels_per_side=self.num_pixels_per_side, 67 | ) 68 | 69 | 70 | def entrypoint(): 71 | """Entrypoint for use with pyproject scripts.""" 72 | tyro.extras.set_accent_color("bright_yellow") 73 | tyro.cli(tyro.conf.FlagConversionOff[TextureMesh]).main() 74 | 75 | 76 | if __name__ == "__main__": 77 | entrypoint() 78 | 79 | # For sphinx docs 80 | get_parser_fn = lambda: tyro.extras.get_parser(TextureMesh) # noqa 81 | -------------------------------------------------------------------------------- /scripts/viewer/view_dataset.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | view_dataset.py 4 | """ 5 | 6 | from rich.console import Console 7 | 8 | CONSOLE = Console(width=120) 9 | import time 10 | from datetime import timedelta 11 | from pathlib import Path 12 | 13 | import torch 14 | import tyro 15 | 16 | from nerfstudio.configs.base_config import ViewerConfig 17 | from nerfstudio.data.datamanagers import AnnotatedDataParserUnion 18 | from nerfstudio.data.datasets.base_dataset import InputDataset 19 | from nerfstudio.viewer.server import viewer_utils 20 | 21 | DEFAULT_TIMEOUT = timedelta(minutes=30) 22 | 23 | # speedup for when input size to model doesn't change (much) 24 | torch.backends.cudnn.benchmark = True # type: ignore 25 | 26 | 27 | def main( 28 | dataparser: AnnotatedDataParserUnion, 29 | viewer: ViewerConfig, 30 | log_base_dir: Path = Path("/tmp/nerfstudio_viewer_logs"), 31 | ) -> None: 32 | """Main function.""" 33 | viewer_state = viewer_utils.ViewerState( 34 | viewer, 35 | log_filename=log_base_dir / viewer.relative_log_filename, 36 | ) 37 | dataset = InputDataset(dataparser.setup().get_dataparser_outputs(split="train")) 38 | viewer_state.init_scene(dataset=dataset, start_train=False) 39 | CONSOLE.log("Please refresh and load page at: %s", viewer_state.viewer_url) 40 | time.sleep(30) # allowing time to refresh page 41 | 42 | 43 | if __name__ == "__main__": 44 | tyro.extras.set_accent_color("bright_yellow") 45 | tyro.cli(main) 46 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/setup.cfg -------------------------------------------------------------------------------- /tests/cameras/test_rays.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test the ray classes. 3 | """ 4 | 5 | import pytest 6 | import torch 7 | 8 | from nerfstudio.cameras.rays import Frustums 9 | 10 | 11 | def test_frustum_get_position(): 12 | """Test position calculation""" 13 | 14 | origin = torch.Tensor([0, 1, 2])[None, ...] 15 | direction = torch.Tensor([0, 1, 0])[None, ...] 16 | frustum_start = torch.Tensor([2])[None, ...] 17 | frustum_end = torch.Tensor([3])[None, ...] 18 | 19 | target_position = torch.Tensor([0, 3.5, 2])[None, ...] 20 | 21 | frustum = Frustums( 22 | origins=origin, 23 | directions=direction, 24 | starts=frustum_start, 25 | ends=frustum_end, 26 | pixel_area=torch.ones((1, 1)), 27 | ) 28 | 29 | positions = frustum.get_positions() 30 | assert positions == pytest.approx(target_position, abs=1e-6) 31 | 32 | 33 | def test_frustum_get_gaussian_blob(): 34 | """Test gaussian blob calculation""" 35 | 36 | frustum = Frustums( 37 | origins=torch.ones((5, 3)), 38 | directions=torch.ones((5, 3)), 39 | starts=torch.ones((5, 1)), 40 | ends=torch.ones((5, 1)), 41 | pixel_area=torch.ones((5, 1)), 42 | ) 43 | 44 | gaussian_blob = frustum.get_gaussian_blob() 45 | assert gaussian_blob.mean.shape == (5, 3) 46 | assert gaussian_blob.cov.shape == (5, 3, 3) 47 | 48 | 49 | def test_frustum_apply_masks(): 50 | """Test masking frustum""" 51 | frustum = Frustums( 52 | origins=torch.ones((5, 3)), 53 | directions=torch.ones((5, 3)), 54 | starts=torch.ones((5, 1)), 55 | ends=torch.ones((5, 1)), 56 | pixel_area=torch.ones((5, 1)), 57 | ) 58 | 59 | mask = torch.tensor([False, True, False, True, True], dtype=torch.bool) 60 | frustum = frustum[mask] 61 | 62 | assert frustum.origins.shape == (3, 3) 63 | assert frustum.directions.shape == (3, 3) 64 | assert frustum.starts.shape == (3, 1) 65 | assert frustum.ends.shape == (3, 1) 66 | assert frustum.pixel_area.shape == (3, 1) 67 | 68 | 69 | def test_get_mock_frustum(): 70 | """Test creation of mock frustum""" 71 | Frustums.get_mock_frustum() 72 | 73 | 74 | if __name__ == "__main__": 75 | test_frustum_get_gaussian_blob() 76 | -------------------------------------------------------------------------------- /tests/data/lego_test/train/r_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/tests/data/lego_test/train/r_0.png -------------------------------------------------------------------------------- /tests/data/lego_test/transforms_train.json: -------------------------------------------------------------------------------- 1 | { 2 | "camera_angle_x": 0.6911112070083618, 3 | "frames": [ 4 | { 5 | "file_path": "./train/r_0", 6 | "rotation": 0.012566370614359171, 7 | "transform_matrix": [ 8 | [ 9 | -0.9999021887779236, 10 | 0.004192245192825794, 11 | -0.013345719315111637, 12 | -0.05379832163453102 13 | ], 14 | [ 15 | -0.013988681137561798, 16 | -0.2996590733528137, 17 | 0.95394366979599, 18 | 3.845470428466797 19 | ], 20 | [ 21 | -4.656612873077393e-10, 22 | 0.9540371894836426, 23 | 0.29968830943107605, 24 | 1.2080823183059692 25 | ], 26 | [ 27 | 0.0, 28 | 0.0, 29 | 0.0, 30 | 1.0 31 | ] 32 | ] 33 | } 34 | ] 35 | } -------------------------------------------------------------------------------- /tests/data/lego_test/transforms_val.json: -------------------------------------------------------------------------------- 1 | { 2 | "camera_angle_x": 0.6911112070083618, 3 | "frames": [ 4 | { 5 | "file_path": "./val/r_0", 6 | "rotation": 0.012566370614359171, 7 | "transform_matrix": [ 8 | [ 9 | -0.963964581489563, 10 | -0.2611401677131653, 11 | 0.0507759265601635, 12 | 0.2046843022108078 13 | ], 14 | [ 15 | 0.26603081822395325, 16 | -0.9462433457374573, 17 | 0.18398693203926086, 18 | 0.7416750192642212 19 | ], 20 | [ 21 | 7.450580596923828e-09, 22 | 0.1908649355173111, 23 | 0.9816163182258606, 24 | 3.957021951675415 25 | ], 26 | [ 27 | 0.0, 28 | 0.0, 29 | 0.0, 30 | 1.0 31 | ] 32 | ] 33 | } 34 | ] 35 | } -------------------------------------------------------------------------------- /tests/data/lego_test/val/r_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/sdfstudio/370902a10dbef08cb3fe4391bd3ed1e227b5c165/tests/data/lego_test/val/r_0.png -------------------------------------------------------------------------------- /tests/field_components/test_embedding.py: -------------------------------------------------------------------------------- 1 | """ 2 | Embedding tests 3 | """ 4 | from nerfstudio.field_components.embedding import Embedding 5 | 6 | 7 | def test_indexing(): 8 | """Test embedding indexing""" 9 | in_dim = 100 10 | out_dim = 64 11 | 12 | embedding = Embedding(in_dim, out_dim) 13 | assert embedding 14 | # TODO 15 | -------------------------------------------------------------------------------- /tests/field_components/test_field_outputs.py: -------------------------------------------------------------------------------- 1 | """ 2 | Field output tests 3 | """ 4 | import pytest 5 | import torch 6 | from torch import nn 7 | 8 | from nerfstudio.field_components.field_heads import ( 9 | DensityFieldHead, 10 | FieldHead, 11 | FieldHeadNames, 12 | RGBFieldHead, 13 | SHFieldHead, 14 | ) 15 | 16 | 17 | def test_field_output(): 18 | """Test render output""" 19 | in_dim = 6 20 | out_dim = 4 21 | activation = nn.ReLU() 22 | render_head = FieldHead(in_dim=in_dim, out_dim=out_dim, field_head_name=FieldHeadNames.RGB, activation=activation) 23 | assert render_head.get_out_dim() == out_dim 24 | 25 | x = torch.ones((9, in_dim)) 26 | render_head(x) 27 | 28 | # Test in_dim not provided at construction 29 | render_head = FieldHead(out_dim=out_dim, field_head_name=FieldHeadNames.RGB, activation=activation) 30 | with pytest.raises(SystemError): 31 | render_head(x) 32 | render_head.set_in_dim(in_dim) 33 | render_head(x) 34 | 35 | 36 | def test_density_output(): 37 | """Test rgb output""" 38 | in_dim = 6 39 | density_head = DensityFieldHead(in_dim) 40 | assert density_head.get_out_dim() == 1 41 | 42 | x = torch.ones((9, in_dim)) 43 | density_head(x) 44 | 45 | 46 | def test_rgb_output(): 47 | """Test rgb output""" 48 | in_dim = 6 49 | rgb_head = RGBFieldHead(in_dim) 50 | assert rgb_head.get_out_dim() == 3 51 | 52 | x = torch.ones((9, in_dim)) 53 | rgb_head(x) 54 | 55 | 56 | def test_sh_output(): 57 | """Test sh output""" 58 | in_dim = 6 59 | levels = 4 60 | channels = 3 61 | rgb_head = SHFieldHead(in_dim, levels=levels, channels=channels) 62 | assert rgb_head.get_out_dim() == channels * levels**2 63 | 64 | x = torch.ones((9, in_dim)) 65 | rgb_head(x) 66 | 67 | 68 | if __name__ == "__main__": 69 | test_field_output() 70 | test_density_output() 71 | test_rgb_output() 72 | test_sh_output() 73 | -------------------------------------------------------------------------------- /tests/field_components/test_fields.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test the fields 3 | """ 4 | import torch 5 | 6 | from nerfstudio.cameras.rays import Frustums, RaySamples 7 | from nerfstudio.fields.instant_ngp_field import TCNNInstantNGPField 8 | 9 | 10 | def test_tcnn_instant_ngp_field(): 11 | """Test the tiny-cuda-nn field""" 12 | # pylint: disable=import-outside-toplevel 13 | # pylint: disable=unused-import 14 | try: 15 | import tinycudann as tcnn 16 | except ImportError as e: 17 | # tinycudann module doesn't exist 18 | print(e) 19 | return 20 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 21 | aabb_scale = 1.0 22 | aabb = torch.tensor( 23 | [[-aabb_scale, -aabb_scale, -aabb_scale], [aabb_scale, aabb_scale, aabb_scale]], dtype=torch.float32 24 | ).to(device) 25 | field = TCNNInstantNGPField(aabb) 26 | num_rays = 1024 27 | num_samples = 256 28 | positions = torch.rand((num_rays, num_samples, 3), dtype=torch.float32, device=device) 29 | directions = torch.rand_like(positions) 30 | frustums = Frustums( 31 | origins=positions, 32 | directions=directions, 33 | starts=torch.zeros((*directions.shape[:-1], 1), device=device), 34 | ends=torch.zeros((*directions.shape[:-1], 1), device=device), 35 | pixel_area=torch.ones((*directions.shape[:-1], 1), device=device), 36 | ) 37 | ray_samples = RaySamples(frustums=frustums) 38 | field.forward(ray_samples) 39 | 40 | 41 | if __name__ == "__main__": 42 | test_tcnn_instant_ngp_field() 43 | -------------------------------------------------------------------------------- /tests/field_components/test_mlp.py: -------------------------------------------------------------------------------- 1 | """ 2 | MLP Test 3 | """ 4 | import torch 5 | from torch import nn 6 | 7 | from nerfstudio.field_components import MLP 8 | 9 | 10 | def test_mlp(): 11 | """Test mlp""" 12 | in_dim = 6 13 | out_dim = 10 14 | num_layers = 2 15 | layer_width = 32 16 | out_activation = nn.ReLU() 17 | mlp = MLP( 18 | in_dim=in_dim, out_dim=out_dim, num_layers=num_layers, layer_width=layer_width, out_activation=out_activation 19 | ) 20 | assert mlp.get_out_dim() == out_dim 21 | 22 | x = torch.ones((9, in_dim)) 23 | 24 | mlp.build_nn_modules() 25 | y = mlp(x) 26 | 27 | assert y.shape[-1] == out_dim 28 | 29 | 30 | if __name__ == "__main__": 31 | test_mlp() 32 | -------------------------------------------------------------------------------- /tests/field_components/test_temporal_distortions.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test if temporal distortions run properly 3 | """ 4 | import torch 5 | 6 | from nerfstudio.field_components.temporal_distortions import DNeRFDistortion 7 | 8 | 9 | def test_dnerf_distortion(): 10 | """Test dnerf distortion""" 11 | # pylint: disable=import-outside-toplevel 12 | # pylint: disable=unused-import 13 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 14 | distortion = DNeRFDistortion().to(device) 15 | 16 | num_rays = 1024 17 | num_samples = 256 18 | positions = torch.rand((num_rays, num_samples, 3), dtype=torch.float32, device=device) 19 | assert distortion.forward(positions, None) is None 20 | times = torch.rand_like(positions[..., :1]) 21 | assert distortion.forward(positions, times).shape == positions.shape 22 | 23 | 24 | if __name__ == "__main__": 25 | test_dnerf_distortion() 26 | -------------------------------------------------------------------------------- /tests/model_components/test_renderers.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test renderers 3 | """ 4 | import pytest 5 | import torch 6 | 7 | from nerfstudio.cameras.rays import Frustums, RaySamples 8 | from nerfstudio.model_components import renderers 9 | 10 | 11 | def test_rgb_renderer(): 12 | """Test RGB volumetric rendering""" 13 | num_samples = 10 14 | 15 | rgb_samples = torch.ones((3, num_samples, 3)) 16 | weights = torch.ones((3, num_samples, 1)) 17 | weights /= torch.sum(weights, dim=-2, keepdim=True) 18 | 19 | rgb_renderer = renderers.RGBRenderer() 20 | 21 | rgb = rgb_renderer(rgb=rgb_samples, weights=weights) 22 | assert torch.max(rgb) > 0.9 23 | 24 | rgb = rgb_renderer(rgb=rgb_samples * 0, weights=weights) 25 | assert torch.max(rgb) == pytest.approx(0, abs=1e-6) 26 | 27 | 28 | def test_sh_renderer(): 29 | """Test SH volumetric rendering""" 30 | 31 | levels = 4 32 | num_samples = 10 33 | 34 | sh = torch.ones((3, num_samples, 3 * levels**2)) 35 | weights = torch.ones((3, num_samples, 1)) 36 | weights /= torch.sum(weights, dim=-2, keepdim=True) 37 | directions = torch.zeros((3, num_samples, 3)) 38 | directions[..., 0] = 1 39 | 40 | sh_renderer = renderers.SHRenderer() 41 | 42 | rgb = sh_renderer(sh=sh, directions=directions, weights=weights) 43 | assert torch.max(rgb) > 0.9 44 | 45 | 46 | def test_acc_renderer(): 47 | """Test accumulation rendering""" 48 | 49 | num_samples = 10 50 | weights = torch.ones((3, num_samples, 1)) 51 | weights /= torch.sum(weights, dim=-2, keepdim=True) 52 | 53 | acc_renderer = renderers.AccumulationRenderer() 54 | 55 | accumulation = acc_renderer(weights=weights) 56 | assert torch.max(accumulation) > 0.9 57 | 58 | 59 | def test_depth_renderer(): 60 | """Test depth rendering""" 61 | 62 | num_samples = 10 63 | weights = torch.ones((num_samples, 1)) 64 | weights /= torch.sum(weights, dim=-2, keepdim=True) 65 | 66 | frustums = Frustums.get_mock_frustum() 67 | frustums.starts = torch.linspace(0, 100, num_samples)[..., None] 68 | frustums.ends = torch.linspace(1, 101, num_samples)[..., None] 69 | 70 | ray_samples = RaySamples( 71 | frustums=frustums, 72 | camera_indices=torch.ones((num_samples, 1)), 73 | deltas=torch.ones((num_samples, 1)), 74 | ) 75 | 76 | depth_renderer = renderers.DepthRenderer(method="median") 77 | depth = depth_renderer(weights=weights, ray_samples=ray_samples) 78 | assert torch.min(depth) > 0 79 | 80 | depth_renderer = renderers.DepthRenderer(method="expected") 81 | depth = depth_renderer(weights=weights, ray_samples=ray_samples) 82 | assert torch.min(depth) > 0 83 | 84 | 85 | if __name__ == "__main__": 86 | test_rgb_renderer() 87 | test_sh_renderer() 88 | test_acc_renderer() 89 | test_depth_renderer() 90 | -------------------------------------------------------------------------------- /tests/test_train.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=protected-access 2 | """ 3 | Default test to make sure train runs 4 | """ 5 | 6 | from __future__ import annotations 7 | 8 | from pathlib import Path 9 | 10 | import pytest 11 | 12 | from nerfstudio.configs.base_config import Config 13 | from nerfstudio.configs.method_configs import method_configs 14 | from nerfstudio.data.dataparsers.blender_dataparser import BlenderDataParserConfig 15 | from scripts.train import train_loop 16 | 17 | BLACKLIST = ["base", "semantic-nerfw", "instant-ngp", "nerfacto", "phototourism"] 18 | 19 | 20 | def set_reduced_config(config: Config): 21 | """Reducing the config settings to speedup test""" 22 | config.machine.num_gpus = 0 23 | config.trainer.max_num_iterations = 2 24 | # reduce dataset factors; set dataset to test 25 | config.pipeline.datamanager.dataparser = BlenderDataParserConfig(data=Path("tests/data/lego_test")) 26 | config.pipeline.datamanager.train_num_images_to_sample_from = 1 27 | config.pipeline.datamanager.train_num_rays_per_batch = 4 28 | 29 | # use tensorboard logging instead of wandb 30 | config.vis = "tensorboard" 31 | config.logging.relative_log_dir = Path("/tmp/") 32 | 33 | # reduce model factors 34 | if hasattr(config.pipeline.model, "num_coarse_samples"): 35 | config.pipeline.model.num_coarse_samples = 4 36 | if hasattr(config.pipeline.model, "num_importance_samples"): 37 | config.pipeline.model.num_importance_samples = 4 38 | # remove viewer 39 | config.viewer.enable = False 40 | 41 | # model specific config settings 42 | if config.method_name == "instant-ngp": 43 | config.pipeline.model.field_implementation = "torch" 44 | 45 | return config 46 | 47 | 48 | @pytest.mark.filterwarnings("ignore::DeprecationWarning") 49 | def test_train(): 50 | """test run train script works properly""" 51 | all_config_names = method_configs.keys() 52 | for config_name in all_config_names: 53 | if config_name in BLACKLIST: 54 | print("skipping", config_name) 55 | continue 56 | print(f"testing run for: {config_name}") 57 | config = method_configs[config_name] 58 | config = set_reduced_config(config) 59 | 60 | train_loop(local_rank=0, world_size=0, config=config) 61 | 62 | 63 | if __name__ == "__main__": 64 | test_train() 65 | -------------------------------------------------------------------------------- /tests/utils/test_poses.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test pose utils 3 | """ 4 | 5 | import torch 6 | 7 | from nerfstudio.utils import poses 8 | 9 | 10 | def test_to4x4(): 11 | """Test addition of homogeneous coordinate to 3D pose.""" 12 | pose = torch.rand((10, 3, 4)) 13 | pose_4x4 = poses.to4x4(pose) 14 | 15 | assert pose_4x4.shape == (10, 4, 4) 16 | assert torch.equal( 17 | pose_4x4[:, 3, 3], 18 | torch.ones( 19 | 10, 20 | ), 21 | ) 22 | 23 | 24 | def test_multiply(): 25 | """Test 3D pose multiplication.""" 26 | pose = torch.tensor( 27 | [ 28 | [ 29 | [1.0, 0.0, 0.0, 1.0], 30 | [0.0, 1.0, 0.0, 2.0], 31 | [0.0, 0.0, 1.0, 3.0], 32 | ] 33 | ] 34 | ) 35 | translation_pose = poses.multiply(pose, pose) 36 | assert translation_pose.shape == pose.shape 37 | assert torch.equal(translation_pose[..., :, 3], torch.tensor([[2.0, 4.0, 6.0]])) 38 | 39 | pose_a = pose.clone() 40 | pose_a[:, :3, :3] = torch.tensor( 41 | [ 42 | [1.0, 0.0, 0.0], 43 | [0.0, 0.0, -1.0], 44 | [0.0, 1.0, 0.0], 45 | ] 46 | ) 47 | pose_b = pose.clone() 48 | pose_b[:, :3, :3] = torch.tensor( 49 | [ 50 | [0.0, -1.0, 0.0], 51 | [0.0, 0.0, -1.0], 52 | [1.0, 0.0, 0.0], 53 | ] 54 | ) 55 | 56 | translation_rotation_pose = poses.multiply(pose_a, pose_b) 57 | assert torch.allclose(translation_rotation_pose, (poses.to4x4(pose_a) @ poses.to4x4(pose_b))[:, :3, :4]) 58 | 59 | 60 | def test_inverse(): 61 | """Test 3D pose inversion.""" 62 | 63 | pose = torch.rand((10, 3, 4)) 64 | pose[:, :3, :3] = torch.tensor( 65 | [ 66 | [1.0, 0.0, 0.0], 67 | [0.0, 0.0, -1.0], 68 | [0.0, 1.0, 0.0], 69 | ] 70 | ) 71 | pose_inv = poses.inverse(pose) 72 | 73 | assert pose_inv.shape == pose.shape 74 | 75 | unit_pose = torch.zeros_like(pose) 76 | unit_pose[:, :3, :3] = torch.eye(3) 77 | 78 | assert torch.allclose( 79 | poses.multiply(pose, pose_inv), 80 | unit_pose, 81 | ) 82 | 83 | 84 | def test_normalize(): 85 | """Test 3D pose normalization""" 86 | pose = torch.ones((2, 3, 4)) 87 | pose[:, :3, :3] = torch.eye(3) 88 | pose[0, :3, 3] = torch.tensor([2.0, 0.0, -2.0]) 89 | pose[1, :3, 3] = torch.tensor([1.0, 1.0, 1.0]) 90 | 91 | pose_scaled = poses.normalize(pose) 92 | 93 | assert pose_scaled.shape == pose.shape 94 | assert torch.max(pose_scaled[:, :3, 3]) <= 1.0 95 | 96 | assert torch.equal(pose_scaled[0, :3, 3], torch.tensor([1.0, 0.0, -1.0])) 97 | assert torch.equal(pose_scaled[1, :3, 3], torch.tensor([0.5, 0.5, 0.5])) 98 | -------------------------------------------------------------------------------- /tests/utils/test_visualization.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test colormaps 3 | """ 4 | import torch 5 | 6 | from nerfstudio.utils import colormaps 7 | 8 | 9 | def test_apply_colormap(): 10 | """Test adding a colormap to data""" 11 | data = torch.rand((10, 20, 1)) 12 | colored_data = colormaps.apply_colormap(data) 13 | 14 | assert colored_data.shape == (10, 20, 3) 15 | assert torch.min(colored_data) >= 0 16 | assert torch.max(colored_data) <= 1 17 | 18 | 19 | def test_apply_depth_colormap(): 20 | """Test adding a colormap to depth data""" 21 | data = torch.rand((10, 20, 1)) 22 | accum = torch.rand((10, 20, 1)) 23 | accum = accum / torch.max(accum) 24 | colored_data = colormaps.apply_depth_colormap(depth=data, accumulation=accum) 25 | 26 | assert colored_data.shape == (10, 20, 3) 27 | assert torch.min(colored_data) >= 0 28 | assert torch.max(colored_data) <= 1 29 | 30 | 31 | def test_apply_boolean_colormap(): 32 | """Test adding a colormap to boolean data""" 33 | data = torch.rand((10, 20, 1)) 34 | data = data > 0.5 35 | colored_data = colormaps.apply_boolean_colormap(data) 36 | 37 | assert colored_data.shape == (10, 20, 3) 38 | assert torch.min(colored_data) >= 0 39 | assert torch.max(colored_data) <= 1 40 | --------------------------------------------------------------------------------