├── .dockerignore ├── .gitignore ├── AUTHORS ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── Makefile ├── PULL_REQUEST_TEMPLATE.md ├── README.md ├── assets └── teaser.png ├── dockerfiles ├── cpu-sbmc.dockerfile └── cuda-sbmc.dockerfile ├── docs ├── .gitignore ├── Makefile ├── make.bat └── source │ ├── callbacks.rst │ ├── conf.py │ ├── datasets.rst │ ├── evaluation.rst │ ├── functions.rst │ ├── index.rst │ ├── interfaces.rst │ ├── losses.rst │ ├── models.rst │ ├── modules.rst │ ├── rendering.rst │ └── scene_generator.rst ├── halide_pytorch ├── .gitignore ├── README.rst ├── halide_pytorch │ ├── __init__.py │ ├── extension.py │ └── version.py └── setup.py ├── pbrt_patches ├── 2011_sen_rpf.diff ├── 2012_rousselle_nlm.diff ├── 2015_kalantari_lbf.diff ├── 2016_bitterli_nfor.diff └── sbmc_pbrt.diff ├── pyproject.toml ├── pytest.ini ├── sbmc ├── __init__.py ├── callbacks.py ├── datasets.py ├── evaluation.py ├── functions.py ├── interfaces.py ├── losses.py ├── models.py ├── modules.py ├── rendering.py ├── scene_generator │ ├── __init__.py │ ├── converters.py │ ├── generators.py │ ├── geometry.py │ ├── lights.py │ ├── materials.py │ ├── randomizers.py │ ├── scene.py │ ├── suncg.py │ ├── textures.py │ └── xforms.py └── version.py ├── scripts ├── check_docker_version.sh ├── compute_metrics.py ├── denoise.py ├── denoise_nfor.py ├── download.py ├── figures │ ├── README │ ├── _legacy_big_metrics.py │ ├── figures.mk │ ├── kernels.py │ └── scatter_vs_gather.py ├── generate_training_data.py ├── install_nvidia_docker.sh ├── profile │ ├── kernel_weighting.py │ └── scatter2gather.py ├── render_exr.py ├── render_samples.py ├── train.py └── visualize_dataset.py ├── setup.py ├── src ├── kernel_weighting.cpp └── scatter2gather.cpp └── tests ├── __init__.py ├── test_functions.py ├── test_losses.py └── test_modules.py /.dockerignore: -------------------------------------------------------------------------------- 1 | _extras 2 | data 3 | checkpoints 4 | dockerfiles 5 | build 6 | rendernet_pbrt 7 | output 8 | .git 9 | .cache 10 | dist 11 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | data 2 | *.pyc 3 | output 4 | .DS_Store 5 | !bin/data 6 | local_data 7 | local_output 8 | .gdb_history 9 | checkpoints 10 | hlMakefile 11 | oldMakefile 12 | trainMake 13 | sbmc/halide_ops.cpython-37m-x86_64-linux-gnu.so 14 | sbmc.egg-info/ 15 | build 16 | sbmc/halide_ops.cpython-37m-darwin.so 17 | dist 18 | -------------------------------------------------------------------------------- /AUTHORS: -------------------------------------------------------------------------------- 1 | Michaël Gharbi 2 | Tzu-Mao Li 3 | Miika Aittala 4 | Jaakko Lehtinen 5 | Frédo Durand 6 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Adobe Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, gender identity and expression, level of experience, 9 | nationality, personal appearance, race, religion, or sexual identity and 10 | orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at Grp-opensourceoffice@adobe.com. All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at [https://contributor-covenant.org/version/1/4][version] 72 | 73 | [homepage]: https://contributor-covenant.org 74 | [version]: https://contributor-covenant.org/version/1/4/ 75 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | Thanks for choosing to contribute! 4 | 5 | The following are a set of guidelines to follow when contributing to this project. 6 | 7 | ## Code Of Conduct 8 | 9 | This project adheres to the Adobe [code of conduct](CODE_OF_CONDUCT.md). By participating, 10 | you are expected to uphold this code. Please report unacceptable behavior to 11 | [Grp-opensourceoffice@adobe.com](mailto:Grp-opensourceoffice@adobe.com). 12 | 13 | ## Have A Question? 14 | 15 | Start by filing an issue. The existing committers on this project work to reach 16 | consensus around project direction and issue solutions within issue threads 17 | (when appropriate). 18 | 19 | ## Contributor License Agreement 20 | 21 | All third-party contributions to this project must be accompanied by a signed contributor 22 | license agreement. This gives Adobe permission to redistribute your contributions 23 | as part of the project. [Sign our CLA](https://opensource.adobe.com/cla.html). You 24 | only need to submit an Adobe CLA one time, so if you have submitted one previously, 25 | you are good to go! 26 | 27 | ## Code Reviews 28 | 29 | All submissions should come in the form of pull requests and need to be reviewed 30 | by project committers. Read [GitHub's pull request documentation](https://help.github.com/articles/about-pull-requests/) 31 | for more information on sending pull requests. 32 | 33 | Lastly, please follow the [pull request template](PULL_REQUEST_TEMPLATE.md) when 34 | submitting a pull request! 35 | 36 | ## From Contributor To Committer 37 | 38 | We love contributions from our community! If you'd like to go a step beyond contributor 39 | and become a committer with full write access and a say in the project, you must 40 | be invited to the project. The existing committers employ an internal nomination 41 | process that must reach lazy consensus (silence is approval) before invitations 42 | are issued. If you feel you are qualified and want to get more deeply involved, 43 | feel free to reach out to existing committers to have a conversation about that. 44 | 45 | ## Security Issues 46 | 47 | Security issues shouldn't be reported on this issue tracker. Instead, [file an issue to our security experts](https://helpx.adobe.com/security/alertus.html) 48 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # This Makefile defines rules to build and run a Docker environment for easier 2 | # reproducibility, as well as commands demonstrating how to use the code 3 | # (commands starting with `demo/`). 4 | 5 | OUTPUT?=$(shell pwd)/output 6 | DATA?=$(shell pwd)/data 7 | PBRT?=_extras/rendernet_pbrt/src/bin/pbrt 8 | OBJ2PBRT?=_extras/rendernet_pbrt/src/bin/obj2pbrt 9 | 10 | # Data server 11 | REMOTE=https://data.csail.mit.edu/graphics/sbmc 12 | 13 | # Path to comparison code 14 | SEN2011?=_extras/comparisons/methods/2011_sen_rpf 15 | ROUSSELLE2012?=_extras/comparisons/methods/2012_rousselle_nlm 16 | KALANTARI2015?=_extras/comparisons/methods/2015_kalantari_lbf 17 | BITTERLI2016?=_extras/comparisons/methods/nfor_fromdocker 18 | 19 | # Checks whether docker version supports the --gpus option 20 | check_docker_version: 21 | ./scripts/check_docker_version.sh 22 | 23 | # Install the required extension for CUDA on Docker 24 | nvidia_docker: check_docker_version 25 | ./scripts/install_nvidia_docker.sh 26 | 27 | # To facilitate environment setup, build and use this dockerfile 28 | # !! Requires NVIDIA's docker extension !! 29 | docker_build: 30 | @docker build -f dockerfiles/cuda-sbmc.dockerfile -t sbmc_cuda . 31 | 32 | # To facilitate environment setup, build and use this dockerfile 33 | # !! Requires NVIDIA's docker extension !! 34 | docker_build_cpu: 35 | @docker build -f dockerfiles/cpu-sbmc.dockerfile -t sbmc_cpu . 36 | 37 | $(OUTPUT): 38 | mkdir -p $(OUTPUT) 39 | 40 | $(DATA): 41 | mkdir -p $(DATA) 42 | 43 | # This target launches a fully configured docker instance, 44 | # mounts $(OUTPUT) as read-write volume and $(DATA) as readonly for persisten I/O. 45 | # Once logged into the docker instance, you can run any of the `make demo/*` 46 | # commands. 47 | docker_run: docker_build $(OUTPUT) $(DATA) 48 | @docker run --gpus all --name sbmc_cuda_app --rm \ 49 | -v $(OUTPUT):/sbmc_app/output \ 50 | -v $(DATA):/sbmc_app/data \ 51 | --ipc=host \ 52 | -p 2001:2001 \ 53 | -it sbmc_cuda 54 | 55 | docker_run_cpu: docker_build_cpu $(OUTPUT) $(DATA) 56 | @docker run --name sbmc_cpu_app --rm \ 57 | -v $(OUTPUT):/sbmc_app/output \ 58 | -v $(DATA):/sbmc_app/data \ 59 | --ipc=host \ 60 | -p 2002:2001 \ 61 | -it sbmc_cpu 62 | 63 | clean: 64 | rm -rf dist .pytest_cache sbmc.egg-info build sbmc/halide_ops.*.so 65 | 66 | test: 67 | pytest tests 68 | 69 | .PHONY: demo/render_bins demo/render_reference \ 70 | demo/visualize demo/denoiser demo/train demo/train_kpcn \ 71 | demo/render_samples server clean nvidia_docker demo/data 72 | 73 | # ----------------------------------------------------------------------------- 74 | 75 | # The rest of this Makefiles demonstrates how to use the SBMC API and entry 76 | # scripts for common tasks. 77 | 78 | demo/render_samples: $(OUTPUT)/demo/test_samples/0000_0000.bin 79 | 80 | # This demonstrates how we render .bin sample files for a test scene 81 | $(OUTPUT)/demo/test_samples/0000_0000.bin: demo_data 82 | @python scripts/render_samples.py $(PBRT) \ 83 | $(DATA)/demo/scenes/GITestSynthesizer_01/scene.pbrt \ 84 | $(OUTPUT)/demo/test_samples \ 85 | --tmp_dir $(OUTPUT)/tmp --height 128 --width 128 --spp 4 \ 86 | --gt_spp 1 87 | 88 | # This demonstrates how we render .bin sample files for a training dataset 89 | # using our random scene generator 90 | demo/generate_scenes: $(OUTPUT)/demo/training_scenes/filelist.txt 91 | 92 | $(OUTPUT)/demo/training_scenes/filelist.txt: demo_data 93 | @python scripts/generate_training_data.py $(PBRT) \ 94 | $(OBJ2PBRT) \ 95 | $(DATA)/demo/scenegen_assets $(OUTPUT)/demo/training_scenes --count 2 \ 96 | --spp 4 --gt_spp 4 --height 128 --width 128 97 | @cd $(OUTPUT)/demo/training_scenes && find . -name "*.bin" > filelist.txt 98 | 99 | # This shows how to use the visualization helper script to inspect the sample 100 | # .bin files 101 | demo/visualize: 102 | @python scripts/visualize_dataset.py $(OUTPUT)/demo/training_scenes \ 103 | $(OUTPUT)/demo/dataviz --spp 1 104 | 105 | # This demonstrates how to run pretrained models on .bin test scenes 106 | demo/denoise: demo/render_samples pretrained_models 107 | @python scripts/denoise.py \ 108 | --input $(OUTPUT)/demo/test_samples \ 109 | --output $(OUTPUT)/demo/ours_4spp.exr \ 110 | --spp 4 \ 111 | --checkpoint $(DATA)/pretrained_models/gharbi2019_sbmc 112 | @python scripts/denoise.py \ 113 | --input $(OUTPUT)/demo/test_samples \ 114 | --output $(OUTPUT)/demo/bako2017_4spp.exr \ 115 | --spp 4 \ 116 | --checkpoint $(DATA)/pretrained_models/bako2017_finetuned 117 | 118 | # This demonstrates how we render a .exr reference image for a test scene 119 | demo/render_reference: demo_data 120 | @python scripts/render_exr.py $(PBRT) \ 121 | $(DATA)/demo/scenes/GITestSynthesizer_01/scene.pbrt \ 122 | $(OUTPUT)/demo/comparisons/reference/GITestSynthesizer_01.exr \ 123 | --tmp_dir $(OUTPUT)/tmp --height 128 --width 128 --spp 4 124 | 125 | # This demonstrates how we render .exr images for the comparison denoisers 126 | # Rouselle2012 and Kalantari2015 require a GPU and are expected to fail on the 127 | # CPU-only docker image. 128 | demo/comparisons: demo/render_samples pretrained_models demo_data 129 | @python scripts/denoise.py \ 130 | --input $(OUTPUT)/demo/test_samples \ 131 | --output $(OUTPUT)/demo/comparisons/2017_bako_kpcn_finetuned/GITestSynthesizer_01.exr \ 132 | --spp 4 \ 133 | --checkpoint $(DATA)/pretrained_models/bako2017_finetuned 134 | @python scripts/denoise.py \ 135 | --input $(OUTPUT)/demo/test_samples \ 136 | --output $(OUTPUT)/demo/comparisons/ours/GITestSynthesizer_01.exr \ 137 | --spp 4 \ 138 | --checkpoint $(DATA)/pretrained_models/gharbi2019_sbmc 139 | @python scripts/render_exr.py $(SEN2011)/bin/pbrt \ 140 | $(DATA)/demo/scenes/GITestSynthesizer_01/scene.pbrt \ 141 | $(OUTPUT)/demo/comparisons/2011_sen_rpf/GITestSynthesizer_01.exr \ 142 | --tmp_dir $(OUTPUT)/tmp --height 128 --width 128 --spp 4 143 | @python scripts/denoise_nfor.py $(BITTERLI2016)/build/denoiser \ 144 | $(OUTPUT)/demo/test_samples \ 145 | $(OUTPUT)/demo/comparisons/2016_bitterli_nfor/GITestSynthesizer_01.exr \ 146 | --tmp_dir $(OUTPUT)/tmp --spp 4 147 | @python scripts/render_exr.py $(ROUSSELLE2012)/bin/pbrt \ 148 | $(DATA)/demo/scenes/GITestSynthesizer_01/scene.pbrt \ 149 | $(OUTPUT)/demo/comparisons/2012_rousselle_nlm/GITestSynthesizer_01.exr \ 150 | --tmp_dir $(OUTPUT)/tmp --height 128 --width 128 --spp 4 151 | @python scripts/render_exr.py $(KALANTARI2015)/bin/pbrt \ 152 | $(DATA)/demo/scenes/GITestSynthesizer_01/scene.pbrt \ 153 | $(OUTPUT)/demo/comparisons/2015_kalantari_lbf/GITestSynthesizer_01.exr \ 154 | --tmp_dir $(OUTPUT)/tmp --height 128 --width 128 --spp 4 \ 155 | --kalantari2015_data $(KALANTARI2015)/pretrained/Weights.dat \ 156 | $(KALANTARI2015)/pretrained/FeatureNorm.dat 157 | 158 | # This demonstrates how to train a new model 159 | demo/train: demo/generate_scenes 160 | @python scripts/train.py \ 161 | --checkpoint_dir $(OUTPUT)/demo/training \ 162 | --data $(OUTPUT)/demo/training_scenes/filelist.txt \ 163 | --env sbmc_ours --port 2001 --bs 1 \ 164 | --spp 4 165 | 166 | # This demonstrates how to train a baseline model (from [Bako 2017]) 167 | demo/train_kpcn: demo/generate_scenes 168 | @python scripts/train.py \ 169 | --checkpoint_dir $(OUTPUT)/demo/training_kpcn \ 170 | --data $(OUTPUT)/demo/training_scenes/filelist.txt \ 171 | --constant_spp --env sbmc_kpcn --port 2001 --bs 1 \ 172 | --kpcn_mode \ 173 | --spp 4 174 | 175 | # Launches a Visdom server to monitor the training 176 | server: 177 | @python -m visdom.server -p 2001 & 178 | 179 | demo/eval: precomputed_renderings 180 | @python scripts/compute_metrics.py data/renderings/ref output/eval.csv \ 181 | --methods data/eval_methods.txt \ 182 | --scenes data/eval_scenes.txt 183 | 184 | # Download the data needed for the demo --------------------------------------- 185 | demo_data: $(DATA)/demo/scenes/GITestSynthesizer_01/scene.pbrt 186 | 187 | pretrained_models: $(DATA)/pretrained_models/gharbi2019_sbmc/final.pth 188 | 189 | test_scenes: $(DATA)/scenes/spaceship/scene.pbrt 190 | 191 | precomputed_renderings: $(DATA)/renderings/4spp/spaceship.exr 192 | 193 | $(DATA)/renderings/4spp/spaceship.exr: 194 | @echo "Downloading precomputed renderings from [Gharbi2019] (about 54 GB)" 195 | @python scripts/download.py $(REMOTE)/renderings.zip $(DATA)/renderings.zip 196 | cd $(DATA) && unzip renderings.zip 197 | rm $(DATA)/renderings.zip 198 | @python scripts/download.py $(REMOTE)/eval_methods.txt $(DATA)/eval_methods.txt 199 | @python scripts/download.py $(REMOTE)/eval_scenes.txt $(DATA)/eval_scenes.txt 200 | 201 | $(DATA)/scenes/spaceship/scene.pbrt: 202 | @echo "Downloading test scenes (about 3 GB)" 203 | @python scripts/download.py $(REMOTE)/scenes.zip $(DATA)/scenes.zip 204 | cd $(DATA) && unzip scenes.zip 205 | rm $(DATA)/scenes.zip 206 | 207 | $(DATA)/demo/scenes/GITestSynthesizer_01/scene.pbrt: 208 | @echo "Downloading demo data (about 30 MB)" 209 | @python scripts/download.py $(REMOTE)/demo.zip $(DATA)/demo.zip 210 | cd $(DATA) && unzip demo.zip 211 | rm $(DATA)/demo.zip 212 | 213 | $(DATA)/pretrained_models/gharbi2019_sbmc/final.pth: 214 | @echo "Downloading pretrained models (about 512 MB)" 215 | @python scripts/download.py $(REMOTE)/pretrained_models.zip $(DATA)/pretrained_models.zip 216 | cd $(DATA) && unzip pretrained_models.zip 217 | rm $(DATA)/pretrained_models.zip 218 | -------------------------------------------------------------------------------- /PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ## Overview 2 | Brief description of what this PR does, and why it is needed. 3 | 4 | ### Demo 5 | Optional. Screenshots, examples, etc. 6 | 7 | ### Notes 8 | Optional. Ancillary topics, caveats, alternative strategies that didn't work out, anything else. 9 | 10 | ## Testing Instructions 11 | * How to test this PR 12 | * Prefer bulleted description 13 | * Start after checking out this branch 14 | * Include any setup required, such as bundling scripts, restarting services, etc. 15 | * Include test case, and expected output 16 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Sample-based Monte Carlo Denoising using a Kernel-Splatting Network 2 | 3 | ![teaser_image](assets/teaser.png) 4 | 5 | Michaël Gharbi (), Tzu-Mao Li, Miika Aittala, Jaakko 6 | Lehtinen, Frédo Durand 7 | 8 | Check out our [project page](http://groups.csail.mit.edu/graphics/rendernet/). 9 | 10 | 11 | ## Quick start 12 | 13 | The quickest way to get started is to run the code from a Docker image. Proceed 14 | as follows: 15 | 16 | 1. Download and [install Docker on your machine](https://docs.docker.com/install/linux/docker-ce/ubuntu/#install-docker-engine---community). 17 | 18 | 2. Allow `docker` to be executed without `sudo` 19 | 20 | 1. Add username to the `docker` group 21 | 22 | ```shell 23 | sudo usermod -aG docker ${USER} 24 | ``` 25 | 26 | 2. To apply the new group membership, log out of the server and back in, or type the following: 27 | ```shell 28 | su - ${USER} 29 | ``` 30 | 31 | 3. Confirm that your user is now added to the `docker` group by typing: 32 | ```shell 33 | id -nG 34 | ``` 35 | 36 | 2. To enable GPU acceleration in your Docker instance, install the NVidia 37 | container toolkit: . 38 | We provide a shortcut to install the latter: 39 | 40 | ```shell 41 | make nvidia_docker 42 | ``` 43 | 44 | 3. Once these prerequisites are installed, you can build a pre-configured Docker image 45 | and run it: 46 | 47 | ```shell 48 | make docker_build 49 | make docker_run 50 | ``` 51 | 52 | If all goes well, this will launch a shell on the Docker instance and you 53 | should not have to worry about configuring the Linux or Python environment. 54 | 55 | Alternatively, you can build a CPU-only version of the Docker image: 56 | 57 | ```shell 58 | make docker_build_cpu 59 | make docker_run_cpu 60 | ``` 61 | 62 | 4. (optional) From within the running Docker instance, run the package's tests: 63 | 64 | ```shell 65 | make test 66 | ``` 67 | 68 | 5. Again, within the Docker instance. Try a few demo commands, e.g. run a pretrained denoiser on a test input: 69 | 70 | ```shell 71 | make demo/denoise 72 | ``` 73 | 74 | This should download the pretrained models to `$(DATA)/pretrained_models`, 75 | some demo scenes to `$(DATA)/demo/scenes`, and render some noisy samples data 76 | to `$(OUTPUT)/demo/test_samples`. After that, our model will be run to produce a denoised output: 77 | `$(OUTPUT)/demo/ours_4spp.exr` (linear radiance) and `$(OUTPUT)/demo/ours_4spp.png` 78 | (clamped 8bit rendering). 79 | 80 | In the docker, `$(OUTPUT)` maps to `/sbmc_app/output` by default. Outside 81 | the docker this is mapped to the `output` subfolder of this repository, so 82 | that both data and output persist across runs. 83 | 84 | See below, or have a look at the `Makefile` for more `demo/*` commands you can try. 85 | 86 | 87 | 88 | ## Docker-less installation and dependencies 89 | 90 | If you just intend to install our library, you can run: 91 | 92 | ```shell 93 | HALIDE_DISTRIB_DIR= python setup.py install 94 | ``` 95 | 96 | from the root of this repo. In any cases the docker file in `dockerfiles` 97 | should help you configure your runtime environment. 98 | 99 | We build on the following dependencies: 100 | 101 | - Halide: our splatting kernel operator is implemented in Halide 102 | . The `setup.py` script looks for the path to the Halide 103 | distribution root under the environment variable `HALIDE_DISTRIB_DIR`. If this variable 104 | is not defined, the script will prompt you whether to download the Halide 105 | locally. 106 | - Torch-Tools: we use the `ttools` library for PyTorch helpers and our training 107 | and evaluation scripts . This should get installed 108 | automatically when running `python setup.py install`. 109 | 110 | 111 | ## Demo 112 | 113 | We provide a patch to PBRTv2's commit #e6f6334f3c26ca29eba2b27af4e60fec9fdc7a8d 114 | in `pbrt_patches/sbmc_pbrt.diff`. This patch 115 | contains our modification to the renderer to save individual samples to disk. 116 | 117 | 118 | ### Render samples from a PBRTv2 test scene 119 | 120 | To render samples as `.bin` files from a `.pbrt` scene description, use the 121 | `scripts/render_samples.py` script. This script assumes the PBRT scene file 122 | contains only the scene description. It will create the appropriate header 123 | description for the camera, sampler, path-tracer, etc. For an example, try: 124 | 125 | ```shell 126 | make demo/render_samples 127 | ``` 128 | 129 | 130 | ### Generating new random scenes for training 131 | 132 | In the manuscript we described a scene generation procedure that used the 133 | SunCG dataset. Because of the legal issues that were later discovered with 134 | this dataset, we decided to no longer support this source of training scenes. 135 | 136 | You can still use our custom, outdoor random scenes generator to generate 137 | training data, `scripts/generate_training_data.py`. For an example, run: 138 | 139 | ```shell 140 | make demo/generate_scenes 141 | ``` 142 | 143 | ### Visualizing the image content of .bin sample files. 144 | 145 | We provide a helper script to inspect the content of .bin sample files, 146 | `scripts/visualize_dataset.py`. For instance, to visualize the training data 147 | generated in the previous section, run: 148 | 149 | ```shell 150 | make demo/visualize 151 | ``` 152 | 153 | ### Run pretrained models 154 | 155 | To run a pre-trained model, use `scripts/denoise.py`. The command below runs 156 | our model and that of [Bako2017] on a test image: 157 | 158 | ```shell 159 | make demo/denoise 160 | ``` 161 | 162 | ### Comparisons to previous work 163 | 164 | In the dockerfile, we setup the code from several previous work to facilitate 165 | comparison. We provide our modifications to the original codebases as patch 166 | files in `pbrt_patches/`. The changes are mostly simple modification to the C++ 167 | code so it compiles with gcc. 168 | 169 | The comparison include: 170 | * [Sen2011] "On Filtering the Noise from the Random Parameters in Monte Carlo Rendering" 171 | * [Rousselle2012] "Adaptive Rendering with Non-Local Means Filtering" 172 | * [Kalantari2015] "A Machine Learning Approach for Filtering Monte Carlo Noise" 173 | * [Bitterli2016] "Nonlinearly Weighted First-order Regression for Denoising Monte Carlo Renderings" 174 | * [Bako2017] "Kernel-Predicting Convolutional Networks for Denoising Monte Carlo Renderings" 175 | 176 | To run the comparisons: 177 | 178 | ```shell 179 | make demo/render_reference 180 | make demo/comparisons 181 | ``` 182 | 183 | 184 | ### Training a new model 185 | 186 | To train your own model, you can use the 187 | script `scripts/train.py`. For instance, 188 | to train our model: 189 | 190 | ```shell 191 | make demo/train 192 | ``` 193 | 194 | Or to train that of Bako et al.: 195 | 196 | ```shell 197 | make demo/train_kpcn 198 | ``` 199 | 200 | Those scripts will also launch a Visdom server to enable you to monitor the 201 | training. In your web browser, to view the plots navigate to . 202 | 203 | 204 | ### Numerical evaluation 205 | 206 | The script `scripts/compute_metrics.py` can be used to 207 | evaluate a set of .exr renderings numerically. It will print out 208 | the averages and save the result to .csv files. 209 | 210 | For example, you can download the renderings we produced for our paper evaluation 211 | and compute the metrics by running: 212 | 213 | ```shell 214 | make demo/eval 215 | ``` 216 | 217 | 218 | ## Precomputed .exr results from our submission 219 | 220 | We provide the pre-rendered .exr results used in our Siggraph submission 221 | on-demand. To download them, run the command below. Please note this data is 222 | rather large (54 GB). 223 | 224 | ```shell 225 | make precomputed_renderings 226 | ``` 227 | 228 | 229 | ## Test scene for evaluation 230 | 231 | You can download the .pbrt scenes we used for evaluation by running: 232 | 233 | ```shell 234 | make test_scenes 235 | ``` 236 | 237 | This will only download the scene description and assets. The images (or 238 | samples) themselves still need to be rendered from this data, using the 239 | `scripts/render_exr.py` and `scripts/render_samples.py` scripts respectively. 240 | 241 | 242 | ## Samples data: our .bin fileformat 243 | 244 | Some sample data used throughout the `demo` commands can be downloaded using: 245 | 246 | ```shell 247 | make demo_data 248 | ``` 249 | 250 | 251 | ## Pretrained models 252 | 253 | Download our pretrained models with the following command: 254 | 255 | ```shell 256 | make pretrained_models 257 | ``` 258 | -------------------------------------------------------------------------------- /assets/teaser.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adobe/sbmc/cf02dfdfabec9ac35fd439880b194a5218dd4119/assets/teaser.png -------------------------------------------------------------------------------- /dockerfiles/cpu-sbmc.dockerfile: -------------------------------------------------------------------------------- 1 | # CPU-Only Docker configuration to run "Sample-Based Monte Carlo 2 | # denoising..." [Gharbi2016] 3 | # 4 | FROM ubuntu:16.04 5 | MAINTAINER Michael Gharbi 6 | 7 | # Download and update required packages 8 | RUN apt-get upgrade 9 | RUN apt-get update && apt-get install -y --no-install-recommends apt-utils 10 | RUN apt-get install -y curl 11 | RUN apt-get install -y \ 12 | build-essential \ 13 | vim \ 14 | git \ 15 | bash \ 16 | liblz4-dev \ 17 | libopenexr-dev \ 18 | bison \ 19 | libomp-dev \ 20 | cmake \ 21 | flex \ 22 | qt5-default \ 23 | libeigen3-dev \ 24 | wget \ 25 | unzip \ 26 | libncurses5-dev \ 27 | liblz4-dev 28 | 29 | # Change default shell 30 | SHELL ["/bin/bash", "-c"] 31 | 32 | # Create directories and copy data 33 | RUN mkdir -p /sbmc_app /sbmc_app/output /sbmc_app/data 34 | COPY pbrt_patches /sbmc_app/patches 35 | 36 | WORKDIR /sbmc_app 37 | 38 | # Download, patch and install PBRTv2 with our changes for data generation. 39 | RUN git clone https://github.com/mmp/pbrt-v2 pbrt_tmp 40 | RUN cd pbrt_tmp && git checkout e6f6334f3c26ca29eba2b27af4e60fec9fdc7a8d 41 | RUN mv pbrt_tmp/src pbrt 42 | RUN rm -rf pbrt_tmp 43 | # The patch allows to save samples while path-tracing 44 | RUN patch -d pbrt -p1 -i /sbmc_app/patches/sbmc_pbrt.diff 45 | RUN cd pbrt && make -j 4 46 | 47 | # Install a few previous denoising works for comparison ----------------------- 48 | 49 | # [Sen2011] 50 | # "On Filtering the Noise from the Random Parameters in Monte Carlo Rendering" 51 | RUN (wget http://cvc.ucsb.edu/graphics/Papers/Sen2011_RPF/PaperData/RPF-v1.0.zip && \ 52 | unzip RPF-v1.0.zip && \ 53 | mv RPF-v1.0/pbrt-v2-rpf/src 2011_sen_rpf && \ 54 | rm -rf RPF-v1.0* && \ 55 | # Patch to fix compilation errors 56 | patch -d 2011_sen_rpf -p1 -i /sbmc_app/patches/2011_sen_rpf.diff && \ 57 | cd 2011_sen_rpf && make -j 4) || echo "Sen2011 could not be downloaded" 58 | 59 | # [Bitterli2016] 60 | RUN git clone https://github.com/tunabrain/tungsten.git 2016_bitterli_nfor 61 | RUN cd 2016_bitterli_nfor && git checkout 88ea02044dbaf20472a8173b6752460b50c096d8 && rm -rf .git 62 | RUN patch -d 2016_bitterli_nfor -p1 -i /sbmc_app/patches/2016_bitterli_nfor.diff 63 | RUN cd 2016_bitterli_nfor && mkdir build && cd build && cmake .. && make -j 4 64 | # ----------------------------------------------------------------------------- 65 | 66 | 67 | # Install Halide 68 | RUN wget -O halide.tgz https://github.com/halide/Halide/releases/download/v8.0.0/halide-linux-64-gcc53-800-65c26cba6a3eca2d08a0bccf113ca28746012cc3.tgz 69 | RUN tar zvxf halide.tgz 70 | RUN rm -rf halide.tgz 71 | ENV HALIDE_DISTRIB_DIR /sbmc_app/halide 72 | 73 | 74 | # Python Environment ---------------------------------------------------------- 75 | RUN curl -o /sbmc_app/anaconda.sh -O \ 76 | https://repo.anaconda.com/miniconda/Miniconda3-py37_4.8.3-Linux-x86_64.sh && \ 77 | sha256sum /sbmc_app/anaconda.sh && \ 78 | chmod a+x /sbmc_app/anaconda.sh && \ 79 | /sbmc_app/anaconda.sh -b -p /sbmc_app/anaconda 80 | ENV PATH /sbmc_app/anaconda/bin:$PATH 81 | 82 | RUN source activate 83 | RUN conda install pytorch==1.2.0 torchvision==0.4.0 -c pytorch 84 | # RUN conda install pytorch torchvision cudatoolkit=9.2 -c pytorch 85 | RUN pip install --upgrade pip && pip install pytest 86 | # ----------------------------------------------------------------------------- 87 | 88 | # Set the environment variables so that the `demo/*` commands in the Makefile 89 | # point to the right directories. 90 | ENV OUTPUT /sbmc_app/output 91 | ENV DATA /sbmc_app/data 92 | ENV PBRT /sbmc_app/pbrt/bin/pbrt 93 | ENV OBJ2PBRT /sbmc_app/pbrt/bin/obj2pbrt 94 | ENV SEN2011 /sbmc_app/2011_sen_rpf 95 | ENV BITTERLI2016 /sbmc_app/2016_bitterli_nfor 96 | 97 | # Install our code 98 | COPY . /sbmc_app/sbmc 99 | RUN cd sbmc/halide_pytorch && python setup.py install 100 | RUN cd sbmc && python setup.py develop 101 | WORKDIR /sbmc_app/sbmc 102 | -------------------------------------------------------------------------------- /dockerfiles/cuda-sbmc.dockerfile: -------------------------------------------------------------------------------- 1 | # CUDA-capable Docker configuration to run "Sample-Based Monte Carlo 2 | # denoising..." [Gharbi2016] 3 | # 4 | # This dockerfile requires `nvidia-docker` check this url: 5 | # or install using 6 | # `make nvidia_docker` 7 | # FROM nvidia/cuda:9.0-devel 8 | FROM nvidia/cuda:10.0-cudnn7-devel-ubuntu16.04 9 | # FROM nvidia/cuda:9.2-cudnn7-devel-ubuntu16.04 10 | MAINTAINER Michael Gharbi 11 | 12 | # Download and update required packages 13 | RUN apt-get update && apt-get install -y --no-install-recommends apt-utils 14 | RUN apt-get install -y \ 15 | build-essential \ 16 | vim \ 17 | git \ 18 | bash \ 19 | liblz4-dev \ 20 | libopenexr-dev \ 21 | curl \ 22 | bison \ 23 | libomp-dev \ 24 | cmake \ 25 | flex \ 26 | qt5-default \ 27 | libeigen3-dev \ 28 | wget \ 29 | unzip \ 30 | libncurses5-dev \ 31 | liblz4-dev 32 | 33 | # Change default shell 34 | SHELL ["/bin/bash", "-c"] 35 | 36 | # Create directories and copy data 37 | RUN mkdir -p /sbmc_app /sbmc_app/output /sbmc_app/data 38 | COPY pbrt_patches /sbmc_app/patches 39 | 40 | WORKDIR /sbmc_app 41 | 42 | # Download, patch and install PBRTv2 with our changes for data generation. 43 | RUN git clone https://github.com/mmp/pbrt-v2 pbrt_tmp 44 | RUN cd pbrt_tmp && git checkout e6f6334f3c26ca29eba2b27af4e60fec9fdc7a8d 45 | RUN mv pbrt_tmp/src pbrt 46 | RUN rm -rf pbrt_tmp 47 | # The patch allows to save samples while path-tracing 48 | RUN patch -d pbrt -p1 -i /sbmc_app/patches/sbmc_pbrt.diff 49 | RUN cd pbrt && make -j 4 50 | 51 | # Install a few previous denoising works for comparison ----------------------- 52 | 53 | # [Sen2011] 54 | # "On Filtering the Noise from the Random Parameters in Monte Carlo Rendering" 55 | RUN (wget http://cvc.ucsb.edu/graphics/Papers/Sen2011_RPF/PaperData/RPF-v1.0.zip && \ 56 | unzip RPF-v1.0.zip && \ 57 | mv RPF-v1.0/pbrt-v2-rpf/src 2011_sen_rpf && \ 58 | rm -rf RPF-v1.0* && \ 59 | # Patch to fix compilation errors 60 | patch -d 2011_sen_rpf -p1 -i /sbmc_app/patches/2011_sen_rpf.diff && \ 61 | cd 2011_sen_rpf && make -j 4) || echo "Sen2011 could not be downloaded" 62 | 63 | # [Rousselle2012] :requires CUDA 64 | # "Adaptive Rendering with Non-Local Means Filtering" 65 | RUN (wget http://cgg-zwicker.inf.unibe.ch/downloads/nlm-code-data.zip && \ 66 | unzip nlm-code-data.zip && \ 67 | mv nlm-code+data/pbrt-v2-nlm/src 2012_rousselle_nlm && \ 68 | rm -rf nlm-code+data nlm-code-data.zip && \ 69 | # Patch to fix compilation errors 70 | patch -d 2012_rousselle_nlm -p1 -i /sbmc_app/patches/2012_rousselle_nlm.diff && \ 71 | cd 2012_rousselle_nlm && make -j 4 ) || echo "Rousselle2012 could not be downloaded" 72 | 73 | # [Kalantari2015]: requires CUDA. 74 | # "A Machine Learning Approach for Filtering Monte Carlo Noise" 75 | RUN (wget http://cvc.ucsb.edu/graphics/Papers/SIGGRAPH2015_LBF/PaperData/SIGGRAPH15_LBF_v1.0.zip && \ 76 | unzip SIGGRAPH15_LBF_v1.0.zip && \ 77 | mv SIGGRAPH15_LBF_v1.0/pbrt-v2-lbf/src 2015_kalantari_lbf && \ 78 | mkdir 2015_kalantari_lbf/pretrained && \ 79 | mv SIGGRAPH15_LBF_v1.0/pbrt-v2-lbf/scenes/*.dat 2015_kalantari_lbf/pretrained && \ 80 | rm -rf SIGGRAPH15_LBF_v1.0* && \ 81 | patch -d 2015_kalantari_lbf -p1 -i /sbmc_app/patches/2015_kalantari_lbf.diff && \\ 82 | cd 2015_kalantari_lbf && make -j 4 ) || echo "Kalantari2015 could not be downloaded" 83 | 84 | # [Bitterli2016] 85 | RUN git clone https://github.com/tunabrain/tungsten.git 2016_bitterli_nfor 86 | RUN cd 2016_bitterli_nfor && git checkout 88ea02044dbaf20472a8173b6752460b50c096d8 && rm -rf .git 87 | RUN patch -d 2016_bitterli_nfor -p1 -i /sbmc_app/patches/2016_bitterli_nfor.diff 88 | RUN cd 2016_bitterli_nfor && mkdir build && cd build && cmake .. && make -j 4 89 | # ----------------------------------------------------------------------------- 90 | 91 | 92 | # Install Halide 93 | RUN wget -O halide.tgz https://github.com/halide/Halide/releases/download/v8.0.0/halide-linux-64-gcc53-800-65c26cba6a3eca2d08a0bccf113ca28746012cc3.tgz 94 | RUN tar zvxf halide.tgz 95 | RUN rm -rf halide.tgz 96 | ENV HALIDE_DISTRIB_DIR /sbmc_app/halide 97 | 98 | 99 | # Python Environment ---------------------------------------------------------- 100 | RUN curl -o /sbmc_app/anaconda.sh -O \ 101 | https://repo.anaconda.com/miniconda/Miniconda3-py37_4.8.3-Linux-x86_64.sh && \ 102 | sha256sum /sbmc_app/anaconda.sh && \ 103 | chmod a+x /sbmc_app/anaconda.sh && \ 104 | /sbmc_app/anaconda.sh -b -p /sbmc_app/anaconda 105 | ENV PATH /sbmc_app/anaconda/bin:$PATH 106 | 107 | RUN source activate 108 | RUN conda install pytorch==1.2.0 torchvision==0.4.0 cudatoolkit=10.0 -c pytorch 109 | # RUN conda install pytorch torchvision cudatoolkit=9.2 -c pytorch 110 | RUN pip install --upgrade pip && pip install pytest 111 | # ----------------------------------------------------------------------------- 112 | 113 | # Set the environment variables so that the `demo/*` commands in the Makefile 114 | # point to the right directories. 115 | ENV OUTPUT /sbmc_app/output 116 | ENV DATA /sbmc_app/data 117 | ENV PBRT /sbmc_app/pbrt/bin/pbrt 118 | ENV OBJ2PBRT /sbmc_app/pbrt/bin/obj2pbrt 119 | ENV SEN2011 /sbmc_app/2011_sen_rpf 120 | ENV ROUSSELLE2012 /sbmc_app/2012_rousselle_nlm 121 | ENV KALANTARI2015 /sbmc_app/2015_kalantari_lbf 122 | ENV BITTERLI2016 /sbmc_app/2016_bitterli_nfor 123 | 124 | # Install our code 125 | COPY . /sbmc_app/sbmc 126 | RUN cd sbmc/halide_pytorch && python setup.py install 127 | RUN cd sbmc && python setup.py develop 128 | WORKDIR /sbmc_app/sbmc 129 | 130 | # NOTE(mgharbi): 131 | # Command used to compute the patch: diff -uarN pbrt rendernet_pbrt > # sbmc_pbrt.diff 132 | -------------------------------------------------------------------------------- /docs/.gitignore: -------------------------------------------------------------------------------- 1 | build 2 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=source 11 | set BUILDDIR=build 12 | 13 | if "%1" == "" goto help 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.http://sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /docs/source/callbacks.rst: -------------------------------------------------------------------------------- 1 | Callbacks 2 | ========= 3 | 4 | .. automodule:: sbmc.callbacks 5 | :members: 6 | -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # This file only contains a selection of the most common options. For a full 4 | # list see the documentation: 5 | # http://www.sphinx-doc.org/en/master/config 6 | 7 | # -- Path setup -------------------------------------------------------------- 8 | 9 | # If extensions (or modules to document with autodoc) are in another directory, 10 | # add these directories to sys.path here. If the directory is relative to the 11 | # documentation root, use os.path.abspath to make it absolute, like shown here. 12 | # 13 | # import os 14 | # import sys 15 | # sys.path.insert(0, os.path.abspath('.')) 16 | 17 | 18 | # -- Project information ----------------------------------------------------- 19 | import os 20 | import sys 21 | dirname = os.path.dirname 22 | rootdir = dirname(dirname(dirname(os.path.abspath(__file__)))) 23 | sys.path.insert(0, rootdir) 24 | 25 | project = 'Sample-Based Monte Carlo Denoising using a Kernel-Splatting Network' 26 | copyright = '2019, Michael Gharbi' 27 | author = 'Michael Gharbi' 28 | 29 | # The full version, including alpha/beta/rc tags 30 | import re 31 | with open(os.path.join(rootdir, "sbmc/version.py")) as fid: 32 | try: 33 | __version__, = re.findall( '__version__ = "(.*)"', fid.read() ) 34 | except: 35 | raise ValueError("could not find version number") 36 | 37 | release = __version__ 38 | 39 | 40 | 41 | # -- General configuration --------------------------------------------------- 42 | autodoc_mock_imports = ["torch", "numpy", "pandas", 43 | "halide_ops", "pyexr", "skimage"] 44 | 45 | # Add any Sphinx extension module names here, as strings. They can be 46 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 47 | # ones. 48 | extensions = [ 49 | 'sphinx.ext.autodoc', 50 | 'sphinx.ext.napoleon', 51 | 'sphinx.ext.doctest', 52 | 'sphinx.ext.todo', 53 | 'sphinx.ext.coverage', 54 | 'sphinx.ext.mathjax', 55 | 'sphinx.ext.viewcode', 56 | ] 57 | 58 | # Add any paths that contain templates here, relative to this directory. 59 | templates_path = ['_templates'] 60 | 61 | # List of patterns, relative to source directory, that match files and 62 | # directories to ignore when looking for source files. 63 | # This pattern also affects html_static_path and html_extra_path. 64 | exclude_patterns = [] 65 | 66 | 67 | # -- Options for HTML output ------------------------------------------------- 68 | 69 | # The theme to use for HTML and HTML Help pages. See the documentation for 70 | # a list of builtin themes. 71 | # 72 | html_theme = 'sphinx_rtd_theme' 73 | 74 | # Add any paths that contain custom static files (such as style sheets) here, 75 | # relative to this directory. They are copied after the builtin static files, 76 | # so a file named "default.css" will overwrite the builtin "default.css". 77 | html_static_path = ['_static'] 78 | -------------------------------------------------------------------------------- /docs/source/datasets.rst: -------------------------------------------------------------------------------- 1 | Datasets 2 | ========= 3 | 4 | .. automodule:: sbmc.datasets 5 | :members: 6 | -------------------------------------------------------------------------------- /docs/source/evaluation.rst: -------------------------------------------------------------------------------- 1 | Evaluation 2 | ========== 3 | 4 | .. automodule:: sbmc.evaluation 5 | :members: 6 | -------------------------------------------------------------------------------- /docs/source/functions.rst: -------------------------------------------------------------------------------- 1 | Functions 2 | ========= 3 | 4 | .. automodule:: sbmc.functions 5 | :members: 6 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | .. Sample-Based Monte Carlo Denoising using a Kernel-Splatting Network documentation master file, created by 2 | sphinx-quickstart on Wed Sep 18 18:44:04 2019. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to Sample-Based Monte Carlo Denoising using a Kernel-Splatting Network's documentation! 7 | =============================================================================================== 8 | 9 | .. toctree:: 10 | :maxdepth: 2 11 | :caption: Contents: 12 | 13 | callbacks 14 | datasets 15 | evaluation 16 | functions 17 | interfaces 18 | losses 19 | models 20 | modules 21 | rendering 22 | scene_generator 23 | 24 | 25 | 26 | Indices and tables 27 | ================== 28 | 29 | * :ref:`genindex` 30 | * :ref:`modindex` 31 | * :ref:`search` 32 | -------------------------------------------------------------------------------- /docs/source/interfaces.rst: -------------------------------------------------------------------------------- 1 | Interfaces 2 | ========== 3 | 4 | .. automodule:: sbmc.interfaces 5 | :members: 6 | -------------------------------------------------------------------------------- /docs/source/losses.rst: -------------------------------------------------------------------------------- 1 | Losses 2 | ====== 3 | 4 | .. automodule:: sbmc.losses 5 | :members: 6 | -------------------------------------------------------------------------------- /docs/source/models.rst: -------------------------------------------------------------------------------- 1 | Models 2 | ====== 3 | 4 | .. automodule:: sbmc.models 5 | :members: 6 | -------------------------------------------------------------------------------- /docs/source/modules.rst: -------------------------------------------------------------------------------- 1 | Modules 2 | ======= 3 | 4 | .. automodule:: sbmc.modules 5 | :members: 6 | -------------------------------------------------------------------------------- /docs/source/rendering.rst: -------------------------------------------------------------------------------- 1 | Rendering 2 | ========= 3 | 4 | .. automodule:: sbmc.rendering 5 | :members: 6 | -------------------------------------------------------------------------------- /docs/source/scene_generator.rst: -------------------------------------------------------------------------------- 1 | Scene Generator 2 | =============== 3 | 4 | Converters 5 | ---------- 6 | 7 | .. automodule:: sbmc.scene_generator.converters 8 | :members: 9 | 10 | Geometry 11 | -------- 12 | 13 | .. automodule:: sbmc.scene_generator.geometry 14 | :members: 15 | 16 | Materials 17 | --------- 18 | 19 | .. automodule:: sbmc.scene_generator.materials 20 | :members: 21 | 22 | Scene 23 | ----- 24 | 25 | .. automodule:: sbmc.scene_generator.scene 26 | :members: 27 | 28 | Textures 29 | -------- 30 | 31 | .. automodule:: sbmc.scene_generator.textures 32 | :members: 33 | 34 | Generators 35 | ---------- 36 | 37 | .. automodule:: sbmc.scene_generator.generators 38 | :members: 39 | 40 | Lights 41 | ------ 42 | 43 | .. automodule:: sbmc.scene_generator.lights 44 | :members: 45 | 46 | Randomizers 47 | ----------- 48 | 49 | .. automodule:: sbmc.scene_generator.randomizers 50 | :members: 51 | 52 | Transforms 53 | ---------- 54 | 55 | .. automodule:: sbmc.scene_generator.xforms 56 | :members: 57 | -------------------------------------------------------------------------------- /halide_pytorch/.gitignore: -------------------------------------------------------------------------------- 1 | halide_pytorch.egg-info 2 | __pycache__ 3 | -------------------------------------------------------------------------------- /halide_pytorch/README.rst: -------------------------------------------------------------------------------- 1 | # halide_pytorch 2 | A set of setuptools extension to allow building Halide ops as Pytorch extensions 3 | 4 | Hopefully this is more `pythonic` than writing a Makefile. 5 | -------------------------------------------------------------------------------- /halide_pytorch/halide_pytorch/__init__.py: -------------------------------------------------------------------------------- 1 | from .version import __version__ 2 | 3 | from .extension import HalideOp, HalidePyTorchExtension, HalideBuildExtension 4 | -------------------------------------------------------------------------------- /halide_pytorch/halide_pytorch/extension.py: -------------------------------------------------------------------------------- 1 | import os 2 | import platform 3 | import subprocess 4 | import setuptools 5 | from torch.utils.cpp_extension import (BuildExtension, include_paths, library_paths) 6 | 7 | 8 | class HalideOp(object): 9 | def __init__(self, gen_source, name, function_name, cuda=False): 10 | self.gen_source = gen_source 11 | # gen_name 12 | self.name = name 13 | 14 | # function name 15 | self.function_name = function_name 16 | 17 | self.cuda = cuda 18 | 19 | # target 20 | # autoschedule yes/no 21 | # other options? 22 | 23 | def __repr__(self): 24 | return "HalideOp %s -> %s (%s)" % (self.name, self.function_name, self.gen_source) 25 | 26 | 27 | class HalidePyTorchExtension(setuptools.Extension): 28 | """A Halide extension for PyTorch. 29 | 30 | Args: 31 | halide_root(str): path to the root of the Halide distribution. 32 | name(str): name of the extension library. 33 | extra_sources(list of str): additional cpp source files to compile with the 34 | extension. 35 | generators(list of HalideOp): Halide PyTorch operators to compile and 36 | add to the extension: 37 | gen_cxx(str): name of the C++ compiler to use for the Halide generators. 38 | gen_cxxflags(list of str): C++ compiler flags for the Halide generators. 39 | """ 40 | def __init__(self, halide_root, name, *args, generators=[], 41 | gen_cxx="g++", gen_cxxflags=None, extra_sources=[], **kwargs): 42 | sources = extra_sources 43 | cuda = False 44 | for g in generators: 45 | # Activate cuda in the wrapper whenever we have an op that requires it 46 | cuda = cuda or g.cuda 47 | 48 | print("CUDA?", cuda) 49 | 50 | compile_args = kwargs.get('extra_compile_args', []) 51 | compile_args += ["-std=c++11", "-g"] 52 | if platform.system() == "Darwin": # on osx libstdc++ causes trouble 53 | compile_args += ["-stdlib=libc++"] 54 | kwargs["extra_compile_args"] = compile_args 55 | 56 | include_dirs = kwargs.get('include_dirs', []) 57 | library_dirs = kwargs.get('library_dirs', []) 58 | libraries = kwargs.get('libraries', []) 59 | 60 | include_dirs += include_paths(cuda=cuda) 61 | include_dirs.append(os.path.join(halide_root, "include")) 62 | 63 | if cuda: 64 | libraries.append('cudart') 65 | libraries.append('cuda') 66 | 67 | if platform.system() == 'Windows': 68 | library_dirs += library_paths() 69 | kwargs['library_dirs'] = library_dirs 70 | 71 | libraries = kwargs.get('libraries', []) 72 | libraries.append('c10') 73 | if cuda: 74 | libraries.append('c10_cuda') 75 | libraries.append('torch') 76 | libraries.append('torch_python') 77 | libraries.append('_C') 78 | kwargs['libraries'] = libraries 79 | 80 | kwargs['language'] = 'c++' 81 | 82 | if cuda: 83 | library_dirs += library_paths(cuda=True) 84 | 85 | kwargs['include_dirs'] = include_dirs 86 | kwargs['library_dirs'] = library_dirs 87 | kwargs['libraries'] = libraries 88 | 89 | super(HalidePyTorchExtension, self).__init__(name, sources, *args, **kwargs) 90 | 91 | # Group generators by source file, so we compile those only once 92 | self.generators = {} 93 | self.cuda = cuda 94 | for g in generators: 95 | if not g.gen_source in self.generators.keys(): 96 | self.generators[g.gen_source] = [] 97 | self.generators[g.gen_source].append(g) 98 | 99 | self.gen_cxx = gen_cxx 100 | self.gen_cxxflags = self._get_gen_cxxflags(gen_cxxflags, halide_root) 101 | self.gen_ldflags = self._get_gen_ldflags(None) 102 | self.gen_hlsyslibs = self._get_hlsyslibs(None) 103 | self.gen_deps = self._get_gen_deps(None, halide_root) 104 | 105 | def __repr__(self): 106 | return "HalidePyTorchExtension" 107 | 108 | def _get_gen_cxxflags(self, flags, hl_distrib): 109 | if flags is None: 110 | flags =["-O3", "-std=c++11", 111 | "-I", os.path.join(hl_distrib, "include"), 112 | "-I", os.path.join(hl_distrib, "tools"), 113 | "-Wall", "-Werror", "-Wno-unused-function", 114 | "-Wcast-qual", "-Wignored-qualifiers", 115 | "-Wno-comment", "-Wsign-compare", 116 | "-Wno-unknown-warning-option", 117 | "-Wno-psabi"] 118 | return flags 119 | 120 | def _get_gen_ldflags(self, flags): 121 | if flags is None: 122 | flags = ["-ldl", "-lpthread", "-lz"] 123 | return flags 124 | 125 | def _get_hlsyslibs(self, flags): 126 | if flags is None: 127 | # TODO: load from distrib Make config 128 | if platform.system() == 'Darwin': 129 | flags = ["-lz", "-lxml2", "-lm"] 130 | else: # Linux 131 | flags = ["-lz", "-lrt", "-ldl", "-ltinfo", "-lpthread", "-lm"] 132 | return flags 133 | 134 | def _get_gen_deps(self, flags, hl_distrib): 135 | if platform.system() == 'Darwin': 136 | ext = ".dylib" 137 | else: 138 | ext = ".so" 139 | 140 | if flags is None: 141 | flags = [os.path.join(hl_distrib, "bin", "libHalide"+ext), 142 | # os.path.join(hl_distrib, "include", "Halide.h"), 143 | os.path.join(hl_distrib, "tools", "GenGen.cpp")] 144 | 145 | return flags 146 | 147 | 148 | class HalideBuildExtension(BuildExtension): 149 | def _generate_pybind_wrapper(self, path, headers, cuda): 150 | """Synthesizes a .cpp source file with a PyBind wrapper around the Halide 151 | ops. 152 | 153 | Args: 154 | path(str): full path for the synthesized .cpp file. 155 | headers(list of str): list of paths to the Halide PyTorch extension 156 | headers to include and wrap. 157 | cuda(bool): if True, include cuda headers. 158 | """ 159 | s = "#include \"torch/extension.h\"\n\n" 160 | if cuda: 161 | s += "#define HL_PT_CUDA\n" 162 | s += "#include \"HalidePyTorchHelpers.h\"\n" 163 | for h in headers: 164 | s += "#include \"{}\"\n".format(os.path.splitext(h)[0]+".pytorch.h") 165 | if cuda: 166 | s += "#undef HL_PT_CUDA\n" 167 | 168 | s += "\nPYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {\n" 169 | for h in headers: 170 | name = os.path.splitext(h)[0] 171 | s += " m.def(\"{}\", &{}_th_, \"PyTorch wrapper of the Halide pipeline {}\");\n".format( 172 | name, name, name) 173 | s += "}\n" 174 | with open(path, 'w') as fid: 175 | fid.write(s) 176 | 177 | def run(self): 178 | if platform.system() == 'Windows': 179 | raise RuntimeError("windows is not supported currently.") 180 | 181 | try: 182 | out = subprocess.check_output(['make', '--version']) 183 | except OSError: 184 | raise RuntimeError( 185 | "Make must be installed to build the following extensions: " + 186 | ", ".join(e.name for e in self.extensions)) 187 | 188 | super(HalideBuildExtension, self).run() 189 | 190 | 191 | def build_extensions(self): 192 | exts = self.extensions 193 | 194 | # Create temporary build directory 195 | build = self.build_temp 196 | os.makedirs(build, exist_ok=True) 197 | 198 | hl_libs = [] 199 | hl_headers = [] 200 | cuda = False 201 | for ext in exts: 202 | if isinstance(ext, HalidePyTorchExtension): 203 | cxx = [ext.gen_cxx] 204 | cxxflags = ext.gen_cxxflags 205 | ldflags = ext.gen_ldflags 206 | hlsyslibs = ext.gen_hlsyslibs 207 | gendeps = ext.gen_deps 208 | 209 | generators = ext.generators 210 | print("building Halide PyTorch extension with generators:", generators) 211 | 212 | # Activate cuda in the wrapper whenever we have an ext that requires it 213 | cuda = cuda or ext.cuda 214 | 215 | for g in generators: 216 | # TODO: test generator names are unique and key is a .cpp file 217 | generator_id = os.path.basename(os.path.splitext(g)[0]) 218 | generator_bin = os.path.join(build, "%s.generator" % generator_id) 219 | 220 | cmd = cxx + cxxflags + [g] + gendeps + ["-o", generator_bin] + ldflags + hlsyslibs 221 | 222 | print("building generator", generator_bin) 223 | subprocess.check_call(cmd) 224 | 225 | for gen in generators[g]: 226 | hl_lib = os.path.join(build) 227 | env = os.environ.copy() 228 | op_cuda = gen.cuda 229 | target = "target=host" 230 | if op_cuda: 231 | target += "-cuda-cuda_capability_61-user_context" 232 | # TODO: add linux version 233 | env["DYLD_LIBRARY_PATH"] = "../Halide/bin" 234 | cmd2 = [generator_bin, "-g", gen.name, "-f", 235 | gen.function_name, "-e", 236 | "static_library,h,pytorch_wrapper", 237 | "-o", hl_lib, target, 238 | "auto_schedule=False"] 239 | libname = os.path.join(hl_lib, gen.function_name+".a") 240 | header = gen.function_name+".h" 241 | hl_libs.append(libname) 242 | hl_headers.append(header) 243 | print("building halide operator %s" % (libname)) 244 | subprocess.check_call(cmd2, env=env) 245 | 246 | wrapper_path = os.path.join(build, "wrapper.cpp") 247 | self._generate_pybind_wrapper(wrapper_path, hl_headers, cuda) 248 | 249 | for ext in self.extensions: 250 | ext.extra_objects += hl_libs # add the static op libraries 251 | ext.sources.append(wrapper_path) # and the wrapper's source 252 | 253 | super(HalideBuildExtension, self).build_extensions() 254 | 255 | 256 | -------------------------------------------------------------------------------- /halide_pytorch/halide_pytorch/version.py: -------------------------------------------------------------------------------- 1 | __version__ = "0.0.0" 2 | -------------------------------------------------------------------------------- /halide_pytorch/setup.py: -------------------------------------------------------------------------------- 1 | """Synthesizes the cpp wrapper code and builds dynamic Python extension.""" 2 | import re 3 | import setuptools 4 | 5 | with open('halide_pytorch/version.py') as fid: 6 | try: 7 | __version__, = re.findall( '__version__ = "(.*)"', fid.read() ) 8 | except: 9 | raise ValueError("could not find version number") 10 | 11 | with open("README.rst", "r") as fh: 12 | long_description = fh.read() 13 | 14 | packages = setuptools.find_packages(exclude=["tests"]) 15 | 16 | if __name__ == "__main__": 17 | # Build the Python extension module 18 | # packages = setuptools.find_packages() 19 | packages = ["halide_pytorch"] 20 | setuptools.setup( 21 | name="halide_pytorch", 22 | verbose=True, 23 | url="", 24 | author="Michaël Gharbi", 25 | author_email="mgharbi@adobe.com", 26 | version=__version__, 27 | packages=packages, 28 | license="MIT", 29 | classifiers=[ 30 | "Development Status :: 3 - Alpha", 31 | "Programming Language :: Python :: 3", 32 | "License :: OSI Approved :: MIT License", 33 | "Operating System :: MacOS :: MacOS X", 34 | "Operating System :: POSIX", 35 | ], 36 | ) 37 | -------------------------------------------------------------------------------- /pbrt_patches/sbmc_pbrt.diff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adobe/sbmc/cf02dfdfabec9ac35fd439880b194a5218dd4119/pbrt_patches/sbmc_pbrt.diff -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools", "wheel", "halide_pytorch"] 3 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | filterwarnings = 3 | ignore::DeprecationWarning 4 | -------------------------------------------------------------------------------- /sbmc/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | # Sample-based Monte Carlo Denoising using a Kernel-Splatting Network 3 | # Michaël Gharbi Tzu-Mao Li Miika Aittala Jaakko Lehtinen Frédo Durand 4 | # Siggraph 2019 5 | # 6 | # Copyright (c) 2019 Michaël Gharbi 7 | # 8 | # Licensed under the Apache License, Version 2.0 (the "License"); 9 | # you may not use this file except in compliance with the License. 10 | # You may obtain a copy of the License at 11 | # 12 | # http://www.apache.org/licenses/LICENSE-2.0 13 | # 14 | # Unless required by applicable law or agreed to in writing, software 15 | # distributed under the License is distributed on an "AS IS" BASIS, 16 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | # See the License for the specific language governing permissions and 18 | # limitations under the License. 19 | from .datasets import * 20 | from .models import * 21 | from .interfaces import * 22 | from .callbacks import * 23 | from .version import __version__ 24 | -------------------------------------------------------------------------------- /sbmc/callbacks.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | # Sample-based Monte Carlo Denoising using a Kernel-Splatting Network 3 | # Michaël Gharbi Tzu-Mao Li Miika Aittala Jaakko Lehtinen Frédo Durand 4 | # Siggraph 2019 5 | # 6 | # Copyright (c) 2019 Michaël Gharbi 7 | # 8 | # Licensed under the Apache License, Version 2.0 (the "License"); 9 | # you may not use this file except in compliance with the License. 10 | # You may obtain a copy of the License at 11 | # 12 | # http://www.apache.org/licenses/LICENSE-2.0 13 | # 14 | # Unless required by applicable law or agreed to in writing, software 15 | # distributed under the License is distributed on an "AS IS" BASIS, 16 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | # See the License for the specific language governing permissions and 18 | # limitations under the License. 19 | """Callbacks used a train time.""" 20 | import torch as th 21 | 22 | import ttools 23 | from ttools.modules.image_operators import crop_like 24 | 25 | 26 | __all__ = ["DenoisingDisplayCallback"] 27 | 28 | 29 | class DenoisingDisplayCallback(ttools.ImageDisplayCallback): 30 | """A callback that periodically displays denoising results. 31 | 32 | Shows a single batch every few training steps, as well as another 33 | set of images during validation. 34 | 35 | See :class:`ttools.ImageDisplayCallback`'s documentation for more info. 36 | """ 37 | def caption(self, batch, fwd_result): 38 | spp = batch["spp"][0].item() 39 | return "vertically: %dspp, ours, target, difference" % spp 40 | 41 | def visualized_image(self, batch, fwd_result): 42 | lowspp = batch["low_spp"].detach() 43 | target = batch["target_image"].detach() 44 | output = fwd_result["radiance"].detach() 45 | 46 | # Make sure images have the same size 47 | lowspp = crop_like(lowspp, output) 48 | target = crop_like(target, output) 49 | 50 | # Assemble a display gallery 51 | diff = (output-target).abs() 52 | data = th.cat([lowspp, output, target, diff], -2) 53 | 54 | # Clip and tonemap 55 | data = th.clamp(data, 0) 56 | data /= 1 + data 57 | data = th.pow(data, 1.0/2.2) 58 | data = th.clamp(data, 0, 1) 59 | 60 | return data 61 | -------------------------------------------------------------------------------- /sbmc/evaluation.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | # Sample-based Monte Carlo Denoising using a Kernel-Splatting Network 3 | # Michaël Gharbi Tzu-Mao Li Miika Aittala Jaakko Lehtinen Frédo Durand 4 | # Siggraph 2019 5 | # 6 | # Copyright (c) 2019 Michaël Gharbi 7 | # 8 | # Licensed under the Apache License, Version 2.0 (the "License"); 9 | # you may not use this file except in compliance with the License. 10 | # You may obtain a copy of the License at 11 | # 12 | # http://www.apache.org/licenses/LICENSE-2.0 13 | # 14 | # Unless required by applicable law or agreed to in writing, software 15 | # distributed under the License is distributed on an "AS IS" BASIS, 16 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | # See the License for the specific language governing permissions and 18 | # limitations under the License. 19 | """Helpers to evaluate on the rendering results.""" 20 | import os 21 | 22 | import numpy as np 23 | import pyexr 24 | import pandas as pd 25 | from skimage.measure import compare_ssim as ssim 26 | 27 | import ttools 28 | 29 | LOG = ttools.get_logger(__name__) 30 | 31 | 32 | def compute(ref_folder, output, methods, scenes, pad=21): 33 | """Compute metrics from .exr images and saves to disk. 34 | 35 | Args: 36 | ref_folder(str): path to the folder containing reference images. 37 | output(str): path to the .csv output file. 38 | methods(list of str): paths to the folders containing the .exr results, 39 | one for each method to evaluate. Accepts a list of methods or a 40 | .txt file listing the methods. 41 | scenes(str, list of str): name of the .exr scenes to include in the 42 | comparison. Accepts a list of filenames or a .txt file listing the 43 | filenames. 44 | pad(int): number of pixels to ignore on each side when computing 45 | the metrics. 46 | """ 47 | scores = pd.DataFrame(columns=["method", "scene", "spp", "valid"] + 48 | list(METRIC_LABELS.keys())) 49 | 50 | scenes = _parse_list_or_txt(scenes) 51 | methods = _parse_list_or_txt(methods) 52 | 53 | n_scenes = len(scenes) 54 | n_methods = len(methods) 55 | 56 | LOG.info("Evaluating %d scenes and %d methods", n_scenes, n_methods) 57 | 58 | filepaths = [] 59 | 60 | if not os.path.splitext(output)[-1] == ".csv": 61 | mess = "Metric computation expects a .csv output path." 62 | LOG.error(mess) 63 | raise RuntimeError(mess) 64 | 65 | dirname = os.path.dirname(output) 66 | os.makedirs(dirname, exist_ok=True) 67 | 68 | for s_idx, scene in enumerate(scenes): 69 | sname = os.path.splitext(scene)[0] 70 | filepaths.append([]) 71 | 72 | # Load data for the reference 73 | ref = pyexr.read(os.path.join(ref_folder, scene))[..., :3] 74 | if ref.sum() == 0: 75 | raise ValueError( 76 | "got an all zero image {}/{}".format(ref_folder, scene)) 77 | if pad > 0: # remove a side portion 78 | ref = ref[pad:-pad, pad:-pad, :] 79 | 80 | LOG.info("%s", sname) 81 | for m_idx, m in enumerate(methods): 82 | mname = os.path.split(m)[-1] 83 | mname, spp = _get_spp(mname) 84 | row = {"method": mname, "scene": sname, "spp": spp} 85 | 86 | LOG.info(" %s %d spp", mname, spp) 87 | 88 | # Load data for the current method 89 | path = os.path.abspath(os.path.join(m, scene)) 90 | filepaths[s_idx].append(path) 91 | try: 92 | im = pyexr.read(path)[..., :3] 93 | except Exception as e: 94 | LOG.error(e) 95 | row["valid"] = False 96 | for k in METRIC_OPS: 97 | row[k] = -1 98 | scores = scores.append(row, ignore_index=True) 99 | continue 100 | 101 | if pad > 0: # remove a side portion 102 | im = im[pad:-pad, pad:-pad, :] 103 | if im.sum() == 0: 104 | LOG.warning("got an all zero image {}/{}, " 105 | " invalidating the scene".format(m, scene)) 106 | row["valid"] = False 107 | for k in METRIC_OPS: 108 | row[k] = -1 109 | scores = scores.append(row, ignore_index=True) 110 | continue 111 | 112 | row["valid"] = True 113 | for k in METRIC_OPS: 114 | row[k] = METRIC_OPS[k](im, ref) 115 | scores = scores.append(row, ignore_index=True) 116 | scores.to_csv(output) 117 | 118 | 119 | def _load_csvs(paths): 120 | """Load and merge one or more .csv files. 121 | 122 | Args: 123 | paths(list of str): paths to the .csv files. 124 | 125 | Returns: 126 | (pandas.DataFrame): merged tables 127 | """ 128 | 129 | df = None 130 | for idx, path in enumerate(paths): 131 | _df = pd.read_csv(path, index_col=0) 132 | if idx == 0: 133 | df = _df 134 | else: 135 | df = df.append(_df, ignore_index=True) 136 | return df 137 | 138 | 139 | def stats(csv_files, output): 140 | df = _load_csvs(csv_files) 141 | df = _prune_invalid_scenes(df) 142 | 143 | n_tot = df.size 144 | n_invalid = df[df["valid"] == False].size 145 | 146 | # Keep only the valid entries 147 | df = df[df["valid"] == True] 148 | 149 | spps = df["spp"].unique() 150 | methods = df["method"].unique() 151 | 152 | LOG.info("Computing statistics for %d methods with %d distinct spp values", 153 | len(methods), len(spps)) 154 | 155 | mean_df = pd.DataFrame() 156 | std_df = pd.DataFrame() 157 | for spp in spps: 158 | current = df[df["spp"] == spp] 159 | for m in methods: 160 | mdata = current[current["method"] == m] 161 | LOG.info("Method %s, %d elements", m, mdata.size) 162 | row = dict(mdata.mean()) 163 | row["method"] = m 164 | row.pop("valid") 165 | mean_df = mean_df.append(row, ignore_index=True) 166 | 167 | row = dict(mdata.std()) 168 | row["method"] = m 169 | row.pop("valid") 170 | std_df = std_df.append(row, ignore_index=True) 171 | 172 | LOG.info("Averages:\n%s", mean_df) 173 | 174 | if n_invalid > 0: 175 | LOG.warning("%d invalid metrics (out of %d)", n_invalid, n_tot) 176 | 177 | # Save the means to disk 178 | mean_df.to_csv(output) 179 | 180 | return mean_df, std_df 181 | 182 | 183 | def _get_spp(method_name): 184 | """Extracts the number of samples-per-pixel from a method's directory name. 185 | 186 | Args: 187 | method_name(str): name of the method. 188 | """ 189 | 190 | method_name = method_name.strip() 191 | s = method_name.split("spp_") 192 | if len(s) == 2: 193 | spp = int(s[0]) 194 | method_name = s[1] 195 | else: # no "spp_" prefix, maybe its "spp" 196 | s = method_name.split("spp") 197 | if len(s) != 2 or s[1] != '': 198 | raise ValueError("unexpected spp format for '%s'" % method_name) 199 | spp = int(s[0]) 200 | method_name = "input" 201 | return method_name, spp 202 | 203 | 204 | def _mse(im, ref): 205 | """Mean-squared error between images. 206 | 207 | Args: 208 | im(np.array): image to test. 209 | ref(np.array): reference for the comparison. 210 | 211 | Returns: 212 | (float) error value. 213 | """ 214 | return np.square(im-ref).mean() 215 | 216 | 217 | def _rmse(im, ref, eps=1e-4): 218 | """Relative Mean-squared error between images. 219 | 220 | Args: 221 | im(np.array): image to test. 222 | ref(np.array): reference for the comparison. 223 | eps(float): small number to prevent division by 0. 224 | 225 | Returns: 226 | (float) error value. 227 | """ 228 | diff = (np.square(im-ref) / (np.square(ref) + eps)) 229 | diff = np.ravel(diff) 230 | diff = diff[~np.isnan(diff)] 231 | return diff.mean() 232 | 233 | 234 | def _l1(im, ref): 235 | """Absolute error between images. 236 | 237 | Args: 238 | im(np.array): image to test. 239 | ref(np.array): reference for the comparison. 240 | 241 | Returns: 242 | (float) error value. 243 | """ 244 | return np.abs(im-ref).mean() 245 | 246 | 247 | def _rl1(im, ref, eps=1e-4): 248 | """Relative absolute error between images. 249 | 250 | Args: 251 | im(np.array): image to test. 252 | ref(np.array): reference for the comparison. 253 | eps(float): small number to prevent division by 0. 254 | 255 | Returns: 256 | (float) error value. 257 | """ 258 | return (np.abs(im-ref) / (np.abs(ref) + eps)).mean() 259 | 260 | 261 | def _ssim(im, ref): 262 | """Structural Similarity error (1-SSIM, or DSSIM). 263 | 264 | Args: 265 | im(np.array): image to test. 266 | ref(np.array): reference for the comparison. 267 | 268 | Returns: 269 | (float) error value. 270 | """ 271 | return 1-ssim(im, ref, multichannel=True) 272 | 273 | 274 | def _parse_list_or_txt(_input): 275 | """Parses a list of string from a .txt file, or list of strings. 276 | 277 | Args: 278 | _input(str or list of str): if a string, interpreted as a path to a 279 | list.txt file containing the actual filenames, straight list 280 | otherwise. 281 | Returns: 282 | (list of str): list of filenames. 283 | """ 284 | if len(_input) == 1 and os.path.splitext(_input[0])[-1] == ".txt": 285 | print("loading from .txt") 286 | with open(_input[0]) as fid: 287 | ret = [] 288 | for line in fid.readlines(): 289 | ret.append(line.strip()) 290 | else: 291 | ret = _input 292 | return ret 293 | 294 | 295 | def _prune_invalid_scenes(scores): 296 | invalid = scores.loc[scores["valid"] == False] 297 | invalid = invalid["scene"].unique() 298 | if len(invalid) > 0: 299 | LOG.warning("%d invalid scenes %s", len(invalid), invalid) 300 | invalid_idx = scores.index[scores["scene"].isin(invalid)] 301 | scores = scores.drop(index=invalid_idx) 302 | return scores 303 | 304 | 305 | METRIC_LABELS = {"mse": "MSE", "rmse": "rMSE", "ssim": "DSSIM", "l1": r"$L_1$", 306 | "relative_l1": r"relative $L_1$"} 307 | 308 | 309 | METRIC_OPS = {"mse": _mse, "rmse": _rmse, "ssim": _ssim, "l1": _l1, 310 | "relative_l1": _rl1} 311 | -------------------------------------------------------------------------------- /sbmc/functions.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | # Sample-based Monte Carlo Denoising using a Kernel-Splatting Network 3 | # Michaël Gharbi Tzu-Mao Li Miika Aittala Jaakko Lehtinen Frédo Durand 4 | # Siggraph 2019 5 | # 6 | # Copyright (c) 2019 Michaël Gharbi 7 | # 8 | # Licensed under the Apache License, Version 2.0 (the "License"); 9 | # you may not use this file except in compliance with the License. 10 | # You may obtain a copy of the License at 11 | # 12 | # http://www.apache.org/licenses/LICENSE-2.0 13 | # 14 | # Unless required by applicable law or agreed to in writing, software 15 | # distributed under the License is distributed on an "AS IS" BASIS, 16 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | # See the License for the specific language governing permissions and 18 | # limitations under the License. 19 | """Autograd functional extensions that use Halide as backend.""" 20 | import torch as th # need to import PyTorch before the extension 21 | import ttools 22 | 23 | LOG = ttools.get_logger(__name__) 24 | try: # TODO(mgharbi): this is a hack so that Sphinx can compile the doc 25 | from . import halide_ops as ops 26 | except Exception as e: 27 | LOG.error("Halide extension not loaded!\n %s", e) 28 | 29 | 30 | def _is_cuda(*args): 31 | """Returns True is any of the argument is on a CUDA device, False 32 | otherwise.""" 33 | for arg in args: 34 | if arg.is_cuda: 35 | return True 36 | return False 37 | 38 | 39 | class Scatter2Gather(th.autograd.Function): 40 | """Converts (transposes) scatter kernels into gather kernels. 41 | 42 | Kernel weights at (x, y) for offset (dx, dy) (i.e. scatter[., dy, dx, y, 43 | x]) are put at gather[., -dy, -dx, y+dy, x+dx]. 44 | 45 | Args: 46 | data(th.Tensor)[bs, k_h, k_w, h, w]: scatter kernel weights. 47 | 48 | Returns: 49 | (th.Tensor)[bs, k_h, k_w, h, w]: gather kernel weights. 50 | """ 51 | @staticmethod 52 | def forward(ctx, data): 53 | output = data.new() 54 | output.resize_as_(data) 55 | assert len(data.shape) == 5, "data should be 5d" 56 | if _is_cuda(data): 57 | ops.scatter2gather_cuda_float32(data, output) 58 | else: 59 | ops.scatter2gather_cpu_float32(data, output) 60 | return output 61 | 62 | @staticmethod 63 | def backward(ctx, d_output): 64 | d_data = d_output.new() 65 | d_data.resize_as_(d_output) 66 | _, kh, kw, _, _ = d_data.shape 67 | if _is_cuda(d_output): 68 | ops.scatter2gather_cuda_float32(d_output, d_data) 69 | else: 70 | ops.scatter2gather_cpu_float32(d_output, d_data) 71 | return d_data 72 | 73 | 74 | class KernelWeighting(th.autograd.Function): 75 | """Locally-weighted average of the input values using kernel weights. 76 | 77 | Args: 78 | data(th.Tensor)[bs, c, h, w]: input values to be locally averaged. 79 | weights(th.Tensor)[bs, k_h, k_w, h, w]: kernel weights. k_h, k_w are 80 | the kernel's dimensions. Channels are filtered independently. 81 | 82 | Returns: 83 | output(th.Tensor)[bs, c, h, w]: weighted average of data using weights. 84 | output[., c, y, x] = sum_{dx, dy} weights[., dy, dx, x, y]*data[., c, 85 | y+dy, x+dx]. 86 | sum_w(th.Tensor)[bs, h, w]: sum of weights per pixel 87 | """ 88 | @staticmethod 89 | def forward(ctx, data, weights): 90 | bs, c, h, w = data.shape 91 | output = data.new() 92 | sum_w = data.new() 93 | output.resize_as_(data) 94 | sum_w.resize_(bs, h, w) 95 | if _is_cuda(data, weights): 96 | ops.kernel_weighting_cuda_float32(data, weights, output, sum_w) 97 | else: 98 | ops.kernel_weighting_cpu_float32(data, weights, output, sum_w) 99 | ctx.save_for_backward(data, weights, sum_w) 100 | return output, sum_w 101 | 102 | @staticmethod 103 | def backward(ctx, d_output, d_sum_w): 104 | data, weights, sum_w = ctx.saved_tensors 105 | d_data = data.new() 106 | d_weights = weights.new() 107 | d_data.resize_as_(data) 108 | d_weights.resize_as_(weights) 109 | if _is_cuda(d_output, d_sum_w): 110 | ops.kernel_weighting_grad_cuda_float32( 111 | data, weights, sum_w, d_output, d_sum_w, d_data, d_weights) 112 | else: 113 | ops.kernel_weighting_grad_cpu_float32( 114 | data, weights, sum_w, d_output, d_sum_w, d_data, d_weights) 115 | return d_data, d_weights 116 | -------------------------------------------------------------------------------- /sbmc/interfaces.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | # Sample-based Monte Carlo Denoising using a Kernel-Splatting Network 3 | # Michaël Gharbi Tzu-Mao Li Miika Aittala Jaakko Lehtinen Frédo Durand 4 | # Siggraph 2019 5 | # 6 | # Copyright (c) 2019 Michaël Gharbi 7 | # 8 | # Licensed under the Apache License, Version 2.0 (the "License"); 9 | # you may not use this file except in compliance with the License. 10 | # You may obtain a copy of the License at 11 | # 12 | # http://www.apache.org/licenses/LICENSE-2.0 13 | # 14 | # Unless required by applicable law or agreed to in writing, software 15 | # distributed under the License is distributed on an "AS IS" BASIS, 16 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | # See the License for the specific language governing permissions and 18 | # limitations under the License. 19 | """Interface for running and training the model.""" 20 | import numpy as np 21 | 22 | import ttools 23 | import torch as th 24 | 25 | from sbmc import losses 26 | from ttools.modules.image_operators import crop_like 27 | 28 | 29 | LOG = ttools.get_logger(__name__) 30 | 31 | 32 | __all__ = ["SampleBasedDenoiserInterface"] 33 | 34 | 35 | class SampleBasedDenoiserInterface(ttools.ModelInterface): 36 | """Training interface for Sample-based Monte Carlo Denoising. 37 | 38 | Args: 39 | model(th.nn.Module): neural network that implements the denoising 40 | operation. 41 | lr(float): learning rate for training. 42 | cuda(bool): if True, run on CUDA device. 43 | """ 44 | 45 | def __init__(self, model, lr=1e-4, cuda=False): 46 | super(SampleBasedDenoiserInterface, self).__init__() 47 | 48 | self.device = "cpu" 49 | self.model = model 50 | self.loss_fn = losses.TonemappedRelativeMSE() 51 | self.rmse_fn = losses.RelativeMSE() 52 | 53 | if cuda: 54 | LOG.debug("Using CUDA") 55 | self.device = "cuda" 56 | self.model.cuda() 57 | self.loss_fn.cuda() 58 | self.rmse_fn.cuda() 59 | 60 | self.optimizer = th.optim.Adam(self.model.parameters(), lr=lr) 61 | 62 | def forward(self, batch): 63 | """Runs a forward pass of the model. 64 | 65 | Args: 66 | batch(dict): 67 | 68 | Returns: 69 | output(dict): 70 | """ 71 | for k in batch: 72 | if not batch[k].__class__ == th.Tensor: 73 | continue 74 | batch[k] = batch[k].to(self.device) 75 | output = self.model(batch) 76 | return output 77 | 78 | def backward(self, batch, fwd): 79 | self.optimizer.zero_grad() 80 | 81 | out = fwd["radiance"] 82 | tgt = crop_like(batch["target_image"], out) # make sure sizes match 83 | 84 | loss = self.loss_fn(out, tgt) 85 | loss.backward() 86 | 87 | # Couple checks to pick up on outliers in the data. 88 | if not np.isfinite(loss.data.item()): 89 | LOG.error("Loss is infinite, there might be outliers in the data.") 90 | raise RuntimeError("Infinite loss at train time.") 91 | 92 | if np.isnan(loss.data.item()): 93 | LOG.error("NaN in the loss, there might be outliers in the data.") 94 | raise RuntimeError("NaN loss at train time.") 95 | 96 | clip = 1000 97 | actual = th.nn.utils.clip_grad_norm_(self.model.parameters(), clip) 98 | if actual > clip: 99 | LOG.info("Clipped gradients {} -> {}".format(clip, actual)) 100 | 101 | self.optimizer.step() 102 | 103 | with th.no_grad(): 104 | rmse = self.rmse_fn(out, tgt) 105 | 106 | return {"loss": loss.item(), "rmse": rmse.item()} 107 | 108 | def init_validation(self): 109 | """Initializes the data to measure and display during validation. 110 | 111 | Returns: 112 | dict: loss, rmse are scalars, n is the number of images evaluated. 113 | """ 114 | return {"loss": 0.0, "rmse": 0.0, "n": 0} 115 | 116 | def update_validation(self, batch, fwd, running): 117 | """Updates running statistics for the validation.""" 118 | with th.no_grad(): 119 | out = fwd["radiance"] 120 | tgt = crop_like(batch["target_image"], out) 121 | loss = self.loss_fn(out, tgt).item() 122 | rmse = self.rmse_fn(out, tgt).item() 123 | 124 | # Make sure our statistics accound for potentially varying batch size 125 | b = out.shape[0] 126 | 127 | # Update the running means 128 | n = running["n"] + b 129 | new_loss = running["loss"] - (1.0/n)*(running["loss"] - b*loss) 130 | new_rmse = running["rmse"] - (1.0/n)*(running["rmse"] - b*rmse) 131 | 132 | return {"loss": new_loss, "rmse": new_rmse, "n": n} 133 | -------------------------------------------------------------------------------- /sbmc/losses.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | # Sample-based Monte Carlo Denoising using a Kernel-Splatting Network 3 | # Michaël Gharbi Tzu-Mao Li Miika Aittala Jaakko Lehtinen Frédo Durand 4 | # Siggraph 2019 5 | # 6 | # Copyright (c) 2019 Michaël Gharbi 7 | # 8 | # Licensed under the Apache License, Version 2.0 (the "License"); 9 | # you may not use this file except in compliance with the License. 10 | # You may obtain a copy of the License at 11 | # 12 | # http://www.apache.org/licenses/LICENSE-2.0 13 | # 14 | # Unless required by applicable law or agreed to in writing, software 15 | # distributed under the License is distributed on an "AS IS" BASIS, 16 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | # See the License for the specific language governing permissions and 18 | # limitations under the License. 19 | """Loss functions and metrics.""" 20 | import torch as th 21 | 22 | 23 | __all__ = ["RelativeMSE", "SMAPE", "TonemappedMSE", "TonemappedRelativeMSE"] 24 | 25 | 26 | class RelativeMSE(th.nn.Module): 27 | """Relative Mean-Squared Error. 28 | 29 | :math:`0.5 * \\frac{(x - y)^2}{y^2 + \epsilon}` 30 | 31 | Args: 32 | eps(float): small number to avoid division by 0. 33 | """ 34 | def __init__(self, eps=1e-2): 35 | super(RelativeMSE, self).__init__() 36 | self.eps = eps 37 | 38 | def forward(self, im, ref): 39 | """Evaluate the metric. 40 | 41 | Args: 42 | im(th.Tensor): image. 43 | ref(th.Tensor): reference. 44 | """ 45 | mse = th.pow(im-ref, 2) 46 | loss = mse/(th.pow(ref, 2) + self.eps) 47 | loss = 0.5*th.mean(loss) 48 | return loss 49 | 50 | 51 | class SMAPE(th.nn.Module): 52 | """Symmetric Mean Absolute error. 53 | 54 | :math:`\\frac{|x - y|} {|x| + |y| + \epsilon}` 55 | 56 | Args: 57 | eps(float): small number to avoid division by 0. 58 | """ 59 | 60 | def __init__(self, eps=1e-2): 61 | super(SMAPE, self).__init__() 62 | self.eps = eps 63 | 64 | def forward(self, im, ref): 65 | # NOTE: the denominator is used to scale the loss, but does not 66 | # contribute gradients, hence the '.detach()' call. 67 | loss = (th.abs(im-ref) / ( 68 | self.eps + th.abs(im.detach()) + th.abs(ref.detach()))).mean() 69 | 70 | return loss 71 | 72 | 73 | class TonemappedMSE(th.nn.Module): 74 | """Mean-squared error on tonemaped images. 75 | 76 | Args: 77 | eps(float): small number to avoid division by 0. 78 | """ 79 | 80 | def __init__(self, eps=1e-2): 81 | super(TonemappedMSE, self).__init__() 82 | self.eps = eps # avoid division by zero 83 | 84 | def forward(self, im, ref): 85 | im = _tonemap(im) 86 | ref = _tonemap(ref) 87 | loss = th.pow(im-ref, 2) 88 | loss = 0.5*th.mean(loss) 89 | return loss 90 | 91 | 92 | class TonemappedRelativeMSE(th.nn.Module): 93 | """Relative mean-squared error on tonemaped images. 94 | 95 | Args: 96 | eps(float): small number to avoid division by 0. 97 | """ 98 | def __init__(self, eps=1e-2): 99 | super(TonemappedRelativeMSE, self).__init__() 100 | self.eps = eps # avoid division by zero 101 | 102 | def forward(self, im, ref): 103 | im = _tonemap(im) 104 | ref = _tonemap(ref) 105 | mse = th.pow(im-ref, 2) 106 | loss = mse/(th.pow(ref, 2) + self.eps) 107 | loss = 0.5*th.mean(loss) 108 | return loss 109 | 110 | 111 | def _tonemap(im): 112 | """Helper Reinhards tonemapper. 113 | 114 | Args: 115 | im(th.Tensor): image to tonemap. 116 | 117 | Returns: 118 | (th.Tensor) tonemaped image. 119 | """ 120 | im = th.clamp(im, min=0) 121 | return im / (1+im) 122 | -------------------------------------------------------------------------------- /sbmc/scene_generator/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | # Sample-based Monte Carlo Denoising using a Kernel-Splatting Network 3 | # Michaël Gharbi Tzu-Mao Li Miika Aittala Jaakko Lehtinen Frédo Durand 4 | # Siggraph 2019 5 | # 6 | # Copyright (c) 2019 Michaël Gharbi 7 | # 8 | # Licensed under the Apache License, Version 2.0 (the "License"); 9 | # you may not use this file except in compliance with the License. 10 | # You may obtain a copy of the License at 11 | # 12 | # http://www.apache.org/licenses/LICENSE-2.0 13 | # 14 | # Unless required by applicable law or agreed to in writing, software 15 | # distributed under the License is distributed on an "AS IS" BASIS, 16 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | # See the License for the specific language governing permissions and 18 | # limitations under the License. 19 | from .scene import * 20 | from .geometry import * 21 | from .lights import * 22 | from .xforms import * 23 | from .converters import * 24 | from .materials import * 25 | from .randomizers import * 26 | from .generators import * 27 | -------------------------------------------------------------------------------- /sbmc/scene_generator/converters.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | # Sample-based Monte Carlo Denoising using a Kernel-Splatting Network 3 | # Michaël Gharbi Tzu-Mao Li Miika Aittala Jaakko Lehtinen Frédo Durand 4 | # Siggraph 2019 5 | # 6 | # Copyright (c) 2019 Michaël Gharbi 7 | # 8 | # Licensed under the Apache License, Version 2.0 (the "License"); 9 | # you may not use this file except in compliance with the License. 10 | # You may obtain a copy of the License at 11 | # 12 | # http://www.apache.org/licenses/LICENSE-2.0 13 | # 14 | # Unless required by applicable law or agreed to in writing, software 15 | # distributed under the License is distributed on an "AS IS" BASIS, 16 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | # See the License for the specific language governing permissions and 18 | # limitations under the License. 19 | """Helpers for file format conversion.""" 20 | 21 | import os 22 | import re 23 | import uuid 24 | import subprocess 25 | 26 | import ttools 27 | 28 | __all__ = ["ObjConverter"] 29 | 30 | 31 | LOG = ttools.get_logger(__name__) 32 | 33 | 34 | OBJ_RE = re.compile(r'^# Name\s*"(?P.*)".*$') 35 | """Regular expression for objects parsing.""" 36 | 37 | 38 | MAT_RE = re.compile( 39 | r'.*"float roughness"\s\[(?P[^\]]*)\]\s.*"float index"' 40 | r'\s*\[(?P[^\]]*)\]\s.*"rgb opacity"\s*\[(?P[^\]]*)\].*') 41 | """Regular expression for materials parsing.""" 42 | 43 | 44 | class ObjConverter(): 45 | """A helper class used that converts .obj files to the .pbrt string format. 46 | 47 | Args: 48 | pbrt_converter(str): path to the `obj2pbrt` executable from PBRT. 49 | """ 50 | def __init__(self, pbrt_converter): 51 | self.pbrt_converter = pbrt_converter 52 | 53 | def __call__(self, fname, dst): 54 | """Convert a .obj geometry to the .pbrt format. 55 | 56 | Args: 57 | fname(str): path to the .obj file. 58 | dst(str): path to the .pbrt output. 59 | """ 60 | dirname = os.path.dirname(fname) 61 | basename = os.path.basename(fname) 62 | 63 | # OBJ companion material if any 64 | mtl_file = os.path.splitext(basename)[0] + ".mtl" 65 | 66 | _split_material_groups(fname, os.path.join(dst, basename)) 67 | 68 | pbrt_file = basename.replace(".obj", ".pbrt") 69 | 70 | cwd = os.getcwd() 71 | os.chdir(dst) 72 | 73 | if not os.path.exists(mtl_file): 74 | os.symlink(os.path.join(dirname, mtl_file), mtl_file) 75 | 76 | subprocess.check_output([self.pbrt_converter, basename, pbrt_file]) 77 | 78 | objects = _split_pbrt_objects(pbrt_file) 79 | 80 | os.remove(basename) 81 | os.remove(mtl_file) 82 | 83 | os.chdir(cwd) 84 | return objects 85 | 86 | 87 | def _split_pbrt_objects(in_f): 88 | """Given an input .pbrt object file, splits the objects by material groups. 89 | 90 | This functions creates new .pbrt files identified by a unique material id 91 | alongside the original .pbrt file. It returns the path to the .pbrt files 92 | generated, as well as the corresponding materials. 93 | 94 | Args: 95 | in_f(str): path to the input .pbrt geometry. 96 | 97 | Returns: 98 | objects(dict of ) 99 | """ 100 | object_idx = 0 101 | objects = [] 102 | 103 | with open(in_f) as fid: 104 | line = fid.readline() 105 | while line: # parse the .pbrt file 106 | matches = OBJ_RE.match(line) 107 | if matches: # We have a new object 108 | # Ignore header, comments, etc 109 | while (not line.startswith("Material") or 110 | line.startswith("Shape")): 111 | line = fid.readline() 112 | 113 | # Parse material definition if there is one 114 | if line.startswith("Material"): 115 | mat_ = OBJMaterial(line) 116 | else: 117 | LOG.error("Found no material.") 118 | mat_ = OBJMaterial("") 119 | 120 | # Seek to the shape definition 121 | while not line.startswith("Shape"): 122 | line = fid.readline() 123 | 124 | # Write a new geometry file for each material group 125 | new_f = os.path.splitext( 126 | in_f)[0] + "%s_object%04d.pbrt" % (mat_.id, object_idx) 127 | with open(new_f, 'w') as new_fid: 128 | # Define the material 129 | new_fid.write('AttributeBegin\n') 130 | new_fid.write('NamedMaterial "%s"\n' % mat_.id) 131 | 132 | # Write the geometry content 133 | while not line.strip() == "AttributeEnd": 134 | new_fid.write(line) 135 | line = fid.readline() 136 | 137 | # write the AttributeEnd command 138 | new_fid.write(line) 139 | 140 | object_idx += 1 141 | 142 | # Store the path to the geometry file and the material 143 | # definition 144 | obj_ = PBRTObject(new_f, mat_) 145 | 146 | # Save the object 147 | objects.append(obj_) 148 | 149 | line = fid.readline() 150 | 151 | return objects 152 | 153 | 154 | def _split_material_groups(src, dst): 155 | """Splits .obj material groups. 156 | 157 | This functions adds matgroup tags so we can split the geometry per material 158 | group. 159 | 160 | Args: 161 | src(str): path to source .obj file. 162 | dst(str): path to target .obj file. 163 | """ 164 | matre = re.compile(r'^usemtl\s*(?P.*)\s*$') 165 | geomre = re.compile(r'^g\s*(?P.*)\s*$') 166 | 167 | basename = os.path.splitext(os.path.basename(src))[0] 168 | 169 | with open(dst, 'w') as dst_fid: 170 | last_geom = None # Beginning of the file: no geometry found yet 171 | with open(src) as src_fid: 172 | for line in src_fid.readlines(): 173 | geometry_match = geomre.match(line) 174 | 175 | if geometry_match: 176 | # We found a new geometry group, memory it 177 | last_geom = geometry_match.group("geo") 178 | else: 179 | material_match = matre.match(line) 180 | if material_match: 181 | matname = material_match.group("mat") 182 | 183 | if last_geom is None: 184 | # not geometry: global material 185 | name = "g %s@%s\n" % (basename, matname) 186 | else: 187 | # we had a geometry, apply material to id 188 | name = "g %s@%s\n" % (last_geom, matname) 189 | dst_fid.write(name) 190 | dst_fid.write(line) 191 | 192 | 193 | class OBJMaterial(): 194 | """Holds a material parsed from an OBJ description string. 195 | 196 | Args: 197 | line(str): string to parse. 198 | """ 199 | def __init__(self, line): 200 | if line == "": # defalt material if there is nothing to be parsed 201 | self.roughness = 0.5 202 | self.index = 1.0 203 | self.opacity = 1.0 204 | else: 205 | mat = MAT_RE.match(line) 206 | if mat is None: 207 | LOG.error("Parser could not understand OBJ material '%s'", 208 | line) 209 | raise RuntimeError( 210 | "Parser could not understand OBJ material '%s'", line) 211 | self.roughness = float(mat.group("roughness")), 212 | self.index = float(mat.group("index")), 213 | self.opacity = min([float(c) for c in 214 | mat.group("opacity").split()]) 215 | self.id = str(uuid.uuid4()).replace("-", "_") 216 | 217 | 218 | class PBRTObject(): 219 | """ 220 | """ 221 | def __init__(self, path, material): 222 | self.path = path 223 | self.material = material 224 | -------------------------------------------------------------------------------- /sbmc/scene_generator/generators.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | # Sample-based Monte Carlo Denoising using a Kernel-Splatting Network 3 | # Michaël Gharbi Tzu-Mao Li Miika Aittala Jaakko Lehtinen Frédo Durand 4 | # Siggraph 2019 5 | # 6 | # Copyright (c) 2019 Michaël Gharbi 7 | # 8 | # Licensed under the Apache License, Version 2.0 (the "License"); 9 | # you may not use this file except in compliance with the License. 10 | # You may obtain a copy of the License at 11 | # 12 | # http://www.apache.org/licenses/LICENSE-2.0 13 | # 14 | # Unless required by applicable law or agreed to in writing, software 15 | # distributed under the License is distributed on an "AS IS" BASIS, 16 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | # See the License for the specific language governing permissions and 18 | # limitations under the License. 19 | """A collection of random scene generators.""" 20 | import os 21 | 22 | from bridson import poisson_disc_samples as pdisc 23 | import numpy as np 24 | 25 | import ttools 26 | 27 | from .scene import Camera 28 | from .converters import ObjConverter 29 | from . import geometry 30 | from . import randomizers 31 | from . import xforms 32 | 33 | 34 | __all__ = ["OutdoorSceneGenerator"] 35 | 36 | 37 | class SceneGenerator(): 38 | """Base class for the random scene generators. 39 | 40 | Args: 41 | envmaps(list of str): absolute paths to .pfm HDR images to use as 42 | envmaps. 43 | textures(list of str): absolute paths to .tga images used to texture 44 | scene objects. 45 | models(list of str): absolute paths to .obj files containing the 46 | geometry. 47 | pbrt_converter(str): path to the PBRTv2 executable that converts .obj 48 | to the .pbrt text format. 49 | """ 50 | def __init__(self, envmaps, textures, models, pbrt_converter): 51 | self._envmaps = envmaps 52 | self._textures = textures 53 | self._current_textures = [] 54 | self._models = models 55 | self._converter = ObjConverter(pbrt_converter) 56 | self._randomize_textures() 57 | self._log = ttools.get_logger(self.__class__.__name__) 58 | 59 | def __str__(self): 60 | return self.__class__.__name__ 61 | 62 | def _randomize_textures(self): 63 | """Shuffle the available textures. Stores them in 64 | `self._current_textures`. 65 | """ 66 | if self._textures: 67 | self._current_textures = list( 68 | np.random.choice(self._textures, 69 | size=(min(30, len(self._textures)), ), 70 | replace=False)) 71 | else: 72 | self._current_textures = [] 73 | 74 | 75 | class OutdoorSceneGenerator(SceneGenerator): 76 | """Generates random outdoor scene with a envmap and a ground plane.""" 77 | def _sample_camera(self): 78 | r_cam = np.random.uniform(1.0, 2.5) 79 | theta_cam = np.random.uniform(0, 2*np.pi) 80 | z_cam = np.random.uniform(0.01, 0.1) 81 | cam_fov = np.random.uniform(15, 65) 82 | 83 | cam_up = np.random.uniform(size=(3,)) 84 | cam_pos = np.array([r_cam*np.cos(theta_cam), r_cam*np.sin(theta_cam), 85 | z_cam]) 86 | cam_target = np.random.uniform(0, 1, size=3) 87 | cam_target[2] = np.random.uniform(1., 2.)*z_cam 88 | 89 | cam_params = {"position": list(cam_pos), "target": list(cam_target), 90 | "up": list(cam_up), "fov": cam_fov} 91 | return cam_params 92 | 93 | def _obj_pos(self, cam): 94 | factor = 5 95 | 96 | # Camera's fulcrum 97 | cam_direction = np.array(cam["target"][:2]) - \ 98 | np.array(cam["position"][:2]) 99 | cam_direction /= np.linalg.norm(cam_direction) # normalized 100 | cam_halfangle = 1.1*cam["fov"]/180*np.pi # add 10% for geometry bounds 101 | c, s = np.cos(cam_halfangle), np.sin(cam_halfangle) 102 | rot = np.matrix([[c, -s], [s, c]]) 103 | u1 = factor*np.linalg.inv(rot).dot(cam_direction) 104 | u2 = factor*rot.dot(cam_direction) 105 | xform = np.vstack([u1, u2]).T 106 | 107 | radius = np.random.uniform(0.13, 0.28) 108 | scaled_radius = radius*factor 109 | 110 | # Place object centers 111 | xy = pdisc(width=1, height=1, r=radius/factor) 112 | np.random.shuffle(xy) # randomize order 113 | xx = [x_[0] for x_ in xy] 114 | yy = [x_[1] for x_ in xy] 115 | xy = np.vstack([xx, yy]) 116 | # transform coordinates to the fulcrum of the camera 117 | xy = xform.dot(xy) 118 | 119 | # project along camera direction, reject objects that are too close or 120 | # too far 121 | proj = np.ravel(cam_direction.dot(xy)) 122 | keep = np.logical_and(proj > 0.1*scaled_radius, proj < factor) 123 | xy = xy[:, keep] 124 | 125 | # keep max 50 objects 126 | nmax = 50 127 | if xy.shape[1] > nmax: 128 | xy = xy[:, :nmax] 129 | 130 | proj /= proj.max() 131 | 132 | # move origin at camera 133 | xy[0, :] += cam["position"][0] 134 | xy[1, :] += cam["position"][1] 135 | 136 | return xy, scaled_radius, proj 137 | 138 | def sample(self, scn, dst_dir, params=None): 139 | self._log.debug("Sampling new outdoor scene") 140 | self._randomize_textures() 141 | 142 | # Random camera 143 | do_dof = np.random.choice([True, False]) 144 | do_mblur = np.random.choice([True, False]) 145 | cam = self._sample_camera() 146 | 147 | if do_mblur: 148 | cam["shutterclose"] = 1.0 149 | 150 | if do_dof: 151 | aperture = _random_aperture() 152 | else: 153 | aperture = 0.0 154 | 155 | # Sample objects in the fulcrum of the camera 156 | self._log.debug("Sampling object positions") 157 | coords, radius, proj = self._obj_pos(cam) 158 | count = coords.shape[1] 159 | 160 | # Focus on one of the objects 161 | if count > 0: 162 | focus_at = np.random.randint(0, count) 163 | 164 | # Randomizes the number of possible object altitudes 165 | z_layers = np.random.poisson(0.5) + 1 166 | 167 | count_blurred = 0 # Counts the number of objects that have motion blur 168 | self._log.debug("Adding %d objects.", count) 169 | for o_idx in range(count): # Populate the scene 170 | 171 | # If motion blur is activated, maybe blur this object 172 | this_mblur = do_mblur and np.random.choice([True, False]) 173 | if this_mblur: 174 | count_blurred += 1 175 | 176 | # Sample a motion vector 177 | mvec_r = np.random.uniform(0.00, 2)*radius 178 | mvec_dir = np.random.uniform(size=(3,)) 179 | mvec_dir /= np.linalg.norm(mvec_dir) 180 | mvec = mvec_dir*mvec_r 181 | 182 | # Fetch a random object from the library 183 | dst = os.path.join(dst_dir, "geometry") 184 | mdl = np.random.choice(self._models) 185 | pbrt_objects = self._converter(mdl, dst) 186 | 187 | # Randomize the scale and position 188 | scl = radius*np.random.exponential(0.5)*np.ones((3,)) 189 | z_idx = np.random.randint(0, z_layers) 190 | altitude = np.random.normal(0.1, 0.2) 191 | position = [coords[0, o_idx], coords[1, o_idx], altitude] 192 | 193 | # Create a ground plane 194 | plane = geometry.Plane(20) 195 | xforms.rotate(plane, [0, 1, 0], 90) 196 | material = randomizers.random_material( 197 | id="floormat", textures_list=self._current_textures) 198 | plane.assign_material(material) 199 | scn.shapes.append(plane) 200 | scn.materials.append(material) 201 | 202 | # Compute the focus distance and update the camera paramters 203 | if do_dof and z_idx == 0 and o_idx == focus_at: 204 | dist = np.linalg.norm( 205 | np.array(cam["position"])-np.array(position)) 206 | if dist > 0: 207 | cam["focaldistance"] = dist 208 | cam["lensradius"] = aperture 209 | 210 | # Obj files may contain multiple pieces, add them all 211 | for obj in pbrt_objects: 212 | geom = geometry.ExternalGeometry(os.path.join("geometry", 213 | obj.path)) 214 | xforms.rotate(geom, np.random.uniform(size=(3,)), 215 | np.random.uniform(0, 360)) 216 | xforms.rotate(geom, np.random.uniform(size=(3,)), 217 | np.random.uniform(0, 360)) 218 | xforms.scale(geom, scl) 219 | xforms.translate(geom, position) 220 | 221 | # Get a material for this piece 222 | material = randomizers.random_material( 223 | id=obj.material.id, textures_list=self._current_textures) 224 | scn.materials.append(material) 225 | 226 | if this_mblur: 227 | xforms.translate(geom, mvec, target="end") 228 | 229 | scn.shapes.append(geom) 230 | 231 | self._log.debug("%s objects have motion blur", count_blurred) 232 | 233 | # Add an envmap 234 | env = randomizers.random_envmap(self._envmaps, nsamples=8) 235 | xforms.rotate(env, [0, 0, 1], np.random.uniform(0, 360)) 236 | scn.lights.append(env) 237 | 238 | # Attach the camera to the scene 239 | scn.camera = Camera(**cam) 240 | 241 | self._log.debug("Camera parameters %s. Motion blur? %s DoF? %s", 242 | scn.camera, do_mblur, do_dof) 243 | if do_mblur: 244 | if (scn.camera.shutteropen != 0.0 or 245 | scn.camera.shutterclose != 1.0): 246 | return False 247 | if do_dof: 248 | if (not scn.camera.lensradius > 0.0 or not 249 | scn.camera.focaldistance > 0.0): 250 | return False 251 | 252 | self._log.debug("Generated Outdoor scene") 253 | 254 | return True 255 | 256 | 257 | def _random_aperture(min_=0.001, max_=0.05): 258 | """Sample a camera aperture value, uniformly in log domain). 259 | Args: 260 | min_(float): smallest aperture. 261 | max_(float): largest aperture. 262 | """ 263 | log_aperture = np.random.uniform(np.log(min_), np.log(max_)) 264 | aperture = np.exp(log_aperture) 265 | return aperture 266 | -------------------------------------------------------------------------------- /sbmc/scene_generator/geometry.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | # Sample-based Monte Carlo Denoising using a Kernel-Splatting Network 3 | # Michaël Gharbi Tzu-Mao Li Miika Aittala Jaakko Lehtinen Frédo Durand 4 | # Siggraph 2019 5 | # 6 | # Copyright (c) 2019 Michaël Gharbi 7 | # 8 | # Licensed under the Apache License, Version 2.0 (the "License"); 9 | # you may not use this file except in compliance with the License. 10 | # You may obtain a copy of the License at 11 | # 12 | # http://www.apache.org/licenses/LICENSE-2.0 13 | # 14 | # Unless required by applicable law or agreed to in writing, software 15 | # distributed under the License is distributed on an "AS IS" BASIS, 16 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | # See the License for the specific language governing permissions and 18 | # limitations under the License. 19 | """Abstraction of geometric primitives to build PBRT scenes.""" 20 | 21 | import numpy as np 22 | 23 | __all__ = ["Sphere", "ExternalGeometry", "Cylinder", "Plane", "TriangleMesh"] 24 | 25 | 26 | class Shape(): 27 | """Base shape. 28 | 29 | Attributes: 30 | xform(): 31 | start_xform(): 32 | end_xform(): 33 | material(): 34 | """ 35 | def __init__(self): 36 | self.xform = None 37 | self.start_xform = None 38 | self.end_xform = None 39 | self.material = None 40 | 41 | def _prefix(self): 42 | """String to prepend the pbrt output with. 43 | 44 | Returns: 45 | (str): prefix string. 46 | """ 47 | s = "AttributeBegin\n" 48 | if self.material is not None: 49 | s += 'NamedMaterial "{}"\n'.format(self.material.id) 50 | if self.xform is not None: 51 | s += self.xform.pbrt() 52 | if self.start_xform is not None: 53 | s += "ActiveTransform StartTime\n" 54 | s += self.start_xform.pbrt() 55 | s += "ActiveTransform All\n" 56 | if self.end_xform is not None: 57 | s += "ActiveTransform EndTime\n" 58 | s += self.end_xform.pbrt() 59 | s += "ActiveTransform All\n" 60 | return s 61 | 62 | def _suffix(self): 63 | """String to append the pbrt output with. 64 | 65 | Returns: 66 | (str): suffix string. 67 | """ 68 | s = "AttributeEnd\n" 69 | return s 70 | 71 | def assign_material(self, mat): 72 | """Assigns a `sbmc.scene_generator.Material` to this object. 73 | 74 | Args: 75 | mat(sbmc.scene_generator.Material): material to attach. 76 | """ 77 | self.material = mat 78 | 79 | def pbrt(self): 80 | """Outputs PBRTv2 string representation. 81 | 82 | Returns: 83 | str: pbrt format. 84 | """ 85 | return "" 86 | 87 | 88 | class Sphere(Shape): 89 | """Sphere primitive. 90 | 91 | Args: 92 | radius(float): sphere radius. 93 | """ 94 | def __init__(self, radius=1): 95 | super(Sphere, self).__init__() 96 | self.radius = radius 97 | 98 | def pbrt(self): 99 | """Outputs PBRTv2 string representation. 100 | 101 | Returns: 102 | str: pbrt format. 103 | """ 104 | out = super(Sphere, self)._prefix() 105 | out += 'Shape "sphere" "float radius" [{}]\n'.format(self.radius) 106 | out += super(Sphere, self)._suffix() 107 | return out 108 | 109 | 110 | class Cylinder(Shape): 111 | """Cylinder primitive. 112 | 113 | Args: 114 | radius(float): cylinder radius. 115 | height(float): cylinder height. 116 | """ 117 | def __init__(self, radius=1, height=1): 118 | super(Cylinder, self).__init__() 119 | self.radius = radius 120 | self.height = height 121 | 122 | def pbrt(self): 123 | s = super(Cylinder, self)._prefix() 124 | s += 'Shape "cylinder" "float radius" [{}]\n'.format(self.radius) 125 | s += '"float zmin" [{}] "float zmax" [{}]\n'.format( 126 | -self.height*0.5, 0.5*self.height) 127 | s += super(Cylinder, self)._suffix() 128 | return s 129 | 130 | 131 | class ExternalGeometry(Shape): 132 | """External .pbrt geometry that can be included in a scene. 133 | 134 | Args: 135 | path(str): path to the external geometry .pbrt file. 136 | """ 137 | def __init__(self, path): 138 | super(ExternalGeometry, self).__init__() 139 | self.path = path 140 | 141 | def pbrt(self): 142 | s = super(ExternalGeometry, self)._prefix() 143 | s += 'Include "{}"\n'.format(self.path) 144 | s += super(ExternalGeometry, self)._suffix() 145 | return s 146 | 147 | 148 | class TriangleMesh(Shape): 149 | """Generic triangle mesh. 150 | 151 | Args: 152 | indices(np.array[m, 3], int): vertex indices corresponding to 153 | triangles. 154 | points(np.array[n, 3], floats): vertex positions (xyz). 155 | """ 156 | def __init__(self, indices, points): 157 | super(TriangleMesh, self).__init__() 158 | self.indices = indices 159 | self.points = points 160 | 161 | def pbrt(self): 162 | s = super(TriangleMesh, self)._prefix() 163 | idx = " ".join(map(str, list(self.indices.ravel()))) 164 | points = " ".join(map(str, list(self.points.ravel()))) 165 | s += 'Shape "trianglemesh" "integer indices" [{}]\n'.format(idx) 166 | s += '"point P" [{}]\n'.format(points) 167 | s += super(TriangleMesh, self)._suffix() 168 | return s 169 | 170 | 171 | class Plane(TriangleMesh): 172 | """A 1-square YZ-plane centered at the origin. 173 | Args: 174 | scale(float): scale factor 175 | """ 176 | 177 | def __init__(self, scale=1): 178 | pts = scale*np.array([ 179 | [0, -0.5, -0.5], 180 | [0, -0.5, 0.5], 181 | [0, 0.5, 0.5], 182 | [0, 0.5, -0.5] 183 | ]) 184 | idx = np.array([ 185 | [0, 1, 2], 186 | [2, 3, 0], 187 | ]) 188 | super(Plane, self).__init__(idx, pts) 189 | -------------------------------------------------------------------------------- /sbmc/scene_generator/lights.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | # Sample-based Monte Carlo Denoising using a Kernel-Splatting Network 3 | # Michaël Gharbi Tzu-Mao Li Miika Aittala Jaakko Lehtinen Frédo Durand 4 | # Siggraph 2019 5 | # 6 | # Copyright (c) 2019 Michaël Gharbi 7 | # 8 | # Licensed under the Apache License, Version 2.0 (the "License"); 9 | # you may not use this file except in compliance with the License. 10 | # You may obtain a copy of the License at 11 | # 12 | # http://www.apache.org/licenses/LICENSE-2.0 13 | # 14 | # Unless required by applicable law or agreed to in writing, software 15 | # distributed under the License is distributed on an "AS IS" BASIS, 16 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | # See the License for the specific language governing permissions and 18 | # limitations under the License. 19 | """Light sources to build PBRT scenes.""" 20 | 21 | __all__ = ["InifiniteLight", "PointLight", "AreaLight"] 22 | 23 | 24 | class Light(): 25 | """Base class for light sources.""" 26 | def __init__(self): 27 | self.xform = None 28 | 29 | def pbrt(self): 30 | """Outputs PBRTv2 string representation. 31 | 32 | Returns: 33 | str: pbrt format. 34 | """ 35 | return "LightSource" 36 | 37 | 38 | class PointLight(Light): 39 | """A point light source. 40 | 41 | Args: 42 | spectrum(None or list of float): RGB intensity of the light. 43 | Defaults to white. 44 | """ 45 | def __init__(self, spectrum=None): 46 | super(PointLight, self).__init__() 47 | if spectrum is None: 48 | spectrum = [1, 1, 1] 49 | self.spectrum = spectrum 50 | 51 | def pbrt(self): 52 | s = "AttributeBegin\n" 53 | if self.xform is not None: 54 | s += self.xform.pbrt() 55 | s += super(PointLight, self).pbrt() 56 | s += ' "point" "rgb I" [{} {} {}]\n'.format(*self.spectrum) 57 | s += "AttributeEnd\n" 58 | return s 59 | 60 | 61 | class InifiniteLight(Light): 62 | """Infinitely distant light source. Useful for envmaps. 63 | 64 | If `mapname` is provided, `spectrum` is overidden by the map's color. 65 | 66 | Args: 67 | spectrum(None or list of float): RGB intensity of the light. 68 | Defaults to white. 69 | mapname(None or str): environment map to use as light. 70 | nsamples(int): number of light samples per pixel. 71 | """ 72 | def __init__(self, spectrum=None, mapname=None, nsamples=8): 73 | super(InifiniteLight, self).__init__() 74 | self.nsamples = nsamples 75 | self.mapname = None 76 | self.spectrum = None 77 | 78 | if mapname is None: 79 | if spectrum is None: 80 | self.spectrum = [1, 1, 1] 81 | else: 82 | self.spectrum = spectrum 83 | else: 84 | self.mapname = mapname 85 | 86 | def pbrt(self): 87 | s = "AttributeBegin\n" 88 | if self.xform is not None: 89 | s += self.xform.pbrt() 90 | s += super(InifiniteLight, self).pbrt() 91 | s += ' "infinite" ' 92 | if self.mapname is None: 93 | s += '"rgb L" [{} {} {}]'.format(*self.spectrum) 94 | else: 95 | s += '"string mapname" ["{}"]'.format(self.mapname) 96 | s += ' "integer nsamples" [{}]\n'.format(self.nsamples) 97 | s += "AttributeEnd\n" 98 | 99 | return s 100 | 101 | 102 | class AreaLight(Light): 103 | """Geometric area light. 104 | 105 | Args: 106 | geom(sbmc.scene_generator.Shape): shape of the light. 107 | spectrum(None or list of float): RGB intensity of the light. 108 | Defaults to white. 109 | nsamples(int): number of light samples per pixel. 110 | """ 111 | def __init__(self, geom, spectrum=None, nsamples=8): 112 | super(AreaLight, self).__init__() 113 | self.nsamples = nsamples 114 | self.spectrum = None 115 | self.geom = geom 116 | 117 | if spectrum is None: 118 | self.spectrum = [1, 1, 1] 119 | else: 120 | self.spectrum = spectrum 121 | 122 | def pbrt(self): 123 | s = "AttributeBegin\n" 124 | if self.xform is not None: 125 | s += self.xform.pbrt() 126 | s += 'AreaLightSource "diffuse" ' 127 | s += '"rgb L" [{} {} {}]'.format(*self.spectrum) 128 | s += ' "integer nsamples" [{}]\n'.format(self.nsamples) 129 | s += self.geom.pbrt() 130 | s += "AttributeEnd\n" 131 | return s 132 | -------------------------------------------------------------------------------- /sbmc/scene_generator/randomizers.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | # Sample-based Monte Carlo Denoising using a Kernel-Splatting Network 3 | # Michaël Gharbi Tzu-Mao Li Miika Aittala Jaakko Lehtinen Frédo Durand 4 | # Siggraph 2019 5 | # 6 | # Copyright (c) 2019 Michaël Gharbi 7 | # 8 | # Licensed under the Apache License, Version 2.0 (the "License"); 9 | # you may not use this file except in compliance with the License. 10 | # You may obtain a copy of the License at 11 | # 12 | # http://www.apache.org/licenses/LICENSE-2.0 13 | # 14 | # Unless required by applicable law or agreed to in writing, software 15 | # distributed under the License is distributed on an "AS IS" BASIS, 16 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | # See the License for the specific language governing permissions and 18 | # limitations under the License. 19 | """Functions to generate random scene content.""" 20 | 21 | import copy 22 | import uuid 23 | import numpy as np 24 | from . import (lights, materials, textures) 25 | 26 | 27 | def random_texture(textures_list, gamma=1.0): 28 | """Generates a random texture from a list of texture images. 29 | Args: 30 | textures_list(list of str): path to the texture images. 31 | gamma(float): gamma correction factor. 32 | """ 33 | texname = np.random.choice(textures_list) 34 | name = str(uuid.uuid4()) 35 | tex = textures.Imagemap(name, "spectrum", texname, gamma=gamma, 36 | udelta=np.random.uniform(0, 1), 37 | vdelta=np.random.uniform(0, 1), 38 | uscale=np.random.uniform(0.1, 20), 39 | vscale=np.random.uniform(0.1, 20)) 40 | return tex 41 | 42 | 43 | def random_envmap(maps, nsamples=8): 44 | """Creates a random environment map light from a list of .pfm images. 45 | 46 | Args: 47 | maps(list of str): paths to the hdr images, in .pfm format. 48 | """ 49 | m = np.random.choice(maps) 50 | light = lights.InifiniteLight(mapname=m, nsamples=nsamples) 51 | return light 52 | 53 | 54 | def random_diffuse_material(id=None, textures_list=None): 55 | """Random diffuse material. 56 | 57 | Args: 58 | id(str): unique identifier for the material. 59 | textures_list(list of str): path to the texture images. 60 | """ 61 | gamma = 1.0 62 | 63 | if textures_list is not None: 64 | tex = random_texture(textures_list, gamma=gamma) 65 | mat = materials.MatteMaterial( 66 | id=id, diffuse_texture=tex, bump_texture=None) 67 | else: 68 | diffuse = list(np.random.uniform(size=(3,))) 69 | mat = materials.MatteMaterial(id=id, diffuse=diffuse) 70 | return mat 71 | 72 | 73 | def random_uber_material(id=None, textures_list=None): 74 | """Random uber material. 75 | 76 | Args: 77 | id(str): unique identifier for the material. 78 | textures_list(list of str): path to the texture images. 79 | """ 80 | gamma = 1.0 81 | 82 | if textures_list is not None: 83 | tex = random_texture(textures_list, gamma=gamma) 84 | glossy = np.random.uniform(0, 1) 85 | specular = np.random.uniform(0, 1) 86 | rough = np.random.uniform(0, 1) 87 | index = np.random.uniform(1, 1.5) 88 | mat = materials.UberMaterial(id=id, diffuse_texture=tex, 89 | bump_texture=None, 90 | glossy_reflection=glossy, 91 | specular_reflection=specular, 92 | roughness=rough, index=index) 93 | else: 94 | diffuse = list(np.random.uniform(size=(3,))) 95 | mat = materials.UberMaterial(id=id, diffuse=diffuse) 96 | return mat 97 | 98 | 99 | def random_mirror_material(id=None, textures_list=None): 100 | """Random mirror material. 101 | 102 | Args: 103 | id(str): unique identifier for the material. 104 | textures_list(list of str): path to the texture images. 105 | """ 106 | gamma = 1.0 107 | 108 | spec = list(np.random.uniform(size=(3,))) 109 | mat = materials.MirrorMaterial(id=id, specular=spec) 110 | 111 | return mat 112 | 113 | 114 | def random_glass_material(id=None, textures_list=None): 115 | """Random glass material. 116 | 117 | Args: 118 | id(str): unique identifier for the material. 119 | textures_list(list of str): path to the texture images. 120 | """ 121 | gamma = 1.0 122 | reflection = list(np.random.uniform(size=(3,))) 123 | transmission = list(np.random.uniform(size=(3,))) 124 | index = np.random.uniform(1.1, 1.8) 125 | mat = materials.GlassMaterial(id=id, reflection=reflection, 126 | transmission=transmission, bump_texture=None) 127 | return mat 128 | 129 | 130 | def random_metal_material(id=None, textures_list=None): 131 | """Random metal material. 132 | 133 | Args: 134 | id(str): unique identifier for the material. 135 | textures_list(list of str): path to the texture images. 136 | """ 137 | gamma = 1.0 138 | eta = list(np.random.uniform(size=(3,))) 139 | k = list(np.random.uniform(1, 3, size=(3,))) 140 | rough = np.random.uniform(0, 1) 141 | mat = materials.MetalMaterial(id=id, eta=eta, k=k, 142 | roughness=rough, bump_texture=None) 143 | return mat 144 | 145 | 146 | def random_plastic_material(id=None, textures_list=None): 147 | """Random plastic material. 148 | 149 | Args: 150 | id(str): unique identifier for the material. 151 | textures_list(list of str): path to the texture images. 152 | """ 153 | gamma = 1.0 154 | 155 | specular = list(np.random.uniform(0, 1, size=(3,))) 156 | rough = np.random.uniform(0, 1) 157 | if textures_list is not None: 158 | tex = random_texture(textures_list, gamma=gamma) 159 | mat = materials.PlasticMaterial(id=id, diffuse_texture=tex, 160 | bump_texture=None, specular=specular, 161 | roughness=rough) 162 | else: 163 | diffuse = list(np.random.uniform(size=(3,))) 164 | mat = materials.PlasticMaterial(id=id, roughness=rough, 165 | diffuse=diffuse, bump_texture=None) 166 | return mat 167 | 168 | 169 | def random_substrate_material(id=None, textures_list=None): 170 | """Random substrate material. 171 | 172 | Args: 173 | id(str): unique identifier for the material. 174 | textures_list(list of str): path to the texture images. 175 | """ 176 | gamma = 1.0 177 | 178 | specular = list(np.random.uniform(0, 1, size=(3,))) 179 | rough = np.random.uniform(0, 1) 180 | rough2 = np.random.uniform(0, 1) 181 | if textures_list is not None: 182 | tex = random_texture(textures_list, gamma=gamma) 183 | mat = materials.SubstrateMaterial(id=id, diffuse_texture=tex, 184 | bump_texture=None, specular=specular, 185 | uroughness=rough, vroughness=rough2) 186 | else: 187 | diffuse = np.random.uniform(size=(3,)) 188 | mat = materials.SubstrateMaterial( 189 | id=id, diffuse=diffuse, uroughness=rough, vroughness=rough2, 190 | bump_texture=None) 191 | return mat 192 | 193 | 194 | def random_material(id=None, textures_list=None): 195 | """Random material. 196 | 197 | With a bias towards simple diffuse materials. 198 | 199 | Args: 200 | id(str): unique identifier for the material. 201 | textures_list(list of str): path to the texture images. 202 | """ 203 | p = np.random.uniform() 204 | p_tex = np.random.uniform() 205 | 206 | if id is None: 207 | id = str(uuid.uuid4()) 208 | 209 | if p_tex < 0.01: # 1% chance uniform 210 | tex = None 211 | else: 212 | tex = textures_list 213 | 214 | if p < 0.05: 215 | return random_mirror_material(id, tex) 216 | if p < 0.1: 217 | return random_glass_material(id, tex) 218 | if p < 0.2: 219 | return random_metal_material(id, tex) 220 | if p < 0.4: 221 | return random_plastic_material(id, tex) 222 | if p < 0.5: 223 | return random_substrate_material(id, tex) 224 | if p < 0.8: 225 | return random_uber_material(id, tex) 226 | 227 | return random_diffuse_material(id, tex) 228 | 229 | 230 | def random_bump(ref_texture, textures_list): 231 | """Random bump map texture. 232 | 233 | The texture is randomly decorrelated from the reference with 50% 234 | probability. 235 | 236 | Args: 237 | ref_texture(sbmc.scene_generator.Texture): reference color texture. 238 | textures_list(list of str): path to the texture images. 239 | """ 240 | if textures_list is None: 241 | return None 242 | do_bump = np.random.uniform() > 0.5 243 | if not do_bump: 244 | return None 245 | 246 | # randomly decorrelate bump texture 247 | if ref_texture is None or np.random.uniform() > 0.5: 248 | texname = np.random.choice(textures_list) 249 | 250 | name = str(uuid.uuid4()) 251 | bump_scale = np.random.uniform(0.01, 0.05) 252 | bump = textures.Imagemap( 253 | name, "float", 254 | texname, scale=bump_scale, gamma=1.0, trilinear=True) 255 | else: 256 | bump = copy.deepcopy(ref_texture) 257 | bump.type = "float" 258 | return bump 259 | -------------------------------------------------------------------------------- /sbmc/scene_generator/scene.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | # Sample-based Monte Carlo Denoising using a Kernel-Splatting Network 3 | # Michaël Gharbi Tzu-Mao Li Miika Aittala Jaakko Lehtinen Frédo Durand 4 | # Siggraph 2019 5 | # 6 | # Copyright (c) 2019 Michaël Gharbi 7 | # 8 | # Licensed under the Apache License, Version 2.0 (the "License"); 9 | # you may not use this file except in compliance with the License. 10 | # You may obtain a copy of the License at 11 | # 12 | # http://www.apache.org/licenses/LICENSE-2.0 13 | # 14 | # Unless required by applicable law or agreed to in writing, software 15 | # distributed under the License is distributed on an "AS IS" BASIS, 16 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | # See the License for the specific language governing permissions and 18 | # limitations under the License. 19 | """Core scene description classes for the PBRT scene generator.""" 20 | import numpy as np 21 | 22 | import re 23 | import copy 24 | import uuid 25 | import sys 26 | 27 | import ttools 28 | 29 | __all__ = ["Scene", "Renderer", "Camera"] 30 | 31 | 32 | LOG = ttools.get_logger(__name__) 33 | 34 | 35 | class Scene(): 36 | """PBRT Scene definition. 37 | 38 | y axis is up. 39 | 40 | Args: 41 | renderer(None or Renderer) 42 | camera(None or Camera) 43 | 44 | Attributes: 45 | renderer(Renderer): PBRT renderer config. 46 | lights(list of Light): lights in the scene. 47 | textures(list of Texture): textures used in the scene. 48 | materials(list of Material): materials used in the scene. 49 | shapes(list of Shape): geometry of the scene. 50 | camera(Camera): camera for the rendering. 51 | shapes(list of Shape): scene geometry. 52 | """ 53 | 54 | def __init__(self, renderer=None, camera=None): 55 | self.renderer = renderer 56 | self.lights = [] 57 | self.textures = [] 58 | self.materials = [] 59 | self.shapes = [] 60 | self.camera = camera 61 | 62 | def __repr__(self): 63 | return "Scene with {} shapes, {} materials, {} textures, {}" \ 64 | " lights".format(len(self.shapes), len(self.materials), 65 | len(self.textures), len(self.lights)) 66 | 67 | def pbrt(self): 68 | """Encodes scene as valid PBRT scene description string. 69 | 70 | Args: 71 | renderer_header(bool): if True, writes the renderer parameters. 72 | """ 73 | out = "" 74 | if self.renderer is not None: 75 | out += self.renderer.pbrt() 76 | if self.camera is not None: 77 | out += self.camera.pbrt() 78 | out += "TransformTimes 0.0 1.0\n\n" 79 | out += "WorldBegin\n" 80 | for obj in self.lights + self.textures + self.materials + \ 81 | self.shapes: 82 | out += obj.pbrt() 83 | out += "WorldEnd\n" 84 | return out 85 | 86 | 87 | class Renderer(): 88 | """PBRT renderer definition. 89 | 90 | Args: 91 | tile_size 92 | spp 93 | gt_spp 94 | path_depth 95 | pixel_filter 96 | height 97 | width 98 | random_crop_w 99 | random_crop_h 100 | integrator 101 | sampler 102 | """ 103 | def __init__(self, **kwargs): 104 | self.tile_size = kwargs.get("tile_size", 128) 105 | self.spp = kwargs.get("spp", 16) 106 | self.gt_spp = kwargs.get("gt_spp", None) 107 | self.path_depth = kwargs.get("path_depth", 5) 108 | self.pixel_filter = kwargs.get("pixel_filter", "box") 109 | self.height = kwargs.get("height", 512) 110 | self.width = kwargs.get("width", 1024) 111 | self.random_crop_w = kwargs.get("random_crop_w", None) 112 | self.random_crop_h = kwargs.get("random_crop_h", None) 113 | self.integrator = kwargs.get("integrator", "pathrendernet") 114 | self.sampler = kwargs.get("sampler", "lowdiscrepancy") 115 | self.filename = kwargs.get("filename", None) 116 | 117 | if self.integrator == "pathrendernet": 118 | if self.path_depth != 5: 119 | LOG.error("Sample-based rendering only supports path_depth=5") 120 | raise RuntimeError("Sample-based rendering only supports" 121 | " path_depth=5, got %d" % self.path_depth) 122 | if self.gt_spp is None: 123 | LOG.error("Sample-based rendering requires `gt_spp` to be set") 124 | raise RuntimeError("Sample-based rendering requires `gt_spp`" 125 | " to be set") 126 | 127 | def pbrt(self): 128 | """Output PBRT scene description string.""" 129 | 130 | out = 'SurfaceIntegrator "%s" "integer maxdepth" [%d]\n' % ( 131 | self.integrator, self.path_depth) 132 | 133 | # Sampler configuration 134 | if self.integrator == "pathrendernet": 135 | out += 'Renderer "rendernet" "integer tilesize" [{}] ' \ 136 | '"integer recordedsamples" [{}]\n'.format(self.tile_size, 137 | self.spp) 138 | out += 'Sampler "%s" "integer pixelsamples" [%d]\n' % ( 139 | self.sampler, self.gt_spp) # The ref has `gt_spp` samples 140 | else: 141 | out += 'Sampler "%s" "integer pixelsamples" [%d]\n' % ( 142 | self.sampler, self.spp) 143 | 144 | out += 'PixelFilter "%s"\n' % self.pixel_filter 145 | out += self._film() 146 | 147 | return out 148 | 149 | def _film(self): 150 | film = 'Film "image" "integer xresolution" [%d]' % self.width 151 | film += ' "integer yresolution" [%d]' % self.height 152 | if self.filename is not None: 153 | film += ' "string filename" "%s"\n\n' % self.filename 154 | 155 | # Handle crop requests 156 | if self.random_crop_w is not None: 157 | if self.random_crop_h is None: 158 | LOG.error("When requesting a crop, provide both x and y" 159 | " coordinates.") 160 | raise RuntimeError("Please provide both x/y crop.") 161 | 162 | height = self.height 163 | width = self.width 164 | 165 | # Make sure the crop is within bounds 166 | start_x = 0 167 | if self.random_crop_w < width: 168 | start_x = np.random.randint( 169 | 0, width - self.random_crop_w) * 1.0 / width 170 | 171 | start_y = 0 172 | if self.random_crop_h < height: 173 | start_y = np.random.randint( 174 | 0, height - self.random_crop_h) * 1.0 / height 175 | 176 | # PBRT expects crop coordinates in [0, 1] 177 | float_w = 1.0*self.random_crop_w / width 178 | float_h = 1.0*self.random_crop_h / height 179 | film += ' "float cropwindow" [{} {} {} {}]'.format( 180 | start_x, start_x + float_w, start_y, start_y + float_h) 181 | 182 | film += "\n" 183 | return film 184 | 185 | 186 | class Camera(): 187 | """PBRT camera. 188 | 189 | Args: 190 | """ 191 | def __init__(self, **kwargs): 192 | self.xform = kwargs.get("xform", None) 193 | self.position = kwargs.get("position", [0, 0, 1]) 194 | self.target = kwargs.get("target", [0, 0, 0]) 195 | self.up = kwargs.get("up", [0, 1, 0]) 196 | self.type = kwargs.get("type", "perspective") 197 | self.fov = kwargs.get("fov", 35) 198 | self.shutteropen = kwargs.get("shutteropen", 0.0) 199 | self.shutterclose = kwargs.get("shutterclose", 0.0) 200 | self.lensradius = kwargs.get("lensradius", 0.0) 201 | self.focaldistance = kwargs.get("focaldistance", 0.0) 202 | 203 | def __repr__(self): 204 | out = "camera\n" 205 | out += " .fov = {}\n".format(self.fov) 206 | out += " .at ({:.1f} {:.1f} {:.1f})\n".format(*self.position) 207 | out += " .looking at ({:.1f} {:.1f} {:.1f})\n".format(*self.target) 208 | out += " .shutter ({:.1f} {:.1f})\n".format(self.shutteropen, 209 | self.shutterclose) 210 | out += " .focus distance {:.1f}\n".format(self.focaldistance) 211 | out += " .lens radius {:.10f}\n".format(self.lensradius) 212 | return out 213 | 214 | def pbrt(self): 215 | """PBRT string representation. 216 | 217 | Returns: 218 | s(str): PBRT formated string. 219 | """ 220 | if self.xform is None: 221 | s = 'LookAt {} {} {} {} {} {} {} {} {}\n'.format( 222 | *(self.position + self.target + self.up)) 223 | else: 224 | s = self.xform.pbrt() 225 | s += ('Camera "{}" "float fov" [{}] "float shutteropen" [{}] ' 226 | '"float shutterclose" [{}] "float lensradius" [{}]' 227 | ' "float focaldistance" [{}]\n').format(self.type, self.fov, 228 | self.shutteropen, 229 | self.shutterclose, 230 | self.lensradius, 231 | self.focaldistance) 232 | return s 233 | -------------------------------------------------------------------------------- /sbmc/scene_generator/textures.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | # Sample-based Monte Carlo Denoising using a Kernel-Splatting Network 3 | # Michaël Gharbi Tzu-Mao Li Miika Aittala Jaakko Lehtinen Frédo Durand 4 | # Siggraph 2019 5 | # 6 | # Copyright (c) 2019 Michaël Gharbi 7 | # 8 | # Licensed under the Apache License, Version 2.0 (the "License"); 9 | # you may not use this file except in compliance with the License. 10 | # You may obtain a copy of the License at 11 | # 12 | # http://www.apache.org/licenses/LICENSE-2.0 13 | # 14 | # Unless required by applicable law or agreed to in writing, software 15 | # distributed under the License is distributed on an "AS IS" BASIS, 16 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | # See the License for the specific language governing permissions and 18 | # limitations under the License. 19 | """Texture classes for the PBRT scene generator.""" 20 | 21 | import ttools 22 | 23 | 24 | __all__ = ["Imagemap", "Checkerboard"] 25 | 26 | 27 | LOG = ttools.get_logger(__name__) 28 | 29 | 30 | class Texture(): 31 | """Base texture class. 32 | 33 | Args: 34 | id(str): a unique string identifier for this texture. 35 | type(str): a unique string identifier for this texture. 36 | uscale(float): scaling of the texture coordinate in u direction. 37 | vscale(float): scaling of the texture coordinate in v direction. 38 | udelta(float): shift of the texture coordinate in u direction. 39 | vdelta(float): shift of the texture coordinate in v direction. 40 | """ 41 | def __init__(self, id, type, uscale=1.0, vscale=1.0, udelta=0.0, 42 | vdelta=0.0): 43 | if type not in ["spectrum", "float"]: 44 | LOG.error("texture type should be one of `spectrum`, `float`," 45 | " got %s", type) 46 | raise RuntimeError("Unknown texture type %s", type) 47 | self.id = id 48 | self.type = type 49 | self.uscale = uscale 50 | self.vscale = vscale 51 | self.udelta = udelta 52 | self.vdelta = vdelta 53 | 54 | def pbrt(self): 55 | """Outputs PBRTv2 string representation. 56 | 57 | Returns: 58 | str: pbrt format. 59 | """ 60 | return 'Texture "{}" "{}" '.format(self.id, self.type) 61 | 62 | def suffix(self): 63 | """Generic PBRT parameters shared by all textures. 64 | 65 | Returns: 66 | (str): the pbrt string suffix parameterizing the texture. 67 | """ 68 | return '"float uscale" [{}] "float vscale" [{}] "float udelta"'\ 69 | ' [{}] "float vdelta" [{}]'.format(self.uscale, self.vscale, 70 | self.udelta, self.vdelta) 71 | 72 | 73 | class Imagemap(Texture): 74 | """Texture based on a bitmap image. 75 | 76 | Args: 77 | id(str): a unique string identifier for this texture. 78 | type(str): a unique string identifier for this texture. 79 | filename(str): path to the texture (PBRT2 supports .tga and .bmp only). 80 | scale(float): scale factor to apply to the bitmap. 81 | gamma(float): gamma correction to apply to the bitmap. 82 | trilinear(bool): if True, uses trilinear filtering. 83 | uscale(float): scaling of the texture coordinate in u direction. 84 | vscale(float): scaling of the texture coordinate in v direction. 85 | udelta(float): shift of the texture coordinate in u direction. 86 | vdelta(float): shift of the texture coordinate in v direction. 87 | """ 88 | def __init__(self, id, type, filename, scale=1.0, gamma=1.0, 89 | trilinear=False, uscale=1.0, vscale=1.0, udelta=1.0, 90 | vdelta=1.0): 91 | super(Imagemap, self).__init__(id, type, uscale=uscale, vscale=vscale, 92 | udelta=udelta, vdelta=vdelta) 93 | self.filename = filename 94 | self.gamma = gamma 95 | self.scale = scale 96 | self.trilinear = "true" if trilinear else "false" 97 | 98 | def pbrt(self): 99 | s = super(Imagemap, self).pbrt() 100 | s += ' "imagemap" "string filename" ["{}"] "float gamma" [{}] ' \ 101 | '"float scale" [{}] "bool trilinear" ["{}"]\n'.format( 102 | self.filename, self.gamma, self.scale, self.trilinear) 103 | s += super(Imagemap, self).suffix() 104 | return s 105 | 106 | 107 | class Checkerboard(Texture): 108 | """A simple parametric checkerboard texture. 109 | 110 | Args: 111 | id(str): a unique string identifier for this texture. 112 | type(str): a unique string identifier for this texture. 113 | uscale(float): scaling of the texture coordinate in u direction. 114 | vscale(float): scaling of the texture coordinate in v direction. 115 | udelta(float): shift of the texture coordinate in u direction. 116 | vdelta(float): shift of the texture coordinate in v direction. 117 | tex1(list of 3 floats or None): first color of the checker. 118 | tex2(list of 3 floats or None): second color of the checker. 119 | """ 120 | def __init__(self, id, type, uscale=1.0, vscale=1.0, udelta=1.0, 121 | vdelta=1.0, tex1=None, tex2=None): 122 | super(Checkerboard, self).__init__(id, type, uscale=uscale, 123 | vscale=vscale, udelta=udelta, 124 | vdelta=vdelta) 125 | if tex1 is not None: 126 | self.tex1 = tex1 127 | else: 128 | self.tex1 = [0.9, 0.9, 0.9] 129 | if tex2 is not None: 130 | self.tex2 = tex2 131 | else: 132 | self.tex2 = [0.2, 0.2, 0.2] 133 | 134 | def pbrt(self): 135 | s = super(Checkerboard, self).pbrt() 136 | s += ' "checkerboard" "rgb tex1" [{} {} {}] "rgb tex2" ' \ 137 | '[{} {} {}]\n'.format(*self.tex1, *self.tex2) 138 | s += super(Checkerboard, self).suffix() 139 | return s 140 | -------------------------------------------------------------------------------- /sbmc/scene_generator/xforms.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | # Sample-based Monte Carlo Denoising using a Kernel-Splatting Network 3 | # Michaël Gharbi Tzu-Mao Li Miika Aittala Jaakko Lehtinen Frédo Durand 4 | # Siggraph 2019 5 | # 6 | # Copyright (c) 2019 Michaël Gharbi 7 | # 8 | # Licensed under the Apache License, Version 2.0 (the "License"); 9 | # you may not use this file except in compliance with the License. 10 | # You may obtain a copy of the License at 11 | # 12 | # http://www.apache.org/licenses/LICENSE-2.0 13 | # 14 | # Unless required by applicable law or agreed to in writing, software 15 | # distributed under the License is distributed on an "AS IS" BASIS, 16 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | # See the License for the specific language governing permissions and 18 | # limitations under the License. 19 | """Collection of geometric transforms.""" 20 | 21 | __all__ = ["translate", "rotate", "scale", "transform", "look_at"] 22 | 23 | 24 | def translate(obj, t, target="default"): 25 | """Applies a translation to an object. 26 | 27 | Args: 28 | obj(sbmc.scene_generator.Shape): object to transform. 29 | t(list of 3 floats): x,y,z transform parameters. 30 | target(str): transformation channel to apply the transform to. Should 31 | be one of: 32 | - `default` (the default transform) 33 | - `start` (the transform at the opening of the camera shutter) 34 | - `end` (the transform at the closing of the camera shutter) 35 | 36 | This allows to animate the object for motion blur computation. 37 | """ 38 | _push_xform(obj, Translation(t), target=target) 39 | 40 | 41 | def rotate(obj, axis, angle, target="default"): 42 | """Applies a rotation to an object. 43 | 44 | Args: 45 | obj(sbmc.scene_generator.Shape): object to transform. 46 | axis(list of 3 floats): x,y,z of the rotation axis. 47 | angle(float): rotation angle in degrees. 48 | target(str): transformation channel to apply the transform to. 49 | 50 | Should be one of: 51 | - `default` (the default transform) 52 | - `start` (the transform at the opening of the camera shutter) 53 | - `end` (the transform at the closing of the camera shutter) 54 | 55 | This allows to animate the object for motion blur computation. 56 | """ 57 | _push_xform(obj, Rotation(axis, angle), target=target) 58 | 59 | 60 | def scale(obj, mag, target="default"): 61 | """Applies a rotation to an object. 62 | 63 | Args: 64 | obj(sbmc.scene_generator.Shape): object to transform. 65 | mag(list of 3 floats): x,y,z scaling factor. 66 | target(str): transformation channel to apply the transform to. 67 | 68 | Should be one of: 69 | - `default` (the default transform) 70 | - `start` (the transform at the opening of the camera shutter) 71 | - `end` (the transform at the closing of the camera shutter) 72 | 73 | This allows to animate the object for motion blur computation. 74 | """ 75 | _push_xform(obj, Scale(mag), target=target) 76 | 77 | 78 | def transform(obj, coeffs, target="default"): 79 | """Applies a generic 4x4 transform matrix. 80 | 81 | Args: 82 | obj(sbmc.scene_generator.Shape): object to transform. 83 | coeffs(list of 16 floats): coefficients of the 4x4 matrix in row-major 84 | ordering. 85 | target(str): transformation channel to apply the transform to. 86 | 87 | Should be one of: 88 | - `default` (the default transform) 89 | - `start` (the transform at the opening of the camera shutter) 90 | - `end` (the transform at the closing of the camera shutter) 91 | 92 | This allows to animate the object for motion blur computation. 93 | """ 94 | _push_xform(obj, Mat44(coeffs), target=target) 95 | 96 | 97 | def look_at(obj, src, dst, up, target="default"): 98 | """Applies a look at transform. 99 | 100 | Args: 101 | obj(sbmc.scene_generator.Shape): object to transform. 102 | src(list of 3 floats): x,y,z center of the `eye` of the look at 103 | transform. 104 | dst(list of 3 floats): x,y,z position of the point looked at. 105 | up(list of 3 floats): x,y,z coordinate of the up vector. 106 | target(str): transformation channel to apply the transform to. 107 | 108 | Should be one of: 109 | - `default` (the default transform) 110 | - `start` (the transform at the opening of the camera shutter) 111 | - `end` (the transform at the closing of the camera shutter) 112 | 113 | This allows to animate the object for motion blur computation. 114 | """ 115 | _push_xform(obj, LookAt(src, dst, up), target=target) 116 | 117 | 118 | class Transform(): 119 | """Abstract transform made by composition of elementary transforms. 120 | 121 | Attributes: 122 | xforms(list of transforms): transforms to compose from back to front. 123 | """ 124 | def __init__(self): 125 | self.xforms = [] 126 | 127 | def push(self, xf): 128 | """Add a transform to the back of the chain. 129 | 130 | Args: 131 | xf(transform): transform to append. 132 | """ 133 | self.xforms.append(xf) 134 | 135 | def pbrt(self): 136 | """Outputs PBRTv2 string representation. 137 | 138 | Returns: 139 | str: pbrt format. 140 | """ 141 | s = "" 142 | for xf in reversed(self.xforms): 143 | s += xf.pbrt() 144 | return s 145 | 146 | 147 | class Translation(): 148 | """Translation transform. 149 | 150 | Args: 151 | t(list of 3 floats): x,y,z transform parameters. 152 | """ 153 | def __init__(self, t): 154 | self.tx = t[0] 155 | self.ty = t[1] 156 | self.tz = t[2] 157 | 158 | def pbrt(self): 159 | s = "Translate {} {} {}\n".format(self.tx, self.ty, self.tz) 160 | return s 161 | 162 | 163 | class LookAt(): 164 | def __init__(self, src, dst, up): 165 | self.ex = src[0] 166 | self.ey = src[1] 167 | self.ez = src[2] 168 | self.lx = dst[0] 169 | self.ly = dst[1] 170 | self.lz = dst[2] 171 | self.ux = up[0] 172 | self.uy = up[1] 173 | self.uz = up[2] 174 | 175 | def pbrt(self): 176 | s = "LookAt {} {} {} {} {} {} {} {} {}\n".format( 177 | self.ex, self.ey, self.ez, 178 | self.lx, self.ly, self.lz, 179 | self.ux, self.uy, self.uz) 180 | return s 181 | 182 | 183 | class Scale(): 184 | """Scaling transform. 185 | 186 | Args: 187 | s(list of 3 floats): x,y,z scaling factor. 188 | """ 189 | def __init__(self, s): 190 | self.sx = s[0] 191 | self.sy = s[1] 192 | self.sz = s[2] 193 | 194 | def pbrt(self): 195 | s = "Scale {} {} {}\n".format(self.sx, self.sy, self.sz) 196 | return s 197 | 198 | 199 | class Rotation(): 200 | """Rotation transform. 201 | 202 | Args: 203 | axis(list of 3 floats): x,y,z of the rotation axis. 204 | angle(float): rotation angle in degrees. 205 | """ 206 | def __init__(self, axis, angle): 207 | self.x = axis[0] 208 | self.y = axis[1] 209 | self.z = axis[2] 210 | self.angle = angle 211 | 212 | def pbrt(self): 213 | s = "Rotate {} {} {} {}\n".format(self.angle, self.x, self.y, self.z) 214 | return s 215 | 216 | 217 | class Mat44(): 218 | """Generic 4x4 transform matrix. 219 | 220 | Args: 221 | coeffs(list of 16 floats): coefficients of the 4x4 matrix in row-major 222 | ordering. 223 | """ 224 | def __init__(self, coeffs): 225 | assert len(coeffs) == 16 226 | self.coeffs = coeffs 227 | 228 | def pbrt(self): 229 | s = ("Transform [" + " {}"*16 + "]\n").format(*self.coeffs) 230 | return s 231 | 232 | 233 | def __init_xform(obj, target="default"): 234 | """Initializes the transform stack of an object. 235 | Args: 236 | obj(sbmc.scene_generator.Shape): object to transform. 237 | target(str): transformation channel to apply the transform to. 238 | Should be one of: 239 | - `default` (the default transform) 240 | - `start` (the transform at the opening of the camera shutter) 241 | - `end` (the transform at the closing of the camera shutter) 242 | This allows to animate the object for motion blur computation. 243 | """ 244 | if target == "default": 245 | if obj.xform is None: 246 | obj.xform = Transform() 247 | elif target == "start": 248 | if obj.start_xform is None: 249 | obj.start_xform = Transform() 250 | elif target == "end": 251 | if obj.end_xform is None: 252 | obj.end_xform = Transform() 253 | else: 254 | raise ValueError("unknown xform target {}".format(target)) 255 | 256 | 257 | def _push_xform(obj, t, target="default"): 258 | """Adds a transform to an object's transform stack. 259 | Args: 260 | obj(sbmc.scene_generator.Shape): object to transform. 261 | t(transform): the transform to add. 262 | target(str): transformation channel to apply the transform to. 263 | Should be one of: 264 | - `default` (the default transform) 265 | - `start` (the transform at the opening of the camera shutter) 266 | - `end` (the transform at the closing of the camera shutter) 267 | This allows to animate the object for motion blur computation. 268 | """ 269 | __init_xform(obj, target=target) 270 | if target == "default": 271 | obj.xform.push(t) 272 | elif target == "start": 273 | obj.start_xform.push(t) 274 | elif target == "end": 275 | obj.end_xform.push(t) 276 | else: 277 | raise ValueError("unknown xform target %s, should be one of " 278 | "`default`, `start`, `end`" % target) 279 | -------------------------------------------------------------------------------- /sbmc/version.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | # Sample-based Monte Carlo Denoising using a Kernel-Splatting Network 3 | # Michaël Gharbi Tzu-Mao Li Miika Aittala Jaakko Lehtinen Frédo Durand 4 | # Siggraph 2019 5 | # 6 | # Copyright (c) 2019 Michaël Gharbi 7 | # 8 | # Licensed under the Apache License, Version 2.0 (the "License"); 9 | # you may not use this file except in compliance with the License. 10 | # You may obtain a copy of the License at 11 | # 12 | # http://www.apache.org/licenses/LICENSE-2.0 13 | # 14 | # Unless required by applicable law or agreed to in writing, software 15 | # distributed under the License is distributed on an "AS IS" BASIS, 16 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | # See the License for the specific language governing permissions and 18 | # limitations under the License. 19 | __version__ = "0.0.1" 20 | -------------------------------------------------------------------------------- /scripts/check_docker_version.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | DOCKER_MINIMUM_REQUIRED_VERSION=19.03 4 | 5 | # Copied from https://stackoverflow.com/a/24067243 6 | function version_gt() { test "$(printf '%s\n' "$@" | sort -V | head -n 1)" != "$1"; } 7 | 8 | current_docker_version=`docker version --format '{{.Server.Version}}'` 9 | if version_gt $DOCKER_MINIMUM_REQUIRED_VERSION $current_docker_version; then 10 | echo "Docker version >=${DOCKER_MINIMUM_REQUIRED_VERSION} required for for the --gpus option." 11 | fi 12 | -------------------------------------------------------------------------------- /scripts/compute_metrics.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # Sample-based Monte Carlo Denoising using a Kernel-Splatting Network 4 | # Michaël Gharbi Tzu-Mao Li Miika Aittala Jaakko Lehtinen Frédo Durand 5 | # Siggraph 2019 6 | # 7 | # Copyright (c) 2019 Michaël Gharbi 8 | # 9 | # Licensed under the Apache License, Version 2.0 (the "License"); 10 | # you may not use this file except in compliance with the License. 11 | # You may obtain a copy of the License at 12 | # 13 | # http://www.apache.org/licenses/LICENSE-2.0 14 | # 15 | # Unless required by applicable law or agreed to in writing, software 16 | # distributed under the License is distributed on an "AS IS" BASIS, 17 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | # See the License for the specific language governing permissions and 19 | # limitations under the License. 20 | """Compute metrics from a set of .exr images.""" 21 | import os 22 | import argparse 23 | 24 | from sbmc import evaluation 25 | 26 | 27 | if __name__ == "__main__": 28 | parser = argparse.ArgumentParser() 29 | parser.add_argument("ref", help="path to the root of the reference .exr" 30 | " files") 31 | parser.add_argument("output", help="path to store the stats") 32 | parser.add_argument("--methods", nargs="+", help="list of methods to" 33 | " compare, their folders are expected to sit next to" 34 | " 'ref/'") 35 | parser.add_argument("--scenes", nargs="+", help="list of scenes to" 36 | " evaluate") 37 | parser.add_argument("--pad", type=int, default=21, help="how many pixels" 38 | " to remove on each side") 39 | args = parser.parse_args() 40 | 41 | evaluation.compute(args.ref, args.output, args.methods, args.scenes, 42 | pad=args.pad) 43 | 44 | fname, ext = os.path.splitext(args.output) 45 | evaluation.stats([args.output], fname + "_stats.csv") 46 | -------------------------------------------------------------------------------- /scripts/denoise.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # Sample-based Monte Carlo Denoising using a Kernel-Splatting Network 4 | # Michaël Gharbi Tzu-Mao Li Miika Aittala Jaakko Lehtinen Frédo Durand 5 | # Siggraph 2019 6 | # 7 | # Copyright (c) 2019 Michaël Gharbi 8 | # 9 | # Licensed under the Apache License, Version 2.0 (the "License"); 10 | # you may not use this file except in compliance with the License. 11 | # You may obtain a copy of the License at 12 | # 13 | # http://www.apache.org/licenses/LICENSE-2.0 14 | # 15 | # Unless required by applicable law or agreed to in writing, software 16 | # distributed under the License is distributed on an "AS IS" BASIS, 17 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | # See the License for the specific language governing permissions and 19 | # limitations under the License. 20 | """Denoise an image using a previously trained model.""" 21 | 22 | import os 23 | import argparse 24 | import shutil 25 | import tempfile 26 | import time 27 | import pyexr 28 | 29 | import torch as th 30 | import numpy as np 31 | from torch.utils.data import DataLoader 32 | import skimage.io as skio 33 | 34 | import ttools 35 | 36 | import sbmc 37 | 38 | 39 | LOG = ttools.get_logger(__name__) 40 | 41 | 42 | def _pad(batch, out_, kpcn_mode): 43 | if kpcn_mode: 44 | pad_h = (batch["kpcn_diffuse_in"].shape[-2] - out_.shape[-2]) // 2 45 | pad_w = (batch["kpcn_diffuse_in"].shape[-1] - out_.shape[-1]) // 2 46 | else: 47 | pad_h = (batch["features"].shape[-2] - out_.shape[-2]) // 2 48 | pad_w = (batch["features"].shape[-1] - out_.shape[-1]) // 2 49 | pad = max(pad_h, pad_w) 50 | out_ = th.nn.functional.pad(out_, (pad_w, pad_w, pad_h, pad_h)) 51 | return out_ 52 | 53 | 54 | def _split_tiles(batch, max_sz=1024, pad=256): 55 | h, w = batch["low_spp"].shape[-2:] 56 | keys = ["radiance", "features", "kpcn_diffuse_in", "kpcn_specular_in", 57 | "kpcn_diffuse_buffer", "kpcn_specular_buffer", "kpcn_albedo"] 58 | unchanged = ["global_features"] 59 | if h <= max_sz and w <= max_sz: # no tiling 60 | tilepad = (0, 0, 0, 0) 61 | return [(batch, 0, h, 0, w, tilepad)] 62 | else: 63 | ret = [] 64 | for start_y in range(0, h, max_sz-2*pad): 65 | pad_y = pad 66 | pad_y2 = pad 67 | if start_y == 0: 68 | pad_y = 0 69 | end_y = start_y + max_sz 70 | if end_y > h: 71 | end_y = h 72 | pad_y2 = 0 73 | for start_x in range(0, w, max_sz-2*pad): 74 | pad_x = pad 75 | pad_x2 = pad 76 | end_x = start_x + max_sz 77 | if start_x == 0: 78 | pad_x = 0 79 | if end_x > w: 80 | end_x = w 81 | pad_x2 = 0 82 | b_ = {} 83 | for k in unchanged: 84 | if k not in batch.keys(): 85 | continue 86 | for k in keys: 87 | if k not in batch.keys(): 88 | continue 89 | b_[k] = batch[k][..., start_y:end_y, start_x:end_x] 90 | tilepad = (pad_y, pad_y2, pad_x, pad_x2) 91 | ret.append((b_, start_y+pad_y, end_y-pad_y2, 92 | start_x+pad_x, end_x-pad_x2, tilepad)) 93 | return ret 94 | 95 | 96 | def main(args): 97 | start = time.time() 98 | if not os.path.exists(args.input): 99 | raise ValueError("input {} does not exist".format(args.input)) 100 | 101 | data_root = os.path.abspath(args.input) 102 | name = os.path.basename(data_root) 103 | tmpdir = tempfile.mkdtemp() 104 | os.symlink(data_root, os.path.join(tmpdir, name)) 105 | 106 | LOG.info("Loading model {}".format(args.checkpoint)) 107 | meta_params = ttools.Checkpointer.load_meta(args.checkpoint) 108 | 109 | LOG.info("Setting up dataloader") 110 | data_params = meta_params["data_params"] 111 | if args.spp: 112 | data_params["spp"] = args.spp 113 | data = sbmc.FullImagesDataset(tmpdir, **data_params) 114 | dataloader = DataLoader(data, batch_size=1, shuffle=False, num_workers=0) 115 | 116 | LOG.info("Denoising input with {} spp".format(data_params["spp"])) 117 | 118 | kpcn_mode = meta_params["kpcn_mode"] 119 | if kpcn_mode: 120 | LOG.info("Using [Bako2017] denoiser.") 121 | model = sbmc.KPCN(data.num_features) 122 | else: 123 | model = sbmc.Multisteps(data.num_features, data.num_global_features) 124 | 125 | model.train(False) 126 | device = "cpu" 127 | cuda = th.cuda.is_available() 128 | if cuda: 129 | LOG.info("Using CUDA") 130 | model.cuda() 131 | device = "cuda" 132 | 133 | checkpointer = ttools.Checkpointer(args.checkpoint, model, None) 134 | extras, meta = checkpointer.load_latest() 135 | LOG.info("Loading latest checkpoint {}".format( 136 | "failed" if meta is None else "success")) 137 | 138 | elapsed = (time.time() - start) * 1000 139 | LOG.info("setup time {:.1f} ms".format(elapsed)) 140 | 141 | LOG.info("starting the denoiser") 142 | for scene_id, batch in enumerate(dataloader): 143 | for k in batch.keys(): 144 | batch[k] = batch[k].to(device) 145 | scene = os.path.basename(data.scenes[scene_id]) 146 | LOG.info(" scene {}".format(scene)) 147 | tile_sz = args.tile_size 148 | tile_pad = args.tile_pad 149 | batch_parts = _split_tiles(batch, max_sz=tile_sz, pad=tile_pad) 150 | out_radiance = th.zeros_like(batch["low_spp"]) 151 | 152 | if cuda: 153 | th.cuda.synchronize() 154 | start = time.time() 155 | for part, start_y, end_y, start_x, end_x, pad_ in batch_parts: 156 | with th.no_grad(): 157 | out_ = model(part) 158 | out_ = _pad(part, out_["radiance"], kpcn_mode) 159 | out_ = out_[..., pad_[0]:out_.shape[-2] - 160 | pad_[1], pad_[2]:out_.shape[-1]-pad_[3]] 161 | out_radiance[..., start_y:end_y, start_x:end_x] = out_ 162 | if cuda: 163 | th.cuda.synchronize() 164 | elapsed = (time.time() - start)*1000 165 | LOG.info(" denoising time {:.1f} ms".format(elapsed)) 166 | 167 | out_radiance = out_radiance[0, ...].cpu().numpy().transpose([1, 2, 0]) 168 | outdir = os.path.dirname(args.output) 169 | os.makedirs(outdir, exist_ok=True) 170 | pyexr.write(args.output, out_radiance) 171 | 172 | png = args.output.replace(".exr", ".png") 173 | skio.imsave(png, (np.clip(out_radiance, 0, 1)*255).astype(np.uint8)) 174 | shutil.rmtree(tmpdir) 175 | 176 | 177 | if __name__ == '__main__': 178 | parser = argparse.ArgumentParser() 179 | parser.add_argument('--input', type=str, required=True, 180 | help="folder containing the sample .bin files.") 181 | parser.add_argument('--checkpoint', type=str, required=True, 182 | help="folder containing the model checkpoint.") 183 | parser.add_argument('--output', type=str, required=True, 184 | help="output destination.") 185 | parser.add_argument('--spp', type=int, 186 | help="number of samples to use as input.") 187 | parser.add_argument("--tile_size", default=1024, help="We process in tiles" 188 | " to limit GPU memory usage. This is the tile size.") 189 | parser.add_argument("--tile_pad", default=256, help="We process in tiles" 190 | " to limit GPU memory usage. This is the padding" 191 | " around tiles, for overlapping tiles.") 192 | args = parser.parse_args() 193 | ttools.set_logger(True) 194 | main(args) 195 | -------------------------------------------------------------------------------- /scripts/denoise_nfor.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # Sample-based Monte Carlo Denoising using a Kernel-Splatting Network 4 | # Michaël Gharbi Tzu-Mao Li Miika Aittala Jaakko Lehtinen Frédo Durand 5 | # Siggraph 2019 6 | # 7 | # Copyright (c) 2019 Michaël Gharbi 8 | # 9 | # Licensed under the Apache License, Version 2.0 (the "License"); 10 | # you may not use this file except in compliance with the License. 11 | # You may obtain a copy of the License at 12 | # 13 | # http://www.apache.org/licenses/LICENSE-2.0 14 | # 15 | # Unless required by applicable law or agreed to in writing, software 16 | # distributed under the License is distributed on an "AS IS" BASIS, 17 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | # See the License for the specific language governing permissions and 19 | # limitations under the License. 20 | """Denoise rendered .bin samples using [Bitterli2016] NFOR.""" 21 | import argparse 22 | import os 23 | import subprocess 24 | import shutil 25 | import tempfile 26 | 27 | import numpy as np 28 | import pyexr 29 | 30 | import ttools 31 | 32 | import sbmc 33 | 34 | LOG = ttools.get_logger(__name__) 35 | 36 | 37 | def bins2exr(data_dir, output, spp): 38 | batch_size = 1 39 | data = sbmc.FullImagesDataset( 40 | data_dir, mode=sbmc.TilesDataset.RAW_MODE, spp=spp) 41 | 42 | if len(data.scenes) != 1: 43 | LOG.error("Expected a single scene, got %d", len(data.scenes)) 44 | raise RuntimeError("Invalid number of scenes in .bin folder.") 45 | 46 | im = data[0] 47 | 48 | os.makedirs(output, exist_ok=True) 49 | 50 | exr_data = {} 51 | half_spp = spp // 2 52 | 53 | # Keys to features 54 | labels = data.tiles_dset.labels 55 | albedo_k = labels.index("albedo_first_r") 56 | diffuse_k = labels.index("diffuse_r") 57 | specular_k = labels.index("specular_r") 58 | normal_k = labels.index("normal_first_x") 59 | depth_k = labels.index("depth_first") 60 | visibility_k = labels.index("visibility") 61 | 62 | df = im['features'] 63 | 64 | diffuse_ = df[:, diffuse_k:diffuse_k+3].transpose([2, 3, 1, 0]) 65 | specular_ = df[:, specular_k:specular_k+3].transpose([2, 3, 1, 0]) 66 | specular_[specular_ < 0.0] = 0 67 | diffuse_[diffuse_ < 0.0] = 0 68 | radiance_ = diffuse_ + specular_ 69 | radiance_[radiance_ < 0.0] = 0 70 | exr_data['color'] = radiance_.mean(-1) 71 | exr_data['colorA'] = radiance_[..., :half_spp].mean(-1) 72 | exr_data['colorB'] = radiance_[..., half_spp:].mean(-1) 73 | # MC estimate's variance = 1/n * var 74 | exr_data['colorVariance'] = radiance_.var(-1) / spp 75 | exr_data['diffuse'] = diffuse_.mean(-1) 76 | exr_data['diffuseA'] = diffuse_[..., :half_spp].mean(-1) 77 | exr_data['diffuseB'] = diffuse_[..., half_spp:].mean(-1) 78 | # MC estimate's variance = 1/n * var 79 | exr_data['diffuseVariance'] = diffuse_.var(-1) / spp 80 | exr_data['specular'] = specular_.mean(-1) 81 | exr_data['specularA'] = specular_[..., :half_spp].mean(-1) 82 | exr_data['specularB'] = specular_[..., half_spp:].mean(-1) 83 | # MC estimate's variance = 1/n * var 84 | exr_data['specularVariance'] = specular_.var(-1) / spp 85 | 86 | albedo_ = df[:, albedo_k:albedo_k+3].transpose([2, 3, 1, 0]) 87 | exr_data['albedo'] = albedo_.mean(-1) 88 | exr_data['albedoA'] = albedo_[..., :half_spp].mean(-1) 89 | exr_data['albedoB'] = albedo_[..., half_spp:].mean(-1) 90 | exr_data['albedoVariance'] = albedo_.var(-1) / spp 91 | 92 | normal_ = df[:, normal_k:normal_k+3].transpose([2, 3, 1, 0]) 93 | exr_data['normal'] = normal_.mean(-1) 94 | exr_data['normalA'] = normal_[..., :half_spp].mean(-1) 95 | exr_data['normalB'] = normal_[..., half_spp:].mean(-1) 96 | exr_data['normalVariance'] = normal_.var(-1) / spp 97 | 98 | depth_ = df[:, depth_k:depth_k+1].transpose([2, 3, 1, 0]) 99 | depth_ = np.tile(depth_, [1, 1, 3, 1]) 100 | scene_radius = im["scene_radius"] 101 | depth_[depth_ < 0.0] = 0 102 | depth_ /= scene_radius 103 | exr_data['depth'] = depth_.mean(-1) 104 | exr_data['depthA'] = depth_[..., :half_spp].mean(-1) 105 | exr_data['depthB'] = depth_[..., half_spp:].mean(-1) 106 | exr_data['depthVariance'] = depth_.var(-1) / spp 107 | 108 | for k in exr_data.keys(): 109 | LOG.debug(" %s %.2f --- %.2f (mean = %.2f)", k, exr_data[k].min(), 110 | exr_data[k].max(), exr_data[k].mean()) 111 | pyexr.write(os.path.join(output, k + ".exr"), exr_data[k]) 112 | 113 | 114 | def main(args): 115 | ttools.set_logger(args.verbose) 116 | args.denoiser_exe = os.path.abspath(args.denoiser_exe) 117 | 118 | # Create a working directory for the temporary output 119 | LOG.debug("Saving temporary data to: %s.", args.tmp_dir) 120 | os.makedirs(args.tmp_dir, exist_ok=True) 121 | os.chdir(args.tmp_dir) 122 | 123 | scene_name = os.path.basename(args.scene) 124 | 125 | # Link folder so that the Dataset class gets the expected folder structure, 126 | # with one scene 127 | bins_folder = os.path.join(args.tmp_dir, "bins") 128 | os.makedirs(bins_folder, exist_ok=True) 129 | try: 130 | os.symlink(os.path.abspath(args.scene), 131 | os.path.join(bins_folder, scene_name)) 132 | except FileExistsError as e: 133 | LOG.warning("scene path in %s already exists.", bins_folder) 134 | 135 | # Convert .bin to .exr in the format expected by NFOR 136 | exr_folder = os.path.join(args.tmp_dir, "exr_channels") 137 | bins2exr(bins_folder, exr_folder, args.spp) 138 | 139 | LOG.info("Denoising %s", args.scene) 140 | if args.verbose: 141 | stderr = None 142 | else: 143 | stderr = subprocess.DEVNULL 144 | os.makedirs(args.output, exist_ok=True) 145 | subprocess.call([args.denoiser_exe, exr_folder+"/", args.output], 146 | stderr=stderr) 147 | shutil.rmtree(args.tmp_dir) 148 | 149 | 150 | if __name__ == "__main__": 151 | parser = argparse.ArgumentParser() 152 | parser.add_argument( 153 | "denoiser_exe", help="path to Tungsten's `denoiser` executable.") 154 | parser.add_argument( 155 | "scene", help="path to the folder containing the scene .bin files.") 156 | parser.add_argument("output", help="output path.") 157 | parser.add_argument("--tmp_dir", default=tempfile.mkdtemp(), 158 | help="temporary working directory.") 159 | parser.add_argument("--spp", type=int, default=8, 160 | help="number of samples to save.") 161 | parser.add_argument("--verbose", dest="verbose", action="store_true", 162 | default=False, help="Use verbose log messages.") 163 | main(parser.parse_args()) 164 | -------------------------------------------------------------------------------- /scripts/download.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # Sample-based Monte Carlo Denoising using a Kernel-Splatting Network 4 | # Michaël Gharbi Tzu-Mao Li Miika Aittala Jaakko Lehtinen Frédo Durand 5 | # Siggraph 2019 6 | # 7 | # Copyright (c) 2019 Michaël Gharbi 8 | # 9 | # Licensed under the Apache License, Version 2.0 (the "License"); 10 | # you may not use this file except in compliance with the License. 11 | # You may obtain a copy of the License at 12 | # 13 | # http://www.apache.org/licenses/LICENSE-2.0 14 | # 15 | # Unless required by applicable law or agreed to in writing, software 16 | # distributed under the License is distributed on an "AS IS" BASIS, 17 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | # See the License for the specific language governing permissions and 19 | # limitations under the License. 20 | """Download the .pbrt description and assets for our test scenes.""" 21 | import argparse 22 | import os 23 | import wget 24 | 25 | if __name__ == "__main__": 26 | parser = argparse.ArgumentParser() 27 | parser.add_argument("src", help="url of the file to download.") 28 | parser.add_argument("dst", help="path to the output") 29 | args = parser.parse_args() 30 | args.dst = os.path.abspath(args.dst) 31 | 32 | os.makedirs(os.path.dirname(args.dst), exist_ok=True) 33 | if os.path.exists(args.dst): 34 | print(args.dst, "already exists, skipping download.") 35 | else: 36 | print("Downloading", args.src, "to", args.dst) 37 | wget.download(args.src, args.dst) 38 | -------------------------------------------------------------------------------- /scripts/figures/README: -------------------------------------------------------------------------------- 1 | This code is no longer maintained, but forms the basis of what we used to 2 | generate some of the figures. 3 | -------------------------------------------------------------------------------- /scripts/figures/figures.mk: -------------------------------------------------------------------------------- 1 | scatter_v_gather_figure: 2 | rm -rf local_output/figures/scatter_v_gather/*.png 3 | python bin/figures/scatter_v_gather.py local_data/test_images/color.jpg \ 4 | --output local_output/figures/scatter_v_gather --outliers --spp 1 5 | 6 | scatter_v_gather_quick: 7 | rm -rf local_output/figures/scatter_v_gather/*.png 8 | python bin/figures/scatter_v_gather.py local_data/test_images/color.jpg \ 9 | --output local_output/figures/scatter_v_gather_quick --outliers \ 10 | --spp 2 --nsteps 10 11 | -------------------------------------------------------------------------------- /scripts/figures/kernels.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | import logging 4 | 5 | import numpy as np 6 | import torch as th 7 | from torch.utils.data import DataLoader 8 | from torchvision import transforms 9 | 10 | import ttools 11 | from ttools.modules.image_operators import crop_like 12 | 13 | import rendernet.dataset as dset 14 | import rendernet.modules.preprocessors as pre 15 | import rendernet.modules.models as models 16 | import rendernet.interfaces as interfaces 17 | import rendernet.callbacks as cb 18 | import rendernet.viz as viz 19 | from sbmc.utils import make_variable 20 | 21 | import skimage.io as skio 22 | 23 | log = logging.getLogger("rendernet") 24 | 25 | def main(args): 26 | log.info("Loading model {}".format(args.checkpoint)) 27 | meta_params = ttools.Checkpointer.load_meta(args.checkpoint) 28 | 29 | spp = meta_params["spp"] 30 | use_p = meta_params["use_p"] 31 | use_ld = meta_params["use_ld"] 32 | use_bt = meta_params["use_bt"] 33 | # use_coc = meta_params["use_coc"] 34 | 35 | mode = "sample" 36 | if "DisneyPreprocessor" == meta_params["preprocessor"]: 37 | mode = "disney_pixel" 38 | elif "SampleDisneyPreprocessor" == meta_params["preprocessor"]: 39 | mode = "disney_sample" 40 | 41 | log.info("Rendering at {} spp".format(spp)) 42 | 43 | log.info("Setting up dataloader, p:{} bt:{} ld:{}".format(use_p, use_bt, use_ld)) 44 | data = dset.FullImageDataset(args.data, dset.RenderDataset, spp=spp, use_p=use_p, use_ld=use_ld, use_bt=use_bt) 45 | preprocessor = pre.get(meta_params["preprocessor"])(data) 46 | xforms = transforms.Compose([dset.ToTensor(), preprocessor]) 47 | data.transform = xforms 48 | dataloader = DataLoader(data, batch_size=1, 49 | shuffle=False, num_workers=0, 50 | pin_memory=True) 51 | 52 | model = models.get(preprocessor, meta_params["model_params"]) 53 | model.cuda() 54 | model.train(False) 55 | 56 | checkpointer = ttools.Checkpointer(args.checkpoint, model, None) 57 | extras, meta = checkpointer.load_latest() 58 | log.info("Loading latest checkpoint {}".format("failed" if meta is None else "success")) 59 | 60 | for scene_id, batch in enumerate(dataloader): 61 | batch_v = make_variable(batch, cuda=True) 62 | with th.no_grad(): 63 | klist = [] 64 | out_ = model(batch_v, kernel_list=klist) 65 | lowspp = batch["radiance"] 66 | target = batch["target_image"] 67 | out = out_["radiance"] 68 | 69 | cx = 70 70 | cy = 20 71 | c = 128 72 | 73 | target = crop_like(target, out) 74 | lowspp = crop_like(lowspp.squeeze(), out) 75 | lowspp = lowspp[..., cy:cy+c, cx:cx+c] 76 | 77 | lowspp = lowspp.permute(1, 2, 0, 3) 78 | chan, h, w, s = lowspp.shape 79 | lowspp = lowspp.contiguous().view(chan, h, w*s) 80 | 81 | sum_r = [] 82 | sum_w = [] 83 | max_w = [] 84 | maxi = crop_like(klist[-1]["max_w"].unsqueeze(1), out) 85 | kernels = [] 86 | updated_kernels = [] 87 | for k in klist: 88 | kernels.append(th.exp(crop_like(k["kernels"], out)-maxi)) 89 | updated_kernels.append(th.exp(crop_like(k["updated_kernels"], out)-maxi)) 90 | 91 | out = out[..., cy:cy+c, cx:cx+c] 92 | target = target[..., cy:cy+c, cx:cx+c] 93 | updated_kernels = [k[..., cy:cy+c, cx:cx+c] for k in updated_kernels] 94 | kernels = [k[..., cy:cy+c, cx:cx+c] for k in kernels] 95 | 96 | u_kernels_im = viz.kernels2im(kernels) 97 | kmean = u_kernels_im.mean(0) 98 | kvar = u_kernels_im.std(0) 99 | 100 | n, h, w = u_kernels_im.shape 101 | u_kernels_im = u_kernels_im.permute(1, 0, 2).contiguous().view(h, w*n) 102 | 103 | fname = os.path.join(args.output, "lowspp.png") 104 | save(fname, lowspp) 105 | fname = os.path.join(args.output, "target.png") 106 | save(fname, target) 107 | fname = os.path.join(args.output, "output.png") 108 | save(fname, out) 109 | fname = os.path.join(args.output, "kernels_gather.png") 110 | save(fname, u_kernels_im) 111 | fname = os.path.join(args.output, "kernels_variance.png") 112 | print(kvar.max()) 113 | save(fname, kvar) 114 | import ipdb; ipdb.set_trace() 115 | break 116 | 117 | def save(fname, im): 118 | os.makedirs(os.path.dirname(fname), exist_ok=True) 119 | im = im.squeeze().cpu() 120 | if len(im.shape) >= 3: 121 | im = im.permute(1, 2, 0) 122 | im = th.clamp(im, 0, 1).numpy() 123 | skio.imsave(fname, im) 124 | 125 | if __name__ == "__main__": 126 | parser = argparse.ArgumentParser() 127 | parser.add_argument("--checkpoint", required=True) 128 | parser.add_argument("--data", required=True) 129 | parser.add_argument("--output", required=True) 130 | args = parser.parse_args() 131 | ttools.set_logger(True) 132 | main(args) 133 | -------------------------------------------------------------------------------- /scripts/install_nvidia_docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Sample-based Monte Carlo Denoising using a Kernel-Splatting Network 3 | # Michaël Gharbi Tzu-Mao Li Miika Aittala Jaakko Lehtinen Frédo Durand 4 | # Siggraph 2019 5 | # 6 | # Copyright (c) 2019 Michaël Gharbi 7 | # 8 | # Licensed under the Apache License, Version 2.0 (the "License"); 9 | # you may not use this file except in compliance with the License. 10 | # You may obtain a copy of the License at 11 | # 12 | # http://www.apache.org/licenses/LICENSE-2.0 13 | # 14 | # Unless required by applicable law or agreed to in writing, software 15 | # distributed under the License is distributed on an "AS IS" BASIS, 16 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | # See the License for the specific language governing permissions and 18 | # limitations under the License. 19 | echo Installing nvidia-docker 20 | distribution=$(. /etc/os-release;echo $ID$VERSION_ID) 21 | echo ${distribution} 22 | curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add - 23 | curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list 24 | sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit 25 | sudo systemctl restart docker 26 | -------------------------------------------------------------------------------- /scripts/profile/kernel_weighting.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # Sample-based Monte Carlo Denoising using a Kernel-Splatting Network 4 | # Michaël Gharbi Tzu-Mao Li Miika Aittala Jaakko Lehtinen Frédo Durand 5 | # Siggraph 2019 6 | # 7 | # Copyright (c) 2019 Michaël Gharbi 8 | # 9 | # Licensed under the Apache License, Version 2.0 (the "License"); 10 | # you may not use this file except in compliance with the License. 11 | # You may obtain a copy of the License at 12 | # 13 | # http://www.apache.org/licenses/LICENSE-2.0 14 | # 15 | # Unless required by applicable law or agreed to in writing, software 16 | # distributed under the License is distributed on an "AS IS" BASIS, 17 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | # See the License for the specific language governing permissions and 19 | # limitations under the License. 20 | """Profiles the kernel weighting extension.""" 21 | import argparse 22 | import torch as th 23 | from torch.autograd import profiler 24 | 25 | 26 | import sbmc.functions as funcs 27 | 28 | 29 | def profile_kernel_weighting(gpu): 30 | print("Profiling `KernelWeighting`") 31 | bs = 4 32 | c = 3 33 | h = 128 34 | w = 128 35 | 36 | ksize = 21 37 | 38 | data = 2*th.ones(bs, c, h, w, requires_grad=True) 39 | weights = th.ones(bs, ksize, ksize, h, w, requires_grad=True) 40 | 41 | if gpu: 42 | print("Running on GPU") 43 | data = data.cuda() 44 | weights = weights.cuda() 45 | else: 46 | print("Running on CPU") 47 | 48 | print("Burning 5 iterations") 49 | for i in range(5): 50 | o, s = funcs.KernelWeighting.apply(data, weights) 51 | if gpu: 52 | th.cuda.synchronize() 53 | 54 | print("Profiling") 55 | with profiler.profile(use_cuda=gpu) as prof: 56 | o, s = funcs.KernelWeighting.apply(data, weights) 57 | loss = o.mean() 58 | loss.backward() 59 | print(prof) 60 | 61 | if __name__ == "__main__": 62 | parser = argparse.ArgumentParser() 63 | parser.add_argument("--gpu", dest="gpu", action="store_true", default=False, 64 | help="if True, run GPU implementation") 65 | args = parser.parse_args() 66 | profile_kernel_weighting(args.gpu) 67 | -------------------------------------------------------------------------------- /scripts/profile/scatter2gather.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # Sample-based Monte Carlo Denoising using a Kernel-Splatting Network 4 | # Michaël Gharbi Tzu-Mao Li Miika Aittala Jaakko Lehtinen Frédo Durand 5 | # Siggraph 2019 6 | # 7 | # Copyright (c) 2019 Michaël Gharbi 8 | # 9 | # Licensed under the Apache License, Version 2.0 (the "License"); 10 | # you may not use this file except in compliance with the License. 11 | # You may obtain a copy of the License at 12 | # 13 | # http://www.apache.org/licenses/LICENSE-2.0 14 | # 15 | # Unless required by applicable law or agreed to in writing, software 16 | # distributed under the License is distributed on an "AS IS" BASIS, 17 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | # See the License for the specific language governing permissions and 19 | # limitations under the License. 20 | """Profiles the scatter2gather extension.""" 21 | import argparse 22 | import torch as th 23 | from torch.autograd import profiler 24 | 25 | 26 | import sbmc.functions as funcs 27 | 28 | 29 | def profile_scatter2gather(gpu): 30 | print("Profiling `Scatter2Gather`") 31 | bs = 4 32 | c = 3 33 | h = 128 34 | w = 128 35 | ksize = 1 36 | 37 | weights = th.ones(bs, ksize, ksize, h, w, requires_grad=True) 38 | 39 | if gpu: 40 | print("Running on GPU") 41 | weights = weights.cuda() 42 | else: 43 | print("Running on CPU") 44 | 45 | print("Burning 5 iterations") 46 | for i in range(5): 47 | w2 = funcs.Scatter2Gather.apply(weights) 48 | if gpu: 49 | th.cuda.synchronize() 50 | 51 | print("Profiling") 52 | with profiler.profile(use_cuda=gpu) as prof: 53 | for i in range(1): 54 | w2 = funcs.Scatter2Gather.apply(weights) 55 | loss = w2.mean() 56 | loss.backward() 57 | print(prof) 58 | 59 | if __name__ == "__main__": 60 | parser = argparse.ArgumentParser() 61 | parser.add_argument("--gpu", dest="gpu", action="store_true", default=False, 62 | help="if True, run GPU implementation") 63 | args = parser.parse_args() 64 | profile_scatter2gather(args.gpu) 65 | -------------------------------------------------------------------------------- /scripts/render_exr.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # Sample-based Monte Carlo Denoising using a Kernel-Splatting Network 4 | # Michaël Gharbi Tzu-Mao Li Miika Aittala Jaakko Lehtinen Frédo Durand 5 | # Siggraph 2019 6 | # 7 | # Copyright (c) 2019 Michaël Gharbi 8 | # 9 | # Licensed under the Apache License, Version 2.0 (the "License"); 10 | # you may not use this file except in compliance with the License. 11 | # You may obtain a copy of the License at 12 | # 13 | # http://www.apache.org/licenses/LICENSE-2.0 14 | # 15 | # Unless required by applicable law or agreed to in writing, software 16 | # distributed under the License is distributed on an "AS IS" BASIS, 17 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | # See the License for the specific language governing permissions and 19 | # limitations under the License. 20 | """Renders a .pbrt scene to a .exr image.""" 21 | from sbmc import rendering 22 | 23 | 24 | if __name__ == "__main__": 25 | parser = rendering.PBRTRenderingParser( 26 | description="Render a .pbrt scene file using the default pathtracer" 27 | " in PBRTv2.") 28 | rendering.PBRTRenderer(parser.parse_args()).render() 29 | -------------------------------------------------------------------------------- /scripts/render_samples.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # Sample-based Monte Carlo Denoising using a Kernel-Splatting Network 4 | # Michaël Gharbi Tzu-Mao Li Miika Aittala Jaakko Lehtinen Frédo Durand 5 | # Siggraph 2019 6 | # 7 | # Copyright (c) 2019 Michaël Gharbi 8 | # 9 | # Licensed under the Apache License, Version 2.0 (the "License"); 10 | # you may not use this file except in compliance with the License. 11 | # You may obtain a copy of the License at 12 | # 13 | # http://www.apache.org/licenses/LICENSE-2.0 14 | # 15 | # Unless required by applicable law or agreed to in writing, software 16 | # distributed under the License is distributed on an "AS IS" BASIS, 17 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | # See the License for the specific language governing permissions and 19 | # limitations under the License. 20 | """Renders a .pbrt scene and saves the sample information to disk. 21 | 22 | The samples are stored in .bin files. See dbmc/datasets.py and 23 | pbrt/core/samplerecord.h in our pbrt extension. 24 | """ 25 | from sbmc import rendering 26 | 27 | 28 | if __name__ == "__main__": 29 | args = rendering.SamplesRenderingParser( 30 | description="Render a .pbrt scene file using our modified pathtracer" 31 | " in PBRTv2, and saves samples to disk as .bin files.").parse_args() 32 | rendering.PBRTSamplesRenderer(args).render() 33 | -------------------------------------------------------------------------------- /scripts/train.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # Sample-based Monte Carlo Denoising using a Kernel-Splatting Network 4 | # Michaël Gharbi Tzu-Mao Li Miika Aittala Jaakko Lehtinen Frédo Durand 5 | # Siggraph 2019 6 | # 7 | # Copyright (c) 2019 Michaël Gharbi 8 | # 9 | # Licensed under the Apache License, Version 2.0 (the "License"); 10 | # you may not use this file except in compliance with the License. 11 | # You may obtain a copy of the License at 12 | # 13 | # http://www.apache.org/licenses/LICENSE-2.0 14 | # 15 | # Unless required by applicable law or agreed to in writing, software 16 | # distributed under the License is distributed on an "AS IS" BASIS, 17 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | # See the License for the specific language governing permissions and 19 | # limitations under the License. 20 | """Train a model.""" 21 | import numpy as np 22 | import torch as th 23 | from torch.utils.data import DataLoader 24 | 25 | import ttools 26 | 27 | import sbmc 28 | 29 | 30 | LOG = ttools.get_logger(__name__) 31 | 32 | 33 | def main(args): 34 | # Fix seed 35 | np.random.seed(0) 36 | th.manual_seed(0) 37 | 38 | # Parameterization of the dataset (shared between train/val) 39 | data_args = dict(spp=args.spp, mode=sbmc.TilesDataset.KPCN_MODE if 40 | args.kpcn_mode else sbmc.TilesDataset.SBMC_MODE, 41 | load_coords=args.load_coords, 42 | load_gbuffer=args.load_gbuffer, load_p=args.load_p, 43 | load_ld=args.load_ld, load_bt=args.load_bt) 44 | 45 | if args.randomize_spp: 46 | if args.bs != 1: 47 | LOG.error("Training with randomized spp is only valid for" 48 | "batch_size=1, got %d", args.bs) 49 | raise RuntimeError("Incorrect batch size") 50 | data = sbmc.MultiSampleCountDataset( 51 | args.data, **data_args) 52 | LOG.info("Training with randomized sample count in [%d, %d]" % ( 53 | 2, args.spp)) 54 | else: 55 | data = sbmc.TilesDataset(args.data, **data_args) 56 | LOG.info("Training with a single sample count: %dspp" % args.spp) 57 | 58 | if args.kpcn_mode: 59 | LOG.info("Model: pixel-based comparison from [Bako2017]") 60 | model = sbmc.KPCN(data.num_features, ksize=args.ksize) 61 | model_params = dict(ksize=args.ksize) 62 | else: 63 | LOG.info("Model: sample-based [Gharbi2019]") 64 | model = sbmc.Multisteps(data.num_features, data.num_global_features, 65 | ksize=args.ksize, splat=not args.gather, 66 | pixel=args.pixel) 67 | model_params = dict(ksize=args.ksize, gather=args.gather, 68 | pixel=args.pixel) 69 | 70 | dataloader = DataLoader( 71 | data, batch_size=args.bs, num_workers=args.num_worker_threads, 72 | shuffle=True) 73 | 74 | # Validation set uses a constant spp 75 | val_dataloader = None 76 | if args.val_data is not None: 77 | LOG.info("Validation set with %dspp" % args.spp) 78 | val_data = sbmc.TilesDataset(args.val_data, **data_args) 79 | val_dataloader = DataLoader( 80 | val_data, batch_size=args.bs, num_workers=1, shuffle=False) 81 | else: 82 | LOG.info("No validation set provided") 83 | 84 | meta = dict(model_params=model_params, kpcn_mode=args.kpcn_mode, 85 | data_params=data_args) 86 | 87 | LOG.info("Model configuration: {}".format(model_params)) 88 | 89 | checkpointer = ttools.Checkpointer(args.checkpoint_dir, model, meta=meta) 90 | 91 | interface = sbmc.SampleBasedDenoiserInterface( 92 | model, lr=args.lr, cuda=args.cuda) 93 | 94 | extras, meta = checkpointer.load_latest() 95 | 96 | trainer = ttools.Trainer(interface) 97 | 98 | # Hook-up some callbacks to the training loop 99 | log_keys = ["loss", "rmse"] 100 | trainer.add_callback(ttools.callbacks.ProgressBarCallback(log_keys)) 101 | trainer.add_callback(ttools.callbacks.CheckpointingCallback(checkpointer)) 102 | trainer.add_callback(ttools.callbacks.VisdomLoggingCallback(log_keys, 103 | env=args.env, 104 | port=args.port, 105 | log=True, 106 | frequency=100)) 107 | trainer.add_callback(sbmc.DenoisingDisplayCallback(env=args.env, 108 | port=args.port, 109 | win="images")) 110 | 111 | # Launch the training 112 | LOG.info("Training started, 'Ctrl+C' to abort.") 113 | trainer.train(dataloader, num_epochs=args.num_epochs, 114 | val_dataloader=val_dataloader) 115 | 116 | 117 | if __name__ == "__main__": 118 | parser = ttools.BasicArgumentParser() 119 | parser.add_argument( 120 | '--spp', type=int, default=8, help="Max number of samples per pixel.") 121 | 122 | # Model parameters 123 | parser.add_argument( 124 | '--kpcn_mode', dest="kpcn_mode", action="store_true", default=False, 125 | help="if True, use the model from [Bako2017]: useful for comparison.") 126 | parser.add_argument( 127 | '--gather', dest="gather", action="store_true", default=False, 128 | help="if True, use gather kernels instead of splat.") 129 | parser.add_argument( 130 | '--pixel', dest="pixel", action="store_true", default=False, 131 | help="if True, use per-pixel model instead of samples.") 132 | parser.add_argument( 133 | '--ksize', type=int, default=21, help="Size of the kernels") 134 | 135 | # Data configuration 136 | parser.add_argument('--constant_spp', dest="randomize_spp", 137 | action="store_false", default=True) 138 | 139 | parser.add_argument('--dont_use_coords', dest="load_coords", 140 | action="store_false", default=True) 141 | parser.add_argument('--dont_use_gbuffer', dest="load_gbuffer", 142 | action="store_false", default=True) 143 | parser.add_argument('--dont_use_p', dest="load_p", 144 | action="store_false", default=True) 145 | parser.add_argument('--dont_use_ld', dest="load_ld", 146 | action="store_false", default=True) 147 | parser.add_argument('--dont_use_bt', dest="load_bt", 148 | action="store_false", default=True) 149 | 150 | args = parser.parse_args() 151 | ttools.set_logger(args.debug) 152 | main(args) 153 | -------------------------------------------------------------------------------- /scripts/visualize_dataset.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | # Sample-based Monte Carlo Denoising using a Kernel-Splatting Network 4 | # Michaël Gharbi Tzu-Mao Li Miika Aittala Jaakko Lehtinen Frédo Durand 5 | # Siggraph 2019 6 | # 7 | # Copyright (c) 2019 Michaël Gharbi 8 | # 9 | # Licensed under the Apache License, Version 2.0 (the "License"); 10 | # you may not use this file except in compliance with the License. 11 | # You may obtain a copy of the License at 12 | # 13 | # http://www.apache.org/licenses/LICENSE-2.0 14 | # 15 | # Unless required by applicable law or agreed to in writing, software 16 | # distributed under the License is distributed on an "AS IS" BASIS, 17 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | # See the License for the specific language governing permissions and 19 | # limitations under the License. 20 | """Visualize the content of a dataset of .bin sample files.""" 21 | import argparse 22 | import os 23 | import numpy as np 24 | from torch.utils.data import DataLoader 25 | import ttools 26 | import skimage.io as skio 27 | 28 | from sbmc.datasets import FullImagesDataset 29 | 30 | 31 | LOG = ttools.get_logger(__name__) 32 | 33 | 34 | def _save(output, imname, imdata, dtype=np.uint8): 35 | os.makedirs(output, exist_ok=True) 36 | path = os.path.join(output, imname) 37 | skio.imsave(path, ttools.utils.tensor2image(imdata, dtype=dtype)) 38 | 39 | 40 | def main(args): 41 | data = FullImagesDataset(args.data_dir, spp=args.spp) 42 | 43 | dataloader = DataLoader(data, batch_size=1, shuffle=False, num_workers=4) 44 | 45 | LOG.info("Visualizing dataset with %d spp (gt_spp = %d)", 46 | data.spp, data.gt_sample_count) 47 | for idx, sample in enumerate(dataloader): 48 | LOG.info("Processing data sample %d", idx) 49 | im = sample["target_image"] 50 | LOG.info(" target radiance: %.2f -- %.2f", im.min().item(), im.max().item()) 51 | _save(args.output, "%04d_reference.png" % idx, im) 52 | 53 | if not args.dump_features: 54 | continue 55 | 56 | for i, k in enumerate(data.labels): 57 | if k in ["diffuse_r", "specular_r", "albedo_r"]: 58 | k = k.split("_r")[0] 59 | im = sample["features"][:, :, i:i+3, ...] 60 | elif k in ["depth", "depth_first", "visibility", "hasHit"]: 61 | im = sample["features"][:, :, i:i+1, ...] 62 | elif k in ["normal_x", "normal_first_x"]: 63 | im = sample["features"][:, :, i:i+3, ...] 64 | im += 1.0 65 | im *= 0.5 66 | else: 67 | continue 68 | 69 | im = im.mean(1) # average samples 70 | LOG.info(" %s (feature): %.2f -- %.2f", k, im.min().item(), im.max().item()) 71 | os.makedirs(args.output, exist_ok=True) 72 | _save(args.output, "%04d_%s.png" % (idx, k), im, dtype=np.uint16) 73 | 74 | 75 | if __name__ == "__main__": 76 | parser = argparse.ArgumentParser() 77 | parser.add_argument('data_dir', help="folder containing the scenes" 78 | " to visualize") 79 | parser.add_argument('output', help="folder where the visualization will" 80 | " be saved.") 81 | parser.add_argument('--spp', type=int) 82 | parser.add_argument('--dump_features', dest="dump_features", 83 | action="store_true", default=False) 84 | args = parser.parse_args() 85 | 86 | ttools.set_logger() 87 | 88 | main(args) 89 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | # Sample-based Monte Carlo Denoising using a Kernel-Splatting Network 3 | # Michaël Gharbi Tzu-Mao Li Miika Aittala Jaakko Lehtinen Frédo Durand 4 | # Siggraph 2019 5 | # 6 | # Copyright (c) 2019 Michaël Gharbi 7 | # 8 | # Licensed under the Apache License, Version 2.0 (the "License"); 9 | # you may not use this file except in compliance with the License. 10 | # You may obtain a copy of the License at 11 | # 12 | # http://www.apache.org/licenses/LICENSE-2.0 13 | # 14 | # Unless required by applicable law or agreed to in writing, software 15 | # distributed under the License is distributed on an "AS IS" BASIS, 16 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | # See the License for the specific language governing permissions and 18 | # limitations under the License. 19 | """Synthesizes the cpp wrapper code and builds dynamic Python extension.""" 20 | import os 21 | import platform 22 | import re 23 | import setuptools 24 | import subprocess 25 | 26 | import torch as th 27 | 28 | 29 | def main(): 30 | dirname = os.path.dirname(os.path.abspath(__file__)) 31 | 32 | try: 33 | import halide_pytorch as hlpt 34 | except ModuleNotFoundError: 35 | print("Required module `halide pytorch` not found. " 36 | "You can install the module using the following command:\n\n" 37 | "\t`pip install halide_pytorch`\n\n" 38 | "Aborting setup.") 39 | return 40 | 41 | with open(os.path.join(dirname, "sbmc", "version.py")) as fid: 42 | try: 43 | __version__, = re.findall( '__version__ = "(.*)"', fid.read() ) 44 | except: 45 | raise ValueError("could not find version number") 46 | 47 | halide_root = os.getenv("HALIDE_DISTRIB_DIR") 48 | if not halide_root: 49 | # TODO: try local download 50 | print("The HALIDE_DISTRIB_DIR environment variable is not set, and" 51 | " Halide was not found in the default `vendor` path, would you" 52 | " like to download a pre-compiled distribution?") 53 | ret = input("yes/no?") 54 | if ret in ["y", "yes"]: 55 | pass 56 | return 57 | else: 58 | print("Not downloading Halide, installation aborted.") 59 | return 60 | 61 | if platform.system() == "Windows": 62 | raise RuntimeError("Windows platform not supported.") 63 | 64 | # CPU Halide extensions 65 | generators = [ 66 | hlpt.HalideOp("src/scatter2gather.cpp", "scatter2gather", 67 | "scatter2gather_cpu_float32"), 68 | hlpt.HalideOp("src/kernel_weighting.cpp", "kernel_weighting", 69 | "kernel_weighting_cpu_float32"), 70 | hlpt.HalideOp("src/kernel_weighting.cpp", "kernel_weighting_grad", 71 | "kernel_weighting_grad_cpu_float32"), 72 | ] 73 | 74 | try: 75 | subprocess.check_output(["nvcc", "--version"]) 76 | print("Found an nvcc compiler: compiling the CUDA Halide extensions.") 77 | generators += [ 78 | hlpt.HalideOp("src/scatter2gather.cpp", "scatter2gather", 79 | "scatter2gather_cuda_float32", cuda=True), 80 | hlpt.HalideOp("src/kernel_weighting.cpp", "kernel_weighting", 81 | "kernel_weighting_cuda_float32", cuda=True), 82 | hlpt.HalideOp("src/kernel_weighting.cpp", "kernel_weighting_grad", 83 | "kernel_weighting_grad_cuda_float32", cuda=True), 84 | ] 85 | except FileNotFoundError: 86 | print("CUDA not found: not compiling the CUDA Halide extensions.") 87 | 88 | ext_name = "sbmc.halide_ops" 89 | extension = hlpt.HalidePyTorchExtension( 90 | halide_root, ext_name, generators=generators) 91 | 92 | # Build the Python extension module 93 | packages = ["sbmc", "sbmc.scene_generator"] 94 | setuptools.setup( 95 | name="sbmc", 96 | verbose=True, 97 | url="", 98 | author_email="mgharbi@adobe.com", 99 | author="Michaël Gharbi", 100 | version=__version__, 101 | packages=packages, 102 | ext_modules=[extension], 103 | install_requires=["torch-tools==0.0.36", "bridson", "pandas", "pyexr", 104 | "scikit-image", "lz4", "wget", "torch==1.2.0"], 105 | cmdclass=dict(build_ext=hlpt.HalideBuildExtension), 106 | ) 107 | 108 | if __name__ == "__main__": 109 | main() 110 | -------------------------------------------------------------------------------- /src/kernel_weighting.cpp: -------------------------------------------------------------------------------- 1 | // Sample-based Monte Carlo Denoising using a Kernel-Splatting Network 2 | // Michaël Gharbi Tzu-Mao Li Miika Aittala Jaakko Lehtinen Frédo Durand 3 | // Siggraph 2019 4 | // 5 | // Copyright (c) 2019 Michaël Gharbi 6 | // 7 | // Licensed under the Apache License, Version 2.0 (the "License"); 8 | // you may not use this file except in compliance with the License. 9 | // You may obtain a copy of the License at 10 | // 11 | // http://www.apache.org/licenses/LICENSE-2.0 12 | // 13 | // Unless required by applicable law or agreed to in writing, software 14 | // distributed under the License is distributed on an "AS IS" BASIS, 15 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | // See the License for the specific language governing permissions and 17 | // limitations under the License. 18 | #include 19 | #include 20 | #include 21 | 22 | 23 | using namespace Halide; 24 | 25 | Var x("x"), y("y"), dx("dx"), dy("dy"), ci("ci"), c("c"), n("n"); 26 | 27 | template 28 | std::map kernel_weighting( 29 | const InputBuffer &data, 30 | const InputBuffer &weights, 31 | const OutputBuffer &output, 32 | const OutputBuffer &sum_w) 33 | { 34 | Func f_data("f_data"); 35 | f_data(x, y, ci, n) = Halide::BoundaryConditions::constant_exterior( 36 | data, 0.0f)(x, y, ci, n); 37 | Func f_weights("f_weights"); 38 | f_weights(x, y, dx, dy, n) = Halide::BoundaryConditions::constant_exterior( 39 | weights, 0.0f)(x, y, dx, dy, n); 40 | 41 | Expr kw = weights.dim(2).extent(); 42 | Expr kh = weights.dim(3).extent(); 43 | Expr channels = data.dim(2).extent(); 44 | 45 | RDom r_kernel(0, kw, 0, kh); 46 | Expr w = f_weights(x, y, r_kernel.x, r_kernel.y, n); 47 | 48 | Func homogeneous("homegeneous"); 49 | homogeneous(x, y, c, n) = select(c < channels, f_data(x, y, c, n), 1.0f); 50 | 51 | Func summed("summed"); 52 | summed(x, y, c, n) = 0.0f; 53 | summed(x, y, c, n) += w * homogeneous(x + r_kernel.x - (kw-1)/2, 54 | y + r_kernel.y - (kh-1)/2, c, n); 55 | 56 | output(x, y, c, n) = summed(x, y, c, n); 57 | sum_w(x, y, n) = summed(x, y, channels, n); 58 | 59 | std::map func_map; 60 | 61 | func_map["summed"] = summed; 62 | 63 | return func_map; 64 | } 65 | 66 | 67 | template 68 | std::map kernel_weighting_grad( 69 | const InputBuffer &data, 70 | const InputBuffer &weights, 71 | const InputBuffer &sum_w, 72 | const InputBuffer &d_output, 73 | const InputBuffer &d_sum_w, 74 | const OutputBuffer &d_data, 75 | const OutputBuffer &d_weights) 76 | { 77 | Func f_data("f_data"); 78 | f_data(x, y, ci, n) = Halide::BoundaryConditions::constant_exterior( 79 | data, 0.0f)(x, y, ci, n); 80 | Func f_d_output("f_d_output"); 81 | f_d_output(x, y, c, n) = Halide::BoundaryConditions::constant_exterior( 82 | d_output, 0.0f)(x, y, c, n); 83 | Func f_weights("f_weights"); 84 | f_weights(x, y, dx, dy, n) = Halide::BoundaryConditions::constant_exterior( 85 | weights, 0.0f)(x, y, dx, dy, n); 86 | 87 | Expr kw = weights.dim(2).extent(); 88 | Expr kh = weights.dim(3).extent(); 89 | Expr channels = data.dim(2).extent(); 90 | 91 | RDom r_kernel(0, kw, 0, kh); 92 | 93 | Expr w = f_weights(x + r_kernel.x - (kw-1)/2, 94 | y + r_kernel.y - (kh-1)/2, 95 | kw - 1 - r_kernel.x, 96 | kh - 1 - r_kernel.y, n); 97 | 98 | // out = sum { data * w } 99 | // dL / ddata = sum {dL/dout * dout / ddata } (= sum {dL/dout * w}) 100 | // + sum {dL/dsumw * dsumw / ddata} (=0) 101 | Func d_data_tmp("d_data_tmp"); 102 | d_data_tmp(x, y, c, n) = 0.0f; 103 | d_data_tmp(x, y, c, n) += w * f_d_output( 104 | x + r_kernel.x - (kw-1)/2, y + r_kernel.y - (kh-1)/2, c, n); 105 | d_data(x, y, c, n) = d_data_tmp(x, y, c, n); 106 | 107 | // sumw = sum { w } 108 | // dL / dwj = sum { dL/dout * dout / dwj } (=sum{dL/dout * dataj}) 109 | // + sum { dL/dsumw * dsumw / dwj } (=sum{dL/dsumw * wj}) 110 | // Expr w2 = f_weights(x, y, dx, dy, n); 111 | Func d_weights_tmp("d_weights_tmp"); 112 | RDom rchan(0, data.dim(2).extent()); 113 | d_weights_tmp(x, y, dx, dy, n) = d_sum_w(x, y, n); 114 | d_weights_tmp(x, y, dx, dy, n) += 115 | f_data( x + dx - (kw-1)/2, y + dy - (kh-1)/2, rchan, n) 116 | * f_d_output(x, y, rchan, n); 117 | d_weights(x, y, dx, dy, n) = d_weights_tmp(x, y, dx, dy, n); 118 | 119 | std::map func_map; 120 | // func_map["d_data_tmp"] = d_data_tmp; 121 | // func_map["d_weights_tmp"] = d_weights_tmp; 122 | 123 | return func_map; 124 | } 125 | 126 | namespace sbmc { 127 | 128 | class KernelWeightingForwardGenerator : public Generator { 129 | public: 130 | Input> data{"data", 4}; 131 | Input> weights{"weights", 5}; 132 | Output> output{"output", 4}; 133 | Output> sum_w{"sum_w", 3}; 134 | 135 | void generate() { 136 | std::map funcs = kernel_weighting( 137 | data, weights, output, sum_w); 138 | 139 | Var tx("tx"), ty("ty"), tz("tz"), 140 | xy("xy"), cn("cn"), allvars("allvars"); 141 | int ts = 16; 142 | 143 | 144 | if(get_target().has_gpu_feature()) { 145 | output 146 | .fuse(x, y, xy) 147 | .fuse(c, n, cn) 148 | .fuse(xy, cn, allvars) 149 | .gpu_tile(allvars, tx, 1024, TailStrategy::GuardWithIf) 150 | ; 151 | 152 | sum_w 153 | .fuse(x, y, xy) 154 | .fuse(xy, n, allvars) 155 | .gpu_tile(allvars, tx, 1024, TailStrategy::GuardWithIf) 156 | ; 157 | 158 | funcs["summed"] 159 | .compute_root() 160 | .gpu_tile(x, y, tx, ty, ts, ts, TailStrategy::GuardWithIf) 161 | .update() 162 | .gpu_tile(x, y, tx, ty, ts, ts, TailStrategy::GuardWithIf) 163 | ; 164 | } else { 165 | output 166 | .compute_root() 167 | .fuse(c, n, cn) 168 | .fuse(y, cn, allvars) 169 | .parallel(allvars, 8) 170 | .vectorize(x, 8) 171 | ; 172 | 173 | sum_w 174 | .compute_root() 175 | .fuse(y, n, allvars) 176 | .parallel(allvars, 8) 177 | .vectorize(x, 8) 178 | ; 179 | 180 | funcs["summed"] 181 | .compute_root() 182 | .parallel(y, 8) 183 | .vectorize(x, 8) 184 | .update() 185 | .parallel(y, 8) 186 | .vectorize(x, 8) 187 | ; 188 | } 189 | } 190 | 191 | }; 192 | 193 | class KernelWeightingGradGenerator : public Generator { 194 | public: 195 | Input> data{"data", 4}; 196 | Input> weights{"weights", 5}; 197 | Input> sum_w{"sum_w", 3}; 198 | Input> d_output{"d_output", 4}; 199 | Input> d_sum_w{"d_sum_w", 3}; 200 | 201 | Output> d_data{"d_data", 4}; 202 | Output> d_weights{"d_weights", 5}; 203 | 204 | void generate() { 205 | std::map funcs = kernel_weighting_grad( 206 | data, weights, sum_w, d_output, d_sum_w, d_data, d_weights); 207 | 208 | 209 | Var tx("tx"), ty("ty"), tz("tz"), dxdy("dxdy"), 210 | xy("xy"), cn("cn"), allvars("allvars"); 211 | 212 | if(get_target().has_gpu_feature()) { 213 | d_data 214 | .gpu_tile(x, y, tx, ty, 32, 32, TailStrategy::GuardWithIf) 215 | ; 216 | d_weights 217 | .gpu_tile(x, y, tx, ty, 32, 32, TailStrategy::GuardWithIf) 218 | ; 219 | } else { 220 | d_data 221 | .compute_root() 222 | .fuse(c, n, cn) 223 | .fuse(y, cn, allvars) 224 | .parallel(allvars, 8) 225 | .vectorize(x, 8) 226 | ; 227 | 228 | d_weights 229 | .compute_root() 230 | .fuse(dx, dy, dxdy) 231 | .fuse(y, dxdy, allvars) 232 | .fuse(allvars, n, allvars) 233 | .parallel(allvars, 8) 234 | .vectorize(x, 8) 235 | ; 236 | } 237 | } 238 | }; 239 | 240 | 241 | } // end namespace sbmc 242 | 243 | HALIDE_REGISTER_GENERATOR( 244 | sbmc::KernelWeightingForwardGenerator, kernel_weighting) 245 | 246 | HALIDE_REGISTER_GENERATOR( 247 | sbmc::KernelWeightingGradGenerator, kernel_weighting_grad) 248 | -------------------------------------------------------------------------------- /src/scatter2gather.cpp: -------------------------------------------------------------------------------- 1 | // Sample-based Monte Carlo Denoising using a Kernel-Splatting Network 2 | // Michaël Gharbi Tzu-Mao Li Miika Aittala Jaakko Lehtinen Frédo Durand 3 | // Siggraph 2019 4 | // 5 | // Copyright (c) 2019 Michaël Gharbi 6 | // 7 | // Licensed under the Apache License, Version 2.0 (the "License"); 8 | // you may not use this file except in compliance with the License. 9 | // You may obtain a copy of the License at 10 | // 11 | // http://www.apache.org/licenses/LICENSE-2.0 12 | // 13 | // Unless required by applicable law or agreed to in writing, software 14 | // distributed under the License is distributed on an "AS IS" BASIS, 15 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | // See the License for the specific language governing permissions and 17 | // limitations under the License. 18 | #include 19 | #include 20 | #include 21 | 22 | using namespace Halide; 23 | 24 | Var x("x"), y("y"), 25 | dx("dx"), dy("dy"), 26 | ci("ci"), c("c"), n("n"); 27 | 28 | template 29 | std::map scatter2gather( 30 | const InputBuffer &weights, 31 | const OutputBuffer &output) 32 | { 33 | Func f_weights("f_weights"); 34 | f_weights(x, y, dx, dy, n) = Halide::BoundaryConditions::constant_exterior( 35 | weights, 0.0f)(x, y, dx, dy, n); 36 | 37 | Expr kw = weights.dim(2).extent(); 38 | Expr kh = weights.dim(3).extent(); 39 | 40 | Expr ddx = dx - (kw-1)/2; 41 | Expr ddy = dy - (kh-1)/2; 42 | 43 | output(x, y, dx, dy, n) = f_weights( 44 | x + ddx, 45 | y + ddy, 46 | kw-1 - dx, 47 | kh-1 - dy, n); 48 | 49 | std::map func_map; 50 | 51 | return func_map; 52 | } 53 | 54 | namespace sbmc { 55 | 56 | /** 57 | * Converts sample-centered kernels into pixel-centered kernels. 58 | */ 59 | class Scatter2GatherGenerator : public Generator { 60 | public: 61 | Input> weights{"weights", 5}; 62 | Output> output{"output", 5}; 63 | 64 | void generate() { 65 | std::map funcs = scatter2gather( 66 | weights, output); 67 | 68 | Var tx("tx"), ty("ty"), tz("tz"), dxdy("dxdy"), 69 | xy("xy"), cn("cn"), allvars("allvars"); 70 | 71 | if(get_target().has_gpu_feature()) { 72 | output 73 | .compute_root() 74 | // .gpu_tile(x, y, tx, tx, 32, 32) 75 | .fuse(x, y, xy) 76 | .fuse(dx, dy, dxdy) 77 | .fuse(xy, dxdy, allvars) 78 | .fuse(allvars, n, allvars) 79 | .gpu_tile(allvars, tx, 1024) 80 | ; 81 | } else { 82 | output 83 | .compute_root() 84 | .fuse(dx, dy, dxdy) 85 | .fuse(y, dxdy, allvars) 86 | .fuse(allvars, n, allvars) 87 | .parallel(allvars, 8) 88 | .vectorize(x, 8) 89 | ; 90 | } 91 | } 92 | 93 | }; 94 | 95 | } // end namespace sbmc 96 | 97 | HALIDE_REGISTER_GENERATOR(sbmc::Scatter2GatherGenerator, scatter2gather) 98 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adobe/sbmc/cf02dfdfabec9ac35fd439880b194a5218dd4119/tests/__init__.py -------------------------------------------------------------------------------- /tests/test_functions.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """Tests for Halide extension functions.""" 3 | import unittest 4 | import warnings 5 | 6 | import torch as th 7 | from torch.autograd import gradcheck 8 | 9 | import ttools 10 | 11 | import sbmc.functions as funcs 12 | 13 | LOG = ttools.get_logger(__name__) 14 | 15 | 16 | class TestKernelWeighting(unittest.TestCase): 17 | def test_forward_cpu(self): 18 | self._forward(False) 19 | 20 | def test_forward_cuda(self): 21 | if not th.cuda.is_available(): 22 | LOG.info("CUDA not available skipping.") 23 | return 24 | self._forward(True) 25 | 26 | def test_backward_cpu(self): 27 | self._backward(False) 28 | 29 | def test_backward_cuda(self): 30 | if not th.cuda.is_available(): 31 | LOG.info("CUDA not available skipping.") 32 | return 33 | self._backward(True) 34 | 35 | def test_kernel_weighting_grad_cpu(self): 36 | self._kernel_weighting_grad(False) 37 | 38 | def test_kernel_weighting_grad_cuda(self): 39 | if not th.cuda.is_available(): 40 | return 41 | self._kernel_weighting_grad(True) 42 | 43 | def _forward(self, gpu): 44 | bs = 4 45 | c = 5 46 | h = 16 47 | w = 16 48 | 49 | ksize = 5 50 | 51 | data = th.zeros(bs, c, h, w) 52 | weights = th.zeros(bs, ksize, ksize, h, w) 53 | 54 | for idx in range(1, 2): 55 | data.zero_() 56 | y, x = h//2, w//2 57 | data[idx, 0, y, x] = 1.4 58 | data[idx, 1, y, x] = 2.4 59 | data[idx, 2, y, x] = 3.4 60 | for dy in range(-(ksize//2), ksize//2 + 1): 61 | for dx in range(-(ksize//2), ksize//2 + 1): 62 | weights.zero_() 63 | kx = ksize // 2 + dx 64 | ky = ksize // 2 + dy 65 | weights[idx, ky, kx, y-dy, x-dx] = 0.5 66 | o, s = funcs.KernelWeighting.apply(data, weights) 67 | self.assertAlmostEqual(o[idx, 0, y-dy, x-dx].item(), 1.4*0.5) 68 | self.assertAlmostEqual(o[idx, 1, y-dy, x-dx].item(), 2.4*0.5) 69 | self.assertAlmostEqual(o[idx, 2, y-dy, x-dx].item(), 3.4*0.5) 70 | self.assertAlmostEqual(s[idx, y-dy, x-dx].item(), 0.5) 71 | 72 | def _backward(self, gpu): 73 | bs = 3 74 | chans = 5 75 | h = 16 76 | w = 16 77 | 78 | x = w // 2 79 | y = h // 2 80 | device = "cpu" 81 | if gpu: 82 | device = "cuda" 83 | 84 | for ksize in [3, 5, 7]: 85 | for b in range(bs): 86 | for c in range(chans): 87 | # Forward 88 | data = th.ones(bs, chans, h, w, device=device, requires_grad=True) 89 | data.data.fill_(7.0) 90 | weights = th.ones(bs, ksize, ksize, h, w, device=device, requires_grad=True) 91 | o, s = funcs.KernelWeighting.apply(data, weights) 92 | 93 | # Backward 94 | o_grad = th.zeros_like(o) 95 | o_grad[b, c, x, y] = 1.1 96 | o.backward(o_grad) 97 | 98 | for dy in range(-(ksize//2), ksize//2 + 1): 99 | for dx in range(-(ksize//2), ksize//2 + 1): 100 | self.assertAlmostEqual(data.grad[b, c, y+dy, x+dx].item(), 1.1) 101 | data.grad[b, c, y+dy, x+dx] = 0.0 # set to zero for next test 102 | self.assertAlmostEqual(data.grad.abs().max().item(), 0.0) 103 | self.assertAlmostEqual(weights.grad[b, ksize//2 + dy, ksize//2 + dx, x, y].item(), 7.0*1.1, places=3) 104 | 105 | def _kernel_weighting_grad(self, gpu): 106 | bs = 2 107 | c = 3 108 | h = 16 109 | w = 16 110 | 111 | ksize = 3 112 | 113 | data = 2*th.randn(bs, c, h, w, requires_grad=True) 114 | weights = th.randn(bs, ksize, ksize, h, w, requires_grad=False) 115 | 116 | if gpu: 117 | data = data.cuda() 118 | weights = weights.cuda() 119 | 120 | o, s = funcs.KernelWeighting.apply(data, weights) 121 | 122 | with warnings.catch_warnings(): 123 | # Inputs are float, the gradient checker wants double inputs and 124 | # will issue a warning. 125 | warnings.filterwarnings( 126 | "ignore", message="At least one of the inputs that requires " 127 | "gradient is not of double precision") 128 | gradcheck( 129 | funcs.KernelWeighting.apply, 130 | (data, weights), 131 | eps=1e-4, atol=5e-2, rtol=5e-4, 132 | ) 133 | 134 | data = 2*th.randn(bs, c, h, w, requires_grad=False) 135 | weights = th.randn(bs, ksize, ksize, h, w, requires_grad=True) 136 | if gpu: 137 | data = data.cuda() 138 | weights = weights.cuda() 139 | 140 | gradcheck( 141 | funcs.KernelWeighting.apply, 142 | (data, weights), 143 | eps=1e-4, atol=5e-2, rtol=5e-4, 144 | ) 145 | 146 | 147 | class TestScatter2Gather(unittest.TestCase): 148 | def test_scatter2gather_cpu(self): 149 | self._scatter2gather(False) 150 | 151 | def test_scatter2gather_cuda(self): 152 | if not th.cuda.is_available(): 153 | return 154 | self._scatter2gather(True) 155 | 156 | def test_scatter2gather_grad_cpu(self): 157 | self._scatter2gather_grad(False) 158 | 159 | def test_scatter2gather_grad_cuda(self): 160 | if not th.cuda.is_available(): 161 | return 162 | self._scatter2gather_grad(True) 163 | 164 | def _scatter2gather(self, gpu): 165 | bs = 4 166 | h = 32 167 | w = 32 168 | 169 | for ksize in [3, 5, 7, 9]: 170 | scatter = th.zeros(bs, ksize, ksize, h, w) 171 | for idx in range(bs): 172 | for y in range(h//2-ksize//2, h//2+ksize//2+1): 173 | for x in range(w//2-ksize//2, w//2+ksize//2+1): 174 | for ky in range(ksize): 175 | for kx in range(ksize): 176 | scatter.zero_() 177 | dx = kx - ksize // 2 178 | dy = ky - ksize // 2 179 | kx2 = ksize-1 - kx 180 | ky2 = ksize-1 - ky 181 | scatter[idx, ky, kx, y, x] = 0.5 182 | gather = funcs.Scatter2Gather.apply(scatter) 183 | self.assertAlmostEqual( 184 | gather[idx, ky2, kx2, y+dy, x+dx].item(), 185 | 0.5) 186 | 187 | def _scatter2gather_grad(self, gpu): 188 | bs = 2 189 | c = 2 190 | h = 32 191 | w = 32 192 | ksize = 3 193 | 194 | weights = th.randn(bs, ksize, ksize, h, w, requires_grad=True) 195 | if gpu: 196 | weights = weights.cuda() 197 | 198 | with warnings.catch_warnings(): 199 | # Inputs are float, the gradient checker wants double inputs and 200 | # will issue a warning. 201 | warnings.filterwarnings( 202 | "ignore", message="At least one of the inputs that requires " 203 | "gradient is not of double precision") 204 | gradcheck( 205 | funcs.Scatter2Gather.apply, 206 | (weights, ), 207 | eps=1e-4, atol=5e-2, rtol=5e-4, 208 | ) 209 | 210 | 211 | if __name__ == "__main__": 212 | unittest.main() 213 | -------------------------------------------------------------------------------- /tests/test_losses.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """Tests for loss functions.""" 3 | import unittest 4 | import warnings 5 | 6 | import torch as th 7 | import numpy as np 8 | 9 | import ttools 10 | 11 | import sbmc.losses as losses 12 | 13 | 14 | LOG = ttools.get_logger(__name__) 15 | 16 | 17 | class TestRelativeMSE(unittest.TestCase): 18 | def setUp(self): 19 | self.eps = 1e-2 20 | self.loss_fn = losses.RelativeMSE(eps=self.eps) 21 | 22 | def test_basic(self): 23 | for sz in [[1, 3, 4, 5]]: 24 | im = th.zeros(*sz) 25 | ref = th.zeros(*sz) 26 | 27 | loss = self.loss_fn(im, ref).item() 28 | self.assertAlmostEqual(loss, 0.0) 29 | 30 | val = 0.34 31 | val2 = 0.7 32 | n = np.array(sz).prod() 33 | target = ((val-val2)**2 / (val**2 + self.eps)) * 0.5 / n 34 | 35 | for dx in range(n): 36 | ref.zero_() 37 | im.zero_() 38 | ref.view(-1)[dx] = val 39 | im.view(-1)[dx] = val2 40 | loss = self.loss_fn(im, ref).item() 41 | self.assertAlmostEqual(loss, target, places=4) 42 | 43 | 44 | class TestSMAPE(unittest.TestCase): 45 | def setUp(self): 46 | self.eps = 1e-2 47 | self.loss_fn = losses.SMAPE(eps=self.eps) 48 | 49 | def test_basic(self): 50 | for sz in [[1, 3, 4, 5]]: 51 | im = th.zeros(*sz) 52 | ref = th.zeros(*sz) 53 | 54 | loss = self.loss_fn(im, ref).item() 55 | self.assertAlmostEqual(loss, 0.0) 56 | 57 | val = 0.34 58 | val2 = 0.7 59 | n = np.array(sz).prod() 60 | target = ((val2-val) / (val + val2 + self.eps)) / n 61 | 62 | for dx in range(n): 63 | ref.zero_() 64 | im.zero_() 65 | ref.view(-1)[dx] = val 66 | im.view(-1)[dx] = val2 67 | loss = self.loss_fn(im, ref).item() 68 | self.assertAlmostEqual(loss, target, places=4) 69 | 70 | 71 | class TestTonemappedMSE(unittest.TestCase): 72 | def setUp(self): 73 | self.eps = 1e-2 74 | self.loss_fn = losses.TonemappedMSE(eps=self.eps) 75 | 76 | def test_basic(self): 77 | for sz in [[1, 3, 4, 5]]: 78 | im = th.zeros(*sz) 79 | ref = th.zeros(*sz) 80 | 81 | loss = self.loss_fn(im, ref).item() 82 | self.assertAlmostEqual(loss, 0.0) 83 | 84 | val = 0.34 85 | val2 = 0.7 86 | n = np.array(sz).prod() 87 | # tonemap 88 | val_t = val / (1 + val) 89 | val2_t = val2 / (1 + val2) 90 | target = (val_t-val2_t)**2 * 0.5 / n 91 | 92 | for dx in range(n): 93 | ref.zero_() 94 | im.zero_() 95 | ref.view(-1)[dx] = val 96 | im.view(-1)[dx] = val2 97 | loss = self.loss_fn(im, ref).item() 98 | self.assertAlmostEqual(loss, target, places=4) 99 | 100 | 101 | class TestTonemappedRelativeMSE(unittest.TestCase): 102 | def setUp(self): 103 | self.eps = 1e-2 104 | self.loss_fn = losses.TonemappedRelativeMSE(eps=self.eps) 105 | 106 | def test_basic(self): 107 | for sz in [[1, 3, 4, 5]]: 108 | im = th.zeros(*sz) 109 | ref = th.zeros(*sz) 110 | 111 | loss = self.loss_fn(im, ref).item() 112 | self.assertAlmostEqual(loss, 0.0) 113 | 114 | val = 0.34 115 | val2 = 0.7 116 | n = np.array(sz).prod() 117 | # tonemap 118 | val_t = val / (1 + val) 119 | val2_t = val2 / (1 + val2) 120 | target = ((val_t-val2_t)**2 / (val_t**2 + self.eps)) * 0.5 / n 121 | 122 | for dx in range(n): 123 | ref.zero_() 124 | im.zero_() 125 | ref.view(-1)[dx] = val 126 | im.view(-1)[dx] = val2 127 | loss = self.loss_fn(im, ref).item() 128 | self.assertAlmostEqual(loss, target, places=4) 129 | -------------------------------------------------------------------------------- /tests/test_modules.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """Tests for helper PyTorch modules.""" 3 | import unittest 4 | import warnings 5 | 6 | import torch as th 7 | import numpy as np 8 | 9 | import ttools 10 | 11 | import sbmc.modules as modules 12 | 13 | 14 | LOG = ttools.get_logger(__name__) 15 | 16 | 17 | class TestConvChain(unittest.TestCase): 18 | def test_basic(self): 19 | self.assertRaises(ValueError, modules.ConvChain, 3, 3, depth=0) 20 | self.assertRaises(ValueError, modules.ConvChain, 3, 3, depth=-1) 21 | self.assertRaises(ValueError, modules.ConvChain, 3, 3, output_type="randomstring") 22 | self.assertRaises(ValueError, modules.ConvChain, 3, 3, activation="randomstring") 23 | self.assertRaises(ValueError, modules.ConvChain, 3, 3, normalize=True, 24 | normalization_type="randomstring") 25 | 26 | for nrm in [False, True]: 27 | net = modules.ConvChain(3, 3, depth=3, width=32, normalize=nrm) 28 | idx = 0 29 | if nrm: 30 | idx = 1 31 | print(net) 32 | self.assertIsInstance(net.layer_0, modules.ConvChain._ConvBNRelu) 33 | self.assertIsInstance(net.layer_1, modules.ConvChain._ConvBNRelu) 34 | self.assertIsInstance(net.prediction, th.nn.Conv2d) 35 | 36 | l0 = list(net.layer_0.layer.children()) 37 | self.assertIsInstance(l0[0], th.nn.Conv2d) 38 | self.assertIsInstance(l0[1+idx], th.nn.ReLU) 39 | self.assertEqual(l0[0].kernel_size, (3, 3)) 40 | self.assertEqual(l0[0].stride, (1, 1)) 41 | self.assertEqual(l0[0].in_channels, 3) 42 | self.assertEqual(l0[0].out_channels, 32) 43 | 44 | l1 = list(net.layer_1.layer.children()) 45 | self.assertIsInstance(l1[0], th.nn.Conv2d) 46 | self.assertIsInstance(l1[1+idx], th.nn.ReLU) 47 | self.assertEqual(l1[0].kernel_size, (3, 3)) 48 | self.assertEqual(l1[0].stride, (1, 1)) 49 | self.assertEqual(l1[0].in_channels, 32) 50 | self.assertEqual(l1[0].out_channels, 32) 51 | 52 | self.assertEqual(net.prediction.in_channels, 32) 53 | self.assertEqual(net.prediction.out_channels, 3) 54 | self.assertEqual(net.prediction.kernel_size, (3, 3)) 55 | self.assertEqual(net.prediction.stride, (1, 1)) 56 | 57 | # check batch norm 58 | if nrm: 59 | self.assertIsInstance(l0[1], th.nn.BatchNorm2d) 60 | self.assertIsInstance(l1[1], th.nn.BatchNorm2d) 61 | 62 | 63 | class TestKernelApply(unittest.TestCase): 64 | def setUp(self): 65 | self.bs = 4 66 | self.c = 5 67 | self.h = 16 68 | self.w = 16 69 | self.ksize = 3 70 | 71 | self.data = th.zeros(self.bs, self.c, self.h, self.w) 72 | self.weights = th.zeros(self.bs, self.ksize*self.ksize, self.h, 73 | self.w) 74 | 75 | def test_basic(self): 76 | y = self.h // 2 77 | x = self.w // 2 78 | cc = 0 79 | k = self.ksize 80 | 81 | val = 1.43 82 | self.data[0, cc, y, x] = val 83 | self.weights[0, :, y, x] = 1.0 84 | 85 | for splat in [True, False]: 86 | func = modules.KernelApply(softmax=False, splat=splat) 87 | output, sum_w = func(self.data, self.weights) 88 | 89 | self.assertAlmostEqual(output[0, cc, y, x].item(), val, places=4) 90 | 91 | if splat: 92 | for dy in range(-(k//2), k//2+1): 93 | for dx in range(-(k//2), k//2+1): 94 | self.assertAlmostEqual( 95 | output[0, cc, y+dy, x+dx].item(), val, places=4) 96 | self.assertAlmostEqual( 97 | sum_w[0, 0, y+dy, x+dx].item(), 1, places=4) 98 | else: 99 | self.assertAlmostEqual(sum_w[0, 0, y, x].item(), k*k, places=4) 100 | 101 | 102 | class TestProgressiveKernelApply(unittest.TestCase): 103 | def setUp(self): 104 | self.bs = 4 105 | self.c = 5 106 | self.h = 16 107 | self.w = 16 108 | self.ksize = 3 109 | 110 | self.data = th.zeros(self.bs, self.c, self.h, self.w) 111 | self.weights = th.zeros(self.bs, self.ksize*self.ksize, self.h, 112 | self.w) 113 | 114 | def test_init(self): 115 | y = self.h // 2 116 | x = self.w // 2 117 | cc = 0 118 | k = self.ksize 119 | 120 | val = 1.43 121 | self.data[0, cc, y, x] = val 122 | self.weights[0, :, y, x] = 1.0 123 | 124 | for splat in [True, False]: 125 | func = modules.ProgressiveKernelApply(splat=splat) 126 | output, sum_w, max_w = func(self.data, self.weights, None, None, None) 127 | 128 | self.assertAlmostEqual(output[0, cc, y, x].item(), val, places=4) 129 | 130 | expected = np.exp(1.0) 131 | 132 | if splat: 133 | for dy in range(-(k//2), k//2+1): 134 | for dx in range(-(k//2), k//2+1): 135 | self.assertAlmostEqual( 136 | output[0, cc, y+dy, x+dx].item(), val, places=4) 137 | # self.assertAlmostEqual( 138 | # sum_w[0, 0, y+dy, x+dx].item(), 1, places=4) 139 | else: 140 | self.assertAlmostEqual(sum_w[0, 0, y, x].item(), k*k, places=4) 141 | 142 | 143 | if __name__ == "__main__": 144 | unittest.main() 145 | --------------------------------------------------------------------------------