├── LICENSE
├── README.md
├── experiments.py
├── figures
├── __pycache__
│ └── figutils.cpython-39.pyc
├── ajar
│ ├── figure.ipynb
│ └── generate_data.py
├── debiasing
│ ├── figure.ipynb
│ └── generate_data.py
├── figutils.py
├── gradient
│ ├── figure.ipynb
│ └── generate_data.py
├── landscapes
│ ├── figure.ipynb
│ └── generate_data.py
├── steady_state
│ ├── figure.ipynb
│ └── generate_data.py
├── stream
│ ├── figure.ipynb
│ └── generate_data.py
├── teaser
│ ├── figure.ipynb
│ ├── generate_data.py
│ ├── teaser-dark.png
│ ├── teaser-light.png
│ └── test.png
├── volumes
│ ├── figure.ipynb
│ ├── generate_data.py
│ └── render_dust.py
└── weights
│ ├── figure.ipynb
│ └── generate_data.py
├── largesteps.py
├── optimize.py
├── plugins
├── __init__.py
├── __pycache__
│ ├── __init__.cpython-39.pyc
│ ├── cv_integrator.cpython-39.pyc
│ ├── metaintegrator.cpython-39.pyc
│ ├── twostatebsdf.cpython-39.pyc
│ ├── twostatemedium.cpython-39.pyc
│ ├── twostatepath.cpython-39.pyc
│ ├── twostatevolpath.cpython-39.pyc
│ ├── volpathsimple.cpython-39.pyc
│ └── welford.cpython-39.pyc
├── cv_integrator.py
├── metaintegrator.py
├── twostatebsdf.py
├── twostatemedium.py
├── twostatepath.py
├── twostatevolpath.py
├── volpathsimple.py
└── welford.py
├── run_experiment.py
├── tutorial.ipynb
├── utils.py
└── vgg.py
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2023 Baptiste Nicolet , All rights reserved.
2 |
3 | Redistribution and use in source and binary forms, with or without
4 | modification, are permitted provided that the following conditions are met:
5 |
6 | 1. Redistributions of source code must retain the above copyright notice, this
7 | list of conditions and the following disclaimer.
8 |
9 | 2. Redistributions in binary form must reproduce the above copyright notice,
10 | this list of conditions and the following disclaimer in the documentation
11 | and/or other materials provided with the distribution.
12 |
13 | 3. Neither the name of the copyright holder nor the names of its contributors
14 | may be used to endorse or promote products derived from this software
15 | without specific prior written permission.
16 |
17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
18 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
21 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
24 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 |
28 | You are under no obligation whatsoever to provide any bug fixes, patches, or
29 | upgrades to the features, functionality or performance of the source code
30 | ("Enhancements") to anyone; however, if you choose to make your Enhancements
31 | available either publicly, or directly to the author of this software, without
32 | imposing a separate written license agreement for such Enhancements, then you
33 | hereby grant the following license: a non-exclusive, royalty-free perpetual
34 | license to install, use, modify, prepare derivative works, incorporate into
35 | other computer software, distribute, and sublicense such enhancements or
36 | derivative works thereof, in binary and source code form.
37 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 | ACM Transactions on Graphics (Proceedings of SIGGRAPH), July 2023.
14 |
15 | Baptiste Nicolet
16 | ·
17 | Fabrice Rousselle
18 | ·
19 | Jan Novák
20 | ·
21 | Alexander Keller
22 | ·
23 | Wenzel Jakob
24 | ·
25 | Thomas Müller
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 | Table of Contents
44 |
45 | - Overview
46 |
- Citation
47 |
- Getting started
48 |
- Running an optimization
49 |
- Reproducing figures
50 |
- Using control variates
51 |
- Acknowledgements
52 |
53 |
54 |
55 | Overview
56 | --------
57 |
58 | This repository contains code examples to reproduce the results from the article:
59 |
60 | > Baptiste Nicolet and Fabrice Rousselle and Jan Novák and Alexander Keller and Wenzel Jakob and Thomas Müller, 2023.
61 | > Recursive Control Variates for Inverse Rendering.
62 | > In Transactions on Graphics (Proceedings of SIGGRAPH) 42(4).
63 |
64 | It uses the [Mitsuba 3](https://github.com/mitsuba-renderer/mitsuba3) differentiable renderer.
65 |
66 |
67 | Citation
68 | --------
69 |
70 | This code is released under the [BSD 3-Clause License](LICENSE). Additionally, if you are using this code in academic research, please cite our paper using the following BibTeX entry:
71 |
72 | ```bibtex
73 | @article{Nicolet2023Recursive,
74 | author = {Baptiste Nicolet and Fabrice Rousselle and Jan Novák and Alexander Keller and Wenzel Jakob and Thomas Müller},
75 | title = {Recursive Control Variates for Inverse Rendering},
76 | journal = {Transactions on Graphics (Proceedings of SIGGRAPH)},
77 | volume = {42},
78 | number = {4},
79 | year = {2023},
80 | month = aug,
81 | doi = {10.1145/3592139}
82 | }
83 | ```
84 |
85 |
86 | Getting started
87 | ---------------
88 |
89 | This code was tested on Ubuntu 22.04 with an NVIDIA RTX 4090 GPU.
90 | NVIDIA driver version 525.60.13 was used with CUDA 12.0.
91 |
92 | Mitsuba 3 was compiled with Clang++ 11.0.0 and the provided scripts were run with Python 3.9.12.
93 | The `cuda_ad_rgb` Mitsuba variant was selected, although the `llvm_ad_rgb` variant is also compatible in principle.
94 |
95 | This implementation relies on modifications to the Mitsuba source code, which are available on the `unbiased-volume-opt` branch of the `mitsuba3` repository.
96 | **Please make sure to checkout the correct branch** as follows.
97 | Note the `--recursive` and `--branch` flags:
98 |
99 | ```bash
100 | # Cloning Mitsuba 3 and this repository
101 | git clone --recursive https://github.com/mitsuba-renderer/mitsuba3 --branch unbiased-volume-opt
102 | git clone --recursive https://github.com/rgl-epfl/recursive_control_variates
103 |
104 | # Building Mitsuba 3, including the project-specific modifications
105 | cd mitsuba3
106 | mkdir build && cd build
107 | cmake -GNinja ..
108 | ninja
109 | ```
110 |
111 | The `cuda_ad_rgb` and `llvm_ad_rgb` variants should be included by default.
112 | Please see the [Mitsuba 3 documentation](https://mitsuba.readthedocs.io/en/latest/#) for complete instructions on building and using the system.
113 |
114 | The scene data must be downloaded and unzipped at the root of the project folder:
115 |
116 | ```bash
117 | cd recursive_control_variates
118 | wget https://rgl.s3.eu-central-1.amazonaws.com/media/papers/Nicolet2023Recursive.zip
119 | unzip Nicolet2023Recursive.zip
120 | rm Nicolet2023Recursive.zip
121 | ls scenes
122 | # The available scenes should now be listed (one directory per scene)
123 | ```
124 |
125 | Running an optimization
126 | -----------------------
127 |
128 | Navigate to this project's directory and make sure that the Mitsuba 3 libraries built in the previous step are made available in your current session using `setpath.sh`:
129 |
130 | ```bash
131 | cd recursive_control_variates
132 | source ../mitsuba3/build/setpath.sh
133 | # The following should execute without error and without output
134 | # (use the variant 'llvm_ad_rgb' if your system does not support the CUDA backend):
135 | python3 -c "import mitsuba as mi; mi.set_variant('cuda_ad_rgb')"
136 | ```
137 |
138 | From here, the script `python/run_experiment.py` can be used to run inverse rendering examples using different methods:
139 |
140 | ```bash
141 | python3 run_experiment.py teapot --method cv_ps
142 | ```
143 |
144 | The script expects a scene name (`teapot` in the example above). The scene
145 | configurations are defined in `experiments.py`. You can also specify with which method you want to run the optimisation:
146 | - `baseline`: Optimisation with standard differentiable rendering integrators.
147 | - `cv_pss`: Optimisation using our control variates, with the 'primary sample space' implementation (see the paper for an explanation).
148 | - `cv_ps`: Optimisation using our control variates, with the 'path space' implementation.
149 |
150 | If no method is specified, both `baseline` and `cv_ps` optimisations will be run.
151 |
152 | Reproducing figures
153 | -------------------
154 |
155 | We provide scripts to reproduce the figures from the paper. These are located in
156 | the `figures` subfolder. Each figure has its own subfolder, with a
157 | `generate_data.py`, that will run the relevant experiments needed to generate
158 | the figure. Then, one can run the `figure.ipynb` notebook to generate the figure.
159 |
160 |
161 | Using control variates
162 | ---------------------------------------------
163 |
164 | In [`tutorial.ipynb`](tutorial.ipynb), we show how our control variates can be easily integrated
165 | into an optimisation. One merely needs to use the `TwoState` adapter BSDF (resp.
166 | medium) for the surface (resp. medium) being optimised, and use the `twostate`
167 | variant of the `prb` (resp. `prbvolpath`) integrator.
168 |
169 |
170 | Acknowledgements
171 | ----------------
172 | This README template was created by [Miguel Crespo](https://github.com/mcrescas/viltrum-mitsuba/blob/457a7ffbbc8b8b5ba9c40d6017b5d08f0f41a886/README.md), and its structure inspired by [Merlin Nimier-David](https://github.com/rgl-epfl/unbiased-inverse-volume-rendering/blob/master/README.md).
173 | Many figure generating utility functions were written by [Delio Vicini](https://dvicini.github.io).
174 |
175 | The `volpathsimple` integrator was implemented by [Merlin Nimier-David](https://github.com/rgl-epfl/unbiased-inverse-volume-rendering)
176 |
177 | Volumes, environment maps and 3D models were generously provided by [JangaFX](https://jangafx.com/software/embergen/download/free-vdb-animations/), [PolyHaven](https://polyhaven.com/hdris), [Benedikt Bitterli](https://benedikt-bitterli.me/resources/) and [vajrablue](https://blendswap.com/blend/28458).
178 |
--------------------------------------------------------------------------------
/experiments.py:
--------------------------------------------------------------------------------
1 | import mitsuba as mi
2 | mi.set_variant('cuda_ad_rgb')
3 | import drjit as dr
4 |
5 | import numpy as np
6 |
7 | import os
8 | import sys
9 | import argparse
10 |
11 | import plugins
12 | from optimize import run_opt
13 | from largesteps import *
14 |
15 | from mitsuba.scalar_rgb import Transform4f as T
16 |
17 | SCENES_DIR = os.path.join(os.path.dirname(__file__), "scenes")
18 | OUTPUT_DIR = os.path.join(os.path.dirname(__file__), "output")
19 |
20 | def scene_janga(method, output_dir):
21 | sensor_count = 8
22 | # Visualisation resolution
23 | final_res = (720*sensor_count, 720)
24 | # Training resolution
25 | resx = 256
26 | resy = 256
27 |
28 | batch_sensor = {
29 | 'type': 'batch',
30 | 'film': {
31 | 'type': 'hdrfilm',
32 | 'width': resx*sensor_count, 'height': resy,
33 | },
34 | 'sampler': {
35 | 'type': 'independent',
36 | 'sample_count': 1
37 | }
38 | }
39 |
40 | scale = [1.0, 1.2, 1.5]
41 | target = [0.5 * s for s in scale]
42 | d = 2
43 | h = 0
44 | for i in range(sensor_count):
45 | theta = 2*np.pi / sensor_count * i + 0.1
46 | batch_sensor[f"sensor_{i:02d}"] = {
47 | 'type': 'perspective',
48 | 'fov': 45,
49 | 'to_world': T.look_at(target=target, origin=[target[0]+d*np.cos(theta), h, target[2]+d*np.sin(theta)], up=[0, 1, 0]),
50 | }
51 |
52 | medium = {
53 | 'type': 'heterogeneous',
54 | 'sigma_t': {
55 | 'type': 'gridvolume',
56 | 'filename': os.path.join(SCENES_DIR, 'janga-smoke/volumes/janga-smoke-264-136-136.vol'),
57 | 'to_world': T.scale(scale),
58 | 'use_grid_bbox': False,
59 | 'accel': False
60 | },
61 | 'albedo': {
62 | 'type': 'gridvolume',
63 | 'filename': os.path.join(SCENES_DIR, 'janga-smoke/volumes/albedo-noise-256-128-128.vol'),
64 | 'to_world': T.scale(scale),
65 | },
66 | 'scale': 20.0,
67 |
68 | 'sample_emitters': True,
69 | 'has_spectral_extinction': False,
70 | 'majorant_resolution_factor': 0
71 | }
72 |
73 | scene_dict = {
74 | 'type': 'scene',
75 | 'object': {
76 | 'type': 'obj',
77 | 'filename': os.path.join(SCENES_DIR, 'common/meshes/cube_unit.obj'),
78 | 'bsdf': {'type': 'null'},
79 | 'to_world': T.scale(scale),
80 | },
81 | 'envmap': {
82 | 'type': 'envmap',
83 | 'filename': os.path.join(SCENES_DIR, 'common/textures/alps_field_4k.exr'),
84 | 'scale': 0.5,
85 | },
86 | 'sensor': batch_sensor
87 | }
88 |
89 | if method == 'cv_ps':
90 | scene_dict['object']['interior'] = {
91 | 'type': 'twostatemedium',
92 | 'old': medium,
93 | 'new': medium,
94 | 'incoming': medium
95 | }
96 | else:
97 | scene_dict['object']['interior'] = medium
98 |
99 | v_res = 16
100 | scene = mi.load_dict(scene_dict)
101 | params = {
102 | 'scene': scene,
103 | 'variables': {
104 | 'object.interior_medium.sigma_t.data': {
105 | 'init': dr.full(mi.TensorXf, 0.04, (v_res, v_res, v_res, 1)),
106 | 'clamp': (0.0, 250.0),
107 | },
108 | 'object.interior_medium.albedo.data': {
109 | 'init': dr.full(mi.TensorXf, 0.6, (v_res, v_res, v_res, 3)),
110 | 'clamp': (0.0, 1.0),
111 | 'lr_factor': 2.0
112 | },
113 | },
114 | 'use_majorant_supergrid': True,
115 | 'recomp_freq': 50,
116 | 'upsample': [0.04, 0.16, 0.36, 0.64],
117 | 'schedule': [0.75, 0.85, 0.95],
118 | 'save': True,
119 | 'benchmark': True,
120 | 'n_steps': 2000,
121 | 'method': method,
122 | 'lr': 5e-3,
123 | 'spp': 4,
124 | 'spp_grad': 4,
125 | 'spp_ref': 128,
126 | 'ref_passes': 16,
127 | 'spp_inf': 64,
128 | 'final_res': final_res,
129 | 'output_dir': output_dir,
130 | 'integrator': mi.load_dict({'type': 'twostateprbvolpath' if method == 'cv_ps' else 'prbvolpath', 'max_depth': 64, 'rr_depth': 64}),
131 | 'adjoint_integrator': mi.load_dict({'type': 'volpathsimple', 'max_depth': 64, 'rr_depth': 64})
132 | }
133 | return params
134 |
135 | def scene_dust_devil(method, output_dir):
136 | v_res = 16
137 | sensor_count = 8
138 | final_res = (720*sensor_count, 1280)
139 | resx = 256
140 | resy = 456
141 |
142 | batch_sensor = {
143 | 'type': 'batch',
144 | 'film': {
145 | 'type': 'hdrfilm',
146 | 'width': resx*sensor_count, 'height': resy,
147 | },
148 | 'sampler': {
149 | 'type': 'independent',
150 | 'sample_count': 1
151 | }
152 | }
153 |
154 | target = [0.5, 0.5, 0.5]
155 | d = 1
156 | h = 0.5
157 | for i in range(sensor_count):
158 | theta = 2*np.pi / sensor_count * i
159 | batch_sensor[f"sensor_{i:02d}"] = {
160 | 'type': 'perspective',
161 | 'fov': 45,
162 | 'to_world': T.look_at(target=target, origin=[target[0]+d*np.cos(theta), h, target[2]+d*np.sin(theta)], up=[0, 1, 0]),
163 | }
164 |
165 | medium = {
166 | 'type': 'heterogeneous',
167 | 'sigma_t': {
168 | 'type': 'gridvolume',
169 | 'filename': os.path.join(SCENES_DIR, 'dust-devil/volumes/embergen_dust_devil_tornado_a_50-256-256-256.vol'),
170 | 'use_grid_bbox': False,
171 | 'accel': False
172 | },
173 | 'albedo': {
174 | 'type': 'gridvolume',
175 | 'filename': os.path.join(SCENES_DIR, 'dust-devil/volumes/albedo-constant-sand-256-256-256.vol'),
176 | 'use_grid_bbox': False,
177 | 'accel': False
178 | },
179 | 'phase': {
180 | 'type': 'hg',
181 | 'g': 0.877
182 | },
183 | 'scale': 100.0,
184 |
185 | 'sample_emitters': True,
186 | 'has_spectral_extinction': False,
187 | 'majorant_resolution_factor': 0
188 | }
189 | scene_dict = {
190 | 'type': 'scene',
191 | 'object': {
192 | 'type': 'obj',
193 | 'filename': os.path.join(SCENES_DIR, 'common/meshes/cube_unit.obj'),
194 | 'bsdf': {'type': 'null'},
195 | },
196 | 'envmap': {
197 | 'type': 'envmap',
198 | 'filename': os.path.join(SCENES_DIR, 'common/textures/kloofendal_38d_partly_cloudy_4k.exr'),
199 | 'scale': 1.0,
200 | },
201 | 'sensor': batch_sensor
202 | }
203 |
204 | if method == 'cv_ps':
205 | scene_dict['object']['interior'] = {
206 | 'type': 'twostatemedium',
207 | 'old': medium,
208 | 'new': medium,
209 | 'incoming': medium
210 | }
211 | else:
212 | scene_dict['object']['interior'] = medium
213 |
214 | scene = mi.load_dict(scene_dict)
215 | params = {
216 | 'scene': scene,
217 | 'variables': {
218 | 'object.interior_medium.sigma_t.data': {
219 | 'init': dr.full(mi.TensorXf, 0.04, (v_res, v_res, v_res, 1)),
220 | 'clamp': (0.0, 250.0),
221 | },
222 | 'object.interior_medium.albedo.data': {
223 | 'init': dr.full(mi.TensorXf, 0.6, (v_res, v_res, v_res, 3)),
224 | 'clamp': (0.0, 1.0),
225 | 'lr_factor': 2.0
226 | },
227 | },
228 | 'use_majorant_supergrid': True,
229 | 'recomp_freq': 50,
230 | 'upsample': [0.04, 0.16, 0.36, 0.64],
231 | 'schedule': [0.75, 0.85, 0.95],
232 | 'save': True,
233 | 'benchmark': True,
234 | 'n_steps': 2000,
235 | 'method': method,
236 | 'lr': 5e-4,
237 | 'spp': 16,
238 | 'spp_grad': 16,
239 | 'spp_ref': 128,
240 | 'ref_passes': 16,
241 | 'spp_inf': 64,
242 | 'final_res': final_res,
243 | 'output_dir': output_dir,
244 | 'integrator': mi.load_dict({'type': 'twostateprbvolpath' if method == 'cv_ps' else 'prbvolpath', 'max_depth': 64, 'rr_depth': 64}),
245 | 'adjoint_integrator': mi.load_dict({'type': 'volpathsimple', 'max_depth': 64, 'rr_depth': 64})
246 |
247 | }
248 |
249 | return params
250 |
251 | def scene_rover(method, output_dir):
252 | sensor_count = 10
253 | final_res = (720*sensor_count), 720
254 | resx = 256
255 | resy = 256
256 | v_res = 16
257 |
258 | batch_sensor = {
259 | 'type': 'batch',
260 | 'film': {
261 | 'type': 'hdrfilm',
262 | 'width': resx*sensor_count, 'height': resy,
263 | },
264 | 'sampler': {
265 | 'type': 'independent',
266 | 'sample_count': 1
267 | }
268 | }
269 |
270 | scale = [6.5, 7.5, 10]
271 | tr = [-3, -1, -5]
272 | target = [-0.25, 2.75, 0]
273 | d = 12
274 | h = 10.0
275 | for i in range(sensor_count):
276 | theta = 2*np.pi / sensor_count * i + 0.1
277 | batch_sensor[f"sensor_{i:02d}"] = {
278 | 'type': 'perspective',
279 | 'fov': 45,
280 | 'to_world': T.look_at(target=target, origin=[target[0]+d*np.cos(theta), h, target[2]+d*np.sin(theta)], up=[0, 1, 0]),
281 | }
282 |
283 | # Render scene ref here
284 |
285 | to_world = T.translate(tr).scale(scale)
286 | medium = {
287 | 'type': 'heterogeneous',
288 | 'sigma_t': {
289 | 'type': 'gridvolume',
290 | 'filename': os.path.join(SCENES_DIR, 'common/volumes/sigma_t-constant-sand-256-256-256.vol'),
291 | 'to_world': to_world,
292 | 'use_grid_bbox': False,
293 | 'accel': False
294 | },
295 | 'albedo': {
296 | 'type': 'gridvolume',
297 | 'filename': os.path.join(SCENES_DIR, 'common/volumes/albedo-constant-sand-256-256-256.vol'),
298 | 'to_world': to_world,
299 | 'use_grid_bbox': False,
300 | 'accel': False
301 | },
302 | 'scale': 1.0,
303 |
304 | 'sample_emitters': True,
305 | 'has_spectral_extinction': False,
306 | 'majorant_resolution_factor': 8
307 | }
308 |
309 | scene_dict = {
310 | 'type': 'scene',
311 | 'object': {
312 | 'type': 'obj',
313 | 'filename': os.path.join(SCENES_DIR, 'common/meshes/cube_unit.obj'),
314 | 'bsdf': {'type': 'null'},
315 | 'to_world': to_world,
316 | },
317 | 'envmap': {
318 | 'type': 'envmap',
319 | 'filename': os.path.join(SCENES_DIR, 'common/textures/clarens_night_02_4k.exr'),
320 | },
321 | 'sensor': batch_sensor
322 | }
323 |
324 | if method == 'cv_ps':
325 | scene_dict['object']['interior'] = {
326 | 'type': 'twostatemedium',
327 | 'old': medium,
328 | 'new': medium,
329 | 'incoming': medium
330 | }
331 | else:
332 | scene_dict['object']['interior'] = medium
333 |
334 |
335 | params = {
336 | 'scene': mi.load_dict(scene_dict),
337 | 'variables': {
338 | 'object.interior_medium.sigma_t.data': {
339 | 'init': dr.full(mi.TensorXf, 0.04, (v_res, v_res, v_res, 1)),
340 | 'clamp': (0.0, 250.0),
341 | },
342 | 'object.interior_medium.albedo.data': {
343 | 'init': dr.full(mi.TensorXf, 0.6, (v_res, v_res, v_res, 3)),
344 | 'clamp': (0.0, 1.0),
345 | 'lr_factor': 2.0
346 | },
347 | },
348 | 'upsample': [0.04, 0.16, 0.36, 0.64],
349 | 'schedule': [0.75, 0.85, 0.95],
350 | 'save': True,
351 | 'benchmark': True,
352 | 'n_steps': 2000,
353 | 'recomp_freq': 50,
354 | 'method': method,
355 | 'lr': 1e-2,
356 | 'spp': 8,
357 | 'spp_grad': 8,
358 | 'spp_ref': 128,
359 | 'ref_passes': 16,
360 | 'spp_inf': 64,
361 | 'final_res': final_res,
362 | 'output_dir': output_dir,
363 | 'integrator': mi.load_dict({'type': 'twostateprbvolpath' if method == 'cv_ps' else 'prbvolpath', 'max_depth': 64, 'rr_depth': 64}),
364 | 'adjoint_integrator': mi.load_dict({'type': 'volpathsimple', 'max_depth': 64, 'rr_depth': 64})
365 |
366 | }
367 |
368 | ref_name = os.path.join(os.path.dirname(output_dir), "img_ref.exr")
369 | if not os.path.exists(ref_name):
370 | sensor = mi.load_dict(batch_sensor)
371 | scene = mi.load_file(os.path.join(SCENES_DIR, "rover", "rover-ref.xml"), envmap_filename=os.path.join(SCENES_DIR, scene_dict['envmap']['filename']))
372 | img_ref = mi.TensorXf(0.0)
373 | ref_passes = params['ref_passes']
374 | spp_ref = params['spp_ref']
375 | from tqdm import trange
376 | for j in trange(ref_passes):
377 | img_ref += mi.render(scene, seed = 17843 + j, sensor=sensor, integrator=params['integrator'], spp=spp_ref) / ref_passes
378 | mi.Bitmap(img_ref).write_async(ref_name)
379 | sensor_params = mi.traverse(sensor)
380 |
381 | sensor_params['film.size'] = final_res
382 | sensor_params.update()
383 | img = mi.TensorXf(0.0)
384 | for j in trange(ref_passes):
385 | img += mi.render(scene, seed = 17843 + j, sensor=sensor, integrator=params['integrator'], spp=spp_ref) / ref_passes
386 | mi.Bitmap(img).write_async(os.path.join(os.path.dirname(output_dir), "img_ref_display.exr"))
387 |
388 | del scene
389 | del sensor
390 |
391 | return params
392 |
393 | def scene_bunnies(method, output_dir):
394 | scene = mi.load_file(os.path.join(SCENES_DIR, "bunnies", "scene_twostates.xml" if method == 'cv_ps' else "scene.xml"), resx=1280, resy=720)
395 | params = {
396 | 'scene': scene,
397 | 'variables': {
398 | 'PLYMesh_1.interior_medium.sigma_t.value.value': {
399 | 'init': 1.0,
400 | 'clamp': (1e-4, 10.0),
401 | },
402 | 'PLYMesh_2.bsdf.alpha.value': {
403 | 'init': 0.5,
404 | 'clamp': (1e-4, 1.0),
405 | },
406 | 'PLYMesh_3.bsdf.reflectance.value': {
407 | 'init': [0.5, 0.5, 0.5],
408 | 'clamp': (1e-4, 1.0),
409 | }
410 | },
411 | 'save': True,
412 | 'n_steps': 300,
413 | 'recomp_freq': 1,
414 | 'method': method,
415 | 'lr': 5e-2,
416 | 'spp': 1,
417 | 'spp_grad': 16,
418 | 'spp_ref': 4096,
419 | 'spp_inf': 1024,
420 | 'output_dir': output_dir,
421 | 'integrator': mi.load_dict({'type': 'twostateprbvolpath' if method == 'cv_ps' else 'prbvolpath', 'max_depth': 64, 'rr_depth': 64}),
422 | }
423 |
424 | return params
425 |
426 | def scene_ajar(method, output_dir):
427 | scene = mi.load_file(os.path.join(SCENES_DIR, "veach-ajar", "scene_twostates.xml" if method == 'cv_ps' else "scene.xml"), resx=1280, resy=720)
428 | lambda_ = 29.0
429 | return {
430 | 'scene': scene,
431 | 'variables': {
432 | 'LandscapeBSDF.reflectance.data': {
433 | 'init': dr.full(mi.TensorXf, 0.1, (256, 512, 3)),
434 | 'clamp': (1e-4, 1.0),
435 | 'largesteps': CholeskySolver(dr.full(mi.TensorXf, 0.1, (256, 512, 3)), lambda_)
436 | },
437 | 'tea.albedo.value.value': {
438 | 'init': [0.1, 0.1, 0.1],
439 | 'clamp': (1e-4, 1.0)
440 | },
441 | 'tea.sigma_t.value.value': {
442 | 'init': 0.1,
443 | 'clamp': (1e-4, 10.0),
444 | 'lr_factor': 2.0
445 | }
446 | },
447 | 'save': True,
448 | 'schedule': [0.75, 0.85, 0.95],
449 | 'n_steps': 500,
450 | 'recomp_freq': 5,
451 | 'method': method,
452 | 'denoise': False,
453 | 'lr': 2e-2,
454 | 'spp': 16,
455 | 'spp_grad': 16,
456 | 'spp_ref': 4096,
457 | 'ref_passes': 16,
458 | 'spp_inf': 4096,
459 | 'output_dir': output_dir,
460 | 'integrator': mi.load_dict({'type': 'twostateprbvolpath' if method == 'cv_ps' else 'prbvolpath', 'max_depth': 64})
461 | }
462 |
463 | def scene_ajar_bias(method, output_dir):
464 | params = scene_ajar(method, output_dir)
465 | params['bias_steps'] = [255]
466 | return params
467 |
468 | def scene_dragon(method, output_dir):
469 | scene = mi.load_file(os.path.join(SCENES_DIR, "dragon", "scene_twostates.xml" if method == 'cv_ps' else "scene.xml"), resx=500, resy=300)
470 | params = {
471 | 'scene': scene,
472 | 'variables': {
473 | 'PLYMesh_2.interior_medium.sigma_t.value.value': {
474 | 'init': 0.1,
475 | 'clamp': (1e-4, 10.0),
476 | }
477 | },
478 | 'save': True,
479 | 'n_steps': 750,
480 | 'method': method,
481 | 'lr': 5e-2,
482 | 'spp': 1,
483 | 'spp_grad': 16,
484 | 'spp_ref': 2048,
485 | 'spp_inf': 256,
486 | 'output_dir': output_dir,
487 | 'integrator': mi.load_dict({'type': 'twostateprbvolpath' if method == 'cv_ps' else 'prbvolpath', 'max_depth': 64, 'rr_depth': 64}),
488 | }
489 |
490 | return params
491 |
492 | def scene_teapot(method, output_dir):
493 | scene = mi.load_file(os.path.join(SCENES_DIR, "teapot-full", "scene_twostates" if method == 'cv_ps' else "scene.xml"), resx=500, resy=300)
494 | params = {
495 | 'scene': scene,
496 | 'variables': {
497 | 'tea.sigma_t.value.value': {
498 | 'init': 0.1,
499 | 'clamp': (1e-4, 1.0),
500 | },
501 | 'tea.albedo.value.value': {
502 | 'init': [0.2, 0.2, 0.2],
503 | 'clamp': (1e-4, 1.0),
504 | }
505 | },
506 | 'save': True,
507 | 'n_steps': 250,
508 | 'method': method,
509 | 'lr': 1e-2,
510 | 'spp': 1,
511 | 'spp_grad': 16,
512 | 'spp_ref': 8192,
513 | 'spp_inf': 64,
514 | 'output_dir': output_dir,
515 | 'integrator': mi.load_dict({'type': 'twostateprbvolpath' if method == 'cv_ps' else 'prbvolpath', 'max_depth': 64, 'rr_depth': 64})
516 | }
517 |
518 | return params
519 |
520 | def scene_cornell_vgg(method, output_dir):
521 | scene_dict = mi.cornell_box()
522 | scene_dict['sensor']['film']['width'] = 512
523 | scene_dict['sensor']['film']['height'] = 512
524 | back_bsdf = {
525 | 'type': 'diffuse',
526 | 'reflectance': {
527 | 'type': 'bitmap',
528 | 'filename': os.path.join(SCENES_DIR, 'concrete.exr'),
529 | 'to_uv': mi.ScalarTransform4f.rotate([1,0,0], 180)
530 | }
531 | }
532 |
533 | if method == 'cv_ps':
534 | scene_dict['back']['bsdf'] = {
535 | 'type': 'twostate',
536 | 'old': back_bsdf,
537 | 'new': back_bsdf,
538 | 'incoming': back_bsdf
539 | }
540 | else:
541 | scene_dict['back']['bsdf'] = back_bsdf
542 |
543 | params = {
544 | 'scene': mi.load_dict(scene_dict),
545 | 'variables': {
546 | 'back.bsdf.reflectance.data': {
547 | 'init': dr.full(mi.TensorXf, 0.5, (512, 512, 3)),
548 | 'clamp': (1e-3, 1.0),
549 | }
550 | },
551 | 'save': True,
552 | 'n_steps': 500,
553 | 'final_res': (1280, 1280),
554 | 'loss': 'VGG',
555 | 'method': method,
556 | 'lr': 2e-2,
557 | 'spp': 1,
558 | 'spp_grad': 16,
559 | 'ref_passes': 8,
560 | 'spp_ref': 1024,
561 | 'spp_inf': 128,
562 | 'output_dir': output_dir,
563 | 'integrator': mi.load_dict({'type': 'twostateprb' if method == 'cv_ps' else 'prb', 'max_depth': 64, 'rr_depth': 64}),
564 | }
565 | return params
566 |
567 | def scene_kitchen(method, output_dir):
568 | scene = mi.load_file(os.path.join(SCENES_DIR, "kitchen", "scene.xml"))
569 | return {
570 | 'scene': scene,
571 | 'variables': {},
572 | 'save': True,
573 | 'n_steps': 1,
574 | 'method': method,
575 | 'denoise': False,
576 | 'lr': 2e-2,
577 | 'spp': 1,
578 | 'spp_grad': 16,
579 | 'spp_ref': 4096,
580 | 'ref_passes': 4,
581 | 'output_dir': output_dir,
582 | 'integrator': mi.load_dict({'type': 'twostateprb' if method == 'cv_ps' else 'prb', 'max_depth': 64, 'rr_depth': 64}),
583 | }
584 |
585 | AVAILABLE_SCENES = [name[6:] for name in globals() if name.startswith('scene_')]
586 |
587 | if __name__ == "__main__":
588 |
589 | parser = argparse.ArgumentParser(description='Run a Mitsuba experiment')
590 | parser.add_argument('scene', type=str, choices=AVAILABLE_SCENES, help='Name of the scene to use')
591 | parser.add_argument('method', type=str, help='Name of the optimization method to use')
592 | # Optional overrides of scene parameters
593 | parser.add_argument('--n_steps', type=int, help='Number of optimization steps')
594 | parser.add_argument('--lr', type=float, help='Learning rate')
595 | parser.add_argument('--spp', type=int, help='Samples per pixel for the primal rendering')
596 | parser.add_argument('--spp_grad', type=int, help='Samples per pixel for the adjoint rendering')
597 | parser.add_argument('--beta1', type=float, help='β₁ parameter for statistics')
598 | parser.add_argument('--beta2', type=float, help='β₂ parameter for statistics')
599 |
600 | args = parser.parse_args()
601 |
602 | output_dir = os.path.join(os.path.dirname(__file__), "output", args.scene, args.method)
603 | params = globals()[f"scene_{args.scene}"](args.method, output_dir)
604 |
605 | # Override the parameters if necessary
606 | for arg in ['n_steps', 'lr', 'spp', 'spp_grad', 'beta1', 'beta2']:
607 | if getattr(args, arg) is not None:
608 | params[arg] = getattr(args, arg)
609 |
610 | result_dict = run_opt(params)
611 |
612 | np.savez(os.path.join(output_dir, "result.npz"), **result_dict)
613 |
--------------------------------------------------------------------------------
/figures/__pycache__/figutils.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rgl-epfl/recursive_control_variates/be19f1be3f054f9497eeff97530e1827ad0d8d56/figures/__pycache__/figutils.cpython-39.pyc
--------------------------------------------------------------------------------
/figures/ajar/figure.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import sys\n",
10 | "sys.path.append(\"..\")\n",
11 | "from figutils import *"
12 | ]
13 | },
14 | {
15 | "cell_type": "code",
16 | "execution_count": null,
17 | "metadata": {},
18 | "outputs": [],
19 | "source": [
20 | "def l1(x, y):\n",
21 | " return dr.mean(dr.abs(x-y))[0]\n",
22 | "def l2(x, y):\n",
23 | " return dr.mean(dr.sqr(x-y))[0]"
24 | ]
25 | },
26 | {
27 | "cell_type": "code",
28 | "execution_count": null,
29 | "metadata": {},
30 | "outputs": [],
31 | "source": [
32 | "\n",
33 | "losses = [\"l1\", \"l2\"]\n",
34 | "loss_names = [r\"$\\mathcal{L}^1$\", r\"$\\mathcal{L}^2$\"]\n",
35 | "methods = [\"baseline\", \"cv_ps\"]\n",
36 | "method_names = [\"Baseline\", \"Ours\"]\n",
37 | "use_denoising = [False, True]\n",
38 | "scene_name = \"ajar\"\n",
39 | "img_ref = mi.TensorXf(mi.Bitmap(os.path.join(OUTPUT_DIR, f\"{scene_name}_l1\", \"img_ref.exr\")))\n",
40 | "imgs = []\n",
41 | "final_losses = []\n",
42 | "for i, loss in enumerate(losses):\n",
43 | " imgs.append([])\n",
44 | " final_losses.append([])\n",
45 | " for j, method in enumerate(methods):\n",
46 | " imgs[i].append([])\n",
47 | " final_losses[i].append([])\n",
48 | " for denoised in use_denoising:\n",
49 | " base_dir = os.path.join(OUTPUT_DIR, f\"{scene_name}_{loss}{'_denoised' if denoised else ''}\")\n",
50 | " img = mi.TensorXf(mi.Bitmap(os.path.join(base_dir, method, \"img_final.exr\")))\n",
51 | " imgs[i][j].append(img)\n",
52 | " # final_losses[i].append(np.load(os.path.join(base_dir, method, \"result.npz\"))[\"loss\"][-1])\n",
53 | " # final_losses[i].append(dr.mean(dr.sqr(imgs[i][-1] - img_ref))[0])\n",
54 | " final_losses[i][j].append((l1(img, img_ref), l2(img, img_ref)))\n",
55 | "\n"
56 | ]
57 | },
58 | {
59 | "cell_type": "code",
60 | "execution_count": null,
61 | "metadata": {},
62 | "outputs": [],
63 | "source": [
64 | "sns.set_style('white')"
65 | ]
66 | },
67 | {
68 | "cell_type": "code",
69 | "execution_count": null,
70 | "metadata": {},
71 | "outputs": [],
72 | "source": [
73 | "n_cols = 10\n",
74 | "n_rows = 2\n",
75 | "\n",
76 | "# crop1 = [2*h//3, h//5+s//10]\n",
77 | "crop1 = [0.4, 0.22, 0.1]\n",
78 | "# crop2 = [8*h//7, 2*h//3 + s//10]\n",
79 | "# crop2 = [0.5, 0.52, 0.2]\n",
80 | "crop2 = [0.5, 0.52, 0.12]\n",
81 | "crops = [crop1, crop2]\n",
82 | "crop_colors = [\"r\", \"g\"]\n",
83 | "\n",
84 | "import matplotlib.gridspec as gridspec\n",
85 | "import matplotlib.patches as patches\n",
86 | "\n",
87 | "\n",
88 | "h,w,_ = img_ref.shape\n",
89 | "img_r = w/h\n",
90 | "inset_r = 1.0\n",
91 | "inner_wspace = 0.05\n",
92 | "inner_hspace = inner_wspace*inset_r\n",
93 | "\n",
94 | "insets_r = gridspec_aspect(2, 4, 1, 1, wspace=inner_wspace, hspace=inner_hspace)\n",
95 | "ref_r = gridspec_aspect(2, 1, 1, 1, hspace=inner_hspace)\n",
96 | "\n",
97 | "outer_wspace = 0.05\n",
98 | "width_ratios = [img_r, insets_r, insets_r, ref_r]\n",
99 | "outer_aspect = gridspec_aspect(1, 4, width_ratios, 1, wspace=outer_wspace)\n",
100 | "\n",
101 | "\n",
102 | "fig = plt.figure(1, figsize=(PAGE_WIDTH, PAGE_WIDTH / outer_aspect))\n",
103 | "# outer = fig.add_gridspec(1, 4, width_ratios=[w/total_width, 2*h/total_width, 2*h/total_width, h/2/total_width], wspace=outer_wspace)\n",
104 | "outer = fig.add_gridspec(1, 4, width_ratios=width_ratios, wspace=outer_wspace)\n",
105 | "gs_ref = gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=outer[0], wspace=outer_wspace)\n",
106 | "gs_l1_inset = gridspec.GridSpecFromSubplotSpec(n_rows, 4, subplot_spec=outer[1], wspace=inner_wspace, hspace=inner_hspace)\n",
107 | "gs_l2_inset = gridspec.GridSpecFromSubplotSpec(n_rows, 4, subplot_spec=outer[2], wspace=inner_wspace, hspace=inner_hspace)\n",
108 | "gs_ref_inset = gridspec.GridSpecFromSubplotSpec(n_rows, 1, subplot_spec=outer[3], hspace=inner_hspace)\n",
109 | "inners = [gs_l1_inset, gs_l2_inset]\n",
110 | "\n",
111 | "ax_ref = fig.add_subplot(gs_ref[:, 0])\n",
112 | "ax_ref.imshow(mi.util.convert_to_bitmap(img_ref), interpolation='none')\n",
113 | "ax_ref.set_xlabel(r'$\\mathcal{L}^1$:'+'\\n'+r'$\\mathcal{L}^2$:', y=-0.25, fontsize=DEFAULT_FONTSIZE, multialignment='right', loc='right')\n",
114 | "disable_ticks(ax_ref)\n",
115 | "# disable_border(ax_ref)\n",
116 | "\n",
117 | "for l, ((rx, ry, s), color) in enumerate(zip(crops, crop_colors)):\n",
118 | " ax = fig.add_subplot(gs_ref_inset[l, 0])\n",
119 | " left = int(rx*w)\n",
120 | " size = int(s*w)\n",
121 | " top = int(ry*h)\n",
122 | " ax.imshow(mi.util.convert_to_bitmap(img_ref[top:top+size, left:left+size]), interpolation='none')\n",
123 | " disable_ticks(ax)\n",
124 | " plt.setp(ax.spines.values(), color=color)\n",
125 | " if l == 0:\n",
126 | " # disable_border(ax)\n",
127 | " ax.set_title(\"Reference\", y=1.025)\n",
128 | "\n",
129 | " rect = patches.Rectangle((left, top), size, size, linewidth=0.5, edgecolor=color, facecolor='none')\n",
130 | " ax_ref.add_patch(rect)\n",
131 | " rect = patches.Rectangle((0, 0), size-1, size-1, linewidth=1.0, edgecolor=color, facecolor='none')\n",
132 | " ax.add_patch(rect)\n",
133 | "\n",
134 | "for i, (loss_name, gs) in enumerate(zip(loss_names, inners)):\n",
135 | " for j, method_name in enumerate(method_names):\n",
136 | " for k, denoised in enumerate(use_denoising):\n",
137 | " for l, ((rx, ry, s), color) in enumerate(zip(crops, crop_colors)):\n",
138 | " ax = fig.add_subplot(gs[l, j*2+k])\n",
139 | " left = int(rx*w)\n",
140 | " right = left + int(s*w)\n",
141 | " top = int(ry*h)\n",
142 | " bottom = top + int(s*w)\n",
143 | " ax.imshow(mi.util.convert_to_bitmap(imgs[i][j][k][top:bottom, left:right]), interpolation='none')\n",
144 | " # ax.imshow(mi.util.convert_to_bitmap(imgs[i][j*2+k][ry:ry+s, rx:rx+s]))\n",
145 | " # plt.plot(np.arange(20))\n",
146 | " disable_ticks(ax)\n",
147 | " # disable_border(ax)\n",
148 | " # plt.setp(ax.spines.values(), color=color, linewidth=1.0)\n",
149 | " size = right-left-1\n",
150 | " rect = patches.Rectangle((0, 0), size, size, linewidth=1.0, edgecolor=color, facecolor='none')\n",
151 | " # ax.add_patch(rect)\n",
152 | " if l == 0:\n",
153 | " ax.set_title('+ Denoising' if denoised else method_name, y=1.025)\n",
154 | " else:\n",
155 | " ax.set_xlabel(f\"{final_losses[i][j][k][0]:.2e}\\n{final_losses[i][j][k][1]:.2e}\", y=-0.25, fontsize=DEFAULT_FONTSIZE)\n",
156 | " # disable_border(ax)\n",
157 | "\n",
158 | " # Ghost axes for the labels (https://stackoverflow.com/a/69117807)\n",
159 | " ax_label = fig.add_subplot(gs[:])\n",
160 | " ax_label.axis('off')\n",
161 | " ax_label.set_title(loss_name, y=1.18)\n",
162 | " rect = patches.Rectangle((.1, 1.2), 0.8, 0.0, linewidth=0.5, edgecolor='black', facecolor='none', clip_on=False)\n",
163 | " ax_label.add_patch(rect)\n",
164 | "\n",
165 | "ax_ref = fig.add_subplot(gs_ref[:])\n",
166 | "ax_ref.axis('off')\n",
167 | "ax_ref.set_title('Reference', y=1.025)\n",
168 | "save_fig(\"ajar\", pad_inches=0.02)\n"
169 | ]
170 | },
171 | {
172 | "cell_type": "code",
173 | "execution_count": null,
174 | "metadata": {},
175 | "outputs": [],
176 | "source": []
177 | }
178 | ],
179 | "metadata": {
180 | "kernelspec": {
181 | "display_name": "Python 3.9.12 ('graphics')",
182 | "language": "python",
183 | "name": "python3"
184 | },
185 | "language_info": {
186 | "codemirror_mode": {
187 | "name": "ipython",
188 | "version": 3
189 | },
190 | "file_extension": ".py",
191 | "mimetype": "text/x-python",
192 | "name": "python",
193 | "nbconvert_exporter": "python",
194 | "pygments_lexer": "ipython3",
195 | "version": "3.9.12"
196 | },
197 | "orig_nbformat": 4,
198 | "vscode": {
199 | "interpreter": {
200 | "hash": "83642eaf50c97d4e19d0a23d915e5d4e870af428ff693683146158fe3feeea5a"
201 | }
202 | }
203 | },
204 | "nbformat": 4,
205 | "nbformat_minor": 2
206 | }
207 |
--------------------------------------------------------------------------------
/figures/ajar/generate_data.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 | import os
3 | from pathlib import Path
4 |
5 | scene_name = "ajar"
6 | # L1
7 | subprocess.call(["python", os.path.join(Path(__file__).parents[2], "run_experiment.py"), scene_name, "--lr", "0.02", "--output", f"{scene_name}_l1"])
8 | subprocess.call(["python", os.path.join(Path(__file__).parents[2], "run_experiment.py"), scene_name, "--lr", "0.02", "--denoise", "--output", f"{scene_name}_l1_denoised"])
9 |
10 | # L2
11 | subprocess.call(["python", os.path.join(Path(__file__).parents[2], "run_experiment.py"), scene_name, "--loss", "L2", "--lr", "0.01", "--output", f"{scene_name}_l2"])
12 | subprocess.call(["python", os.path.join(Path(__file__).parents[2], "run_experiment.py"), scene_name, "--loss", "L2", "--lr", "0.01", "--denoise", "--output", f"{scene_name}_l2_denoised"])
13 |
--------------------------------------------------------------------------------
/figures/debiasing/figure.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import sys\n",
10 | "sys.path.append(\"..\")\n",
11 | "from figutils import *\n",
12 | "from string import ascii_lowercase\n",
13 | "import matplotlib.patheffects as pe"
14 | ]
15 | },
16 | {
17 | "cell_type": "code",
18 | "execution_count": null,
19 | "metadata": {},
20 | "outputs": [],
21 | "source": [
22 | "output_dir = os.path.join(OUTPUT_DIR, os.path.basename(os.getcwd()))"
23 | ]
24 | },
25 | {
26 | "cell_type": "code",
27 | "execution_count": null,
28 | "metadata": {},
29 | "outputs": [],
30 | "source": [
31 | "methods = [\"baseline\", \"debiased\"]\n",
32 | "method_names = [\"Naive\", \"Debiased\"]\n",
33 | "losses = []\n",
34 | "imgs_final = []\n",
35 | "imgs = []\n",
36 | "\n",
37 | "ref_img = mi.Bitmap(os.path.join(output_dir, \"img_ref.exr\"))\n",
38 | "for method in methods:\n",
39 | " losses.append(np.load(os.path.join(output_dir, method, \"loss_hist.npz\")))\n",
40 | " imgs.append(mi.TensorXf(mi.Bitmap(os.path.join(output_dir, method, \"img\", \"0249.exr\"))))\n",
41 | " imgs_final.append(mi.TensorXf(mi.Bitmap(os.path.join(output_dir, method, \"img_inf\", \"0249.exr\"))))\n"
42 | ]
43 | },
44 | {
45 | "cell_type": "code",
46 | "execution_count": null,
47 | "metadata": {},
48 | "outputs": [],
49 | "source": [
50 | "n_cols = 3\n",
51 | "n_rows = 2\n",
52 | "base_size = 3\n",
53 | "h,w,_ = imgs[0].shape\n",
54 | "\n",
55 | "wspace = 0.02\n",
56 | "hspace = 0.02 * w/h\n",
57 | "aspect = gridspec_aspect(n_rows, n_cols, w, h, wspace=wspace, hspace=hspace)\n",
58 | "\n",
59 | "sns.set_style('white')\n",
60 | "fig = plt.figure(1, figsize=(TEXT_WIDTH, TEXT_WIDTH / aspect))\n",
61 | "gs = fig.add_gridspec(n_rows, n_cols, wspace=wspace, hspace=hspace)\n",
62 | "\n",
63 | "for i, method in enumerate(method_names):\n",
64 | " ax = fig.add_subplot(gs[0, i])\n",
65 | " ax.imshow(mi.util.convert_to_bitmap(imgs[i]), interpolation='none')\n",
66 | " std = dr.sqrt(dr.mean(dr.sqr(imgs[i] - imgs_final[i])))[0]\n",
67 | " text = f\"$\\sigma={std:.3f}$\"\n",
68 | " ax.text(0.99*w, 0.99*h, text, ha=\"right\", va=\"bottom\", color=\"white\", fontsize=DEFAULT_FONTSIZE, path_effects=[pe.withStroke(linewidth=1, foreground=\"black\")])\n",
69 | " if i == 0:\n",
70 | " ax.set_ylabel(\"Primal\")\n",
71 | " disable_ticks(ax)\n",
72 | "\n",
73 | " ax = fig.add_subplot(gs[1, i])\n",
74 | " ax.imshow(mi.util.convert_to_bitmap(imgs_final[i]), interpolation='none')\n",
75 | " if i == 0:\n",
76 | " ax.set_ylabel(\"Re-rendered\")\n",
77 | " disable_ticks(ax)\n",
78 | " ax.set_title(rf\"\\textbf{{({ascii_lowercase[i]})}} {method}\", y=-0.3)\n",
79 | "\n",
80 | "ax = fig.add_subplot(gs[1, 2])\n",
81 | "ax.imshow(mi.util.convert_to_bitmap(ref_img), interpolation='none')\n",
82 | "disable_ticks(ax)\n",
83 | "ax.set_title(r\"\\textbf{(c)} Target\", y=-0.3)\n",
84 | "\n",
85 | "save_fig('debiasing_v2')"
86 | ]
87 | },
88 | {
89 | "cell_type": "code",
90 | "execution_count": null,
91 | "metadata": {},
92 | "outputs": [],
93 | "source": []
94 | },
95 | {
96 | "cell_type": "code",
97 | "execution_count": null,
98 | "metadata": {},
99 | "outputs": [],
100 | "source": []
101 | }
102 | ],
103 | "metadata": {
104 | "kernelspec": {
105 | "display_name": "Python 3.9.12 ('graphics')",
106 | "language": "python",
107 | "name": "python3"
108 | },
109 | "language_info": {
110 | "codemirror_mode": {
111 | "name": "ipython",
112 | "version": 3
113 | },
114 | "file_extension": ".py",
115 | "mimetype": "text/x-python",
116 | "name": "python",
117 | "nbconvert_exporter": "python",
118 | "pygments_lexer": "ipython3",
119 | "version": "3.9.12"
120 | },
121 | "orig_nbformat": 4,
122 | "vscode": {
123 | "interpreter": {
124 | "hash": "83642eaf50c97d4e19d0a23d915e5d4e870af428ff693683146158fe3feeea5a"
125 | }
126 | }
127 | },
128 | "nbformat": 4,
129 | "nbformat_minor": 2
130 | }
131 |
--------------------------------------------------------------------------------
/figures/debiasing/generate_data.py:
--------------------------------------------------------------------------------
1 | import mitsuba as mi
2 | mi.set_variant('cuda_ad_rgb')
3 | import drjit as dr
4 | from tqdm import trange
5 |
6 | import numpy as np
7 |
8 | import os
9 | import sys
10 | from pathlib import Path
11 | import argparse
12 |
13 | code_dir = str(Path(__file__).parents[2])
14 | sys.path.append(os.path.dirname(code_dir))
15 | from experiments import scene_dragon
16 |
17 | output_dir = os.path.join(code_dir, "output", os.path.basename(os.path.dirname(__file__)))
18 | if not os.path.isdir(output_dir):
19 | os.makedirs(output_dir)
20 |
21 | opt_config = scene_dragon(output_dir, "baseline")
22 |
23 | n_steps = 250
24 | n_runs = 64
25 | spp = opt_config['spp']
26 | spp_grad = opt_config['spp_grad']
27 | spp_ref = opt_config['spp_ref']
28 | integrator = opt_config['integrator']
29 | scene = opt_config['scene']
30 |
31 | seed_offset = n_steps
32 | params = mi.traverse(scene)
33 |
34 | img_ref = mi.render(scene, params, integrator=integrator, spp=spp_ref)
35 | mi.Bitmap(img_ref).write(os.path.join(output_dir, "img_ref.exr"))
36 |
37 | def save_image(img, output_dir, img_name, it):
38 | filename = os.path.join(output_dir, img_name, f"{it:04d}.exr")
39 | if not os.path.isdir(os.path.dirname(filename)):
40 | os.makedirs(os.path.dirname(filename))
41 | mi.Bitmap(img).write_async(filename)
42 |
43 | for j, debias in enumerate([True, False]):
44 | loss_hist = np.zeros(n_steps)
45 | save_dir = os.path.join(output_dir, "debiased" if debias else "baseline")
46 | opt = mi.ad.Adam(lr=5e-2)
47 |
48 | # Initialize the parameters
49 | for key, param in opt_config['variables'].items():
50 | init_state = param['init']
51 | if 'sigma_t' in key:
52 | opt[key] = mi.Float(init_state)
53 | params[key] = opt[key]
54 | else:
55 | params[key] = init_state
56 | # Add the parameter to the optimizer
57 | opt[key] = params[key]
58 |
59 | for i in trange(n_steps):
60 | params.update(opt)
61 |
62 | # Primal rendering
63 | img = mi.render(scene, params, integrator=integrator, seed=i, spp=spp, spp_grad=spp_grad)
64 | grad_biased = dr.sign(img - img_ref)
65 |
66 | if debias:
67 | # Compute bias term
68 | with dr.suspend_grad():
69 | img_inf = mi.render(scene, params, integrator=integrator, seed=1457892+i, spp=spp_ref)
70 | grad_inf = dr.sign(img_inf - img_ref)
71 |
72 | grad_exp = 0
73 | for run in range(n_runs):
74 | x = mi.render(scene, params, seed=n_steps+i*n_runs+run, spp=spp)
75 | grad_exp += dr.sign(x - img_ref) / n_runs
76 |
77 | grad = grad_biased + grad_inf - grad_exp
78 | else:
79 | grad = dr.detach(grad_biased)
80 |
81 | dr.backward(img * grad / dr.prod(img.shape))
82 |
83 | with dr.suspend_grad():
84 | if not debias:
85 | img_inf = mi.render(scene, params, integrator=integrator, seed=i+1, spp=spp_ref)
86 |
87 | save_image(img_inf, save_dir, "img_inf", i)
88 | save_image(img, save_dir, "img", i)
89 |
90 | loss_hist[i] = dr.mean(dr.abs(img_inf - img_ref))[0]
91 |
92 | opt.step()
93 |
94 | for key, param in opt_config['variables'].items():
95 | opt[key] = dr.clamp(dr.detach(opt[key]), param['clamp'][0], param['clamp'][1])
96 |
97 | np.savez(os.path.join(save_dir, "loss_hist.npz"), loss_hist=loss_hist)
98 |
--------------------------------------------------------------------------------
/figures/figutils.py:
--------------------------------------------------------------------------------
1 | """Bundles together some common imports, functions and settings for figure generation / plotting"""
2 |
3 | import os
4 | import subprocess
5 | from os.path import join
6 | from pathlib import Path
7 |
8 | import drjit as dr
9 | import mitsuba as mi
10 |
11 | import matplotlib
12 | import matplotlib.pyplot as plt
13 | import matplotlib.gridspec as gridspec
14 | import matplotlib.patheffects as path_effects
15 |
16 | import seaborn as sns
17 | import numpy as np
18 | import json
19 |
20 | mi.set_variant('cuda_ad_rgb', 'scalar_rgb')
21 |
22 | _SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
23 | OUTPUT_DIR = join(Path(__file__).parents[3], 'output')
24 | FIG_OUTPUT_DIR = join(_SCRIPT_DIR, 'output')
25 | del _SCRIPT_DIR
26 |
27 | TEXT_WIDTH = 5.90666
28 | # TODO: get the proper value
29 | PAGE_WIDTH = 2*TEXT_WIDTH
30 |
31 | DEFAULT_FONTSIZE = 10 # Font size used by captions
32 | DEFAULT_FONTSIZE_SMALL = 8
33 |
34 | MPL_STYLE = {
35 | "text.usetex": True,
36 | "text.color": 'black',
37 | "font.size": DEFAULT_FONTSIZE,
38 | "axes.titlesize": DEFAULT_FONTSIZE,
39 | "axes.labelsize": DEFAULT_FONTSIZE_SMALL,
40 | "xtick.labelsize": DEFAULT_FONTSIZE_SMALL - 2,
41 | "ytick.labelsize": DEFAULT_FONTSIZE_SMALL - 2,
42 | "legend.fontsize": DEFAULT_FONTSIZE_SMALL,
43 | "figure.titlesize": DEFAULT_FONTSIZE,
44 | "text.latex.preamble": r"""\usepackage{libertine}
45 | \usepackage[libertine]{newtxmath}
46 | \usepackage{amsmath}
47 | \usepackage{amsfonts}
48 | \usepackage{bm}
49 | \usepackage{bbm}""",
50 | "pdf.fonttype": 42,
51 | "ps.fonttype": 42,
52 | 'axes.edgecolor': 'black',
53 | 'axes.linewidth': 0.4,
54 | 'xtick.major.size': 0.5,
55 | 'xtick.major.width': 0.5,
56 | 'xtick.minor.size': 0.25,
57 | 'xtick.minor.width': 0.5,
58 |
59 | 'ytick.major.size': 0.5,
60 | 'ytick.major.width': 0.5,
61 | 'ytick.minor.size': 0.25,
62 | 'ytick.minor.width': 0.5,
63 |
64 | 'lines.linewidth': 0.75,
65 | 'patch.linewidth': 0.5,
66 |
67 | 'grid.linewidth': 0.5,
68 |
69 | 'axes.titley': -0.18,
70 |
71 | 'figure.dpi': 120, # Controls the display size of the figure in the notebook (we overwrite this when saving)
72 | }
73 |
74 |
75 | def __init():
76 | # Override any style changes by VSCode
77 | matplotlib.style.use('default')
78 | matplotlib.rcParams.update(MPL_STYLE)
79 | sns.set()
80 | matplotlib.rcParams.update(MPL_STYLE)
81 |
82 | __init()
83 |
84 |
85 | def save_fig(fig_name, fig_sub_dir='', dpi=300, pad_inches=0.005, bbox_inches='tight', compress=True):
86 | if fig_sub_dir == '':
87 | output_dir = os.path.join(FIG_OUTPUT_DIR, fig_name)
88 | else:
89 | output_dir = os.path.join(FIG_OUTPUT_DIR, fig_sub_dir, fig_name)
90 | os.makedirs(output_dir, exist_ok=True)
91 | fn = join(output_dir, fig_name + '.pdf')
92 | orig_fn = fn
93 | if compress:
94 | fn = fn.replace('.pdf', '_uc.pdf')
95 | plt.savefig(fn, format='pdf', dpi=dpi, bbox_inches=bbox_inches, pad_inches=pad_inches)
96 | if compress:
97 | gs = f"gs -o {orig_fn} -dQUIET -f -dNOPAUSE -dBATCH "
98 | gs += "-sDEVICE=pdfwrite -dPDFSETTINGS=/prepress -dCompatibilityLevel=1.6 "
99 | gs += f"-dDownsampleColorImages=false -DownsampleGrayImages=false {fn}"
100 | subprocess.call(gs, shell=True)
101 | return orig_fn
102 |
103 | def gridspec_aspect(n_rows, n_cols, w, h, wspace=0, hspace=0):
104 | if isinstance(w, int) or isinstance(w, float):
105 | Ws = n_cols * w
106 | elif isinstance(w, list) or isinstance(w, tuple):
107 | Ws = sum(w)
108 |
109 | if isinstance(h, int):
110 | Hs = n_rows * h
111 | elif isinstance(h, list) or isinstance(h, tuple):
112 | Hs = sum(h)
113 |
114 | w_spacing = wspace * Ws / n_cols
115 | h_spacing = hspace * Hs / n_rows
116 |
117 | return (Ws + (n_cols - 1) * w_spacing) / (Hs + (n_rows - 1) * h_spacing)
118 |
119 | def disable_ticks(ax):
120 | ax.axes.get_xaxis().set_ticklabels([])
121 | ax.axes.get_yaxis().set_ticklabels([])
122 | ax.axes.get_xaxis().set_ticks([])
123 | ax.axes.get_yaxis().set_ticks([])
124 |
125 |
126 | def disable_border(ax):
127 | ax.spines['top'].set_visible(False)
128 | ax.spines['right'].set_visible(False)
129 | ax.spines['bottom'].set_visible(False)
130 | ax.spines['left'].set_visible(False)
131 |
132 |
133 | def set_spine_width(ax, w):
134 | for _, s in ax.spines.items():
135 | s.set_linewidth(w)
136 |
137 |
138 | def set_aspect(ax, aspect):
139 | x_left, x_right = ax.get_xlim()
140 | y_low, y_high = ax.get_ylim()
141 | ax.set_aspect(abs((x_right - x_left) / (y_low - y_high)) * aspect)
142 |
143 |
144 | def apply_color_map(data, cmap='coolwarm', vmin=None, vmax=None):
145 | from matplotlib import cm
146 |
147 | data = np.array(data)
148 | if vmin is None:
149 | vmin = np.min(data)
150 | if vmax is None:
151 | vmax = np.max(data)
152 | return getattr(cm, cmap)(plt.Normalize(vmin, vmax)(data))[..., :3]
153 |
154 |
155 | def merge_pdfs(fn1, fn2, out_fn):
156 | """Merges two PDF files"""
157 | from PyPDF2 import PdfReader, PdfWriter
158 | reader_base = PdfReader(fn1)
159 | page_base = reader_base.pages[0]
160 | reader = PdfReader(fn2)
161 | page_box = reader.pages[0]
162 | page_base.merge_page(page_box)
163 | writer = PdfWriter()
164 | writer.add_page(page_base)
165 | with open(out_fn, 'wb') as fp:
166 | writer.write(fp)
167 |
168 |
169 | def repeat_img(img, times):
170 | return np.repeat(np.repeat(img, times, axis=1), times, axis=0)
171 |
172 |
173 | def time_to_string(duration):
174 | duration = round(duration)
175 | m, s = divmod(duration, 60)
176 | h, m = divmod(m, 60)
177 | d, h = divmod(h, 24)
178 | result = ''
179 | if d > 0:
180 | result += f'{d}d '
181 | if h > 0:
182 | result += f'{h}h '
183 | if m > 0:
184 | result += f'{m}m '
185 | result += f'{s}s'
186 | return result
187 |
188 |
189 | def read_img(fn, exposure=0, tonemap=True, background_color=None,
190 | handle_inexistant_file=False):
191 | if handle_inexistant_file and not os.path.isfile(fn):
192 | return np.ones((256, 256, 3)) * 0.3
193 | bmp = mi.Bitmap(fn)
194 | if tonemap:
195 | if background_color is not None:
196 | img = np.array(bmp.convert(mi.Bitmap.PixelFormat.RGBA, mi.Struct.Type.Float32, False))
197 | background_color = np.array(background_color).ravel()[None, None, :]
198 | # img = img[:, :, :3] * img[..., -1][..., None] + (1.0 - img[..., -1][..., None]) * background_color
199 | img = img[:, :, :3] + (1.0 - img[..., -1][..., None]) * background_color
200 | else:
201 | img = np.array(bmp.convert(mi.Bitmap.PixelFormat.RGB, mi.Struct.Type.Float32, False))
202 | img = img * 2 ** exposure
203 |
204 | return np.clip(np.array(mi.Bitmap(img).convert(mi.Bitmap.PixelFormat.RGB, mi.Struct.Type.Float32, True)), 0, 1)
205 | else:
206 | return np.array(bmp)
207 |
208 |
209 | def tonemap(img):
210 | return np.clip(np.array(mi.Bitmap(img).convert(mi.Bitmap.PixelFormat.RGB, mi.Struct.Type.Float32, True)), 0, 1)
211 |
212 |
213 | def identify_axes(ax_dict, fontsize=48):
214 | """
215 | Helper to identify the Axes in the examples below.
216 |
217 | Draws the label in a large font in the center of the Axes.
218 |
219 | Parameters
220 | ----------
221 | ax_dict : dict[str, Axes]
222 | Mapping between the title / label and the Axes.
223 | fontsize : int, optional
224 | How big the label should be.
225 | """
226 | kw = dict(ha="center", va="center", fontsize=fontsize, color="darkgrey")
227 | for k, ax in ax_dict.items():
228 | ax.text(0.5, 0.5, k, transform=ax.transAxes, **kw)
229 |
--------------------------------------------------------------------------------
/figures/gradient/figure.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import sys\n",
10 | "sys.path.append(\"..\")\n",
11 | "from figutils import *"
12 | ]
13 | },
14 | {
15 | "cell_type": "code",
16 | "execution_count": null,
17 | "metadata": {},
18 | "outputs": [],
19 | "source": [
20 | "scene_name = 'gradients'\n",
21 | "output_dir = os.path.join(OUTPUT_DIR, scene_name)\n",
22 | "it = 20"
23 | ]
24 | },
25 | {
26 | "cell_type": "code",
27 | "execution_count": null,
28 | "metadata": {},
29 | "outputs": [],
30 | "source": [
31 | "methods = [\"gt\", \"baseline\", 'cv_ps']\n",
32 | "method_names = [r\"\\textbf{(a)} Ground Truth\", r\"\\textbf{(b)} Baseline\", r\"\\textbf{(c)} Ours\"]\n",
33 | "grads = [\"l1\", \"l2\"]\n",
34 | "grad_names = [r\"$\\mathcal{L}^1$\", r\"$\\mathcal{L}^2$\"]\n",
35 | "gradients = []\n",
36 | "imgs = []\n",
37 | "scales = [0, 0]\n",
38 | "channel = 1\n",
39 | "\n",
40 | "imgs.append(mi.TensorXf(mi.Bitmap(os.path.join(output_dir, \"cv_ps\", \"img_final.exr\"))))\n",
41 | "imgs.append(mi.TensorXf(mi.Bitmap(os.path.join(output_dir, \"cv_ps\", \"img_F\", f\"{it//5:04d}.exr\"))))\n",
42 | "imgs.append(mi.TensorXf(mi.Bitmap(os.path.join(output_dir, \"cv_ps\", \"img\", f\"{it//5:04d}.exr\"))))\n",
43 | "ref = mi.TensorXf(mi.Bitmap(os.path.join(output_dir, \"img_ref.exr\")))\n",
44 | "\n",
45 | "h,w,_ = imgs[0].shape\n",
46 | "N = w*h\n",
47 | "\n",
48 | "gradients.append([dr.sign(img - ref) for img in imgs])\n",
49 | "gradients.append([(img - ref) for img in imgs])\n",
50 | "\n",
51 | "for i, method in enumerate(methods):\n",
52 | " for j, grad in enumerate(grads):\n",
53 | " scales[j] = dr.maximum(dr.max(dr.abs(gradients[j][i])), scales[j])[0]\n",
54 | "\n",
55 | "scales = [1.0, scales[1]/200]"
56 | ]
57 | },
58 | {
59 | "cell_type": "code",
60 | "execution_count": null,
61 | "metadata": {},
62 | "outputs": [],
63 | "source": [
64 | "sns.set_style('white')"
65 | ]
66 | },
67 | {
68 | "cell_type": "code",
69 | "execution_count": null,
70 | "metadata": {},
71 | "outputs": [],
72 | "source": [
73 | "n_cols = 3\n",
74 | "n_rows = 3\n",
75 | "\n",
76 | "crop1 = [0.4, 0.23, 0.1]\n",
77 | "crop2 = [0.5, 0.52, 0.12]\n",
78 | "crops = [crop1, crop2]\n",
79 | "crop_colors = [\"r\", \"g\"]\n",
80 | "\n",
81 | "outer_wspace = 0.05\n",
82 | "outer_hspace = 0.03\n",
83 | "\n",
84 | "inset_hspace = 0.03\n",
85 | "inset_wspace = 0.03\n",
86 | "\n",
87 | "main_aspect = w/h\n",
88 | "insets_aspect = gridspec_aspect(2, 2, 1, 1, wspace=inset_wspace, hspace=inset_hspace)\n",
89 | "height_ratios = [1/main_aspect, 1/insets_aspect]\n",
90 | "outer_aspect = gridspec_aspect(2, 3, 1, height_ratios, wspace=outer_wspace, hspace=outer_hspace)\n",
91 | "\n",
92 | "import matplotlib.gridspec as gridspec\n",
93 | "import matplotlib.patches as patches\n",
94 | "\n",
95 | "fig = plt.figure(1, figsize=(TEXT_WIDTH, TEXT_WIDTH / outer_aspect))\n",
96 | "outer_gs = fig.add_gridspec(2, 3, wspace=outer_wspace, hspace=outer_hspace, height_ratios=height_ratios)\n",
97 | "\n",
98 | "# scales = [2, 2e-1]\n",
99 | "\n",
100 | "for i, method in enumerate(method_names):\n",
101 | " main_gs = gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=outer_gs[0, i], wspace=0.0, hspace=0.0)\n",
102 | " inset_gs = gridspec.GridSpecFromSubplotSpec(2, 2, subplot_spec=outer_gs[1, i], wspace=inset_wspace, hspace=inset_hspace)\n",
103 | " ax = fig.add_subplot(main_gs[0])\n",
104 | " ax.imshow(mi.util.convert_to_bitmap(imgs[i]), interpolation='none')\n",
105 | " disable_ticks(ax)\n",
106 | " if i == 0:\n",
107 | " for l, ((rx, ry, s), color) in enumerate(zip(crops, crop_colors)):\n",
108 | " left = int(rx*w)\n",
109 | " size = int(s*w)\n",
110 | " top = int(ry*h)\n",
111 | " rect = patches.Rectangle((left, top), size, size, linewidth=0.5, edgecolor=color, facecolor='none')\n",
112 | " ax.add_patch(rect)\n",
113 | " ax.set_ylabel(\"Primal\")\n",
114 | "\n",
115 | " for j, loss_name in enumerate(grads):\n",
116 | " for l, ((rx, ry, s), color) in enumerate(zip(crops, crop_colors)):\n",
117 | " ax = fig.add_subplot(inset_gs[j, l])\n",
118 | " left = int(rx*w)\n",
119 | " size = int(s*w)\n",
120 | " right = left + size\n",
121 | " top = int(ry*h)\n",
122 | " bottom = top + size\n",
123 | " im = ax.imshow(gradients[j][i][top:bottom, left:right, channel], cmap='coolwarm', vmin=-scales[j], vmax=scales[j], interpolation='none')\n",
124 | " disable_ticks(ax)\n",
125 | " plt.setp(ax.spines.values(), color=color)\n",
126 | " rect = patches.Rectangle((0, 0), size-1, size-1, linewidth=1.0, edgecolor=color, facecolor='none')\n",
127 | " ax.add_patch(rect)\n",
128 | "\n",
129 | " if i == 0 and l == 0:\n",
130 | " ax.set_ylabel(grad_names[j])\n",
131 | " if i == 2 and l == 1:\n",
132 | " cbax = ax.inset_axes([1.04, 0.0, 0.08, 1.0], transform=ax.transAxes)\n",
133 | " cbar = fig.colorbar(im, cax=cbax, ticks=[-scales[j]*0.9, 0, scales[j]*0.9], format=\"%.1f\")\n",
134 | " cbar.outline.set_visible(False)\n",
135 | " cbar.ax.tick_params(size=0)\n",
136 | " \n",
137 | "\n",
138 | " # Ghost axes for the labels (https://stackoverflow.com/a/69117807)\n",
139 | " ax_label = fig.add_subplot(outer_gs[1, i])\n",
140 | " ax_label.axis('off')\n",
141 | " ax_label.set_title(method)\n",
142 | "\n",
143 | "save_fig(\"gradients\", pad_inches=0.02)"
144 | ]
145 | }
146 | ],
147 | "metadata": {
148 | "kernelspec": {
149 | "display_name": "Python 3.9.12 ('graphics')",
150 | "language": "python",
151 | "name": "python3"
152 | },
153 | "language_info": {
154 | "codemirror_mode": {
155 | "name": "ipython",
156 | "version": 3
157 | },
158 | "file_extension": ".py",
159 | "mimetype": "text/x-python",
160 | "name": "python",
161 | "nbconvert_exporter": "python",
162 | "pygments_lexer": "ipython3",
163 | "version": "3.9.12"
164 | },
165 | "orig_nbformat": 4,
166 | "vscode": {
167 | "interpreter": {
168 | "hash": "83642eaf50c97d4e19d0a23d915e5d4e870af428ff693683146158fe3feeea5a"
169 | }
170 | }
171 | },
172 | "nbformat": 4,
173 | "nbformat_minor": 2
174 | }
175 |
--------------------------------------------------------------------------------
/figures/gradient/generate_data.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 | import os
3 | from pathlib import Path
4 |
5 | subprocess.call(["python", os.path.join(Path(__file__).parents[2], "run_experiment.py"), "ajar", "--output", "gradients", "--n_steps", "21", "--method", "cv_ps"])
6 |
--------------------------------------------------------------------------------
/figures/landscapes/figure.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import sys\n",
10 | "sys.path.append(\"..\")\n",
11 | "from figutils import *\n",
12 | "from tol_colors import tol_cmap, tol_cset"
13 | ]
14 | },
15 | {
16 | "cell_type": "code",
17 | "execution_count": null,
18 | "metadata": {},
19 | "outputs": [],
20 | "source": [
21 | "loss_names = [\"l1\", \"l2\"]\n",
22 | "\n",
23 | "losses = []\n",
24 | "grads = []\n",
25 | "output_dir = os.path.join(OUTPUT_DIR, os.path.basename(os.getcwd()))\n",
26 | "spps = np.load(os.path.join(output_dir, \"spps.npy\"))\n",
27 | "param_landscape = np.load(os.path.join(output_dir, \"param_landscape.npy\"))\n",
28 | "\n",
29 | "for loss_name in loss_names:\n",
30 | " losses.append([])\n",
31 | " grads.append([])\n",
32 | " for spp in spps:\n",
33 | " losses[-1].append(np.load(os.path.join(output_dir, f\"loss_{loss_name}_{spp:04d}.npy\")))\n",
34 | " grads[-1].append(np.load(os.path.join(output_dir, f\"grad_{loss_name}_{spp:04d}.npy\")))\n",
35 | "\n"
36 | ]
37 | },
38 | {
39 | "cell_type": "code",
40 | "execution_count": null,
41 | "metadata": {},
42 | "outputs": [],
43 | "source": [
44 | "n_rows = len(loss_names)\n",
45 | "n_cols = 2\n",
46 | "\n",
47 | "aspect = 1.75\n",
48 | "fig = plt.figure(1, figsize=(TEXT_WIDTH, TEXT_WIDTH / aspect))\n",
49 | "gs = fig.add_gridspec(n_rows, n_cols, wspace=0.2, hspace=0.05)\n",
50 | "\n",
51 | "# colors = sns.color_palette(\"dark:seagreen\", n_colors=len(spps))\n",
52 | "colors = [tol_cmap(\"rainbow_PuRd\")(0.15 + 0.5 * i/len(spps)) for i in range(len(spps))]\n",
53 | "# colors = tol_cset('bright')\n",
54 | "\n",
55 | "titles = [r\"$\\mathcal{L}^1$\", r\"$\\mathcal{L}^2$\"]\n",
56 | "for i, title in enumerate(titles):\n",
57 | " ax_loss = fig.add_subplot(gs[i, 0])\n",
58 | " ax_grad = fig.add_subplot(gs[i, 1])\n",
59 | " ax_grad.plot([5, 5], [min([grads[i][j].min() for j in range(len(spps))]), max([grads[i][j].max() for j in range(len(spps))])], color=\"grey\", linestyle=\"--\")\n",
60 | " ax_grad.plot(param_landscape, np.zeros_like(param_landscape), color=\"grey\", linestyle=\"--\")\n",
61 | " for j, spp in enumerate(spps):\n",
62 | " ax_loss.semilogy(param_landscape, losses[i][j], label=str(spp), color=colors[j])\n",
63 | " ax_loss.scatter(param_landscape[np.argmin(losses[i][j])], np.min(losses[i][j]), color=colors[j], marker=\"x\")\n",
64 | " ax_grad.plot(param_landscape, grads[i][j], label=str(spp), color=colors[j])\n",
65 | " if i == 0:\n",
66 | " ax_grad.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)\n",
67 | " ax_loss.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)\n",
68 | " # disable_border(ax_grad)\n",
69 | " # disable_border(ax_loss)\n",
70 | " ax_loss.set_ylabel(title)\n",
71 | " # ax_loss.yaxis.tick_right()\n",
72 | " # ax_grad.yaxis.tick_right()\n",
73 | "\n",
74 | "ax_grad.legend(title=\"spp\")\n",
75 | "ax_loss.set_title(\"Loss\", y=-0.3)\n",
76 | "ax_grad.set_title(\"Gradient\", y=-0.3)\n",
77 | "save_fig(\"landscapes\", pad_inches=0.015)"
78 | ]
79 | },
80 | {
81 | "cell_type": "code",
82 | "execution_count": null,
83 | "metadata": {},
84 | "outputs": [],
85 | "source": []
86 | }
87 | ],
88 | "metadata": {
89 | "kernelspec": {
90 | "display_name": "Python 3.9.12 ('graphics')",
91 | "language": "python",
92 | "name": "python3"
93 | },
94 | "language_info": {
95 | "codemirror_mode": {
96 | "name": "ipython",
97 | "version": 3
98 | },
99 | "file_extension": ".py",
100 | "mimetype": "text/x-python",
101 | "name": "python",
102 | "nbconvert_exporter": "python",
103 | "pygments_lexer": "ipython3",
104 | "version": "3.9.12"
105 | },
106 | "orig_nbformat": 4,
107 | "vscode": {
108 | "interpreter": {
109 | "hash": "83642eaf50c97d4e19d0a23d915e5d4e870af428ff693683146158fe3feeea5a"
110 | }
111 | }
112 | },
113 | "nbformat": 4,
114 | "nbformat_minor": 2
115 | }
116 |
--------------------------------------------------------------------------------
/figures/landscapes/generate_data.py:
--------------------------------------------------------------------------------
1 | import mitsuba as mi
2 | mi.set_variant('cuda_ad_rgb')
3 | import drjit as dr
4 | from tqdm import trange
5 |
6 | import numpy as np
7 |
8 | import os
9 | import sys
10 | from pathlib import Path
11 | import argparse
12 |
13 | code_dir = str(Path(__file__).parents[2])
14 | sys.path.append(os.path.dirname(code_dir))
15 |
16 | import plugins
17 | from experiments import scene_dragon
18 | from utils import l1_loss, l2_loss
19 |
20 | output_dir = os.path.join(code_dir, "output", os.path.basename(os.path.dirname(__file__)))
21 | if not os.path.isdir(output_dir):
22 | os.makedirs(output_dir)
23 |
24 | # opt_config = scene_teapot(output_dir, "baseline")
25 | opt_config = scene_dragon(output_dir, "baseline")
26 |
27 | spp = opt_config['spp']
28 | spp_grad = opt_config['spp_grad']
29 | spp_ref = opt_config['spp_ref']
30 | integrator = opt_config['integrator']
31 | scene = opt_config['scene']
32 |
33 | params = mi.traverse(scene)
34 | key = 'PLYMesh_2.interior_medium.sigma_t.value.value'
35 |
36 | img_ref = mi.render(scene, params, integrator=integrator, spp=spp_ref)
37 | mi.Bitmap(img_ref).write(os.path.join(output_dir, "img_ref.exr"))
38 |
39 | spps = 2 ** (2*np.arange(5) + 1)
40 | losses = ["l1", "l2"]
41 | n_runs = 2 ** np.arange(8, 0, -1)
42 |
43 | param_landscape = np.linspace(2, 8, 25)
44 |
45 | np.save(os.path.join(output_dir, "spps.npy"), spps)
46 | np.save(os.path.join(output_dir, "param_landscape.npy"), param_landscape)
47 |
48 | for loss_name in losses:
49 | for k, spp in enumerate(spps):
50 | loss_hist = np.zeros(len(param_landscape))
51 | grad_hist = np.zeros_like(loss_hist)
52 | save_dir = os.path.join(output_dir, loss_name, str(spp))
53 |
54 | print(f"Loss: {loss_name}, SPP: {spp}")
55 | for j in trange(n_runs[k]):
56 | for i, param in enumerate(param_landscape):
57 | # Update the extinction
58 | params[key] = mi.Float(param)
59 | dr.set_grad_enabled(params[key], True)
60 | params.update()
61 |
62 | # Primal rendering
63 | img = mi.render(scene, params, integrator=integrator, seed=i+j*len(param_landscape), spp=spp, spp_grad=32)
64 |
65 | if loss_name == "l1":
66 | loss = l1_loss(img, img_ref)
67 | elif loss_name == "l2":
68 | loss = l2_loss(img, img_ref)
69 | else:
70 | raise NotImplementedError
71 |
72 | dr.backward(loss)
73 |
74 | loss_hist[i] += loss[0] / n_runs[k]
75 | grad_hist[i] += dr.grad(params[key])[0] / n_runs[k]
76 |
77 | np.save(os.path.join(output_dir, f"loss_{loss_name}_{spp:04d}.npy"), loss_hist)
78 | np.save(os.path.join(output_dir, f"grad_{loss_name}_{spp:04d}.npy"), grad_hist)
79 |
--------------------------------------------------------------------------------
/figures/steady_state/figure.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import sys\n",
10 | "sys.path.append(\"..\")\n",
11 | "from figutils import *\n",
12 | "from string import ascii_lowercase"
13 | ]
14 | },
15 | {
16 | "cell_type": "code",
17 | "execution_count": null,
18 | "metadata": {},
19 | "outputs": [],
20 | "source": [
21 | "output_dir = os.path.join(OUTPUT_DIR, os.path.basename(os.getcwd()))"
22 | ]
23 | },
24 | {
25 | "cell_type": "code",
26 | "execution_count": null,
27 | "metadata": {},
28 | "outputs": [],
29 | "source": [
30 | "methods = [\"analytic\", \"baseline\", \"recursive\"]\n",
31 | "method_names = [\"Analytic\", \"Ours - naïve\", \"Ours - recursive\"]\n",
32 | "imgs = []\n",
33 | "weights = []\n",
34 | "it = 50\n",
35 | "variances = []\n",
36 | "img_ref = mi.Bitmap(os.path.join(output_dir, \"img_ref.exr\"))\n",
37 | "\n",
38 | "for method in methods:\n",
39 | " imgs.append(mi.Bitmap(os.path.join(output_dir, method, \"img\", f\"{it:03d}.exr\")))\n",
40 | " weights.append(mi.Bitmap(os.path.join(output_dir, method, \"weights\", f\"{it:03d}.exr\")))\n",
41 | " variances.append(np.load(os.path.join(output_dir, method, \"var.npy\")))\n"
42 | ]
43 | },
44 | {
45 | "cell_type": "code",
46 | "execution_count": null,
47 | "metadata": {},
48 | "outputs": [],
49 | "source": [
50 | "show_std = True\n",
51 | "import matplotlib.patheffects as pe\n",
52 | "n_rows = len(methods)\n",
53 | "n_cols = 2\n",
54 | "w,h = imgs[0].size()\n",
55 | "wspace = 0.02\n",
56 | "hspace = 0.02*w/h\n",
57 | "aspect = gridspec_aspect(n_rows, n_cols, w, h, wspace, hspace)\n",
58 | "sns.set_style('white')\n",
59 | "fig = plt.figure(1, figsize=(TEXT_WIDTH, TEXT_WIDTH / aspect))\n",
60 | "gs = fig.add_gridspec(n_rows, n_cols, wspace=wspace, hspace=hspace)\n",
61 | "\n",
62 | "for i, method in enumerate(method_names):\n",
63 | " ax = fig.add_subplot(gs[i, 0])\n",
64 | " ax.imshow(mi.util.convert_to_bitmap(imgs[i]), interpolation='none')\n",
65 | " if i == 2:\n",
66 | " ax.set_title(\"Primal\")\n",
67 | " # disable_border(ax)\n",
68 | " disable_ticks(ax)\n",
69 | " ax.set_ylabel(method)\n",
70 | " # ax.set_ylabel(f\"Var: {variances[i][it]:.1e}\\n\" +fr\"\\textbf{{({ascii_lowercase[i]})}} {method}\", y=-0.5)\n",
71 | " # ax.text(0.99*w, 0.99*h, f\"Var: {variances[i][it]:.1e}\", ha=\"right\", va=\"bottom\", color=\"white\", fontsize=DEFAULT_FONTSIZE, path_effects=[pe.withStroke(linewidth=1, foreground=\"black\")])\n",
72 | " v = variances[i][it]\n",
73 | " if show_std:\n",
74 | " v = np.sqrt(v)\n",
75 | " e = int(np.log10(v))\n",
76 | " if e == 0:\n",
77 | " text = f\"$\\sigma{'' if show_std else '^2'}={v/10**e:.3f}$\"\n",
78 | " else:\n",
79 | " text = f\"$\\sigma{'' if show_std else '^2'}={v/10**e:.4f}\\cdot 10^{{{e}}}$\"\n",
80 | " ax.text(0.99*w, 0.99*h, text, ha=\"right\", va=\"bottom\", color=\"white\", fontsize=DEFAULT_FONTSIZE, path_effects=[pe.withStroke(linewidth=1, foreground=\"black\")])\n",
81 | "\n",
82 | "\n",
83 | " ax = fig.add_subplot(gs[i, 1])\n",
84 | " im = ax.imshow(mi.TensorXf(weights[i])[..., 0], cmap='Reds_r', vmin=0.0, vmax=1.0, interpolation='none')\n",
85 | " if i == 2:\n",
86 | " ax.set_title(\"Weights\")\n",
87 | " # disable_border(ax)\n",
88 | " disable_ticks(ax)\n",
89 | "\n",
90 | " if i == 2:\n",
91 | " cbax = ax.inset_axes([1.02, 0, 0.04, 1], transform=ax.transAxes)\n",
92 | " cbar = fig.colorbar(im, cax=cbax, ticks=[0, 0.5, 1])\n",
93 | " cbar.outline.set_visible(False)\n",
94 | " cbar.ax.tick_params(size=0)\n",
95 | "\n",
96 | "\n",
97 | "# gs.update(wspace=0.015, hspace=0.015*aspect)\n",
98 | "save_fig('steady_state')"
99 | ]
100 | },
101 | {
102 | "cell_type": "code",
103 | "execution_count": null,
104 | "metadata": {},
105 | "outputs": [],
106 | "source": []
107 | },
108 | {
109 | "cell_type": "code",
110 | "execution_count": null,
111 | "metadata": {},
112 | "outputs": [],
113 | "source": []
114 | }
115 | ],
116 | "metadata": {
117 | "kernelspec": {
118 | "display_name": "Python 3.9.12 ('graphics')",
119 | "language": "python",
120 | "name": "python3"
121 | },
122 | "language_info": {
123 | "codemirror_mode": {
124 | "name": "ipython",
125 | "version": 3
126 | },
127 | "file_extension": ".py",
128 | "mimetype": "text/x-python",
129 | "name": "python",
130 | "nbconvert_exporter": "python",
131 | "pygments_lexer": "ipython3",
132 | "version": "3.9.12"
133 | },
134 | "orig_nbformat": 4,
135 | "vscode": {
136 | "interpreter": {
137 | "hash": "83642eaf50c97d4e19d0a23d915e5d4e870af428ff693683146158fe3feeea5a"
138 | }
139 | }
140 | },
141 | "nbformat": 4,
142 | "nbformat_minor": 2
143 | }
144 |
--------------------------------------------------------------------------------
/figures/steady_state/generate_data.py:
--------------------------------------------------------------------------------
1 | import mitsuba as mi
2 | mi.set_variant('cuda_ad_rgb')
3 | import drjit as dr
4 | from tqdm import trange
5 |
6 | import numpy as np
7 |
8 | import os
9 | import sys
10 | from pathlib import Path
11 | import argparse
12 |
13 | code_dir = str(Path(__file__).parents[2])
14 | sys.path.append(os.path.dirname(code_dir))
15 | from experiments import scene_kitchen
16 | from plugins.welford import StatisticsEstimator, WelfordVarianceEstimator
17 |
18 | output_dir = os.path.join(code_dir, "output", os.path.basename(os.path.dirname(__file__)))
19 | os.makedirs(output_dir, exist_ok=True)
20 |
21 | opt_config = scene_kitchen(output_dir, "baseline")
22 |
23 | n_steps = 250
24 | spp = 1
25 | integrator = mi.load_dict({'type': 'path', 'max_depth': 64})
26 | scene = opt_config['scene']
27 |
28 | params = mi.traverse(scene)
29 |
30 | methods = ["analytic", "baseline", "recursive"]
31 | ref_path = os.path.join(output_dir, "img_ref.exr")
32 | if os.path.exists(ref_path):
33 | img_ref = mi.Bitmap(ref_path)
34 | else:
35 | img_ref = mi.TensorXf(0.0)
36 | n_passes = 16
37 | for i in trange(n_passes):
38 | img_ref += mi.render(scene, spp=4096, integrator=integrator, seed=i) / n_passes
39 | mi.Bitmap(img_ref).write(os.path.join(output_dir, "img_ref.exr"))
40 |
41 | for i, method in enumerate(methods):
42 | img = 0.0
43 | F = 0.0
44 | H = 0.0
45 | v_n = 0.0
46 | var_n = WelfordVarianceEstimator()
47 | stats = StatisticsEstimator()
48 | weight_dir = os.path.join(output_dir, method, "weights")
49 | img_dir = os.path.join(output_dir, method, "img")
50 | os.makedirs(weight_dir, exist_ok=True)
51 | os.makedirs(img_dir, exist_ok=True)
52 | var = np.zeros(n_steps)
53 | for j in trange(n_steps):
54 | F = mi.render(scene, integrator=integrator, seed=j, spp=spp)
55 | # No need for a second rendering here, as the scene does not evolve
56 | H = F
57 | if j > 0:
58 | if method == "analytic":
59 | w_s = dr.full(mi.TensorXf, j / (j + 1), F.shape)
60 | else:
61 | if j > 5:
62 | v_0, v_1, cov = stats.get()
63 | if method == "baseline":
64 | v_n = var_n.get()
65 | else:
66 | v_n = w_s ** 2 * (v_n + v_0) + v_1 - 2*w_s * cov
67 |
68 | w_s = cov / (v_0 + v_n)
69 | dr.schedule(w_s, v_n)
70 |
71 | w_s = dr.select(dr.isnan(w_s) | dr.isinf(w_s), 0.0, w_s)
72 | w_s = dr.clamp(w_s, 0.0, 1.0)
73 | else:
74 | w_s = mi.TensorXf(0.0, F.shape)
75 |
76 | stats.update(H, F)
77 |
78 | img = w_s * (img - H) + F
79 | var_n.update(img)
80 | mi.Bitmap(w_s).write(os.path.join(weight_dir, f"{j:03d}.exr"))
81 | else:
82 | img = F
83 | var[j] = dr.mean(dr.sqr(img - img_ref))[0]
84 | mi.Bitmap(img).write(os.path.join(img_dir, f"{j:03d}.exr"))
85 | np.save(os.path.join(output_dir, method, "var.npy"), var)
86 |
--------------------------------------------------------------------------------
/figures/stream/figure.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import sys\n",
10 | "sys.path.append(\"..\")\n",
11 | "from figutils import *\n",
12 | "import matplotlib.patheffects as pe"
13 | ]
14 | },
15 | {
16 | "cell_type": "code",
17 | "execution_count": null,
18 | "metadata": {},
19 | "outputs": [],
20 | "source": [
21 | "scene_name = 'stream'\n",
22 | "method = 'cv_ps'\n",
23 | "output_dir = os.path.join(OUTPUT_DIR, scene_name)\n",
24 | "iterations = [3, 5, 25, 75, 250, 750]"
25 | ]
26 | },
27 | {
28 | "cell_type": "code",
29 | "execution_count": null,
30 | "metadata": {},
31 | "outputs": [],
32 | "source": [
33 | "\n",
34 | "imgs = []\n",
35 | "weights = []\n",
36 | "labels = []\n",
37 | "\n",
38 | "method_dir = os.path.join(output_dir, method)\n",
39 | "\n",
40 | "for i in iterations:\n",
41 | " labels.append(f\"it={i}\")\n",
42 | " imgs.append(mi.Bitmap(os.path.join(method_dir, \"img\", f\"{i:04d}.exr\")))\n",
43 | " weights.append(mi.Bitmap(os.path.join(output_dir, method, \"weights\", f\"{i:04d}.exr\")))\n",
44 | "\n",
45 | "labels.append(\"Target\")\n",
46 | "imgs.append(mi.Bitmap(os.path.join(output_dir, \"img_ref.exr\")))\n",
47 | "weights.append(None)\n",
48 | "img_ref = mi.Bitmap(os.path.join(output_dir, \"img_ref.exr\"))\n",
49 | "result = np.load(os.path.join(method_dir, \"result.npz\"))\n",
50 | "\n",
51 | "variance = np.load(os.path.join(method_dir, \"result.npz\"))['var']"
52 | ]
53 | },
54 | {
55 | "cell_type": "code",
56 | "execution_count": null,
57 | "metadata": {},
58 | "outputs": [],
59 | "source": [
60 | "sns.set_style('white')"
61 | ]
62 | },
63 | {
64 | "cell_type": "code",
65 | "execution_count": null,
66 | "metadata": {},
67 | "outputs": [],
68 | "source": [
69 | "var = [variance[it] for it in iterations] "
70 | ]
71 | },
72 | {
73 | "cell_type": "code",
74 | "execution_count": null,
75 | "metadata": {},
76 | "outputs": [],
77 | "source": [
78 | "base_size = 4\n",
79 | "\n",
80 | "w,h = imgs[0].size()\n",
81 | "\n",
82 | "n_cols = len(imgs) + 1\n",
83 | "n_rows = 2 if show_weights else 1\n",
84 | "\n",
85 | "wspace = 0.03\n",
86 | "hspace = 0.04 * w/h\n",
87 | "aspect = gridspec_aspect(n_rows, n_cols, w, h, wspace=wspace, hspace=hspace)\n",
88 | "\n",
89 | "fig = plt.figure(1, figsize=(PAGE_WIDTH, PAGE_WIDTH / aspect))\n",
90 | "\n",
91 | "gs = fig.add_gridspec(n_rows, n_cols, wspace=wspace, hspace=hspace)\n",
92 | "\n",
93 | "for i, (img, weight, label) in enumerate(zip(imgs, weights, labels)):\n",
94 | " ax = fig.add_subplot(gs[0, i])\n",
95 | " ax.imshow(mi.util.convert_to_bitmap(img), interpolation='none')\n",
96 | " disable_ticks(ax)\n",
97 | " if i != n_cols - 2:\n",
98 | " v = np.sqrt(var[i])\n",
99 | " e = int(np.log10(v))-1\n",
100 | " text = f\"$\\sigma={v:.3f}$\"\n",
101 | " ax.text(0.99*w, 0.99*h, text, ha=\"right\", va=\"bottom\", color=\"white\", fontsize=DEFAULT_FONTSIZE, path_effects=[pe.withStroke(linewidth=1, foreground=\"black\")])\n",
102 | "\n",
103 | " if show_weights:\n",
104 | " if i == 0:\n",
105 | " ax.set_ylabel(\"Primal\")\n",
106 | " ax = fig.add_subplot(gs[1, i])\n",
107 | " if weight is not None:\n",
108 | " weight = mi.TensorXf(weights[i])[:,:,0]\n",
109 | " im = ax.imshow(weight, cmap='Reds_r', vmin=0, vmax=1, interpolation='none')\n",
110 | " else:\n",
111 | " weight = np.ones((h, w))\n",
112 | " ax.imshow(weight, cmap='gray', vmin=0, vmax=1)\n",
113 | " ax.text(w/2, h/2, \"N/A\", ha=\"center\", va=\"center\", color=\"darkgrey\")\n",
114 | " disable_border(ax)\n",
115 | "\n",
116 | " disable_ticks(ax)\n",
117 | " \n",
118 | " if i == 0:\n",
119 | " ax.set_ylabel(\"Weights\")\n",
120 | "\n",
121 | " if i != n_cols-2:\n",
122 | " title = f\"Step {iterations[i]}\\n\"\n",
123 | " ax.set_title(title, verticalalignment='top', y=-0.2)\n",
124 | " else:\n",
125 | " ax.set_title(label, verticalalignment='top', y=-0.2)\n",
126 | "\n",
127 | " if i == n_cols - 2:\n",
128 | " cbax = ax.inset_axes([1.04, 0, 0.05, 1], transform=ax.transAxes)\n",
129 | " cbar = fig.colorbar(im, cax=cbax, ticks=[0, 0.5, 1])\n",
130 | " cbar.outline.set_visible(False)\n",
131 | " cbar.ax.tick_params(size=0)\n",
132 | "\n",
133 | "save_fig(\"stream\")\n"
134 | ]
135 | }
136 | ],
137 | "metadata": {
138 | "kernelspec": {
139 | "display_name": "Python 3.9.12 ('graphics')",
140 | "language": "python",
141 | "name": "python3"
142 | },
143 | "language_info": {
144 | "codemirror_mode": {
145 | "name": "ipython",
146 | "version": 3
147 | },
148 | "file_extension": ".py",
149 | "mimetype": "text/x-python",
150 | "name": "python",
151 | "nbconvert_exporter": "python",
152 | "pygments_lexer": "ipython3",
153 | "version": "3.9.12"
154 | },
155 | "orig_nbformat": 4,
156 | "vscode": {
157 | "interpreter": {
158 | "hash": "83642eaf50c97d4e19d0a23d915e5d4e870af428ff693683146158fe3feeea5a"
159 | }
160 | }
161 | },
162 | "nbformat": 4,
163 | "nbformat_minor": 2
164 | }
165 |
--------------------------------------------------------------------------------
/figures/stream/generate_data.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 | import os
3 | from pathlib import Path
4 |
5 | subprocess.call(["python", os.path.join(Path(__file__).parents[2], "run_experiment.py"), "bunnies", "--method", "cv_ps", "--n_steps", "1000", "--output", "stream"])
6 |
--------------------------------------------------------------------------------
/figures/teaser/figure.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import sys\n",
10 | "sys.path.append(\"..\")\n",
11 | "from figutils import *\n",
12 | "import matplotlib.gridspec as gridspec\n",
13 | "import matplotlib.patches as patches"
14 | ]
15 | },
16 | {
17 | "cell_type": "code",
18 | "execution_count": null,
19 | "metadata": {},
20 | "outputs": [],
21 | "source": [
22 | "def l1(x, y):\n",
23 | " return np.mean(np.abs(x - y))"
24 | ]
25 | },
26 | {
27 | "cell_type": "code",
28 | "execution_count": null,
29 | "metadata": {},
30 | "outputs": [],
31 | "source": [
32 | "scene = \"teaser\"\n",
33 | "methods = [\"baseline\", \"cv_ps\"]\n",
34 | "method_names = [\"Baseline\", \"Ours\"]\n",
35 | "\n",
36 | "\n",
37 | "it = 19\n",
38 | "output_dir = os.path.join(OUTPUT_DIR, scene)\n",
39 | "\n",
40 | "img_ref = mi.TensorXf(mi.Bitmap(os.path.join(output_dir, f\"img_ref.exr\")))\n",
41 | "\n",
42 | "n_steps = 500\n",
43 | "\n",
44 | "noisy_imgs = []\n",
45 | "final_imgs = []\n",
46 | "results = []\n",
47 | "final_errors = []\n",
48 | "for method in methods:\n",
49 | " noisy_img = mi.TensorXf(mi.Bitmap(os.path.join(output_dir, method, \"img\", f\"{it:04d}.exr\")))\n",
50 | " img_final = mi.TensorXf(mi.Bitmap(os.path.join(output_dir, method, f\"img_final.exr\")))\n",
51 | " final_errors.append(l1(img_final, img_ref))\n",
52 | " final_imgs.append(img_final)\n",
53 | " noisy_imgs.append(noisy_img)\n",
54 | " results.append(np.load(os.path.join(output_dir, method, \"result.npz\")))\n",
55 | "\n"
56 | ]
57 | },
58 | {
59 | "cell_type": "code",
60 | "execution_count": null,
61 | "metadata": {},
62 | "outputs": [],
63 | "source": [
64 | "h, w = final_imgs[0].shape[:2]\n",
65 | "plot_w = h\n",
66 | "\n",
67 | "inset_wspace = 0.02\n",
68 | "inset_hspace = 0.02\n",
69 | "# img_ratios = [1, 2*w/h, 1, 1]\n",
70 | "inset_aspect = gridspec_aspect(2, 1, 1, 1, wspace=inset_wspace, hspace=inset_hspace)\n",
71 | "\n",
72 | "img_wspace = 0.0\n",
73 | "img_aspect = gridspec_aspect(1, 2, w/2, h, wspace=img_wspace)\n",
74 | "\n",
75 | "plot_r = 2.0\n",
76 | "outer_ratios = [inset_aspect, img_aspect, inset_aspect, inset_aspect, plot_r/2]\n",
77 | "outer_wspace = 0.02\n",
78 | "outer_aspect = gridspec_aspect(1, 2, outer_ratios, 1, wspace=outer_wspace)\n",
79 | "\n",
80 | "crop1 = [0.35, 0.22, 0.1]\n",
81 | "crop2 = [0.5, 0.52, 0.12]\n",
82 | "crops = [crop1, crop2]\n",
83 | "crop_colors = [\"r\", \"g\"]\n",
84 | "\n",
85 | "fig = plt.figure(1, figsize=(PAGE_WIDTH, PAGE_WIDTH / outer_aspect))\n",
86 | "outer = fig.add_gridspec(1, 5, width_ratios=outer_ratios, wspace=outer_wspace)\n",
87 | "baseline_gs = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=outer[0], hspace=inset_hspace)\n",
88 | "ours_gs = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=outer[2], hspace=inset_hspace)\n",
89 | "ref_gs = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=outer[3], hspace=inset_hspace)\n",
90 | "img_gs = gridspec.GridSpecFromSubplotSpec(1, 2, subplot_spec=outer[1], wspace=img_wspace, hspace=0)\n",
91 | "plot_gs = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=outer[4], hspace=0.02)\n",
92 | "\n",
93 | "img_mixed = noisy_imgs[0].numpy()\n",
94 | "img_mixed[:, w//2:] = noisy_imgs[1][:, w//2:]\n",
95 | "img_mixed[:, w//2-2:w//2+2] = 1.0\n",
96 | "\n",
97 | "main_ax = fig.add_subplot(img_gs[:])\n",
98 | "main_ax.imshow(mi.util.convert_to_bitmap(img_mixed), interpolation='none')\n",
99 | "disable_ticks(main_ax)\n",
100 | "# disable_border(main_ax)\n",
101 | "\n",
102 | "# main_ax.text(2*w/3, 1.0, \"CV (ours)\", fontsize=DEFAULT_FONTSIZE)\n",
103 | "# main_ax.text(0.33, 1.0, \"Baseline\", fontsize=DEFAULT_FONTSIZE)\n",
104 | "\n",
105 | "inset_gridspecs = [baseline_gs, ours_gs]\n",
106 | "# Plot insets\n",
107 | "for l, ((rx, ry, s), color) in enumerate(zip(crops, crop_colors)):\n",
108 | " left = int(rx*w)\n",
109 | " right = left + int(s*w)\n",
110 | " top = int(ry*h)\n",
111 | " bottom = top + int(s*w)\n",
112 | " size = int(s*w)\n",
113 | " main_rect = patches.Rectangle((left, top), size, size, linewidth=1.5, edgecolor=color, facecolor='none')\n",
114 | " main_ax.add_patch(main_rect)\n",
115 | "\n",
116 | "\n",
117 | " for j, method_name in enumerate(method_names):\n",
118 | " ax = fig.add_subplot(inset_gridspecs[j][l])\n",
119 | " ax.imshow(mi.util.convert_to_bitmap(final_imgs[j][top:bottom, left:right]), interpolation='none')\n",
120 | " disable_ticks(ax)\n",
121 | " # disable_border(ax)\n",
122 | " plt.setp(ax.spines.values(), color=color)\n",
123 | " rect = patches.Rectangle((0, 0), size-1, size-1, linewidth=1.5, edgecolor=color, facecolor='none')\n",
124 | " ax.add_patch(rect)\n",
125 | " if l == 0:\n",
126 | " ax.set_title(method_name, y=1.0)\n",
127 | "\n",
128 | " ax = fig.add_subplot(ref_gs[l])\n",
129 | " ax.imshow(mi.util.convert_to_bitmap(img_ref[top:bottom, left:right]), interpolation='none')\n",
130 | " disable_ticks(ax)\n",
131 | " # disable_border(ax)\n",
132 | " plt.setp(ax.spines.values(), color=color)\n",
133 | " rect = patches.Rectangle((0, 0), size-1, size-1, linewidth=1.5, edgecolor=color, facecolor='none')\n",
134 | " ax.add_patch(rect)\n",
135 | "\n",
136 | " if l == 0:\n",
137 | " ax.set_title(\"Reference\", y=1.0)\n",
138 | "\n",
139 | "\n",
140 | "# Plot loss\n",
141 | "from matplotlib.ticker import FormatStrFormatter\n",
142 | "ax_loss = fig.add_subplot(plot_gs[0])\n",
143 | "ax_loss.locator_params(nbins=4, axis='y')\n",
144 | "ax_loss.yaxis.set_major_formatter(FormatStrFormatter('%.1e'))\n",
145 | "ax_var = fig.add_subplot(plot_gs[1])\n",
146 | "for j, method_name in enumerate(method_names):\n",
147 | " loss = results[j][\"loss\"]\n",
148 | " x = np.linspace(0, n_steps, len(loss))\n",
149 | " ax_loss.semilogy(x, loss, label=method_name)\n",
150 | " var = results[j][\"var\"]\n",
151 | " ax_var.semilogy(x, var)\n",
152 | "\n",
153 | "ax_loss.set_title(r\"$\\mathcal{L}^1$ Error\", y=1.0)\n",
154 | "ax_loss.yaxis.tick_right()\n",
155 | "ax_loss.legend()\n",
156 | "ax_var.set_title(\"Variance\", y=-0.4)\n",
157 | "ax_var.yaxis.tick_right()\n",
158 | "\n",
159 | "save_fig(\"teaser\", pad_inches=0.02)\n"
160 | ]
161 | },
162 | {
163 | "cell_type": "code",
164 | "execution_count": null,
165 | "metadata": {},
166 | "outputs": [],
167 | "source": []
168 | }
169 | ],
170 | "metadata": {
171 | "kernelspec": {
172 | "display_name": "Python 3.9.12 ('graphics')",
173 | "language": "python",
174 | "name": "python3"
175 | },
176 | "language_info": {
177 | "codemirror_mode": {
178 | "name": "ipython",
179 | "version": 3
180 | },
181 | "file_extension": ".py",
182 | "mimetype": "text/x-python",
183 | "name": "python",
184 | "nbconvert_exporter": "python",
185 | "pygments_lexer": "ipython3",
186 | "version": "3.9.12"
187 | },
188 | "orig_nbformat": 4,
189 | "vscode": {
190 | "interpreter": {
191 | "hash": "83642eaf50c97d4e19d0a23d915e5d4e870af428ff693683146158fe3feeea5a"
192 | }
193 | }
194 | },
195 | "nbformat": 4,
196 | "nbformat_minor": 2
197 | }
198 |
--------------------------------------------------------------------------------
/figures/teaser/generate_data.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 | import os
3 | from pathlib import Path
4 |
5 | subprocess.call(["python", os.path.join(Path(__file__).parents[2], "run_experiment.py"), "ajar", "--output", "teaser"])
6 |
--------------------------------------------------------------------------------
/figures/teaser/teaser-dark.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rgl-epfl/recursive_control_variates/be19f1be3f054f9497eeff97530e1827ad0d8d56/figures/teaser/teaser-dark.png
--------------------------------------------------------------------------------
/figures/teaser/teaser-light.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rgl-epfl/recursive_control_variates/be19f1be3f054f9497eeff97530e1827ad0d8d56/figures/teaser/teaser-light.png
--------------------------------------------------------------------------------
/figures/teaser/test.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rgl-epfl/recursive_control_variates/be19f1be3f054f9497eeff97530e1827ad0d8d56/figures/teaser/test.png
--------------------------------------------------------------------------------
/figures/volumes/figure.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import sys\n",
10 | "sys.path.append(\"..\")\n",
11 | "from figutils import *\n",
12 | "import matplotlib.patches as patches\n",
13 | "from tol_colors import tol_cmap, tol_cset"
14 | ]
15 | },
16 | {
17 | "cell_type": "code",
18 | "execution_count": null,
19 | "metadata": {},
20 | "outputs": [],
21 | "source": [
22 | "def l1(x, y):\n",
23 | " return(dr.mean(dr.abs(x-y)))[0]"
24 | ]
25 | },
26 | {
27 | "cell_type": "code",
28 | "execution_count": null,
29 | "metadata": {},
30 | "outputs": [],
31 | "source": [
32 | "def ape(x, y):\n",
33 | " return dr.abs(x-y)"
34 | ]
35 | },
36 | {
37 | "cell_type": "code",
38 | "execution_count": null,
39 | "metadata": {},
40 | "outputs": [],
41 | "source": [
42 | "methods = [\"baseline\", \"cv_ps\"]\n",
43 | "method_names = [\"Equal time\", \"Equal quality\", \"Ours\"]\n",
44 | "sensor_counts = [8, 10, 8]\n",
45 | "sensors = [4, 1, 0]\n",
46 | "\n",
47 | "scenes = ['janga', 'rover', 'dust_devil']\n",
48 | "scene_names = ['Janga', 'Rover', 'Dust Devil']\n",
49 | "imgs = []\n",
50 | "error_imgs = []\n",
51 | "error_scales = [0, 0, 0]\n",
52 | "noisy_imgs = []\n",
53 | "it = 39\n",
54 | "ref_imgs = []\n",
55 | "results = []\n",
56 | "final_losses = []\n",
57 | "\n",
58 | "for i, scene_name in enumerate(scenes):\n",
59 | " imgs.append([])\n",
60 | " error_imgs.append([])\n",
61 | " final_losses.append([])\n",
62 | " results.append([])\n",
63 | " noisy_imgs.append([])\n",
64 | " img_ref = mi.TensorXf(mi.Bitmap(os.path.join(OUTPUT_DIR, scene_name, \"img_ref_display.exr\")))\n",
65 | " if scene_name == \"dust_devil\":\n",
66 | " img_ref_viz = mi.TensorXf(mi.Bitmap(os.path.join(OUTPUT_DIR, scene_name, \"img_ref_re.exr\")))\n",
67 | " for j, method in enumerate(methods):\n",
68 | " dirs = [scene_name]\n",
69 | " if method == \"baseline\":\n",
70 | " dirs.append(f\"{scene_name}_high_spp\")\n",
71 | " for d in dirs:\n",
72 | " noisy_img = mi.TensorXf(mi.Bitmap(os.path.join(OUTPUT_DIR, d, method, \"img\", f\"{it:04d}.exr\")))\n",
73 | " img = mi.TensorXf(mi.Bitmap(os.path.join(OUTPUT_DIR, d, method, \"img_final.exr\")))\n",
74 | " final_losses[i].append(l1(img, img_ref))\n",
75 | " results[i].append(np.load(os.path.join(OUTPUT_DIR, d, method, \"result.npz\")))\n",
76 | " w = img.shape[1] // sensor_counts[i]\n",
77 | " sensor = sensors[i]\n",
78 | "\n",
79 | " if scene_name == \"dust_devil\":\n",
80 | " img = mi.TensorXf(mi.Bitmap(os.path.join(OUTPUT_DIR, d, method, \"img_final_re.exr\")))\n",
81 | " imgs[i].append(img)\n",
82 | " error_imgs[i].append(ape(img, img_ref_viz))\n",
83 | " else:\n",
84 | " imgs[i].append(img[:, w*sensor:w*(sensor+1)])\n",
85 | " error_imgs[i].append(ape(img[:, w*sensor:w*(sensor+1)], img_ref[:, w*sensor:w*(sensor+1)]))\n",
86 | " error_scales[i] = max(error_scales[i], dr.max(error_imgs[i][-1])[0])\n",
87 | "\n",
88 | " wn = noisy_img.shape[1] // sensor_counts[i]\n",
89 | " noisy_imgs[i].append(noisy_img[:, wn*sensor:wn*(sensor+1)])\n",
90 | "\n",
91 | " if scene_name == \"dust_devil\":\n",
92 | " ref_imgs.append(img_ref_viz)\n",
93 | " else:\n",
94 | " ref_imgs.append(img_ref[:, w*sensor:w*(sensor+1)])\n",
95 | "\n"
96 | ]
97 | },
98 | {
99 | "cell_type": "code",
100 | "execution_count": null,
101 | "metadata": {},
102 | "outputs": [],
103 | "source": [
104 | "aspect = 4.3/len(scenes)\n",
105 | "n_rows = 3\n",
106 | "\n",
107 | "inset_hspace = 0.02\n",
108 | "inset_wspace = 0.02\n",
109 | "inset_aspect = gridspec_aspect(2, 4, 1, 1, wspace=inset_wspace, hspace=inset_hspace)\n",
110 | "\n",
111 | "row_wspace = 0.2\n",
112 | "bar_r = 0.5\n",
113 | "width_ratios = [1, inset_aspect, bar_r, bar_r]\n",
114 | "row_aspect = gridspec_aspect(1, 4, width_ratios, 1, wspace=row_wspace)\n",
115 | "\n",
116 | "outer_hspace = 0.1\n",
117 | "outer_aspect = gridspec_aspect(n_rows, 1, row_aspect, 1, hspace=outer_hspace)\n",
118 | "\n",
119 | "fig = plt.figure(1, figsize=(PAGE_WIDTH, PAGE_WIDTH / outer_aspect))\n",
120 | "outer = fig.add_gridspec(n_rows, 1, hspace=outer_hspace)\n",
121 | "\n",
122 | "nbins = 4\n",
123 | "sns.set_palette(sns.color_palette(\"colorblind\"))\n",
124 | "error_maps = True\n",
125 | "\n",
126 | "from matplotlib.ticker import FormatStrFormatter\n",
127 | "crops = [\n",
128 | " [0.21, 0.3, 0.25],\n",
129 | " [0.5, 0.35, 0.25],\n",
130 | " [0.4, 0.5, 0.25]\n",
131 | "]\n",
132 | "\n",
133 | "for i, scene_name in enumerate(scene_names):\n",
134 | " gs_row = gridspec.GridSpecFromSubplotSpec(1, 4, subplot_spec=outer[i], wspace=row_wspace, hspace=0.0, width_ratios=width_ratios)\n",
135 | "\n",
136 | " gs_insets = gridspec.GridSpecFromSubplotSpec(2, 4, subplot_spec=gs_row[1], wspace=inset_wspace, hspace=inset_hspace)\n",
137 | " gs_loss = gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=gs_row[2], wspace=0.0, hspace=0.0)\n",
138 | " gs_rt = gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=gs_row[3], wspace=0.0, hspace=0.0)\n",
139 | "\n",
140 | " ax = fig.add_subplot(gs_row[0])\n",
141 | " ax.imshow(mi.util.convert_to_bitmap(ref_imgs[i]), interpolation='none')\n",
142 | " \n",
143 | "\n",
144 | " disable_ticks(ax)\n",
145 | " if i == n_rows - 1:\n",
146 | " ax.set_title(\"Reference\", y=-0.15)\n",
147 | " ax.set_ylabel(scene_name)\n",
148 | "\n",
149 | " ax_loss = fig.add_subplot(gs_loss[0])\n",
150 | " ax_loss.tick_params(bottom=False, labelbottom=False)\n",
151 | " ax_loss.locator_params(nbins=nbins)\n",
152 | " ax_loss.yaxis.set_major_formatter(FormatStrFormatter('%.1e'))\n",
153 | "\n",
154 | " if i == n_rows - 1:\n",
155 | " ax_loss.set_title(r\"$\\mathcal{L}^1$\", y=-0.15)\n",
156 | " ax_rt = fig.add_subplot(gs_rt[0])\n",
157 | " ax_rt.tick_params(bottom=False, labelbottom=False)\n",
158 | " ax_rt.locator_params(nbins=nbins)\n",
159 | " if i == n_rows - 1:\n",
160 | " ax_rt.set_title(\"Runtime (s)\", y=-0.15)\n",
161 | "\n",
162 | " wx, wy, ws = crops[i]\n",
163 | " h, w, _ = imgs[i][0].shape\n",
164 | " left = int(wx*w)\n",
165 | " top = int(wy*h)\n",
166 | " size = int(ws*h)\n",
167 | "\n",
168 | " rect = patches.Rectangle((left, top), size, size, linewidth=1.0, edgecolor='g', facecolor='none')\n",
169 | " ax.add_patch(rect)\n",
170 | "\n",
171 | " ax = fig.add_subplot(gs_insets[0 if error_maps else 1, -1])\n",
172 | " ax.imshow(mi.util.convert_to_bitmap(ref_imgs[i][top:top+size, left:left+size]), interpolation='none')\n",
173 | " disable_ticks(ax)\n",
174 | " if i == n_rows - 1:\n",
175 | " with sns.axes_style('white'):\n",
176 | " if error_maps:\n",
177 | " ax = fig.add_subplot(gs_insets[1, -1])\n",
178 | " disable_border(ax)\n",
179 | " disable_ticks(ax)\n",
180 | " ax.set_title(\"Reference\", y=-0.3)\n",
181 | " else:\n",
182 | " ax.set_title(\"Reference\", y=-0.3)\n",
183 | "\n",
184 | " h1, w1, _ = noisy_imgs[i][0].shape\n",
185 | " if scene_name == \"Dust Devil\":\n",
186 | " wx = (left - 280) / 720\n",
187 | " left1 = int(wx*w1)\n",
188 | " top1 = int(wy*h1)\n",
189 | " size1 = int(ws*h1)\n",
190 | "\n",
191 | " for j, method in enumerate(method_names):\n",
192 | " ax = fig.add_subplot(gs_insets[0, j])\n",
193 | " ax.imshow(mi.util.convert_to_bitmap(noisy_imgs[i][j][top1:top1+size1, left1:left1+size1]), interpolation='none')\n",
194 | " disable_ticks(ax)\n",
195 | " if j == 0:\n",
196 | " ax.set_ylabel(\"Primal\")\n",
197 | "\n",
198 | " ax = fig.add_subplot(gs_insets[1, j])\n",
199 | " if error_maps:\n",
200 | " im = ax.imshow(error_imgs[i][j][top:top+size, left:left+size, 0], interpolation='none', cmap='inferno', vmin=0, vmax=0.1)\n",
201 | " else:\n",
202 | " ax.imshow(mi.util.convert_to_bitmap(imgs[i][j][top:top+size, left:left+size]), interpolation='none')\n",
203 | " disable_ticks(ax)\n",
204 | " if i == n_rows - 1:\n",
205 | " ax.set_title(method, y=-0.3)\n",
206 | " if j == 0:\n",
207 | " ax.set_ylabel(r\"$\\mathcal{L}^1$ Error\" if error_maps else \"Final\")\n",
208 | " elif j == 2:\n",
209 | " cbax = ax.inset_axes([1.03, 0.0, 0.05, 1.0], transform=ax.transAxes)\n",
210 | " cbar = fig.colorbar(im, cax=cbax, ticks=[0, 0.05, 0.09])\n",
211 | " cbar.outline.set_visible(False)\n",
212 | " cbar.ax.tick_params(size=0)\n",
213 | " cbar.ax.locator_params(nbins=nbins)\n",
214 | "\n",
215 | " ax_loss.bar(j*(0.8 if j < 2 else 0.9), final_losses[i][j], color=sns.color_palette()[1+(2+j)%3])\n",
216 | " ax_rt.bar(j*(0.8 if j < 2 else 0.9), results[i][j][\"runtime\"].sum() * 1e-3, alpha=0.5, color=sns.color_palette()[1+(2+j)%3], hatch='////')\n",
217 | " ax_rt.bar(j*(0.8 if j < 2 else 0.9), results[i][j][\"runtime\"][:,0].sum() * 1e-3, label=method, alpha=1.0, color=sns.color_palette()[1+(2+j)%3])\n",
218 | "\n",
219 | " if i == 0:\n",
220 | " ax_rt.legend(loc='upper left', bbox_to_anchor=(1.0, 1))\n",
221 | " elif i == 1:\n",
222 | " legend_elements = [patches.Patch(facecolor=sns.color_palette()[3], alpha=1.0),\n",
223 | " patches.Patch(facecolor=sns.color_palette()[3], alpha=0.75, hatch='////') ]\n",
224 | " ax_rt.legend(legend_elements, ['Primal', 'Ajoint'], loc='upper left', bbox_to_anchor=(1.0, 1.8))\n",
225 | "save_fig(\"volumes\")"
226 | ]
227 | },
228 | {
229 | "cell_type": "code",
230 | "execution_count": null,
231 | "metadata": {},
232 | "outputs": [],
233 | "source": []
234 | }
235 | ],
236 | "metadata": {
237 | "kernelspec": {
238 | "display_name": "Python 3.9.12 ('graphics')",
239 | "language": "python",
240 | "name": "python3"
241 | },
242 | "language_info": {
243 | "codemirror_mode": {
244 | "name": "ipython",
245 | "version": 3
246 | },
247 | "file_extension": ".py",
248 | "mimetype": "text/x-python",
249 | "name": "python",
250 | "nbconvert_exporter": "python",
251 | "pygments_lexer": "ipython3",
252 | "version": "3.9.12"
253 | },
254 | "orig_nbformat": 4,
255 | "vscode": {
256 | "interpreter": {
257 | "hash": "83642eaf50c97d4e19d0a23d915e5d4e870af428ff693683146158fe3feeea5a"
258 | }
259 | }
260 | },
261 | "nbformat": 4,
262 | "nbformat_minor": 2
263 | }
264 |
--------------------------------------------------------------------------------
/figures/volumes/generate_data.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 | import os
3 | from pathlib import Path
4 | import mitsuba as mi
5 | mi.set_variant("cuda_ad_rgb")
6 |
7 | scenes = ["dust_devil", "janga", "rover"]
8 | spp_high = [128, 32, 32]
9 |
10 | for scene, spp in zip(scenes, spp_high):
11 | # Equal budget
12 | subprocess.call(["python", os.path.join(Path(__file__).parents[2], "run_experiment.py"), scene])
13 | # Run baseline at higher spp (equal quality)
14 | subprocess.call(["python", os.path.join(Path(__file__).parents[2], "run_experiment.py"), scene, "--spp", f"{spp}", "--method", "baseline", "--output", f"{scene}_high_spp"])
15 |
16 | subprocess.call(["python", os.path.join(os.path.dirname(__file__), "render_dust.py")])
--------------------------------------------------------------------------------
/figures/volumes/render_dust.py:
--------------------------------------------------------------------------------
1 | import mitsuba as mi
2 | mi.set_variant('cuda_ad_rgb')
3 | import drjit as dr
4 | import os
5 | from mitsuba.scalar_rgb import Transform4f as T
6 | import matplotlib.pyplot as plt
7 | import numpy as np
8 |
9 | import sys
10 | from pathlib import Path
11 | sys.path.append(Path(__file__).parents[3])
12 | from experiments import SCENES_DIR, OUTPUT_DIR
13 |
14 | from tqdm import trange
15 |
16 | target = [0.5, 0.5, 0.5]
17 | d = 1
18 | h = 0.5
19 | sensor_count = 8
20 | i = 0
21 | theta = 2*np.pi / sensor_count * i
22 | sensor = {
23 | 'type': 'perspective',
24 | 'fov': 72.734,
25 | 'to_world': T.look_at(target=target, origin=[target[0]+d*np.cos(theta), h, target[2]+d*np.sin(theta)], up=[0, 1, 0]),
26 | 'film': {
27 | 'type': 'hdrfilm',
28 | 'width': 1280, 'height': 1280,
29 | },
30 | }
31 |
32 | medium = {
33 | 'type': 'heterogeneous',
34 | 'sigma_t': {
35 | 'type': 'gridvolume',
36 | 'filename': os.path.join(SCENES_DIR, 'dust-devil/volumes/embergen_dust_devil_tornado_a_50-256-256-256.vol'),
37 | 'use_grid_bbox': False,
38 | 'accel': False
39 | },
40 | 'albedo': {
41 | 'type': 'gridvolume',
42 | 'filename': os.path.join(SCENES_DIR, 'dust-devil/volumes/albedo-constant-sand-256-256-256.vol'),
43 | 'use_grid_bbox': False,
44 | 'accel': False
45 | },
46 | 'phase': {
47 | 'type': 'hg',
48 | 'g': 0.877
49 | },
50 | 'scale': 100.0,
51 |
52 | 'sample_emitters': True,
53 | 'has_spectral_extinction': False,
54 | 'majorant_resolution_factor': 0
55 | }
56 |
57 | scene_dict = {
58 | 'type': 'scene',
59 | 'object': {
60 | 'type': 'obj',
61 | 'filename': os.path.join(SCENES_DIR, 'common/meshes/cube_unit.obj'),
62 | 'bsdf': {'type': 'null'},
63 | 'interior': medium
64 | },
65 | 'envmap': {
66 | 'type': 'envmap',
67 | 'filename': os.path.join(SCENES_DIR, 'common/textures/kloofendal_38d_partly_cloudy_4k.exr'),
68 | 'scale': 1.0,
69 | },
70 | 'sensor': sensor
71 | }
72 |
73 | scene_name = 'dust_devil'
74 | scene = mi.load_dict(scene_dict)
75 | integrator = mi.load_dict({'type': 'prbvolpath', 'max_depth':64, 'rr_depth': 64})
76 | img_ref = mi.TensorXf(0.0)
77 |
78 | n_passes = 32
79 | spp = 128
80 | for i in trange(n_passes):
81 | img_ref += mi.render(scene, integrator=integrator, seed=i, spp=spp) / n_passes
82 |
83 | mi.Bitmap(img_ref).write(os.path.join(OUTPUT_DIR, scene_name, 'img_ref_re.exr'))
84 |
85 | dirs = [os.path.join(OUTPUT_DIR, scene_name, 'baseline'),
86 | os.path.join(OUTPUT_DIR, f"{scene_name}_high_spp", 'baseline'),
87 | os.path.join(OUTPUT_DIR, scene_name, 'cv_ps'),]
88 |
89 | keys = ['object.interior_medium.albedo.data', 'object.interior_medium.sigma_t.data']
90 | for i, d in enumerate(dirs):
91 | scene_params = mi.traverse(scene)
92 | for key in keys:
93 | final_volume = mi.load_dict({
94 | 'type': 'gridvolume',
95 | 'filename': os.path.join(d, f"{key.replace('.', '_')}_final.vol"),
96 | })
97 | scene_params[key] = mi.traverse(final_volume)['data']
98 | scene_params.update()
99 |
100 | img = mi.TensorXf(0.0)
101 | for i in trange(n_passes):
102 | img += mi.render(scene, integrator=integrator, seed=i, spp=spp) / n_passes
103 |
104 | mi.Bitmap(img).write(os.path.join(d, 'img_final_re.exr'))
105 |
--------------------------------------------------------------------------------
/figures/weights/figure.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import sys\n",
10 | "sys.path.append(\"..\")\n",
11 | "from figutils import *"
12 | ]
13 | },
14 | {
15 | "cell_type": "code",
16 | "execution_count": null,
17 | "metadata": {},
18 | "outputs": [],
19 | "source": [
20 | "scene_name = 'weights'\n",
21 | "output_dir = os.path.join(OUTPUT_DIR, scene_name)\n",
22 | "i = 49"
23 | ]
24 | },
25 | {
26 | "cell_type": "code",
27 | "execution_count": null,
28 | "metadata": {},
29 | "outputs": [],
30 | "source": [
31 | "methods = [\"baseline\", \"cv_pss\", \"cv_ps\"]\n",
32 | "method_names = [r\"\\textbf{(a)} Baseline\", r\"\\textbf{(b)} CV-PSS\", r\"\\textbf{(c)} CV-PS\"]\n",
33 | "results = []\n",
34 | "final_states = []\n",
35 | "intermediate_states = []\n",
36 | "textures = []\n",
37 | "weights = []\n",
38 | "\n",
39 | "ref_img = mi.Bitmap(os.path.join(output_dir, \"img_ref.exr\"))\n",
40 | "start_img = mi.Bitmap(os.path.join(output_dir, \"img_start.exr\"))\n",
41 | "\n",
42 | "for method in methods:\n",
43 | " results.append(np.load(os.path.join(output_dir, method, \"result.npz\")))\n",
44 | " final_states.append(mi.Bitmap(os.path.join(output_dir, method, \"img_final.exr\")))\n",
45 | " intermediate_states.append(mi.Bitmap(os.path.join(output_dir, method, \"img\", f\"{i:04d}.exr\")))\n",
46 | " if method != 'baseline' and i > 1:\n",
47 | " weights.append(mi.Bitmap(os.path.join(output_dir, method, \"weights\", f\"{i:04d}.exr\")))\n",
48 | " else:\n",
49 | " weights.append(None)\n"
50 | ]
51 | },
52 | {
53 | "cell_type": "code",
54 | "execution_count": null,
55 | "metadata": {},
56 | "outputs": [],
57 | "source": [
58 | "sns.set_style('white')"
59 | ]
60 | },
61 | {
62 | "cell_type": "code",
63 | "execution_count": null,
64 | "metadata": {},
65 | "outputs": [],
66 | "source": [
67 | "base_size = 4\n",
68 | "w,h = ref_img.size()\n",
69 | "\n",
70 | "n_cols = 3\n",
71 | "n_rows = 2\n",
72 | "aspect = w * n_cols / h / n_rows\n",
73 | "\n",
74 | "fig = plt.figure(1, figsize=(TEXT_WIDTH, TEXT_WIDTH / aspect))\n",
75 | "wspace= 0.01\n",
76 | "gs = fig.add_gridspec(n_rows, n_cols, wspace=wspace, hspace=wspace*aspect)\n",
77 | "\n",
78 | "for i, method in enumerate(method_names):\n",
79 | " ax = fig.add_subplot(gs[0, i])\n",
80 | " ax.imshow(mi.util.convert_to_bitmap(intermediate_states[i]), interpolation='none')\n",
81 | " disable_ticks(ax)\n",
82 | "\n",
83 | " if i == 0:\n",
84 | " ax.set_ylabel(\"Primal\")\n",
85 | "\n",
86 | " ax = fig.add_subplot(gs[1, i])\n",
87 | " if weights[i] is not None:\n",
88 | " weight = mi.TensorXf(weights[i])[:,:,0]\n",
89 | " im = ax.imshow(weight, cmap='Reds_r', vmin=0, vmax=1, interpolation='none')\n",
90 | " else:\n",
91 | " weight = np.ones((h,w))\n",
92 | " ax.text(0.5, 0.5, \"N/A\", ha=\"center\", va=\"center\", color=\"darkgrey\")\n",
93 | " disable_border(ax)\n",
94 | "\n",
95 | " if i == 0:\n",
96 | " ax.set_ylabel(\"Weights\")\n",
97 | "\n",
98 | " ax.set_title(method, y=-0.25)\n",
99 | " disable_ticks(ax)\n",
100 | " if i == 2:\n",
101 | " cbax = ax.inset_axes([1.02, 0, 0.04, 1], transform=ax.transAxes)\n",
102 | " cbar = fig.colorbar(im, cax=cbax, ticks=[0, 0.5, 1])\n",
103 | " cbar.outline.set_visible(False)\n",
104 | " cbar.ax.tick_params(size=0)\n",
105 | "save_fig(\"weights\")\n"
106 | ]
107 | },
108 | {
109 | "cell_type": "code",
110 | "execution_count": null,
111 | "metadata": {},
112 | "outputs": [],
113 | "source": []
114 | }
115 | ],
116 | "metadata": {
117 | "kernelspec": {
118 | "display_name": "Python 3.9.12 ('graphics')",
119 | "language": "python",
120 | "name": "python3"
121 | },
122 | "language_info": {
123 | "codemirror_mode": {
124 | "name": "ipython",
125 | "version": 3
126 | },
127 | "file_extension": ".py",
128 | "mimetype": "text/x-python",
129 | "name": "python",
130 | "nbconvert_exporter": "python",
131 | "pygments_lexer": "ipython3",
132 | "version": "3.9.12"
133 | },
134 | "orig_nbformat": 4,
135 | "vscode": {
136 | "interpreter": {
137 | "hash": "83642eaf50c97d4e19d0a23d915e5d4e870af428ff693683146158fe3feeea5a"
138 | }
139 | }
140 | },
141 | "nbformat": 4,
142 | "nbformat_minor": 2
143 | }
144 |
--------------------------------------------------------------------------------
/figures/weights/generate_data.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 | import os
3 | from pathlib import Path
4 |
5 | subprocess.call(["python", os.path.join(Path(__file__).parents[2], "run_experiment.py"), "bunnies", "--all", "--force_baseline", "--output", "weights", "--n_steps", "50"])
6 |
--------------------------------------------------------------------------------
/largesteps.py:
--------------------------------------------------------------------------------
1 | import mitsuba as mi
2 | import drjit as dr
3 | import numpy as np
4 | from cholespy import CholeskySolverF, MatrixType
5 | import scipy.sparse as sp
6 |
7 | def laplacian_2d(tex, lambda_=19):
8 | h,w = tex.shape[:2]
9 | N = h*w
10 | idx = np.arange(N)
11 |
12 | i = idx[(idx+1)%w!=0]
13 | j = idx[idx%w!=0]
14 | k = idx[idx%(h*w)=w]
16 |
17 | ij = np.stack((i,j))
18 | ji = np.stack((j,i))
19 | ii = np.stack((i,i))
20 | jj = np.stack((j,j))
21 | kl = np.stack((k,l))
22 | lk = np.stack((l,k))
23 | kk = np.stack((k,k))
24 | ll = np.stack((l,l))
25 | indices = np.concatenate((ij,ji,kl,lk,ii,jj,kk,ll), axis=1)
26 | values = np.ones(indices.shape[1])
27 | values[:-2*(ii.shape[1]+kk.shape[1])] = -1
28 | return sp.csc_matrix((values, indices)) * lambda_ + sp.identity(N, format="csc")
29 |
30 | def laplacian_3d(vol, lambda_=19):
31 | d,h,w = vol.shape[:3]
32 | N = d*h*w
33 | idx = np.arange(N)
34 |
35 | i = idx[(idx+1)%w!=0]
36 | j = idx[idx%w!=0]
37 | k = idx[idx%(h*w)=w]
39 | m = idx[idx=w*h]
41 |
42 | ij = np.stack((i,j))
43 | ji = np.stack((j,i))
44 | ii = np.stack((i,i))
45 | jj = np.stack((j,j))
46 | kl = np.stack((k,l))
47 | lk = np.stack((l,k))
48 | kk = np.stack((k,k))
49 | ll = np.stack((l,l))
50 | mn = np.stack((m,n))
51 | nm = np.stack((n,m))
52 | mm = np.stack((m,m))
53 | nn = np.stack((n,n))
54 | indices = np.concatenate((ij,ji,kl,lk,mn,nm,ii,jj,kk,ll,mm,nn), axis=1)
55 | values = np.ones(indices.shape[1])
56 | values[:-2*(ii.shape[1]+kk.shape[1]+mm.shape[1])] = -1
57 | return sp.csc_matrix((values, indices)) * lambda_ + sp.identity(N, format="csc")
58 |
59 | class CholeskySolve(dr.CustomOp):
60 |
61 | def eval(self, solver, u):
62 | self.solver = solver
63 | self.shape = u.shape
64 | self.shape_solver = (dr.prod(self.shape[:-1]), self.shape[-1])
65 | x = dr.zeros(mi.TensorXf, shape=self.shape_solver)
66 | u = mi.TensorXf(u.array, self.shape_solver)
67 | # u = np.array(u.array, dtype=np.float32).reshape(self.shape_solver)
68 | # x = np.zeros_like(u)
69 | solver.solve(u, x)
70 | return mi.TensorXf(x.array, self.shape)
71 |
72 | def forward(self):
73 | x = dr.zeros(mi.TensorXf, shape=self.grad_in('u').shape)
74 | self.solver.solve(self.grad_in('u'), x)
75 | self.set_grad_out(x)
76 |
77 | def backward(self):
78 | # x = np.zeros_like(self.grad_out())
79 | x = dr.zeros(mi.TensorXf, shape=self.shape_solver)
80 | self.solver.solve(mi.TensorXf(self.grad_out().array, self.solver), x)
81 | # self.set_grad_in('u', mi.TensorXf(x))
82 | self.set_grad_in('u', mi.TensorXf(x.array, self.shape))
83 |
84 | def name(self):
85 | return "Cholesky solve"
86 |
87 |
88 | class CholeskySolver():
89 | def __init__(self, x, lambda_):
90 | self.shape = x.shape
91 | self.channels = x.shape[-1]
92 | assert len(self.shape) in (3,4)
93 | self.N = dr.prod(self.shape[:-1])
94 | if len(self.shape) == 3:
95 | L_csc = laplacian_2d(x, lambda_)
96 | else:
97 | L_csc = laplacian_3d(x, lambda_)
98 |
99 | self.solver = CholeskySolverF(self.N, mi.TensorXi(L_csc.indptr), mi.TensorXi(L_csc.indices), mi.TensorXd(L_csc.data), MatrixType.CSC)
100 |
101 | def solve(self, u):
102 | return dr.custom(CholeskySolve, self.solver, u)
103 |
104 | def precondition(self, u):
105 | return self.solve(self.solve(u))
106 |
107 | def to_differential(tex, lambda_):
108 | if len(tex.shape) == 3:
109 | L_csc = laplacian_2d(tex, lambda_)
110 | elif len(tex.shape) == 4:
111 | L_csc = laplacian_3d(tex, lambda_)
112 | return mi.TensorXf(mi.TensorXf((L_csc @ tex.numpy().reshape((-1, tex.shape[-1])))).array, tex.shape)
113 |
--------------------------------------------------------------------------------
/optimize.py:
--------------------------------------------------------------------------------
1 | import mitsuba as mi
2 | import drjit as dr
3 | import numpy as np
4 | from tqdm import tqdm
5 | import os
6 |
7 | from utils import *
8 | from vgg import VGGLoss
9 |
10 | def run_opt(params):
11 |
12 | scene = params['scene']
13 | scene_ref = params.get('scene_ref', scene)
14 |
15 | loss_name = params.get('loss', 'L1')
16 | if loss_name == "L1":
17 | loss_func = l1_loss
18 | elif loss_name == "L2":
19 | loss_func = l2_loss
20 | elif loss_name == "VGG":
21 | loss_func = VGGLoss()
22 | else:
23 | raise ValueError("Unknown loss function")
24 |
25 | method = params.get('method', 'baseline')
26 |
27 | output_dir = params['output_dir']
28 |
29 | n_steps = params.get('n_steps', 0)
30 | lr = params.get('lr', 1e-2)
31 | spp = params.get('spp', 1) # SPP for the forward pass
32 | spp_grad = params.get('spp_grad', spp) # SPP for gradient estimation
33 | spp_ref = params.get('spp_ref', 1024) # SPP for the reference image
34 | benchmark = params.get('benchmark', False)
35 |
36 | assert len(scene.sensors()) == 1, "Only scenes with a single sensor are supported. Consider using the batch sensor to use several viewpoints."
37 |
38 | if not os.path.isdir(output_dir):
39 | os.makedirs(output_dir)
40 |
41 | scene_params = mi.traverse(scene)
42 |
43 | recomp_freq = params.get('recomp_freq', 10)
44 | result_dict = {
45 | 'loss': np.zeros(1 + n_steps // recomp_freq, dtype=np.float32),
46 | 'var': np.zeros(1 + n_steps // recomp_freq, dtype=np.float32),
47 | 'runtime': np.zeros((n_steps, 2), dtype=np.float32)
48 | }
49 |
50 | if 'integrator' in params:
51 | base_integrator = params['integrator']
52 | else:
53 | raise ValueError("No integrator specified")
54 |
55 | integrator_dict = {
56 | 'type': 'meta',
57 | 'method': method,
58 | 'denoise': params.get('denoise', False),
59 | 'integrator': base_integrator,
60 | 'adjoint_integrator': params.get('adjoint_integrator', base_integrator),
61 | 'beta1': params.get('beta1', 0.9),
62 | 'beta2': params.get('beta2', 0.999),
63 | 'force_baseline': params.get('force_baseline', False),
64 | 'pre_update': params.get('pre_update', False)
65 | }
66 | integrator = mi.load_dict(integrator_dict)
67 |
68 | # Texture/volume upsampling frequency
69 | upsample_steps = []
70 | for w in params.get('upsample', []):
71 | assert w > 0 and w < 1
72 | upsample_steps.append(int(n_steps * w))
73 |
74 | # Step size schedule
75 | schedule_steps = []
76 | for w in params.get('schedule', []):
77 | assert w > 0 and w < 1
78 | schedule_steps.append(int(n_steps * w))
79 |
80 | # Render the reference image
81 | img_ref = render_reference(params, scene_ref, base_integrator)
82 |
83 | # Render the reference at the display resolution
84 | ref_display_path = os.path.join(os.path.dirname(output_dir), "img_ref_display.exr")
85 | if 'final_res' in params and not os.path.exists(ref_display_path):
86 | img_display = render_display(params, scene_ref, scene_params, base_integrator)
87 | mi.Bitmap(img_display).write(ref_display_path)
88 |
89 | # Initialize the optimizer
90 | opt = mi.ad.Adam(lr=lr)
91 |
92 | # Initialize the parameters
93 | initialize_parameters(params, opt, scene, scene_params)
94 |
95 | # Render the starting point
96 | save_path = os.path.join(os.path.dirname(params['output_dir']), f"img_start.exr")
97 | if not os.path.isfile(save_path):
98 | with dr.suspend_grad():
99 | start_img = mi.render(scene, seed = 2048, integrator=base_integrator, spp=spp_ref)
100 | mi.Bitmap(start_img).write_async(save_path)
101 |
102 | # Main loop
103 | with tqdm(total=n_steps, bar_format="{l_bar}{bar}| {n:.0f}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]", desc=f"{os.path.basename(os.path.dirname(params['output_dir'])):<20} | {method:<20}") as pbar:
104 | for i in range(n_steps):
105 | active_benchmark = benchmark and i > 16
106 | with dr.scoped_set_flag(dr.JitFlag.KernelHistory, active_benchmark):
107 | scene_params.update(opt)
108 | img = mi.render(scene, scene_params, integrator=integrator, seed=i, spp=spp, spp_grad=spp_grad)
109 |
110 | loss = loss_func(img, img_ref)
111 | if loss_name == 'VGG':
112 | loss += l1_loss(img, img_ref)
113 |
114 | if active_benchmark:
115 | dr.schedule(loss)
116 | result_dict['runtime'][i, 0] = runtime()
117 |
118 |
119 | checkpoint(scene, params, i, integrator, loss_func, img, img_ref, result_dict)
120 |
121 | with dr.scoped_set_flag(dr.JitFlag.KernelHistory, active_benchmark):
122 | dr.backward(loss)
123 |
124 | # Apply gradient preconditioning if needed
125 | precondition(params, opt)
126 | # Gradient descent
127 | opt.step()
128 | # Clamp parameters if needed
129 | clamp(params, opt)
130 |
131 | if active_benchmark:
132 | result_dict['runtime'][i, 1] = runtime()
133 |
134 | # Volume upsampling
135 | if i in upsample_steps:
136 | upsample(scene, params, opt, integrator)
137 |
138 | # Update step size
139 | if i in schedule_steps:
140 | lr *= 0.5
141 | opt.set_learning_rate(lr)
142 |
143 | pbar.update(1)
144 |
145 | with dr.suspend_grad():
146 | scene_params.update(opt)
147 | # Get the final state
148 | img_final = render_display(params, scene_ref, scene_params, base_integrator)
149 | mi.Bitmap(img_final).write(os.path.join(output_dir, "img_final.exr"))
150 |
151 | # Save the final state
152 | save_final_state(params, opt, output_dir)
153 |
154 | return result_dict
155 |
--------------------------------------------------------------------------------
/plugins/__init__.py:
--------------------------------------------------------------------------------
1 | import mitsuba as mi
2 |
3 | from .twostatebsdf import TwoStateBSDF
4 | from .twostatemedium import TwoStateMedium
5 | from .twostatepath import *
6 | from .twostatevolpath import *
7 | from .metaintegrator import MetaIntegrator
8 | from .volpathsimple import VolpathSimpleIntegrator
9 | from .cv_integrator import CVIntegrator
10 |
11 | mi.register_integrator("volpathsimple", lambda props: VolpathSimpleIntegrator(props))
12 | mi.register_integrator("twostateprb", TwoStatePRBIntegrator)
13 | mi.register_integrator("meta", MetaIntegrator)
14 | mi.register_integrator("cv", CVIntegrator)
15 | mi.register_integrator("twostateprbvolpath", TwoStatePRBVolpathIntegrator)
16 | mi.register_bsdf('twostate', TwoStateBSDF)
17 | mi.register_medium('twostatemedium', TwoStateMedium)
--------------------------------------------------------------------------------
/plugins/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rgl-epfl/recursive_control_variates/be19f1be3f054f9497eeff97530e1827ad0d8d56/plugins/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/plugins/__pycache__/cv_integrator.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rgl-epfl/recursive_control_variates/be19f1be3f054f9497eeff97530e1827ad0d8d56/plugins/__pycache__/cv_integrator.cpython-39.pyc
--------------------------------------------------------------------------------
/plugins/__pycache__/metaintegrator.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rgl-epfl/recursive_control_variates/be19f1be3f054f9497eeff97530e1827ad0d8d56/plugins/__pycache__/metaintegrator.cpython-39.pyc
--------------------------------------------------------------------------------
/plugins/__pycache__/twostatebsdf.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rgl-epfl/recursive_control_variates/be19f1be3f054f9497eeff97530e1827ad0d8d56/plugins/__pycache__/twostatebsdf.cpython-39.pyc
--------------------------------------------------------------------------------
/plugins/__pycache__/twostatemedium.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rgl-epfl/recursive_control_variates/be19f1be3f054f9497eeff97530e1827ad0d8d56/plugins/__pycache__/twostatemedium.cpython-39.pyc
--------------------------------------------------------------------------------
/plugins/__pycache__/twostatepath.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rgl-epfl/recursive_control_variates/be19f1be3f054f9497eeff97530e1827ad0d8d56/plugins/__pycache__/twostatepath.cpython-39.pyc
--------------------------------------------------------------------------------
/plugins/__pycache__/twostatevolpath.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rgl-epfl/recursive_control_variates/be19f1be3f054f9497eeff97530e1827ad0d8d56/plugins/__pycache__/twostatevolpath.cpython-39.pyc
--------------------------------------------------------------------------------
/plugins/__pycache__/volpathsimple.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rgl-epfl/recursive_control_variates/be19f1be3f054f9497eeff97530e1827ad0d8d56/plugins/__pycache__/volpathsimple.cpython-39.pyc
--------------------------------------------------------------------------------
/plugins/__pycache__/welford.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rgl-epfl/recursive_control_variates/be19f1be3f054f9497eeff97530e1827ad0d8d56/plugins/__pycache__/welford.cpython-39.pyc
--------------------------------------------------------------------------------
/plugins/cv_integrator.py:
--------------------------------------------------------------------------------
1 | import mitsuba as mi
2 | import drjit as dr
3 | from typing import Union, Any
4 | from .welford import StatisticsEstimator
5 | from .twostatepath import TwoStatePRBIntegrator
6 | from .twostatevolpath import TwoStatePRBVolpathIntegrator
7 | from utils import runtime
8 |
9 | class CVIntegrator(mi.ad.integrators.common.RBIntegrator):
10 | """
11 | This integrator encapsulates the control variate logic around an inner
12 | integrator. It implements the 'path space' variant of the algorithm. This
13 | means it expects a 'twostate' integrator as inner integrator to render the
14 | correlated images. For a more general implementation, see metaintegrator.py
15 | """
16 | def __init__(self, props):
17 | super().__init__(props)
18 | self.integrator = props.get('integrator')
19 | if type(self.integrator) not in (TwoStatePRBIntegrator, TwoStatePRBVolpathIntegrator):
20 | raise ValueError("CV integrator expects a 'twostate' integrator as nested integrator !")
21 |
22 | self.adjoint_integrator = props.get('adjoint_integrator', self.integrator)
23 | self.warmup = props.get('warmup', 1)
24 | # Statistics for the control variates
25 | self.beta1 = props.get('beta1', 0.9)
26 | self.beta2 = props.get('beta2', 0.999)
27 | self.reset()
28 |
29 | def reset(self):
30 | self.init = False
31 |
32 | def init_buffers(self):
33 | # Initialize the statistics estimators if not already done
34 | if self.init:
35 | return
36 |
37 | self.img = mi.TensorXf(0.0)
38 | self.stats = StatisticsEstimator(self.beta1, self.beta2)
39 | self.v_n = mi.TensorXf(0.0)
40 | self.w_s = mi.TensorXf(0.0)
41 | self.F = mi.TensorXf(0.0)
42 | self.H = mi.TensorXf(0.0)
43 | self.it = 0
44 |
45 | self.init = True
46 |
47 | def render(self: mi.SamplingIntegrator,
48 | scene: mi.Scene,
49 | sensor: int = 0, # WARN: this could also be an object, but we don't support it
50 | seed: int = 0,
51 | spp: int = 0,
52 | develop: bool = True,
53 | evaluate: bool = True) -> mi.TensorXf:
54 |
55 | assert isinstance(sensor, int)
56 | self.init_buffers()
57 |
58 | self.H, self.F = self.integrator.render_twostates(scene, sensor, seed, spp)
59 |
60 | # Compute the control weights and update statistics
61 | if self.it > self.warmup:
62 | v_0, v_1, cov = self.stats.get()
63 | self.v_n = self.w_s**2 * (self.v_n + v_0) + v_1 - 2*self.w_s * cov
64 | dr.schedule(self.v_n)
65 | self.w_s = cov / (v_0 + self.v_n)
66 | dr.schedule(self.w_s)
67 | self.w_s = dr.select(dr.isnan(self.w_s) | dr.isinf(self.w_s), 0.0, self.w_s)
68 | self.w_s = dr.clamp(self.w_s, 0.0, 1.0)
69 |
70 | self.img = self.w_s * (self.img - self.H) + self.F
71 | dr.schedule(self.img)
72 |
73 | if self.it > 0:
74 | self.stats.update(self.H, self.F)
75 |
76 | self.it += 1
77 | return self.img
78 |
79 | def render_forward(self: mi.SamplingIntegrator,
80 | scene: mi.Scene,
81 | params: Any,
82 | sensor: Union[int, mi.Sensor] = 0,
83 | seed: int = 0,
84 | spp: int = 0) -> mi.TensorXf:
85 | return self.adjoint_integrator.render_forward(scene, params, sensor, seed, spp)
86 |
87 | def render_backward(self: mi.SamplingIntegrator,
88 | scene: mi.Scene,
89 | params: Any,
90 | grad_in: mi.TensorXf,
91 | sensor: Union[int, mi.Sensor] = 0,
92 | seed: int = 0,
93 | spp: int = 0) -> None:
94 | return self.adjoint_integrator.render_backward(scene, params, grad_in, sensor, seed, spp)
95 |
--------------------------------------------------------------------------------
/plugins/metaintegrator.py:
--------------------------------------------------------------------------------
1 | import mitsuba as mi
2 | import drjit as dr
3 | from typing import Union, Any
4 | from .welford import StatisticsEstimator
5 | from utils import runtime
6 |
7 | class MetaIntegrator(mi.ad.integrators.common.RBIntegrator):
8 | """
9 | This integrator is a meta integrator that can be used to wrap other integrators and add
10 | control variates to them. It can also be used to denoise the output of the wrapped integrator.
11 | """
12 | def __init__(self, props):
13 | super().__init__(props)
14 | self.denoise = props.get('denoise', False)
15 | self.force_baseline = props.get('force_baseline', False)
16 | self.integrator = props.get('integrator')
17 | self.adjoint_integrator = props.get('adjoint_integrator', self.integrator)
18 | self.aov_integrator = mi.load_dict({
19 | 'type': 'aov',
20 | 'aovs': 'albedo:albedo,normals:sh_normal',
21 | 'integrator': self.integrator
22 | })
23 | # Placeholders for denoiser and sensor-to-world transform
24 | self.denoiser = None
25 | self.to_sensor = None
26 |
27 | self.method = props.get('method', 'baseline')
28 | self.pre_update = props.get('pre_update', False)
29 | self.warmup = props.get('warmup', 1)
30 | # Statistics for the control variates
31 | self.beta1 = props.get('beta1', 0.9)
32 | self.beta2 = props.get('beta2', 0.999)
33 | self.reset()
34 |
35 | def reset(self):
36 | self.init = False
37 |
38 | def init_buffers(self):
39 | # Initialize the statistics estimators if not already done
40 | if self.init:
41 | return
42 |
43 | self.img = mi.TensorXf(0.0)
44 |
45 | if 'cv' not in self.method:
46 | return
47 |
48 | self.stats = StatisticsEstimator(self.beta1, self.beta2)
49 | self.v_n = mi.TensorXf(0.0)
50 | self.w_s = mi.TensorXf(0.0)
51 | self.F = mi.TensorXf(0.0)
52 | self.H = mi.TensorXf(0.0)
53 | self.it = 0
54 |
55 | self.init = True
56 |
57 | def render(self: mi.SamplingIntegrator,
58 | scene: mi.Scene,
59 | sensor: int = 0, # WARN: this could also be an object, but we don't support it
60 | seed: int = 0,
61 | spp: int = 0,
62 | develop: bool = True,
63 | evaluate: bool = True) -> mi.TensorXf:
64 |
65 | assert isinstance(sensor, int)
66 | self.init_buffers()
67 |
68 | if self.method == 'cv_ps':
69 | self.H, self.F = self.integrator.render_twostates(scene, sensor, seed, spp)
70 |
71 | if self.denoise:
72 | # If not already done, compute the sensor-to-world transform
73 | # WARN: this does not work if the sensor is animated
74 | if self.to_sensor is None:
75 | self.to_sensor = scene.sensors()[sensor].world_transform().inverse()
76 |
77 | # Render with AOV integrator
78 | aovs = self.aov_integrator.render(scene, sensor, seed, spp, develop, evaluate)
79 | img_noisy = aovs[..., :3]
80 | albedo = aovs[..., 3:6]
81 | normals = aovs[..., 6:9]
82 |
83 | # Initialize denoiser
84 | if self.denoiser is None:
85 | self.denoiser = mi.OptixDenoiser(aovs.shape[:2], albedo=True, normals=True, temporal=False)
86 |
87 | elif self.method != 'cv_ps':
88 | img_noisy = self.integrator.render(scene, sensor, seed, spp, develop, evaluate)
89 |
90 | if self.method == 'cv_pss':
91 | self.F = img_noisy
92 |
93 | if 'cv' in self.method:
94 | # Compute the control weights and update statistics
95 | if self.it > 0 and self.pre_update:
96 | self.stats.update(self.H, self.F)
97 |
98 | if self.it > self.warmup:
99 | v_0, v_1, cov = self.stats.get()
100 | self.v_n = self.w_s**2 * (self.v_n + v_0) + v_1 - 2*self.w_s * cov
101 | dr.schedule(self.v_n)
102 | self.w_s = cov / (v_0 + self.v_n)
103 | dr.schedule(self.w_s)
104 | self.w_s = dr.select(dr.isnan(self.w_s) | dr.isinf(self.w_s), 0.0, self.w_s)
105 | self.w_s = dr.clamp(self.w_s, 0.0, 1.0)
106 |
107 | self.img = self.w_s * (self.img - self.H) + self.F
108 | dr.schedule(self.img)
109 |
110 | if self.it > 0 and not self.pre_update:
111 | self.stats.update(self.H, self.F)
112 |
113 | self.it += 1
114 | else:
115 | self.img = img_noisy
116 |
117 | if self.method == 'cv_pss':
118 | # Re render the current state with the seed for the next iteration
119 | self.H = self.integrator.render(scene, sensor, seed+1, spp, develop, evaluate)
120 |
121 | if self.denoise:
122 | # Denoise
123 | return self.denoiser(self.img, albedo=albedo, normals=normals, to_sensor=self.to_sensor)
124 | elif self.force_baseline:
125 | # Return the noisy image. This is only so we can look at images with the different methods, while still taking the same steps.
126 | if self.method != 'cv_ps':
127 | return img_noisy
128 | return self.integrator.render(scene, sensor, seed, spp, develop, evaluate)
129 | else:
130 | return self.img
131 |
132 | def render_forward(self: mi.SamplingIntegrator,
133 | scene: mi.Scene,
134 | params: Any,
135 | sensor: Union[int, mi.Sensor] = 0,
136 | seed: int = 0,
137 | spp: int = 0) -> mi.TensorXf:
138 | return self.adjoint_integrator.render_forward(scene, params, sensor, seed, spp)
139 |
140 | def render_backward(self: mi.SamplingIntegrator,
141 | scene: mi.Scene,
142 | params: Any,
143 | grad_in: mi.TensorXf,
144 | sensor: Union[int, mi.Sensor] = 0,
145 | seed: int = 0,
146 | spp: int = 0) -> None:
147 | return self.adjoint_integrator.render_backward(scene, params, grad_in, sensor, seed, spp)
148 |
--------------------------------------------------------------------------------
/plugins/twostatebsdf.py:
--------------------------------------------------------------------------------
1 | import mitsuba as mi
2 | import drjit as dr
3 |
4 | class TwoStateBSDF(mi.BSDF):
5 | # Evaluates two BSDFs with its `eval_2` method
6 |
7 | def __init__(self, props: mi.Properties):
8 | mi.set_variant('cuda_ad_rgb')
9 | mi.BSDF.__init__(self, props)
10 |
11 | self.old = props['old']
12 | self.new = props['new']
13 | self.incoming = props['incoming']
14 |
15 | # props.set_plugin_name(props['bsdf_name'])
16 | # del props['bsdf_name']
17 | # pmgr = mi.PluginManager.instance()
18 | # self.old = pmgr.create_object(props, pmgr.get_plugin_class(props.plugin_name(), mi.variant()))
19 | # self.new = pmgr.create_object(props, pmgr.get_plugin_class(props.plugin_name(), mi.variant()))
20 | # self.incoming = pmgr.create_object(props, pmgr.get_plugin_class(props.plugin_name(), mi.variant()))
21 |
22 | self.m_components = self.old.m_components
23 | self.m_flags = self.old.m_flags
24 |
25 | def sample(self, ctx, si, sample1, sample2, active):
26 | return self.new.sample(ctx, si, sample1, sample2, active)
27 |
28 | def eval(self, ctx, si, wo, active):
29 | return self.new.eval(ctx, si, wo, active)
30 |
31 | def eval_old(self, ctx, si, wo, active):
32 | return self.old.eval(ctx, si, wo, active)
33 |
34 | def pdf(self, ctx, si, wo, active):
35 | return self.new.pdf(ctx, si, wo, active)
36 |
37 | def eval_pdf(self, ctx, si, wo, active):
38 | return self.new.eval_pdf(ctx, si, wo, active)
39 |
40 | def eval_diffuse_reflectance(self, si, active):
41 | return self.new.eval_diffuse_reflectance(si, active)
42 |
43 | def traverse(self, callback):
44 | self.incoming.traverse(callback)
45 |
46 | def parameters_changed(self, keys):
47 | old_params = mi.traverse(self.old)
48 | new_params = mi.traverse(self.new)
49 | incoming_params = mi.traverse(self.incoming)
50 |
51 | for key in incoming_params.keys():
52 | old_params[key] = type(incoming_params[key])(new_params[key])
53 | new_params[key] = type(incoming_params[key])(incoming_params[key])
54 |
55 | old_params.update()
56 | new_params.update()
57 |
58 | def to_string(self):
59 | old_params = mi.traverse(self.old)
60 | new_params = mi.traverse(self.new)
61 | incoming_params = mi.traverse(self.incoming)
62 | keys = incoming_params.keys()
63 |
64 | # For debugging purposes
65 | return ('Evolving[\n'
66 | ' old_indices=%s,\n'
67 | ' old_indices_ad=%s,\n'
68 | ' new_indices=%s,\n'
69 | ' new_indices_ad=%s,\n'
70 | ' incoming_indices=%s,\n'
71 | ' incoming_indices_ad=%s,\n'
72 | ']' % (
73 | [old_params[key].index for key in keys if hasattr(incoming_params[key], 'index')],
74 | [old_params[key].index_ad for key in keys if hasattr(incoming_params[key], 'index_ad')],
75 | [new_params[key].index for key in keys if hasattr(incoming_params[key], 'index')],
76 | [new_params[key].index_ad for key in keys if hasattr(incoming_params[key], 'index_ad')],
77 | [incoming_params[key].index for key in keys if hasattr(incoming_params[key], 'index')],
78 | [incoming_params[key].index_ad for key in keys if hasattr(incoming_params[key], 'index_ad')]
79 | ))
80 |
81 | mi.register_bsdf('twostate', TwoStateBSDF)
82 |
--------------------------------------------------------------------------------
/plugins/twostatemedium.py:
--------------------------------------------------------------------------------
1 | from re import M
2 | import mitsuba as mi
3 | import drjit as dr
4 |
5 | class TwoStateMedium(mi.Medium):
6 |
7 | def __init__(self, props):
8 | mi.set_variant('cuda_ad_rgb')
9 | mi.Medium.__init__(self, props)
10 |
11 | self.old = props["old"]
12 | self.new = props["new"]
13 | self.incoming = props["incoming"]
14 |
15 | def sample_interaction_twostates(self: mi.Medium,
16 | ray: mi.Ray3f,
17 | sample: float,
18 | channel: int,
19 | active: bool):
20 |
21 | # Initialize basic medium interaction fields
22 | mei_new, mint, maxt, active = self.new.prepare_interaction_sampling(ray, active)
23 |
24 | desired_tau = -dr.log(1 - sample)
25 | if self.new.majorant_grid() is not None:
26 | # --- Spatially-variying majorant (supergrid).
27 | # 1. Prepare for DDA traversal
28 | # Adapted from: https://github.com/francisengelmann/fast_voxel_traversal/blob/9664f0bde1943e69dbd1942f95efc31901fbbd42/main.cpp
29 | dda_t, dda_tmax, dda_tdelta = self.new.prepare_dda_traversal(
30 | self.new.majorant_grid(), ray, mint, maxt, active)
31 |
32 | # 2. Traverse the medium with DDA until we reach the desired
33 | # optical depth.
34 | active_dda = mi.Bool(active)
35 | reached = mi.Bool(False)
36 | tau_acc = mi.Float(0.0)
37 | dda_loop = mi.Loop(name=f"TwoStateMedium::sample_interaction_dda",
38 | state=lambda: (active_dda, reached, dda_t, dda_tmax, tau_acc, mei_new))
39 | while dda_loop(active_dda):
40 | # Figure out which axis we hit first.
41 | # `t_next` is the ray's `t` parameter when hitting that axis.
42 | t_next = dr.min(dda_tmax)
43 | got_assigned = mi.Bool(False)
44 | tmax_update = dr.zeros(mi.Vector3f)
45 | for k in range(3):
46 | active_k = dr.eq(dda_tmax[k], t_next)
47 | tmax_update[k] = dr.select(~got_assigned & active_k, dda_tdelta[k], 0)
48 | got_assigned |= active_k
49 |
50 | # Lookup and accumulate majorant in current cell.
51 | mei_new.t[active_dda] = 0.5 * (dda_t + t_next)
52 | mei_new.p[active_dda] = ray(mei_new.t)
53 | majorant = dr.maximum(self.old.majorant_grid().eval_1(mei_new, active_dda),
54 | self.new.majorant_grid().eval_1(mei_new, active_dda))
55 | tau_next = tau_acc + majorant * (t_next - dda_t)
56 |
57 | # For rays that will stop within this cell, figure out
58 | # the precise `t` parameter where `desired_tau` is reached.
59 | t_precise = dda_t + (desired_tau - tau_acc) / majorant
60 | reached |= active_dda & (majorant > 0) & (t_precise < maxt) & (tau_next >= desired_tau)
61 | dda_t[active_dda] = dr.select(reached, t_precise, t_next)
62 |
63 | # Prepare for next iteration
64 | active_dda &= ~reached & (t_next < maxt)
65 | dda_tmax[active_dda] = dda_tmax + tmax_update
66 | tau_acc[active_dda] = tau_next
67 |
68 | # Adopt the stopping location, making sure to convert to the main
69 | # ray's parametrization.
70 | sampled_t = dr.select(reached, dda_t, dr.inf)
71 | else:
72 | # --- A single majorant for the whole volume.
73 | majorant_old = self.old.get_majorant(mei_new, active)
74 | majorant_new = self.new.get_majorant(mei_new, active)
75 | combined_extinction = dr.maximum(majorant_old, majorant_new)
76 | m = mi.ad.integrators.prbvolpath.index_spectrum(combined_extinction, channel)
77 |
78 | sampled_t = mint + (desired_tau / m)
79 |
80 | valid_mi = active & (sampled_t <= maxt)
81 |
82 | if self.new.majorant_grid() is not None:
83 | # Otherwise it was already looked up above
84 | combined_extinction = dr.maximum(self.new.majorant_grid().eval_1(mei_new, valid_mi),
85 | self.old.majorant_grid().eval_1(mei_new, valid_mi))
86 | # mei.combined_extinction = dr.detach(m_majorant_grid.eval_1(mei, valid_mei))
87 |
88 | mei_new.t = dr.select(valid_mi, sampled_t, dr.inf)
89 | mei_new.p = ray(sampled_t)
90 | mei_new.medium = mi.MediumPtr(self)
91 | mei_new.mint = mint
92 |
93 | sigma_s_old, _, sigma_t_old = self.old.get_scattering_coefficients(mei_new, valid_mi)
94 | sigma_s_new, _, sigma_t_new = self.new.get_scattering_coefficients(mei_new, valid_mi)
95 | # Adjust sigma_n to the true majorant
96 | sigma_n_old = combined_extinction - sigma_t_old
97 | sigma_n_new = combined_extinction - sigma_t_new
98 |
99 | mei_old = mi.MediumInteraction3f(mei_new)
100 |
101 | mei_new.combined_extinction = combined_extinction
102 | mei_old.combined_extinction = combined_extinction
103 |
104 | mei_new.sigma_s, mei_new.sigma_n, mei_new.sigma_t = sigma_s_new, sigma_n_new, sigma_t_new
105 | mei_old.sigma_s, mei_old.sigma_n, mei_old.sigma_t = sigma_s_old, sigma_n_old, sigma_t_old
106 |
107 | return mei_old, mei_new
108 |
109 | def eval_tr_old(self, mei, si, active):
110 | t = dr.minimum(mei.t, si.t) - mei.mint
111 | return dr.exp(-t * self.old.get_majorant(mei, active))
112 |
113 | def eval_tr_new(self, mei, si, active):
114 | t = dr.minimum(mei.t, si.t) - mei.mint
115 | return dr.exp(-t * self.new.get_majorant(mei, active))
116 |
117 | def eval_tr_and_pdf(self, mei, si, active):
118 | return self.new.eval_tr_and_pdf(mei, si, active)
119 | # t = dr.minimum(mei.t, si.t) - mei.mint
120 | # return dr.exp(-t * self.new.get_majorant(mei, active)), 1.0
121 |
122 | def intersect_aabb(self, ray):
123 | return self.new.intersect_aabb(ray)
124 |
125 | def get_majorant(self, mei, active):
126 | # Here we need to be very careful. We need to make sure that the majorant is always the new one in the adjoint pass,
127 | # otherwise the adjoint pass will not be able to compute the correct adjoint values for the new medium.
128 | # For the primal, pass, it should be the maximum of the two majorants.
129 | # return dr.maximum(self.old.get_majorant(mei, active), self.new.get_majorant(mei, active))
130 | return self.new.get_majorant(mei, active)
131 |
132 | def get_scattering_coefficients(self, mei, active):
133 | return self.new.get_scattering_coefficients(mei, active)
134 |
135 | def sample_interaction(self, ray, sample, channel, active):
136 | return self.new.sample_interaction(ray, sample, channel, active)
137 |
138 | def sample_interaction_real(self, ray, sampler, channel, active):
139 | return self.new.sample_interaction_real(ray, sampler, channel, active)
140 |
141 | def sample_interaction_drt(self, ray, sampler, channel, active):
142 | return self.new.sample_interaction_drt(ray, sampler, channel, active)
143 |
144 | def sample_interaction_drrt(self, ray, sampler, channel, active):
145 | return self.new.sample_interaction_drrt(ray, sampler, channel, active)
146 |
147 | def prepare_interaction_sampling(self, ray, active):
148 | return self.new.prepare_interaction_sampling(ray, active)
149 |
150 | def prepare_dda_traversal(self, majorant_grid, ray, mint, maxt, active = True):
151 | return self.new.prepare_dda_traversal(majorant_grid, ray, mint, maxt, active)
152 |
153 | def phase_function(self):
154 | return self.new.phase_function()
155 |
156 | def old_phase_function(self):
157 | return self.old.phase_function()
158 |
159 | def use_emitter_sampling(self):
160 | return self.new.use_emitter_sampling()
161 |
162 | def has_spectral_extinction(self):
163 | return self.new.has_spectral_extinction()
164 |
165 | def is_homogeneous(self):
166 | return self.new.is_homogeneous()
167 |
168 | def majorant_grid(self):
169 | return self.new.majorant_grid()
170 |
171 | def majorant_resolution_factor(self):
172 | return self.new.majorant_resolution_factor()
173 |
174 | def set_majorant_resolution_factor(self, factor):
175 | self.old.set_majorant_resolution_factor(factor)
176 | self.old.parameters_changed()
177 | self.new.set_majorant_resolution_factor(factor)
178 | self.new.parameters_changed()
179 |
180 | def has_majorant_grid(self):
181 | return self.new.has_majorant_grid()
182 |
183 | def majorant_resolution_factor(self):
184 | return self.new.majorant_resolution_factor()
185 |
186 | def traverse(self, callback):
187 | self.incoming.traverse(callback)
188 |
189 | def parameters_changed(self, keys):
190 | old_params = mi.traverse(self.old)
191 | new_params = mi.traverse(self.new)
192 | incoming_params = mi.traverse(self.incoming)
193 |
194 | # Hardcoded for diffuse BSDFs
195 | for key in incoming_params.keys():
196 | old_params[key] = type(incoming_params[key])(new_params[key])
197 | new_params[key] = type(incoming_params[key])(incoming_params[key])
198 |
199 | old_params.update()
200 | new_params.update()
201 |
202 | def get_albedo(self, mei, active):
203 | return self.new.get_albedo(mei, active)
204 |
205 | def to_string(self):
206 | old_params = mi.traverse(self.old)
207 | new_params = mi.traverse(self.new)
208 | incoming_params = mi.traverse(self.incoming)
209 | keys = incoming_params.keys()
210 | # For debugging purposes
211 | return ('TwoStateMedium[\n'
212 | ' old_indices=%s,\n'
213 | ' old_indices_ad=%s,\n'
214 | ' new_indices=%s,\n'
215 | ' new_indices_ad=%s,\n'
216 | ' incoming_indices=%s,\n'
217 | ' incoming_indices_ad=%s,\n'
218 | ']' % (
219 | [old_params[key].index for key in keys if hasattr(incoming_params[key], 'index')],
220 | [old_params[key].index_ad for key in keys if hasattr(incoming_params[key], 'index_ad')],
221 | [new_params[key].index for key in keys if hasattr(incoming_params[key], 'index')],
222 | [new_params[key].index_ad for key in keys if hasattr(incoming_params[key], 'index_ad')],
223 | [incoming_params[key].index for key in keys if hasattr(incoming_params[key], 'index')],
224 | [incoming_params[key].index_ad for key in keys if hasattr(incoming_params[key], 'index_ad')]
225 | ))
226 |
--------------------------------------------------------------------------------
/plugins/twostatepath.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations as __annotations__ # Delayed parsing of type annotations
2 | import mitsuba as mi
3 | import drjit as dr
4 | import gc
5 |
6 | mis_weight = mi.ad.common.mis_weight
7 | from .twostatebsdf import TwoStateBSDF
8 | from .twostatemedium import TwoStateMedium
9 |
10 | class _TwoStateRenderOp(dr.CustomOp):
11 | """
12 | This class is an implementation detail of the render() function. It
13 | realizes a CustomOp that provides evaluation, and forward/reverse-mode
14 | differentiation callbacks that will be invoked as needed (e.g. when a
15 | rendering operation is encountered by an AD graph traversal).
16 | """
17 |
18 | def __init__(self) -> None:
19 | super().__init__()
20 | self.variant = mi.variant()
21 |
22 | def eval(self, integrator, scene, sensor, params, seed, spp):
23 | self.scene = scene
24 | self.sensor = sensor
25 | self.params = params
26 | self.integrator = integrator
27 | self.seed = seed
28 | self.spp = spp
29 |
30 | with dr.suspend_grad():
31 | return self.integrator.render_twostates(
32 | scene=self.scene,
33 | sensor=sensor,
34 | seed=seed[0],
35 | spp=spp[0]
36 | )
37 |
38 | def forward(self):
39 | mi.set_variant(self.variant)
40 | if not isinstance(self.params, mi.SceneParameters):
41 | raise Exception('An instance of mi.SceneParameter containing the '
42 | 'scene parameter to be differentiated should be '
43 | 'provided to mi.render() if forward derivatives are '
44 | 'desired!')
45 | self.set_grad_out(0,
46 | self.integrator.render_forward(self.scene, self.params, self.sensor,
47 | self.seed[1], self.spp[1]))
48 |
49 | def backward(self):
50 | mi.set_variant(self.variant)
51 | if not isinstance(self.params, mi.SceneParameters):
52 | raise Exception('An instance of mi.SceneParameter containing the '
53 | 'scene parameter to be differentiated should be '
54 | 'provided to mi.render() if backward derivatives are '
55 | 'desired!')
56 | self.integrator.render_backward(self.scene, self.params, self.grad_out()[1],
57 | self.sensor, self.seed[1], self.spp[1])
58 |
59 | def name(self):
60 | return "TwoStateRenderOp"
61 |
62 | def two_state_render(scene: mi.Scene,
63 | integrator,
64 | params: Any = None,
65 | sensor: Union[int, mi.Sensor] = 0,
66 | seed: int = 0,
67 | seed_grad: int = 0,
68 | spp: int = 0,
69 | spp_grad: int = 0) -> mi.TensorXf:
70 |
71 | if params is not None and not isinstance(params, mi.SceneParameters):
72 | raise Exception('params should be an instance of mi.SceneParameter!')
73 |
74 | assert isinstance(scene, mi.Scene)
75 |
76 | if isinstance(sensor, int):
77 | if len(scene.sensors()) == 0:
78 | raise Exception('No sensor specified! Add a sensor in the scene '
79 | 'description or provide a sensor directly as argument.')
80 | sensor = scene.sensors()[sensor]
81 |
82 | assert isinstance(sensor, mi.Sensor)
83 |
84 | if spp_grad == 0:
85 | spp_grad = spp
86 |
87 | if seed_grad == 0:
88 | # Compute a seed that de-correlates the primal and differential phase
89 | seed_grad = mi.sample_tea_32(seed, 1)[0]
90 | elif seed_grad == seed:
91 | raise Exception('The primal and differential seed should be different '
92 | 'to ensure unbiased gradient computation!')
93 |
94 | return dr.custom(_TwoStateRenderOp, integrator,
95 | scene, sensor, params, (seed, seed_grad), (spp, spp_grad))
96 |
97 | class TwoStatePRBIntegrator(mi.ad.integrators.prb.PRBIntegrator):
98 | r"""
99 | .. _integrator-prb:
100 |
101 | Path Replay Backpropagation (:monosp:`prb`)
102 | -------------------------------------------
103 |
104 | .. pluginparameters::
105 |
106 | * - max_depth
107 | - |int|
108 | - Specifies the longest path depth in the generated output image (where -1
109 | corresponds to :math:`\infty`). A value of 1 will only render directly
110 | visible light sources. 2 will lead to single-bounce (direct-only)
111 | illumination, and so on. (Default: 6)
112 |
113 | * - rr_depth
114 | - |int|
115 | - Specifies the path depth, at which the implementation will begin to use
116 | the *russian roulette* path termination criterion. For example, if set to
117 | 1, then path generation many randomly cease after encountering directly
118 | visible surfaces. (Default: 5)
119 |
120 | This plugin implements a basic Path Replay Backpropagation (PRB) integrator
121 | with the following properties:
122 |
123 | - Emitter sampling (a.k.a. next event estimation).
124 |
125 | - Russian Roulette stopping criterion.
126 |
127 | - No reparameterization. This means that the integrator cannot be used for
128 | shape optimization (it will return incorrect/biased gradients for
129 | geometric parameters like vertex positions.)
130 |
131 | - Detached sampling. This means that the properties of ideal specular
132 | objects (e.g., the IOR of a glass vase) cannot be optimized.
133 |
134 | See ``prb_basic.py`` for an even more reduced implementation that removes
135 | the first two features.
136 |
137 | See the papers :cite:`Vicini2021` and :cite:`Zeltner2021MonteCarlo`
138 | for details on PRB, attached/detached sampling, and reparameterizations.
139 |
140 | .. tabs::
141 |
142 | .. code-tab:: python
143 |
144 | 'type': 'prb',
145 | 'max_depth': 8
146 | """
147 | def __init__(self, props):
148 | super().__init__(props)
149 | self.xyz = props.get("xyz", False)
150 | self.is_checked = False
151 |
152 | def check_scene(self, scene):
153 | if self.is_checked:
154 | return
155 |
156 | self.is_checked = True
157 | has_twostate = False
158 | for shape in scene.shapes():
159 | has_twostate = has_twostate or isinstance(shape.bsdf(), TwoStateBSDF)
160 |
161 | for medium in [shape.interior_medium(), shape.exterior_medium()]:
162 | if isinstance(medium, TwoStateMedium):
163 | raise ValueError("TwoStateMedium is not supported in two state prb integrator!")
164 |
165 | if not has_twostate:
166 | raise RuntimeError("No TwoStateBSDF found in the scene!")
167 |
168 | def develop(self, sensor, ray, weight, pos, spp, L, alpha):
169 | # Prepare an ImageBlock as specified by the film
170 | block = sensor.film().create_block()
171 |
172 | # Only use the coalescing feature when rendering enough samples
173 | block.set_coalesce(block.coalesce() and spp >= 4)
174 | block.put(pos, ray.wavelengths, L * weight, alpha)
175 | # Perform the weight division and return an image tensor
176 | sensor.film().put_block(block)
177 | return sensor.film().develop()
178 |
179 | def render_twostates(self, scene, sensor=0, seed=0, spp=1):
180 | # Make sure the scene has at least one twostate bsdf
181 | self.check_scene(scene)
182 |
183 | if isinstance(sensor, int):
184 | sensor = scene.sensors()[sensor]
185 |
186 | # Disable derivatives in all of the following
187 | with dr.suspend_grad():
188 | # Prepare the film and sample generator for rendering
189 | sampler, spp = self.prepare(
190 | sensor=sensor,
191 | seed=seed,
192 | spp=spp,
193 | aovs=self.aovs()
194 | )
195 |
196 | # Generate a set of rays starting at the sensor
197 | ray, weight, pos, _ = self.sample_rays(scene, sensor, sampler)
198 |
199 | # Launch the Monte Carlo sampling process in primal mode
200 | L_old, L_new, valid = self.sample_twostates(
201 | scene=scene,
202 | sampler=sampler,
203 | ray=ray,
204 | depth=mi.UInt32(0),
205 | active=mi.Bool(True)
206 | )
207 |
208 | # Accumulate into the image block
209 | alpha = dr.select(valid, mi.Float(1), mi.Float(0))
210 |
211 | dr.schedule(L_old, L_new, alpha)
212 |
213 | self.primal_image_old = self.develop(sensor, ray, weight, pos, spp, L_old, alpha)
214 | # HACK: Reset the film
215 | sensor.film().prepare([])
216 | self.primal_image_new = self.develop(sensor, ray, weight, pos, spp, L_new, alpha)
217 | # self.primal_image_old = self.primal_image_new
218 |
219 | # Explicitly delete any remaining unused variables
220 | del sampler, ray, weight, pos, L_old, L_new, valid, alpha
221 | gc.collect()
222 |
223 | return self.primal_image_old, self.primal_image_new
224 |
225 | def sample_twostates(self,
226 | scene: mi.Scene,
227 | sampler: mi.Sampler,
228 | ray: mi.Ray3f,
229 | active: mi.Bool,
230 | **kwargs # Absorbs unused arguments
231 | ) -> Tuple[mi.Spectrum,
232 | mi.Bool, mi.Spectrum]:
233 | """
234 | See ``ADIntegrator.sample()`` for a description of this interface and
235 | the role of the various parameters and return values.
236 | """
237 |
238 | # Standard BSDF evaluation context for path tracing
239 | bsdf_ctx = mi.BSDFContext()
240 |
241 | # --------------------- Configure loop state ----------------------
242 |
243 | # Copy input arguments to avoid mutating the caller's state
244 | ray = mi.Ray3f(ray)
245 | depth = mi.UInt32(0) # Depth of current vertex
246 | L_old = mi.Spectrum(0) # Old radiance accumulator
247 | L_new = mi.Spectrum(0) # New radiance accumulator
248 | β_old = mi.Spectrum(1) # Old path throughput weight
249 | β_new = mi.Spectrum(1) # New path throughput weight
250 | η = mi.Float(1) # Index of refraction
251 | active = mi.Bool(active) # Active SIMD lanes
252 |
253 | # Variables caching information from the previous bounce
254 | prev_si = dr.zeros(mi.SurfaceInteraction3f)
255 | prev_bsdf_pdf = mi.Float(1.0)
256 | prev_bsdf_delta = mi.Bool(True)
257 |
258 | # Record the following loop in its entirety
259 | loop = mi.Loop(name="Path Replay Backpropagation (%s)",
260 | state=lambda: (sampler, ray, depth, L_old, L_new, β_old, β_new, η, active,
261 | prev_si, prev_bsdf_pdf, prev_bsdf_delta))
262 |
263 | # Specify the max. number of loop iterations (this can help avoid
264 | # costly synchronization when when wavefront-style loops are generated)
265 | loop.set_max_iterations(self.max_depth)
266 |
267 | while loop(active):
268 | # Compute a surface interaction that tracks derivatives arising
269 | # from differentiable shape parameters (position, normals, etc.)
270 | # In primal mode, this is just an ordinary ray tracing operation.
271 |
272 | si = scene.ray_intersect(ray,
273 | ray_flags=mi.RayFlags.All,
274 | coherent=dr.eq(depth, 0))
275 |
276 | # Get the BSDF, potentially computes texture-space differentials
277 | bsdf = si.bsdf(ray)
278 |
279 | # ---------------------- Direct emission ----------------------
280 |
281 | # Compute MIS weight for emitter sample from previous bounce
282 | ds = mi.DirectionSample3f(scene, si=si, ref=prev_si)
283 |
284 | mis = mis_weight(
285 | prev_bsdf_pdf,
286 | scene.pdf_emitter_direction(prev_si, ds, ~prev_bsdf_delta)
287 | )
288 |
289 | L_old += β_old * mis * ds.emitter.eval(si)
290 | L_new += β_new * mis * ds.emitter.eval(si)
291 |
292 | # ---------------------- Emitter sampling ----------------------
293 |
294 | # Should we continue tracing to reach one more vertex?
295 | active_next = (depth + 1 < self.max_depth) & si.is_valid()
296 |
297 | # Is emitter sampling even possible on the current vertex?
298 | active_em = active_next & mi.has_flag(bsdf.flags(), mi.BSDFFlags.Smooth)
299 |
300 | # If so, randomly sample an emitter without derivative tracking.
301 | ds, em_weight = scene.sample_emitter_direction(
302 | si, sampler.next_2d(), True, active_em)
303 | active_em &= dr.neq(ds.pdf, 0.0)
304 |
305 | # Evaluate BSDF * cos(theta)
306 | wo = si.to_local(ds.d)
307 | bsdf_value_em_new, bsdf_pdf_em = bsdf.eval_pdf(bsdf_ctx, si, wo, active_em)
308 | bsdf_value_em_old = bsdf.eval_old(bsdf_ctx, si, wo, active_em)
309 | mis_em = dr.select(ds.delta, 1, mis_weight(ds.pdf, bsdf_pdf_em))
310 |
311 | L_old += β_old * mis_em * bsdf_value_em_old * em_weight
312 | L_new += β_new * mis_em * bsdf_value_em_new * em_weight
313 |
314 | # ------------------ Detached BSDF sampling -------------------
315 |
316 | bsdf_sample, bsdf_weight = bsdf.sample(bsdf_ctx, si,
317 | sampler.next_1d(),
318 | sampler.next_2d(),
319 | active_next)
320 | bsdf_value_old = bsdf.eval_old(bsdf_ctx, si, bsdf_sample.wo, active_next)
321 |
322 | prev_bsdf_delta = mi.has_flag(bsdf_sample.sampled_type, mi.BSDFFlags.Delta)
323 |
324 | # ---- Update loop variables based on current interaction -----
325 |
326 | ray = si.spawn_ray(si.to_world(bsdf_sample.wo))
327 | η *= bsdf_sample.eta
328 | bsdf_weight_old = dr.select(bsdf_sample.pdf > 0, bsdf_value_old / bsdf_sample.pdf, 0)
329 |
330 | # If the BSDF is delta, use the new value to avoid having a 0 throughput
331 | # Note that this means we can't differentiate w.r.t. delta BSDFs (which we usually don't want to do anyway)
332 | β_old *= dr.select(prev_bsdf_delta, bsdf_weight, bsdf_weight_old)
333 | β_new *= bsdf_weight
334 |
335 | # Information about the current vertex needed by the next iteration
336 |
337 | prev_si = dr.detach(si, True)
338 | prev_bsdf_pdf = bsdf_sample.pdf
339 |
340 | # -------------------- Stopping criterion ---------------------
341 |
342 | # Don't run another iteration if the throughput has reached zero
343 | β_max = dr.max(β_new)
344 | active_next &= dr.neq(β_max, 0)
345 |
346 | # Russian roulette stopping probability (must cancel out ior^2
347 | # to obtain unitless throughput, enforces a minimum probability)
348 | rr_prob = dr.minimum(β_max * η**2, .95)
349 |
350 | # Apply only further along the path since, this introduces variance
351 | rr_active = depth >= self.rr_depth
352 | β_old[rr_active] *= dr.rcp(rr_prob)
353 | β_new[rr_active] *= dr.rcp(rr_prob)
354 | rr_continue = sampler.next_1d() < rr_prob
355 | active_next &= ~rr_active | rr_continue
356 |
357 | depth[si.is_valid()] += 1
358 | active = active_next
359 |
360 | valid_ray = dr.neq(depth, 0) # Ray validity flag for alpha blending
361 | if self.xyz:
362 | return mi.srgb_to_xyz(L_old), mi.srgb_to_xyz(L_new), valid_ray
363 | else:
364 | return L_old, L_new, valid_ray
365 |
--------------------------------------------------------------------------------
/plugins/twostatevolpath.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations as __annotations__ # Delayed parsing of type annotations
2 | import mitsuba as mi
3 | import drjit as dr
4 | import gc
5 | from .twostatemedium import TwoStateMedium
6 | from .twostatebsdf import TwoStateBSDF
7 |
8 | mis_weight = mi.ad.common.mis_weight
9 | index_spectrum = mi.ad.integrators.prbvolpath.index_spectrum
10 |
11 | class TwoStatePRBVolpathIntegrator(mi.ad.integrators.prbvolpath.PRBVolpathIntegrator):
12 | r"""
13 | .. _integrator-prb:
14 |
15 | Path Replay Backpropagation (:monosp:`prb`)
16 | -------------------------------------------
17 |
18 | .. pluginparameters::
19 |
20 | * - max_depth
21 | - |int|
22 | - Specifies the longest path depth in the generated output image (where -1
23 | corresponds to :math:`\infty`). A value of 1 will only render directly
24 | visible light sources. 2 will lead to single-bounce (direct-only)
25 | illumination, and so on. (Default: 6)
26 |
27 | * - rr_depth
28 | - |int|
29 | - Specifies the path depth, at which the implementation will begin to use
30 | the *russian roulette* path termination criterion. For example, if set to
31 | 1, then path generation many randomly cease after encountering directly
32 | visible surfaces. (Default: 5)
33 |
34 | This plugin implements a basic Path Replay Backpropagation (PRB) integrator
35 | with the following properties:
36 |
37 | - Emitter sampling (a.k.a. next event estimation).
38 |
39 | - Russian Roulette stopping criterion.
40 |
41 | - No reparameterization. This means that the integrator cannot be used for
42 | shape optimization (it will return incorrect/biased gradients for
43 | geometric parameters like vertex positions.)
44 |
45 | - Detached sampling. This means that the properties of ideal specular
46 | objects (e.g., the IOR of a glass vase) cannot be optimized.
47 |
48 | See ``prb_basic.py`` for an even more reduced implementation that removes
49 | the first two features.
50 |
51 | See the papers :cite:`Vicini2021` and :cite:`Zeltner2021MonteCarlo`
52 | for details on PRB, attached/detached sampling, and reparameterizations.
53 |
54 | .. tabs::
55 |
56 | .. code-tab:: python
57 |
58 | 'type': 'prb',
59 | 'max_depth': 8
60 | """
61 |
62 | def __init__(self, props):
63 | super().__init__(props)
64 | self.xyz = props.get("xyz", False)
65 | self.is_checked = False
66 |
67 | def check_scene(self, scene):
68 | if self.is_checked:
69 | return
70 |
71 | self.is_checked = True
72 | has_twostate = False
73 | for shape in scene.shapes():
74 | for medium in [shape.interior_medium(), shape.exterior_medium()]:
75 | has_twostate = has_twostate or isinstance(medium, TwoStateMedium)
76 |
77 | if not has_twostate:
78 | raise RuntimeError("No TwoStateMedium found in the scene!")
79 |
80 | def develop(self, sensor, ray, weight, pos, spp, L, alpha):
81 | # Prepare an ImageBlock as specified by the film
82 | block = sensor.film().create_block()
83 |
84 | # Only use the coalescing feature when rendering enough samples
85 | block.set_coalesce(block.coalesce() and spp >= 4)
86 | block.put(pos, ray.wavelengths, L * weight, alpha)
87 | # Perform the weight division and return an image tensor
88 | sensor.film().put_block(block)
89 | return sensor.film().develop()
90 |
91 | def render_twostates(self, scene, sensor=0, seed=0, spp=1):
92 | # Make sure the scene has at least one twostate medium
93 | self.check_scene(scene)
94 |
95 | if isinstance(sensor, int):
96 | sensor = scene.sensors()[sensor]
97 |
98 | # Always handle null scattering
99 | self.prepare_scene(scene) # Make sure the flags are properly set first
100 |
101 | # Disable derivatives in all of the following
102 | with dr.suspend_grad():
103 | # Prepare the film and sample generator for rendering
104 | sampler, spp = self.prepare(
105 | sensor=sensor,
106 | seed=seed,
107 | spp=spp,
108 | aovs=self.aovs()
109 | )
110 |
111 | # Generate a set of rays starting at the sensor
112 | ray, weight, pos, _ = self.sample_rays(scene, sensor, sampler)
113 |
114 | # Launch the Monte Carlo sampling process in primal mode
115 | L_old, L_new, valid = self.sample_twostates(
116 | scene=scene,
117 | sampler=sampler,
118 | ray=ray,
119 | depth=mi.UInt32(0),
120 | active=mi.Bool(True)
121 | )
122 |
123 | # Accumulate into the image block
124 | alpha = dr.select(valid, mi.Float(1), mi.Float(0))
125 |
126 | dr.schedule(L_old, L_new, alpha)
127 |
128 | self.primal_image_old = self.develop(sensor, ray, weight, pos, spp, L_old, alpha)
129 | # HACK: Reset the film
130 | sensor.film().prepare([])
131 | self.primal_image_new = self.develop(sensor, ray, weight, pos, spp, L_new, alpha)
132 | # self.primal_image_old = self.primal_image_new
133 |
134 | # Explicitly delete any remaining unused variables
135 | del sampler, ray, weight, pos, L_old, L_new, valid, alpha
136 | gc.collect()
137 |
138 | return self.primal_image_old, self.primal_image_new
139 |
140 |
141 | def sample_twostates(self,
142 | scene: mi.Scene,
143 | sampler: mi.Sampler,
144 | ray: mi.Ray3f,
145 | active: mi.Bool,
146 | **kwargs # Absorbs unused arguments
147 | ) -> Tuple[mi.Spectrum,
148 | mi.Bool, mi.Spectrum]:
149 |
150 | ray = mi.Ray3f(ray)
151 | depth = mi.UInt32(0) # Depth of current vertex
152 | L_old = mi.Spectrum(0) # Old radiance accumulator
153 | L_new = mi.Spectrum(0) # New radiance accumulator
154 | throughput_old = mi.Spectrum(1) # Path throughput weight
155 | throughput_new = mi.Spectrum(1) # Path throughput weight
156 | η = mi.Float(1) # Index of refraction
157 | active = mi.Bool(active)
158 |
159 | si = dr.zeros(mi.SurfaceInteraction3f)
160 | needs_intersection = mi.Bool(True)
161 | last_scatter_event = dr.zeros(mi.Interaction3f)
162 | last_scatter_direction_pdf = mi.Float(1.0)
163 |
164 | medium = dr.zeros(mi.MediumPtr)
165 |
166 | channel = 0
167 | depth = mi.UInt32(0)
168 | valid_ray = mi.Bool(False)
169 | specular_chain = mi.Bool(True)
170 |
171 | if mi.is_rgb: # Sample a color channel to sample free-flight distances
172 | n_channels = dr.size_v(mi.Spectrum)
173 | channel = dr.minimum(n_channels * sampler.next_1d(active), n_channels - 1)
174 |
175 | loop = mi.Loop(name=f"TwoState Path Replay Backpropagation",
176 | state=lambda: (sampler, active, depth, ray, medium, si,
177 | throughput_old, throughput_new, L_old, L_new, needs_intersection,
178 | last_scatter_event, specular_chain, η,
179 | last_scatter_direction_pdf, valid_ray))
180 | while loop(active):
181 | # Russian Roulette
182 | active &= (dr.any(dr.neq(throughput_new, 0.0)) | dr.any(dr.neq(throughput_old, 0.0)))
183 | q = dr.minimum(dr.max(dr.maximum(throughput_new, throughput_old)) * dr.sqr(η), 0.99)
184 | perform_rr = (depth > self.rr_depth)
185 | active &= (sampler.next_1d(active) < q) | ~perform_rr
186 |
187 | throughput_new[perform_rr] = throughput_new * dr.rcp(q)
188 | throughput_old[perform_rr] = throughput_old * dr.rcp(q)
189 |
190 | active_medium = active & dr.neq(medium, None) # TODO this is not necessary
191 | active_surface = active & ~active_medium
192 |
193 | # Handle medium sampling and potential medium escape
194 | u = sampler.next_1d(active_medium)
195 | # Sample an interaction according to the maximum of the old&new majorants
196 | mei_old, mei_new = medium.sample_interaction_twostates(ray, u, channel, active_medium)
197 | mei_new.t = dr.detach(mei_new.t)
198 |
199 | ray.maxt[active_medium & medium.is_homogeneous() & mei_new.is_valid()] = mei_new.t
200 | intersect = needs_intersection & active_medium
201 | si_new = scene.ray_intersect(ray, intersect)
202 | si[intersect] = si_new
203 |
204 | needs_intersection &= ~active_medium
205 | mei_new.t[active_medium & (si.t < mei_new.t)] = dr.inf
206 | mei_old.t[active_medium & (si.t < mei_old.t)] = dr.inf
207 |
208 | # Evaluate transmittance. Is only used for homogeneous media
209 | if not self.handle_null_scattering:
210 | tr_new, free_flight_pdf = medium.eval_tr_and_pdf(mei_new, si, active_medium)
211 | tr_pdf = index_spectrum(free_flight_pdf, channel)
212 |
213 | weight_new = mi.Spectrum(1.0)
214 | weight_old = mi.Spectrum(1.0)
215 |
216 | escaped_medium = active_medium & ~mei_new.is_valid()
217 | active_medium &= mei_new.is_valid()
218 | # Handle null and real scatter events
219 | if self.handle_null_scattering:
220 | # Scattering probability is the average of the two states, since we need to be sure we sample null interactions
221 | # with non-zero probability to ensure unbiasedness
222 | majorant = index_spectrum(mei_new.combined_extinction, channel) # Both medium interactions have the same majorant
223 | scatter_prob_new = index_spectrum(mei_new.sigma_t, channel) / majorant
224 | scatter_prob_old = index_spectrum(mei_old.sigma_t, channel) / majorant
225 | scatter_prob = dr.select(dr.neq(majorant, 0.0), (scatter_prob_new + scatter_prob_old) * 0.5, 0.0)
226 |
227 | act_null_scatter = (sampler.next_1d(active_medium) >= scatter_prob) & active_medium
228 | act_medium_scatter = ~act_null_scatter & active_medium
229 |
230 | weight_new[act_null_scatter] *= 2 * mei_new.sigma_n / (mei_new.sigma_n + mei_old.sigma_n)
231 | weight_old[act_null_scatter] *= 2 * mei_old.sigma_n / (mei_new.sigma_n + mei_old.sigma_n)
232 | else:
233 | scatter_prob = mi.Float(1.0)
234 | t = dr.minimum(mei_new.t, si.t) - mei_new.mint
235 | tr_new_true = dr.exp(-t * mei_new.sigma_t)
236 | tr_old_true = dr.exp(-t * mei_old.sigma_t)
237 | ratio_new = dr.select(tr_pdf > 0.0, tr_new_true / dr.detach(tr_pdf), 0.0)
238 | ratio_old = dr.select(tr_pdf > 0.0, tr_old_true / dr.detach(tr_pdf), 0.0)
239 |
240 | # Rays that exit the medium do not get their throughput modified by the weight, as it usually cancels out
241 | # Here we need to do it since the pdf is not the usual one
242 | throughput_new[escaped_medium] *= ratio_new
243 | throughput_old[escaped_medium] *= ratio_old
244 |
245 | weight_new[active_medium] *= ratio_new
246 | weight_old[active_medium] *= ratio_old
247 | act_medium_scatter = active_medium
248 |
249 | depth[act_medium_scatter] += 1
250 | last_scatter_event[act_medium_scatter] = dr.detach(mei_new)
251 |
252 | # Don't estimate lighting if we exceeded number of bounces
253 | active &= depth < self.max_depth
254 | act_medium_scatter &= active
255 | if self.handle_null_scattering:
256 | ray.o[act_null_scatter] = dr.detach(mei_new.p)
257 | si.t[act_null_scatter] = si.t - dr.detach(mei_new.t)
258 |
259 | weight_new[act_medium_scatter] *= 2 * mei_new.sigma_s / (mei_new.sigma_t + mei_old.sigma_t)
260 | weight_old[act_medium_scatter] *= 2 * mei_old.sigma_s / (mei_new.sigma_t + mei_old.sigma_t)
261 | else:
262 | weight_new[act_medium_scatter] *= mei_new.sigma_s
263 | weight_old[act_medium_scatter] *= mei_old.sigma_s
264 |
265 | throughput_new[active_medium] *= dr.detach(weight_new)
266 | throughput_old[active_medium] *= dr.detach(weight_old)
267 |
268 | phase_ctx = mi.PhaseFunctionContext(sampler)
269 | phase_new = mei_new.medium.phase_function()
270 | phase_old = mei_old.medium.phase_function()
271 | phase_new[~act_medium_scatter] = dr.zeros(mi.PhaseFunctionPtr)
272 | phase_old[~act_medium_scatter] = dr.zeros(mi.PhaseFunctionPtr)
273 |
274 | valid_ray |= act_medium_scatter
275 | wo, phase_pdf = phase_new.sample(phase_ctx, mei_new, sampler.next_1d(act_medium_scatter), sampler.next_2d(act_medium_scatter), act_medium_scatter)
276 | act_medium_scatter &= phase_pdf > 0.0
277 |
278 | new_ray = mei_new.spawn_ray(wo)
279 | ray[act_medium_scatter] = new_ray
280 | needs_intersection |= act_medium_scatter
281 | last_scatter_direction_pdf[act_medium_scatter] = phase_pdf
282 |
283 | #--------------------- Surface Interactions ---------------------
284 | active_surface |= escaped_medium
285 | intersect = active_surface & needs_intersection
286 | si[intersect] = scene.ray_intersect(ray, intersect)
287 |
288 | # ---------------- Intersection with emitters ----------------
289 | ray_from_camera = active_surface & dr.eq(depth, 0)
290 | count_direct = ray_from_camera | specular_chain
291 | emitter = si.emitter(scene)
292 | active_e = active_surface & dr.neq(emitter, None) & ~(dr.eq(depth, 0) & self.hide_emitters)
293 |
294 | # Get the PDF of sampling this emitter using next event estimation
295 | ds = mi.DirectionSample3f(scene, si, last_scatter_event)
296 | if self.use_nee:
297 | emitter_pdf = scene.pdf_emitter_direction(last_scatter_event, ds, active_e)
298 | else:
299 | emitter_pdf = 0.0
300 | emitted = emitter.eval(si, active_e)
301 |
302 | mis_bsdf = mis_weight(last_scatter_direction_pdf, emitter_pdf)
303 |
304 | L_new[active_e] += dr.select(count_direct, throughput_new * emitted,
305 | throughput_new * mis_bsdf * emitted)
306 |
307 | L_old[active_e] += dr.select(count_direct, throughput_old * emitted,
308 | throughput_old * mis_bsdf * emitted)
309 |
310 | active_surface &= si.is_valid()
311 | ctx = mi.BSDFContext()
312 | bsdf = si.bsdf(ray)
313 |
314 | # --------------------- Emitter sampling ---------------------
315 | if self.use_nee:
316 | active_e_surface = active_surface & mi.has_flag(bsdf.flags(), mi.BSDFFlags.Smooth) & (depth + 1 < self.max_depth)
317 | sample_emitters = mei_new.medium.use_emitter_sampling()
318 | specular_chain &= ~act_medium_scatter
319 | specular_chain |= act_medium_scatter & ~sample_emitters
320 | active_e_medium = act_medium_scatter & sample_emitters
321 | active_e = active_e_surface | active_e_medium
322 | ref_interaction = dr.zeros(mi.Interaction3f)
323 | ref_interaction[act_medium_scatter] = mei_new
324 | ref_interaction[active_surface] = si
325 | emitted_old, emitted_new, ds = self.sample_emitter_twostates(ref_interaction, scene, sampler, medium, channel, active_e)
326 | # Query the BSDF for that emitter-sampled direction
327 | # For surfaces
328 | wo_em = si.to_local(ds.d)
329 | bsdf_val_new, bsdf_pdf = bsdf.eval_pdf(ctx, si, wo_em, active_e_surface)
330 | bsdf_val_old = bsdf.eval_old(ctx, si, wo_em, active_e_surface)
331 |
332 | # For media
333 | phase_val_new = phase_new.eval(phase_ctx, mei_new, ds.d, active_e_medium)
334 | phase_val_old = phase_old.eval(phase_ctx, mei_old, ds.d, active_e_medium)
335 |
336 | nee_weight_new = dr.select(active_e_surface, bsdf_val_new, phase_val_new)
337 | nee_weight_old = dr.select(active_e_surface, bsdf_val_old, phase_val_old)
338 |
339 | nee_directional_pdf = dr.select(ds.delta, 0.0, dr.select(active_e_surface, bsdf_pdf, phase_val_new))
340 |
341 | mis_em = mis_weight(ds.pdf, nee_directional_pdf)
342 |
343 | L_new[active] += throughput_new * nee_weight_new * mis_em * emitted_new
344 | L_old[active] += throughput_old * nee_weight_old * mis_em * emitted_old
345 |
346 | # ----------------------- BSDF sampling ----------------------
347 | bs, bsdf_weight = bsdf.sample(ctx, si, sampler.next_1d(active_surface),
348 | sampler.next_2d(active_surface), active_surface)
349 | active_surface &= bs.pdf > 0
350 |
351 | bsdf_value_old = bsdf.eval_old(ctx, si, bs.wo, active_surface)
352 | prev_bsdf_delta = mi.has_flag(bs.sampled_type, mi.BSDFFlags.Delta)
353 |
354 | bsdf_weight_old = dr.select(bs.pdf > 0, bsdf_value_old / bs.pdf, 0)
355 | throughput_old[active_surface] *= dr.select(prev_bsdf_delta, bsdf_weight, bsdf_weight_old)
356 |
357 | throughput_new[active_surface] *= bsdf_weight
358 |
359 | # Update the old throughput with the phase/pdf ratio, since it does not cancel out perfectly anymore
360 | phase_val = phase_old.eval(phase_ctx, mei_old, wo, act_medium_scatter)
361 | throughput_old[act_medium_scatter] *= phase_val / phase_pdf
362 |
363 | η[active_surface] *= bs.eta
364 | bsdf_ray = si.spawn_ray(si.to_world(bs.wo))
365 | ray[active_surface] = bsdf_ray
366 |
367 | needs_intersection |= active_surface
368 | non_null_bsdf = active_surface & ~mi.has_flag(bs.sampled_type, mi.BSDFFlags.Null)
369 | depth[non_null_bsdf] += 1
370 |
371 | # update the last scatter PDF event if we encountered a non-null scatter event
372 | last_scatter_event[non_null_bsdf] = si
373 | last_scatter_direction_pdf[non_null_bsdf] = bs.pdf
374 |
375 | valid_ray |= non_null_bsdf
376 | specular_chain |= non_null_bsdf & mi.has_flag(bs.sampled_type, mi.BSDFFlags.Delta)
377 | specular_chain &= ~(active_surface & mi.has_flag(bs.sampled_type, mi.BSDFFlags.Smooth))
378 | has_medium_trans = active_surface & si.is_medium_transition()
379 | medium[has_medium_trans] = si.target_medium(ray.d)
380 | active &= (active_surface | active_medium)
381 |
382 | if self.xyz:
383 | return mi.srgb_to_xyz(L_old), mi.srgb_to_xyz(L_new), valid_ray
384 | else:
385 | return L_old, L_new, valid_ray
386 |
387 | def sample_emitter_twostates(self, ref_interaction, scene, sampler, medium, channel,
388 | active):
389 |
390 | active = mi.Bool(active)
391 | medium = dr.select(active, medium, dr.zeros(mi.MediumPtr))
392 |
393 | ds, emitter_val = scene.sample_emitter_direction(ref_interaction, sampler.next_2d(active), False, active)
394 | ds = dr.detach(ds)
395 | invalid = dr.eq(ds.pdf, 0.0)
396 | emitter_val[invalid] = 0.0
397 | active &= ~invalid
398 |
399 | ray = ref_interaction.spawn_ray(ds.d)
400 | total_dist = mi.Float(0.0)
401 | si = dr.zeros(mi.SurfaceInteraction3f)
402 | needs_intersection = mi.Bool(True)
403 | transmittance_old = mi.Spectrum(1.0)
404 | transmittance_new = mi.Spectrum(1.0)
405 | loop = mi.Loop(name=f"PRB Next Event Estimation (twostates)",
406 | state=lambda: (sampler, active, medium, ray, total_dist,
407 | needs_intersection, si, transmittance_old, transmittance_new))
408 | while loop(active):
409 | remaining_dist = ds.dist * (1.0 - mi.math.ShadowEpsilon) - total_dist
410 | ray.maxt = dr.detach(remaining_dist)
411 | active &= remaining_dist > 0.0
412 |
413 | # This ray will not intersect if it reached the end of the segment
414 | needs_intersection &= active
415 | si[needs_intersection] = scene.ray_intersect(ray, needs_intersection)
416 | needs_intersection &= False
417 |
418 | active_medium = active & dr.neq(medium, None)
419 | active_surface = active & ~active_medium
420 |
421 | # Handle medium interactions / transmittance
422 | mei_old, mei_new = medium.sample_interaction_twostates(ray, sampler.next_1d(active_medium), channel, active_medium)
423 | mei_old.t[active_medium & (si.t < mei_old.t)] = dr.inf
424 | mei_new.t[active_medium & (si.t < mei_new.t)] = dr.inf
425 |
426 | tr_multiplier_old = mi.Spectrum(1.0)
427 | tr_multiplier_new = mi.Spectrum(1.0)
428 |
429 | # Special case for homogeneous media: directly advance to the next surface / end of the segment
430 | if self.nee_handle_homogeneous:
431 | active_homogeneous = active_medium & medium.is_homogeneous()
432 | mei_old.t[active_homogeneous] = dr.minimum(remaining_dist, si.t)
433 | mei_new.t[active_homogeneous] = dr.minimum(remaining_dist, si.t)
434 |
435 | tr_multiplier_old[active_homogeneous] = medium.eval_tr_old(mei_old, si, active_homogeneous)
436 | tr_multiplier_new[active_homogeneous] = medium.eval_tr_new(mei_new, si, active_homogeneous)
437 |
438 | mei_old.t[active_homogeneous] = dr.inf
439 | mei_new.t[active_homogeneous] = dr.inf
440 |
441 | escaped_medium = active_medium & ~mei_new.is_valid()
442 |
443 | # Ratio tracking transmittance computation
444 | active_medium &= mei_new.is_valid()
445 | ray.o[active_medium] = dr.detach(mei_new.p)
446 | si.t[active_medium] = dr.detach(si.t - mei_new.t)
447 | tr_multiplier_old[active_medium] *= dr.select(dr.neq(mei_old.combined_extinction, 0.0),
448 | mei_old.sigma_n / mei_old.combined_extinction,
449 | mei_old.sigma_n)
450 | tr_multiplier_new[active_medium] *= dr.select(dr.neq(mei_new.combined_extinction, 0.0),
451 | mei_new.sigma_n / mei_new.combined_extinction,
452 | mei_new.sigma_n)
453 |
454 | # Handle interactions with surfaces
455 | active_surface |= escaped_medium
456 | active_surface &= si.is_valid() & ~active_medium
457 | bsdf = si.bsdf(ray)
458 | bsdf_val = bsdf.eval_null_transmission(si, active_surface)
459 | tr_multiplier_old[active_surface] = tr_multiplier_old * bsdf_val
460 | tr_multiplier_new[active_surface] = tr_multiplier_new * bsdf_val
461 |
462 | transmittance_old *= dr.detach(tr_multiplier_old)
463 | transmittance_new *= dr.detach(tr_multiplier_new)
464 |
465 | # Update the ray with new origin & t parameter
466 | new_ray = si.spawn_ray(mi.Vector3f(ray.d))
467 | ray[active_surface] = dr.detach(new_ray)
468 | ray.maxt = dr.detach(remaining_dist)
469 | needs_intersection |= active_surface
470 |
471 | # Continue tracing through scene if non-zero weights exist
472 | active &= (active_medium | active_surface) & (dr.any(dr.neq(transmittance_new, 0.0)) | dr.any(dr.neq(transmittance_old, 0.0)))
473 | total_dist[active] += dr.select(active_medium, mei_new.t, si.t)
474 |
475 | # If a medium transition is taking place: Update the medium pointer
476 | has_medium_trans = active_surface & si.is_medium_transition()
477 | medium[has_medium_trans] = si.target_medium(ray.d)
478 |
479 | return emitter_val * transmittance_old, emitter_val * transmittance_new, ds
480 |
481 | def to_string(self):
482 | return f'TwoStatePRBVolpathIntegrator[max_depth = {self.max_depth}]'
483 |
--------------------------------------------------------------------------------
/plugins/welford.py:
--------------------------------------------------------------------------------
1 | import mitsuba as mi
2 | import drjit as dr
3 |
4 | class WelfordVarianceEstimator():
5 | """
6 | Estimates the variance of a random variable in an online manner, Using
7 | Welford's online algorithm:
8 | - https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online
9 | - https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
10 |
11 | The estimates use exponential moving averages akin to Adam [Kingma and Ba
12 | 2015] to downweight earlier samples that follow a different distribution in
13 | an optimization.
14 | """
15 |
16 | def __init__(self, beta1=0.9, beta2=0.999):
17 | import mitsuba as mi
18 | self.n = mi.TensorXi(0)
19 | dr.make_opaque(self.n)
20 | self.mean = 0
21 | self.var = 0
22 | # EMA weight for the mean
23 | self.beta1 = mi.TensorXf(beta1)
24 | # EMA weight for the covariance
25 | self.beta2 = mi.TensorXf(beta2)
26 |
27 | def update(self, x):
28 | dx1 = dr.select(self.n > 0, x - self.mean / (1 - self.beta1**self.n), x)
29 | self.mean = self.beta1 * self.mean + (1 - self.beta1) * x
30 | self.n += 1
31 | dx2 = x - self.mean / (1 - self.beta1**self.n)
32 | self.var = self.beta2 * self.var + (1 - self.beta2) * dx1 * dx2
33 | dr.schedule(self.mean, self.var, self.n)
34 |
35 | def get(self):
36 | return self.var / (1 - self.beta2**self.n)
37 |
38 | class StatisticsEstimator():
39 | """
40 | Estimates the mean, variance and covariance of 2 given random variables,
41 | using Welford's online algorithm with EWMAs.
42 | """
43 |
44 | def __init__(self, beta1=0.9, beta2=0.999):
45 | import mitsuba as mi
46 | self.n = mi.TensorXi(0)
47 | dr.make_opaque(self.n)
48 | self.mean_x = 0
49 | self.mean_y = 0
50 | self.var_x = 0
51 | self.var_y = 0
52 | self.cov = 0
53 |
54 | # EMA weight for the mean
55 | self.beta1 = mi.TensorXf(beta1)
56 | # EMA weight for the (co)variance
57 | self.beta2 = mi.TensorXf(beta2)
58 |
59 | def update(self, x, y):
60 |
61 | dx1 = dr.select(self.n > 0, x - self.mean_x / (1 - self.beta1**self.n), x)
62 | dy1 = dr.select(self.n > 0, y - self.mean_y / (1 - self.beta1**self.n), y)
63 |
64 | self.mean_y = self.beta1 * self.mean_y + (1 - self.beta1) * y
65 | self.mean_x = self.beta1 * self.mean_x + (1 - self.beta1) * x
66 |
67 | self.n += 1
68 | dx2 = x - self.mean_x / (1 - self.beta1**self.n)
69 | dy2 = y - self.mean_y / (1 - self.beta1**self.n)
70 |
71 | self.var_x = self.beta2 * self.var_x + (1 - self.beta2) * dx1 * dx2
72 | self.var_y = self.beta2 * self.var_y + (1 - self.beta2) * dy1 * dy2
73 | self.cov = self.beta2 * self.cov + (1 - self.beta2) * dx1 * dy2
74 |
75 | dr.schedule(self.mean_x, self.mean_y, self.var_x, self.var_y, self.cov, self.n)
76 |
77 | def get(self):
78 | return self.var_x / (1 - self.beta2**self.n), self.var_y / (1 - self.beta2**self.n), self.cov / (1 - self.beta2**self.n)
79 |
--------------------------------------------------------------------------------
/run_experiment.py:
--------------------------------------------------------------------------------
1 | import os
2 | import multiprocessing as mp
3 | import argparse
4 | import time
5 |
6 | from experiments import *
7 |
8 | AVAILABLE_METHODS = ["baseline", "cv_ps", "cv_pss"]
9 | DISPATCH_METHODS = ["baseline", "cv_ps"]
10 |
11 | def make_video(dir, prefix="img"):
12 | import subprocess
13 | dir, method = os.path.split(dir)
14 | subprocess.run(["ffmpeg", "-i", f"{dir}/{method}/{prefix}/%04d.png", "-y", "-loglevel", "error", "-c:v:", "libx264", "-movflags", "+faststart", "-crf", "15", f"{dir}/{method}_{prefix}.mp4"])
15 |
16 | def run_experiment(config):
17 | base_dir = os.path.join(os.path.dirname(__file__), "output")
18 | if 'output' in config:
19 | base_dir = os.path.join(base_dir, config['output'])
20 | else:
21 | base_dir = os.path.join(base_dir, config['experiment'])
22 |
23 | output_dir = os.path.join(base_dir, config['method'])
24 |
25 | params = globals()[f"scene_{config['experiment']}"](config['method'], output_dir)
26 |
27 | # Override the parameters if necessary
28 | for key, value in config.items():
29 | if key not in ['device', 'experiment', 'method']:
30 | params[key] = value
31 |
32 | result_dict = run_opt(params)
33 | np.savez(os.path.join(output_dir, "result.npz"), **result_dict)
34 |
35 | if __name__ == "__main__":
36 | # Change the default start method to 'spawn' to avoid CUDA errors
37 | mp.set_start_method('spawn')
38 |
39 | parser = argparse.ArgumentParser(description='Run a Mitsuba experiment')
40 | parser.add_argument('experiment', type=str, nargs='*', choices=AVAILABLE_SCENES, help='Name(s) of the experiment to run')
41 | parser.add_argument('--method', type=str, choices=AVAILABLE_METHODS, help="Optimization method to use")
42 | parser.add_argument('--all', action='store_true', help="use all the methods")
43 | parser.add_argument('--video', action='store_true', help="Generate videos")
44 | parser.add_argument('--output', type=str, help='Output directory name')
45 |
46 | # Optional overrides of scene parameters
47 | parser.add_argument('--n_steps', type=int, help='Number of optimization steps')
48 | parser.add_argument('--lr', type=float, help='Learning rate')
49 | parser.add_argument('--spp', type=int, help='Samples per pixel for the primal rendering')
50 | parser.add_argument('--spp_grad', type=int, help='Samples per pixel for the adjoint rendering')
51 | parser.add_argument('--beta1', type=float, help='β₁ parameter for statistics')
52 | parser.add_argument('--beta2', type=float, help='β₂ parameter for statistics')
53 | parser.add_argument('--force_baseline', action='store_true', help='Force the use of the baseline primal rendering for gradient computation')
54 | parser.add_argument('--denoise', action='store_true', help='Denoise the primal rendering')
55 | parser.add_argument('--loss', type=str, choices=['L1', 'L2', 'VGG'], help='Loss function to use')
56 | parser.add_argument('--pre_update', action='store_true', help='Update the statistics before computing the control weight')
57 |
58 | args = parser.parse_args()
59 |
60 | print(f"{'Experiment':^20} | {'Method':^20}")
61 | print(f"{'-'*20:<20} | {'-'*20:<20}")
62 |
63 | # By default, cv_pss is not run, this allows to run it as well
64 | if args.all:
65 | methods = AVAILABLE_METHODS
66 | else:
67 | methods = DISPATCH_METHODS
68 |
69 | job_queue = []
70 | for experiment in args.experiment:
71 | for method in methods if args.method is None else [args.method]:
72 | job_queue.append({
73 | 'experiment': experiment,
74 | 'method': method,
75 | **{k: v for k, v in vars(args).items() if v is not None and k not in ['experiment', 'method', 'video', 'all']}
76 | })
77 |
78 | for config in job_queue:
79 | process = mp.Process(target=run_experiment, args=(config,))
80 | process.start()
81 | process.join()
82 |
83 | # Generate the videos
84 | if args.video:
85 | print("Generating videos...")
86 | for experiment in args.experiment:
87 | for i, method in enumerate(methods):
88 | if args.method is not None and args.method != method:
89 | continue
90 |
91 | output_dir = os.path.join(os.path.dirname(__file__), 'output', experiment, method)
92 |
93 | make_video(output_dir, 'img') # Main video
94 | make_video(output_dir, 'img_inf') # Converged video
95 |
--------------------------------------------------------------------------------
/utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | import mitsuba as mi
3 | import drjit as dr
4 | from plugins.volpathsimple import get_single_medium
5 | import json
6 | from tqdm import trange
7 |
8 | def save_img(img, output_dir, img_name, it, save_png=False):
9 | filename = os.path.join(output_dir, img_name, f"{it:04d}.exr")
10 | if not os.path.isdir(os.path.dirname(filename)):
11 | os.makedirs(os.path.dirname(filename))
12 | mi.Bitmap(img).write_async(filename)
13 | if save_png:
14 | mi.util.convert_to_bitmap(img).write_async(filename.replace(".exr", ".png"))
15 |
16 | def runtime():
17 | dr.eval()
18 | dr.sync_thread()
19 | return sum([k['execution_time'] + k['codegen_time'] for k in dr.kernel_history()])
20 |
21 | def l2_loss(x, y):
22 | return dr.mean(dr.sqr(x-y))
23 |
24 | def l1_loss(x, y):
25 | return dr.mean(dr.abs(x-y))
26 |
27 | def adjust_majorant_res_factor(scene, density_res):
28 | res_factor = 8
29 |
30 | if res_factor > 1:
31 | min_side = dr.min(density_res[:3])
32 | # For the current density res, find the largest factor that
33 | # results in a meaningful supergrid resolution.
34 | while (res_factor > 1) and (min_side // res_factor) < 4:
35 | res_factor -= 1
36 | # Otherwise, just disable the supergrid.
37 | if res_factor <= 1:
38 | res_factor = 0
39 |
40 | medium = get_single_medium(scene)
41 | current = medium.majorant_resolution_factor()
42 | if current != res_factor:
43 | medium.set_majorant_resolution_factor(res_factor)
44 |
45 | def save_final_state(params, opt, output_dir):
46 | final_state = {}
47 | for key in params['variables'].keys():
48 | param = opt[key]
49 | if isinstance(param, mi.TensorXf):
50 | shape = dr.shape(opt[key])
51 | if len(shape) == 4: # volume
52 | mi.VolumeGrid(opt[key]).write(os.path.join(output_dir, f"{key.replace('.', '_')}_final.vol"))
53 | elif len(shape) == 3: # Texture
54 | mi.Bitmap(opt[key]).write(os.path.join(output_dir, f"{key.replace('.', '_')}_final.exr"))
55 | else:
56 | final_state[key] = param.numpy().tolist()
57 |
58 | if len(final_state) > 0:
59 | with open(os.path.join(output_dir, "final_state.json"), 'w') as f:
60 | json.dump(final_state, f)
61 |
62 | def initialize_parameters(params, opt, scene, scene_params):
63 | for i in range(2 if params['method'] == 'cv_ps' else 1):
64 | # We do this twice to properly set the initial states of the twostate BSDFs/Media
65 | for key, param in params['variables'].items():
66 | init_state = param['init']
67 | if 'sigma_t' in key:
68 | # Spectrally varying extinction is not supported
69 | if isinstance(init_state, float):
70 | opt[key] = mi.Float(init_state)
71 | elif isinstance(init_state, mi.TensorXf):
72 | assert init_state.shape[-1] == 1
73 | opt[key] = init_state
74 |
75 | if params.get('use_majorant_supergrid', False):
76 | adjust_majorant_res_factor(scene, init_state.shape)
77 |
78 | scene_params[key] = opt[key]
79 | else:
80 | scene_params[key] = init_state
81 | opt[key] = scene_params[key]
82 |
83 | # Adjust learning rate if needed
84 | if 'lr_factor' in param:
85 | opt.set_learning_rate({key: params.get('lr', 1e-2) * param['lr_factor']})
86 |
87 | scene_params.update()
88 |
89 |
90 | def d_l(x, y, name):
91 | if name == 'L1':
92 | return dr.sign(x - y)
93 | elif name == 'L2':
94 | return 2 * (x - y)
95 | else:
96 | raise NotImplementedError(f"Unknown loss function {name}")
97 |
98 | def render_reference(params, scene, integrator):
99 | save_path = os.path.join(os.path.dirname(params['output_dir']), f"img_ref.exr")
100 | ref_passes = params.get('ref_passes', 1)
101 | spp_ref = params.get('spp_ref', params.get('spp', 1))
102 | if not os.path.isfile(save_path):
103 | # Only recompute the reference if it's not already saved, since it's quite expensive
104 | img_ref = mi.TensorXf(0.0)
105 | for j in trange(ref_passes):
106 | img_ref += mi.render(scene, seed = 17843 + j, integrator=integrator, spp=spp_ref) / ref_passes
107 | mi.Bitmap(img_ref).write_async(save_path)
108 | else:
109 | img_ref = mi.TensorXf(mi.Bitmap(save_path))
110 |
111 | return img_ref
112 |
113 | def render_display(params, scene, scene_params, integrator):
114 | opt_res = scene.sensors()[0].film().size()
115 | final_res = params.get('final_res', opt_res)
116 |
117 | scene_params['sensor.film.size'] = final_res
118 | scene_params.update()
119 | ref_passes = params.get('ref_passes', 1)
120 | spp_ref = params.get('spp_ref', params.get('spp', 1))
121 | img = mi.TensorXf(0.0)
122 | for j in trange(ref_passes):
123 | img += mi.render(scene, seed = 17843 + j, integrator=integrator, spp=spp_ref) / ref_passes
124 |
125 | scene_params['sensor.film.size'] = opt_res
126 | scene_params.update()
127 | return img
128 |
129 | def checkpoint(scene, params, i, integrator, loss_func, img, img_ref, result_dict):
130 | recomp_freq = params.get('recomp_freq', 10)
131 | save = params.get('save', False)
132 | spp_inf = params.get('spp_inf', 64)
133 | output_dir = params['output_dir']
134 | denoise = params.get('denoise', False)
135 | method = params.get('method', 'baseline')
136 |
137 | if i % recomp_freq == 0:
138 | with dr.suspend_grad():
139 | # Re render the current state with a higher sample count, to avoid bias in the loss evaluation
140 | img_inf = mi.render(scene, integrator=integrator.integrator, seed=i+1, spp=spp_inf)
141 | if save:
142 | save_img(img_inf, output_dir, "img_inf", i//recomp_freq, save_png=True)
143 |
144 | result_dict["loss"][i//recomp_freq] = loss_func(img_ref, img_inf).numpy()[0]
145 | # Compute variance as MSE between the noisy and the high spp rendering
146 | result_dict["var"][i//recomp_freq] = dr.mean(dr.sqr(img_inf - img)).numpy()[0]
147 |
148 | if save:
149 | save_img(integrator.img, output_dir, "img", i//recomp_freq, save_png=True)
150 | if denoise:
151 | save_img(img, output_dir, "img_denoised", i//recomp_freq, save_png=True)
152 | if method != "baseline" and dr.all(integrator.stats.n > integrator.warmup):
153 | save_img(integrator.w_s, output_dir, "weights", i//recomp_freq)
154 | save_img(integrator.H, output_dir, "img_H", i//recomp_freq)
155 | save_img(integrator.F, output_dir, "img_F", i//recomp_freq)
156 |
157 | if 'bias_steps' in params and i in params['bias_steps']:
158 | with dr.suspend_grad():
159 | ref_passes = params.get('ref_passes', 1)
160 | spp_ref = params.get('spp_ref', params.get('spp', 1))
161 | img_gt = mi.TensorXf(0.0)
162 | for j in trange(ref_passes):
163 | img_gt += mi.render(scene, seed = 17843 + j, integrator=integrator, spp=spp_ref) / ref_passes
164 | save_img(img_gt, output_dir, "bias_gt", i)
165 | save_img(img, output_dir, "bias_img", i)
166 |
167 | def precondition(params, opt):
168 | for key, param in params['variables'].items():
169 | if 'largesteps' in param:
170 | dr.set_grad(opt[key], param['largesteps'].precondition(dr.grad(opt[key])))
171 |
172 | def clamp(params, opt):
173 | for key, param in params['variables'].items():
174 | if 'clamp' in param:
175 | opt[key] = dr.clamp(dr.detach(opt[key]), param['clamp'][0], param['clamp'][1])
176 |
177 | def upsample(scene, params, opt, integrator):
178 | use_majorant_supergrid = params.get('use_majorant_supergrid', False)
179 | for key, _ in params['variables'].items():
180 | if type(opt[key]) == mi.TensorXf:
181 | old_res = opt[key].shape
182 | if len(old_res) in (3,4):
183 | new_res = (*[2*x for x in old_res[:-1]], old_res[-1])
184 | opt[key] = dr.upsample(opt[key], shape=new_res)
185 | else:
186 | raise ValueError(f"Upsampling expects a 3 or 4D tensor. Got {len(old_res)}.")
187 | if '.sigma_t.' in key and use_majorant_supergrid:
188 | adjust_majorant_res_factor(scene, new_res)
189 | else:
190 | raise TypeError(f"Upsampling is only supported for mi.TensorXf, got type {type(opt[key])}.")
191 |
192 | integrator.reset()
193 |
--------------------------------------------------------------------------------
/vgg.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torchvision
3 | from torchvision.models import vgg19
4 | import drjit as dr
5 |
6 | class VGGLoss():
7 | def __init__(self):
8 |
9 | # Load VGG19
10 | vgg = vgg19(pretrained=True).cuda()
11 |
12 | # Feature extraction layers, as in [Hu et al. 2022]
13 | self.features = [
14 | torch.nn.Sequential(vgg.features[:4]), # relu1_2
15 | torch.nn.Sequential(vgg.features[4:9]), # relu2_2
16 | torch.nn.Sequential(vgg.features[9:14]), # relu3_2
17 | torch.nn.Sequential(vgg.features[16:23]) # relu4_2
18 | ]
19 |
20 | # Disable gradients of the model weights
21 | for f in self.features:
22 | for p in f.parameters():
23 | p.requires_grad = False
24 |
25 | # Standardization factors for VGG19
26 | self.preprocess = torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
27 |
28 | # Last reference used for loss computation (to avoid recomputing features)
29 | self.target_features = []
30 | self.target_index = -1 # JIT index of the target image
31 |
32 | @dr.wrap_ad(source="drjit", target="torch")
33 | def compute_target_features(self, target):
34 | with torch.no_grad():
35 | p_img = self.preprocess(target.T[None, ...])
36 | for f in self.features:
37 | p_img = f(p_img)
38 | self.target_features.append(p_img)
39 |
40 | @dr.wrap_ad(source="drjit", target="torch")
41 | def loss(self, source, enable_grad=True):
42 | # Temporary fix to avoid GPU memory leaks due to wrap_ad in detached mode
43 | source.requires_grad = enable_grad
44 |
45 | # Standardize the image
46 | p_img = self.preprocess(source.T[None, ...])
47 | loss = 0
48 | for i, f in enumerate(self.features):
49 | p_img = f(p_img)
50 | loss += torch.mean((p_img - self.target_features[i])**2)
51 |
52 | if enable_grad:
53 | return loss
54 | # Temporary fix to avoid GPU memory leaks due to wrap_ad
55 | return loss.cpu()
56 |
57 | def __call__(self, source, target):
58 | if target.index != self.target_index:
59 | self.target_index = target.index
60 | self.compute_target_features(target)
61 | return self.loss(source, enable_grad=dr.grad_enabled(source))
62 |
--------------------------------------------------------------------------------