├── .envrc ├── .gitignore ├── .gitmodules ├── WoodFloor.jpg ├── __pycache__ ├── pssmlt.cpython-310.pyc ├── pssmltpath.cpython-310.pyc └── pssmltsimple.cpython-310.pyc ├── bdpt.py ├── bdpt02.py ├── custom-prop.py ├── download.py ├── drjitstruct.py ├── hashgrid.py ├── metropolis.py ├── mlt.py ├── patch.py ├── path-mis.py ├── path-record.py ├── path.py ├── pathrecord.py ├── pixi.lock ├── pixi.toml ├── prefix_sum.py ├── pssmlt.py ├── pssmltpath.py ├── pssmltsimple.py ├── reductions.py ├── reproject.py ├── restirgi.py ├── simple.py ├── specular_from_bsdf.py ├── sppm.py ├── test-restir-dynamic.py ├── test-restir-jacobian.py ├── test-restir-spatial-spatial.py ├── test-restir-spatial.py ├── test.py ├── testpssmlt.py ├── to_dict.py └── util.py /.envrc: -------------------------------------------------------------------------------- 1 | watch_file pixi.lock 2 | eval "$(pixi shell-hook)" 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | scenes.zip 2 | scenes/ 3 | .venv 4 | out/ 5 | __pycache__ 6 | data/ 7 | flip/ 8 | .pixi/ 9 | build*/ 10 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "mitsuba3"] 2 | path = mitsuba3 3 | url = https://github.com/mitsuba-renderer/mitsuba3 4 | -------------------------------------------------------------------------------- /WoodFloor.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DoeringChristian/mitsuba3-experiments/caf7c122da0ad868bb4da2c27feabdfd6ef505c6/WoodFloor.jpg -------------------------------------------------------------------------------- /__pycache__/pssmlt.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DoeringChristian/mitsuba3-experiments/caf7c122da0ad868bb4da2c27feabdfd6ef505c6/__pycache__/pssmlt.cpython-310.pyc -------------------------------------------------------------------------------- /__pycache__/pssmltpath.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DoeringChristian/mitsuba3-experiments/caf7c122da0ad868bb4da2c27feabdfd6ef505c6/__pycache__/pssmltpath.cpython-310.pyc -------------------------------------------------------------------------------- /__pycache__/pssmltsimple.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DoeringChristian/mitsuba3-experiments/caf7c122da0ad868bb4da2c27feabdfd6ef505c6/__pycache__/pssmltsimple.cpython-310.pyc -------------------------------------------------------------------------------- /bdpt.py: -------------------------------------------------------------------------------- 1 | import mitsuba as mi 2 | import drjit as dr 3 | import matplotlib.pyplot as plt 4 | from typing import Type, TypeVar, overload 5 | 6 | mi.set_variant("llvm_ad_rgb") 7 | # dr.set_log_level(dr.LogLevel.Debug) 8 | 9 | T = TypeVar("T") 10 | 11 | 12 | class Path: 13 | idx: mi.UInt32 14 | 15 | def __init__(self, dtype: Type[T], n_rays: int, max_depth: int): 16 | self.n_rays = n_rays 17 | self.max_depth = max_depth 18 | self.idx = dr.arange(mi.UInt32, n_rays) 19 | self.dtype = dtype 20 | 21 | self.vertices = dr.zeros(dtype, shape=(self.max_depth * self.n_rays)) 22 | 23 | def __setitem__(self, depth: mi.UInt32, value: T): 24 | dr.scatter(self.vertices, value, depth * self.n_rays + self.idx) 25 | 26 | # Return vertex at depth 27 | @overload 28 | def __getitem__(self, depth: mi.UInt32) -> T: 29 | ... 30 | 31 | # Return a vertex at (depth, ray_index) 32 | @overload 33 | def __getitem__(self, idx: tuple[mi.UInt32, mi.UInt32]) -> T: 34 | ... 35 | 36 | def __getitem__(self, idx): 37 | if isinstance(idx, mi.UInt32): 38 | return dr.gather(self.dtype, self.vertices, idx * self.n_rays + self.idx) 39 | if ( 40 | isinstance(idx, tuple) 41 | and isinstance(idx[0], mi.UInt32) 42 | and isinstance(idx[1], mi.UInt32) 43 | ): 44 | return dr.gather(self.dtype, self.vertices, idx[0] * self.n_rays + idx[1]) 45 | -------------------------------------------------------------------------------- /bdpt02.py: -------------------------------------------------------------------------------- 1 | import mitsuba as mi 2 | import drjit as dr 3 | import matplotlib.pyplot as plt 4 | from typing import Generic, Type, TypeVar, overload 5 | from dataclasses import dataclass 6 | 7 | mi.set_variant("llvm_ad_rgb") 8 | # dr.set_log_level(dr.LogLevel.Debug) 9 | 10 | 11 | def mis_weight(pdf_a: mi.Float, pdf_b: mi.Float) -> mi.Float: 12 | """ 13 | Compute the Multiple Importance Sampling (MIS) weight given the densities 14 | of two sampling strategies according to the power heuristic. 15 | """ 16 | a2 = dr.sqr(pdf_a) 17 | return dr.detach(dr.select(pdf_a > 0, a2 / dr.fma(pdf_b, pdf_b, a2), 0), True) 18 | 19 | 20 | T = TypeVar("T") 21 | 22 | 23 | @dataclass 24 | class Vertex: 25 | def __init__(self) -> None: 26 | self.p: mi.Point3f = dr.zeros(mi.Point3f) 27 | self.f: mi.Color3f = dr.zeros(mi.Color3f) 28 | self.L: mi.Color3f = dr.zeros(mi.Color3f) 29 | self.wi: mi.Vector3f = dr.zeros(mi.Vector3f) 30 | 31 | DRJIT_STRUCT = { 32 | "p": mi.Point3f, 33 | "f": mi.Color3f, 34 | "L": mi.Color3f, 35 | "wi": mi.Vector3f, 36 | } 37 | 38 | 39 | class Path(Generic[T]): 40 | idx: mi.UInt32 41 | 42 | def __init__(self, dtype: Type[T], n_rays: int, max_depth: int): 43 | self.n_rays = n_rays 44 | self.max_depth = max_depth 45 | self.idx = dr.arange(mi.UInt32, n_rays) 46 | self.dtype = dtype 47 | 48 | self.vertices = dr.zeros(dtype, shape=(self.max_depth * self.n_rays)) 49 | 50 | def __setitem__(self, depth: mi.UInt32, value: T): 51 | dr.scatter(self.vertices, value, depth * self.n_rays + self.idx) 52 | 53 | # Return vertex at depth 54 | @overload 55 | def __getitem__(self, depth: mi.UInt32) -> T: 56 | ... 57 | 58 | # Return a vertex at (depth, ray_index) 59 | @overload 60 | def __getitem__(self, idx: tuple[mi.UInt32, mi.UInt32]) -> T: 61 | ... 62 | 63 | def __getitem__(self, idx): 64 | if isinstance(idx, mi.UInt32): 65 | return dr.gather(self.dtype, self.vertices, idx * self.n_rays + self.idx) 66 | if ( 67 | isinstance(idx, tuple) 68 | and isinstance(idx[0], mi.UInt32) 69 | and isinstance(idx[1], mi.UInt32) 70 | ): 71 | return dr.gather(self.dtype, self.vertices, idx[0] * self.n_rays + idx[1]) 72 | 73 | 74 | class BDPTIntegrator(mi.SamplingIntegrator): 75 | def __init__(self, props: mi.Properties) -> None: 76 | self.max_depth = int(props.get("max_depth", def_value=16)) 77 | self.rr_depth = int(props.get("rr_depth", def_value=4)) 78 | super().__init__(props) 79 | 80 | def record_light_path( 81 | self, scene: mi.Scene, sampler: mi.Sampler, active: bool = True 82 | ) -> Path[Vertex]: 83 | wavefront_size = sampler.wavefront_size() 84 | path = Path(Vertex, wavefront_size, self.max_depth + 1) 85 | 86 | ray, ray_weight, emitter = scene.sample_emitter_ray( 87 | 0.0, sampler.next_1d(), sampler.next_2d(), sampler.next_2d(), active 88 | ) 89 | 90 | vertex = Vertex() 91 | vertex.f = mi.Color3f(1.0) 92 | vertex.L = ray_weight 93 | vertex.p = ray.o 94 | vertex.wi = dr.zeros(mi.Vector3f) 95 | path[mi.UInt32(0)] = vertex 96 | 97 | bsdf_ctx = mi.BSDFContext() 98 | 99 | depth = mi.UInt32(0) 100 | f = mi.Color3f(1.0) 101 | L = mi.Color3f(ray_weight) 102 | active = mi.Bool(active) 103 | 104 | prev_si = dr.zeros(mi.SurfaceInteraction3f) 105 | 106 | loop = mi.Loop( 107 | name="Record Light", 108 | state=lambda: (sampler, ray, depth, f, L, active, prev_si), 109 | ) 110 | loop.set_max_iterations(self.max_depth) 111 | 112 | while loop(active): 113 | si: mi.SurfaceInteraction3f = scene.ray_intersect( 114 | ray, 115 | ) 116 | 117 | bsdf: mi.BSDF = si.bsdf() 118 | 119 | ds = mi.DirectionSample3f(scene, si=si, ref=prev_si) 120 | 121 | Le = ds.emitter.eval(si) 122 | 123 | active_next = (depth + 1 < self.max_depth) & si.is_valid() 124 | 125 | bsdf_sample, bsdf_val = bsdf.sample( 126 | bsdf_ctx, si, sampler.next_1d(), sampler.next_2d(), active_next 127 | ) 128 | ray = si.spawn_ray(si.to_world(bsdf_sample.wo)) 129 | 130 | f *= bsdf_val 131 | L = f * L + Le 132 | 133 | vertex = Vertex() 134 | vertex.f = f 135 | vertex.L = L 136 | vertex.p = si.p 137 | vertex.wi = si.to_world(si.wi) 138 | path[depth + 1] = vertex 139 | 140 | prev_si = dr.detach(si, True) 141 | 142 | active = active_next 143 | depth += 1 144 | 145 | return path 146 | 147 | def record_camera_path( 148 | self, scene: mi.Scene, sampler: mi.Sampler, ray: mi.Ray3f, active: bool = True 149 | ) -> Path[Vertex]: 150 | wavefront_size = sampler.wavefront_size() 151 | path = Path(Vertex, wavefront_size, self.max_depth + 1) 152 | 153 | vertex = Vertex() 154 | vertex.f = mi.Color3f(1.0) 155 | vertex.L = mi.Color3f(1.0) 156 | vertex.p = ray.o 157 | vertex.wi = dr.zeros(mi.Vector3f) 158 | path[mi.UInt32(0)] = vertex 159 | 160 | bsdf_ctx = mi.BSDFContext() 161 | 162 | ray = mi.Ray3f(ray) 163 | depth = mi.UInt32(0) 164 | f = mi.Color3f(1.0) 165 | L = mi.Color3f(0.0) 166 | active = mi.Bool(active) 167 | 168 | prev_si = dr.zeros(mi.SurfaceInteraction3f) 169 | 170 | loop = mi.Loop( 171 | name="Record View", 172 | state=lambda: (sampler, ray, depth, f, L, active, prev_si), 173 | ) 174 | 175 | loop.set_max_iterations(self.max_depth) 176 | 177 | while loop(active): 178 | si: mi.SurfaceInteraction3f = scene.ray_intersect( 179 | ray, ray_flags=mi.RayFlags.All, coherent=dr.eq(depth, 0) 180 | ) 181 | 182 | bsdf: mi.BSDF = si.bsdf(ray) 183 | 184 | ds = mi.DirectionSample3f(scene, si=si, ref=prev_si) 185 | 186 | Le = f * ds.emitter.eval(si) 187 | 188 | active_next = (depth + 1 < self.max_depth) & si.is_valid() 189 | 190 | bsdf_sample, bsdf_val = bsdf.sample( 191 | bsdf_ctx, si, sampler.next_1d(), sampler.next_2d(), active_next 192 | ) 193 | 194 | ray = si.spawn_ray(si.to_world(bsdf_sample.wo)) 195 | 196 | L = L + Le 197 | f *= bsdf_val 198 | 199 | vertex = Vertex() 200 | vertex.f = f 201 | vertex.L = L 202 | vertex.p = si.p 203 | vertex.wi = si.to_world(si.wi) 204 | path[depth + 1] = vertex 205 | 206 | prev_si = dr.detach(si, True) 207 | 208 | active = active_next 209 | depth += 1 210 | 211 | return path 212 | 213 | def connect_s2t( 214 | self, 215 | scene: mi.Scene, 216 | s: mi.UInt32, 217 | t: mi.UInt32, 218 | s_path: Path[Vertex], 219 | t_path: Path[Vertex], 220 | ) -> tuple[mi.Color3f, mi.Color3f]: 221 | """ 222 | Perform connection between vectex s and t. 223 | Returns bsdf weight at vertex s and radiance emitted from s in direction of t. 224 | 225 | s0 s1 s2 t2 t1 t0 226 | o -- o -- o .. o -- o -- o 227 | 228 | first ray is cast from t2 to s2 to test if the points are visible to each other and to get surface interaction at s2. 229 | Then we use wi (s2 -> s1) to calculate the bsdf weight. 230 | """ 231 | s_p = s_path[s].p 232 | t_p = t_path[t].p 233 | 234 | t2s_dir = dr.normalize(s_p - t_p) 235 | 236 | t2s_ray = mi.Ray3f(t_p, t2s_dir) 237 | 238 | active = scene.ray_test(mi.Ray3f(t2s_ray, dr.norm(s_p - t_p))) 239 | 240 | si: mi.SurfaceInteraction3f = scene.ray_intersect(t2s_ray, active) 241 | 242 | bsdf: mi.BSDF = si.bsdf() 243 | 244 | wo = si.to_local(s_path[s].wi) 245 | weight, pdf = bsdf.eval_pdf(mi.BSDFContext(), si, wo, active) 246 | weight = dr.select(pdf > 0, weight / pdf, 0.0) 247 | weight = dr.select(active, weight, 0.0) 248 | 249 | emitter: mi.Emitter = si.emitter(scene, active) 250 | Le = emitter.eval(si, active) 251 | 252 | return weight, Le 253 | 254 | def connect_bdpt( 255 | self, 256 | scene: mi.Scene, 257 | s: mi.UInt32, 258 | t: mi.UInt32, 259 | camera_path: Path[Vertex], 260 | light_path: Path[Vertex], 261 | ) -> mi.Color3f: 262 | camera_weight, camera_Le = self.connect_s2t( 263 | scene, s, t, camera_path, light_path 264 | ) 265 | light_weight, light_Le = self.connect_s2t(scene, t, s, light_path, camera_path) 266 | 267 | camera_weight = dr.select(s == 0, 1.0, camera_weight) 268 | 269 | L = ( 270 | camera_path[s].L 271 | + camera_path[s].f * camera_weight * light_Le 272 | + camera_path[s].f * camera_weight * light_weight * light_path[t].L 273 | ) 274 | return light_weight 275 | 276 | def sample( 277 | self, 278 | scene: mi.Scene, 279 | sampler: mi.Sampler, 280 | ray: mi.RayDifferential3f, 281 | medium: mi.Medium = None, 282 | active: bool = True, 283 | ) -> tuple[mi.Color3f, mi.Bool, list]: 284 | camera_path = self.record_camera_path(scene, sampler, ray, active) 285 | light_path = self.record_light_path(scene, sampler, active) 286 | f = camera_path[mi.UInt32(1)].f 287 | L = self.connect_bdpt( 288 | scene, mi.UInt32(1), mi.UInt32(1), camera_path, light_path 289 | ) 290 | return L, mi.Bool(True), [] 291 | 292 | 293 | mi.register_integrator("bdpt", lambda props: BDPTIntegrator(props)) 294 | 295 | scene = mi.cornell_box() 296 | scene = mi.load_dict(scene) 297 | integrator = mi.load_dict( 298 | { 299 | "type": "bdpt", 300 | "max_depth": 16, 301 | "rr_depth": 2, 302 | } 303 | ) 304 | 305 | img = mi.render(scene, integrator=integrator) 306 | plt.imshow(mi.util.convert_to_bitmap(img)) 307 | plt.show() 308 | -------------------------------------------------------------------------------- /custom-prop.py: -------------------------------------------------------------------------------- 1 | import drjit as dr 2 | import mitsuba as mi 3 | 4 | if __name__ == "__main__": 5 | 6 | mi.set_variant("cuda_ad_rgb") 7 | 8 | scene = mi.cornell_box() 9 | scene = mi.load_dict(scene) 10 | 11 | dynamic_shape = scene.shapes()[0] 12 | dynamic_shape.set_id("dynamic_back") 13 | 14 | def is_dynamic(self, _) -> mi.Bool: 15 | if "dynamic" in self.id(): 16 | return mi.Bool(True) 17 | return mi.Bool(False) 18 | 19 | bsdf_ptrs = scene.shapes_dr() 20 | 21 | is_dynamic = dr.dispatch(bsdf_ptrs, is_dynamic, None) 22 | 23 | print(f"{is_dynamic=}") 24 | -------------------------------------------------------------------------------- /download.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import zipfile 3 | 4 | url = "https://rgl.s3.eu-central-1.amazonaws.com/scenes/tutorials/scenes.zip" 5 | 6 | req = requests.get(url) 7 | 8 | filename = url.split('/')[-1] 9 | 10 | with open(filename, 'wb') as out_file: 11 | out_file.write(req.content) 12 | 13 | 14 | with zipfile.ZipFile(filename, "r") as zip_ref: 15 | zip_ref.extractall(''.join(map(str, filename.split('.')[:-1]))) 16 | -------------------------------------------------------------------------------- /drjitstruct.py: -------------------------------------------------------------------------------- 1 | from typing import get_type_hints 2 | 3 | 4 | def drjitstruct(cls): 5 | drjit_struct = {} 6 | 7 | type_hints = get_type_hints(cls) 8 | 9 | for name, ty in type_hints.items(): 10 | drjit_struct[name] = ty 11 | cls.DRJIT_STRUCT = drjit_struct 12 | return cls 13 | -------------------------------------------------------------------------------- /hashgrid.py: -------------------------------------------------------------------------------- 1 | import mitsuba as mi 2 | import drjit as dr 3 | 4 | if __name__ == "__main__": 5 | mi.set_variant("cuda_ad_rgb") 6 | 7 | 8 | def hash(p: mi.Point3u | mi.Point3f, hash_size: int): 9 | if isinstance(p, mi.Point3f): 10 | p = mi.Point3u(mi.UInt(p.x), mi.UInt(p.y), mi.UInt(p.z)) 11 | return hash(p, hash_size) 12 | return ((p.x * 73856093) ^ (p.y * 19349663) ^ (p.z * 83492791)) % hash_size 13 | 14 | 15 | class HashGrid: 16 | def __init__( 17 | self, sample: mi.Point3f, resolution: int, n_cells: None | int = None 18 | ) -> None: 19 | """ 20 | Constructs a 3D Hash Grid with the samples inserted. 21 | 22 | It uses the hash function from the pbrt-v3 SPPM implementaiton (https://github.com/mmp/pbrt-v3/blob/master/src/integrators/sppm.cpp) 23 | 24 | @param sample: Samples to insert into the Hash Grid 25 | @param resolution: The number of grid cells in each dimension 26 | """ 27 | n_samples = dr.shape(sample)[-1] 28 | if n_cells is None: 29 | n_cells = n_samples 30 | self.n_cells = n_cells 31 | self.n_samples = n_samples 32 | self.resolution = resolution 33 | self.bbmin = dr.minimum( 34 | dr.min(sample.x), dr.minimum(dr.min(sample.y), dr.min(sample.z)) 35 | ) 36 | self.bbmax = dr.maximum( 37 | dr.max(sample.x), dr.maximum(dr.max(sample.y), dr.max(sample.z)) 38 | ) 39 | self.bbmax = dr.maximum( 40 | dr.max(sample.x), dr.maximum(dr.max(sample.y), dr.max(sample.z)) 41 | ) 42 | 43 | from prefix_sum import prefix_sum 44 | 45 | cell = self.cell_idx(sample) 46 | 47 | cell_size = dr.zeros(mi.UInt, n_cells) 48 | index_in_cell = mi.UInt(0) 49 | processing = dr.zeros(mi.UInt, n_cells) 50 | queued = mi.Bool(True) 51 | 52 | while dr.any(queued): 53 | dr.scatter(processing, dr.arange(mi.UInt, n_samples), cell, active=queued) 54 | selected = ( 55 | dr.eq( 56 | dr.gather(mi.UInt, processing, cell, queued), 57 | dr.arange(mi.UInt, n_samples), 58 | ) 59 | & queued 60 | ) 61 | index_in_cell[selected] = dr.gather(mi.UInt, cell_size, cell, selected) 62 | dr.scatter(cell_size, index_in_cell + 1, cell, selected) 63 | queued &= ~selected 64 | 65 | first_cell = dr.eq(dr.arange(mi.UInt, n_cells), 0) 66 | cell_offset = prefix_sum(cell_size) 67 | cell_offset = dr.select( 68 | first_cell, 69 | 0, 70 | dr.gather( 71 | mi.UInt, 72 | cell_offset, 73 | dr.arange(mi.UInt, n_cells) - 1, 74 | ~first_cell, 75 | ), 76 | ) 77 | self.cell_offset = cell_offset 78 | self.cell_size = cell_size 79 | self.sample_idx = dr.zeros(mi.UInt, n_samples) 80 | dr.scatter( 81 | self.sample_idx, 82 | dr.arange(mi.UInt, n_samples), 83 | dr.gather(mi.UInt, cell_offset, cell) + index_in_cell, 84 | ) 85 | 86 | def cell_idx(self, p: mi.Point3f): 87 | return hash( 88 | (p - self.bbmin) / (self.bbmax - self.bbmin) * self.resolution, 89 | self.n_cells, 90 | ) 91 | 92 | 93 | if __name__ == "__main__": 94 | x = mi.Float(0, 0.1, 0.6, 1) 95 | y = mi.Float(0, 0.1, 0.6, 1) 96 | z = mi.Float(0, 0.1, 0.6, 1) 97 | 98 | grid = HashGrid(mi.Point3f(x, y, z), 2, 2) 99 | -------------------------------------------------------------------------------- /metropolis.py: -------------------------------------------------------------------------------- 1 | import drjit as dr 2 | import mitsuba as mi 3 | import os 4 | import numpy as np 5 | import matplotlib.pyplot as plt 6 | import tqdm 7 | from scipy import stats 8 | from dataclasses import dataclass, field 9 | from matplotlib.lines import Line2D 10 | 11 | 12 | if __name__ == "__main__": 13 | mi.set_variant("cuda_ad_rgb") 14 | 15 | 16 | def interval_to_exp(sample: mi.Float): 17 | return -dr.log(1 - sample + 1e-8) 18 | 19 | 20 | def normal_pdf(x: mi.Float, mu: mi.Float, sigma: mi.Float): 21 | return ( 22 | (dr.inv_sqrt_two_pi) 23 | * (1.0 / sigma) 24 | * dr.exp(-dr.square((x - mu)) / (2 * dr.square(sigma))) 25 | ) 26 | 27 | 28 | class MetropolisSampler(mi.Sampler): 29 | """ 30 | Implementation of the Metropolis sampler, that works with python loops, but not with mitsuba loops. 31 | """ 32 | 33 | def __init__(self, sigma=0.1, p_large=0.1) -> None: 34 | super().__init__(mi.Properties()) 35 | self.sigma = dr.opaque(mi.Float, sigma) 36 | self.p_large = dr.opaque(mi.Float, p_large) 37 | 38 | # State variables 39 | self.independent: mi.Sampler = mi.load_dict({"type": "independent"}) 40 | self.proposed = [] 41 | self.i = 0 42 | self.f = mi.Float(0) 43 | self.samples = None 44 | 45 | self.wavefront_size = 0 46 | 47 | def seed(self, seed=0, wavefront_size=1024): 48 | self.independent.seed(seed, wavefront_size) 49 | self.wavefront_size = wavefront_size 50 | 51 | def initial_1d(self, active: mi.Bool) -> mi.Float: 52 | return self.independent.next_1d(active) 53 | 54 | def next_1d(self, active: mi.Bool = True) -> mi.Float: 55 | if len(self.proposed) > self.i: 56 | result = self.proposed[self.i] 57 | else: 58 | result = self.initial_1d(active) 59 | self.proposed.append(result) 60 | self.i += 1 61 | return result 62 | 63 | def next_2d(self, active: mi.Bool = True) -> mi.Point2f: 64 | return mi.Point2f(self.next_1d(active), self.next_1d(active)) 65 | 66 | def sample_proposal(self, x: mi.Float) -> mi.Float: 67 | y = x + mi.warp.square_to_std_normal(self.independent.next_2d()).x * self.sigma 68 | y = y - dr.floor(y) 69 | 70 | large = self.independent.next_1d() < self.p_large 71 | y = dr.select(large, self.independent.next_1d(), y) 72 | 73 | return y 74 | 75 | def pdf_proposal(self, x: mi.Float, y: mi.Float) -> mi.Float: 76 | return normal_pdf(x, y, self.sigma) 77 | 78 | def advance(self, f: mi.Float): 79 | acceptance = dr.minimum(1, f / self.f) 80 | accept = self.independent.next_1d() <= acceptance 81 | 82 | if self.samples: 83 | self.samples = [ 84 | dr.select(accept, proposed, sample) 85 | for sample, proposed in zip(self.samples, self.proposed) 86 | ] 87 | else: 88 | self.samples = [mi.Float(sample) for sample in self.proposed] 89 | 90 | # Update with new proposal 91 | self.proposed = [self.sample_proposal(x) for x in self.samples] 92 | self.f = mi.Float(f) 93 | self.i = 0 94 | 95 | def schedule_state(self): 96 | self.independent.schedule_state() 97 | dr.schedule(self.samples) 98 | dr.schedule(self.proposed) 99 | dr.schedule(self.f) 100 | 101 | def set_sample_count(self, spp: int): 102 | self.spp = spp 103 | 104 | def sample_count(self) -> int: 105 | return self.spp 106 | 107 | def set_samples_per_wavefront(self, spp_per_pass: int): 108 | self.spp_per_pass = spp_per_pass 109 | 110 | 111 | def gaussian(x, mu, sig): 112 | return ( 113 | 1.0 / (np.sqrt(2.0 * np.pi) * sig) * np.exp(-np.power((x - mu) / sig, 2.0) / 2) 114 | ) 115 | 116 | 117 | std = 0.1 118 | mean = 0.5 119 | 120 | 121 | def target(x): 122 | 123 | def f(x): 124 | return gaussian(x, 0.2, 0.01) + gaussian(x, 0.7, 0.1) 125 | 126 | between_0_1 = np.logical_and(0.0 < x, x < 1.0) 127 | outside_05_06 = np.logical_or(x < 0.5, 0.6 < x) 128 | 129 | range = np.logical_and(between_0_1, outside_05_06) 130 | 131 | target = np.select([range], [f(x)], 0) 132 | 133 | return target 134 | 135 | 136 | def Dkl(p, q): 137 | return np.nanmean(np.where(p > 0, p * np.log(p / q), 0)) 138 | 139 | 140 | def KL(P: np.ndarray, Q: np.ndarray) -> float: 141 | epsilon = 0.00001 142 | 143 | P = P + epsilon 144 | Q = Q + epsilon 145 | 146 | divergence = np.mean(P * np.log(P / Q)) 147 | return divergence 148 | 149 | 150 | @dataclass(init=True) 151 | class Result: 152 | it: list | np.ndarray = field(default_factory=list) 153 | kl: list | np.ndarray = field(default_factory=list) 154 | mean: list | np.ndarray = field(default_factory=list) 155 | var: list | np.ndarray = field(default_factory=list) 156 | std: list | np.ndarray = field(default_factory=list) 157 | 158 | def numpy(self) -> "Result": 159 | result = Result() 160 | result.it = np.array(self.it) 161 | result.kl = np.array(self.kl) 162 | result.mean = np.array(self.mean) 163 | result.var = np.array(self.var) 164 | result.std = np.array(self.std) 165 | return result 166 | 167 | 168 | def test(name: str, iterations, batch_size, log_interval, bins, sampler) -> Result: 169 | x_ref = np.linspace(0, 1, 1000) 170 | y_ref = target(x_ref) 171 | y_ref = y_ref / np.mean(y_ref) 172 | 173 | result = Result() 174 | 175 | sampler.seed(0, batch_size) 176 | iterator = tqdm.tqdm(range(iterations)) 177 | for i in iterator: 178 | dr.kernel_history_clear() 179 | 180 | x = sampler.next_1d().numpy() 181 | 182 | f = target(x) 183 | 184 | sampler.advance(mi.Float(f)) 185 | sampler.schedule_state() 186 | dr.eval() 187 | 188 | if i % log_interval == 0: 189 | 190 | mean = np.mean(x) 191 | var = np.mean((x - mean) ** 2) 192 | std = np.sqrt(var) 193 | 194 | plt.clf() 195 | plt.hist(x, bins=bins, density=True, label="Metropolis Histogram") 196 | plt.plot(x_ref, y_ref, label="Ref") 197 | plt.vlines( 198 | [mean + std, mean - std], 199 | 0, 200 | 1, 201 | transform=plt.gca().get_xaxis_transform(), 202 | colors="r", 203 | label="std deviation", 204 | ) 205 | # kde = stats.gaussian_kde(x) 206 | # plt.plot(x_ref, kde(x_ref), label="Metropolis KDE") 207 | plt.legend() 208 | os.makedirs(f"out/{name}", exist_ok=True) 209 | plt.savefig(f"out/{name}/{i}.svg") 210 | 211 | target_pdf = target(np.linspace(0, 1, bins)) 212 | target_pdf = target_pdf / np.mean(target_pdf) 213 | sample_pdf = np.histogram(x, bins, density=True)[0] 214 | dkl = KL(sample_pdf, target_pdf) 215 | iterator.set_postfix({"dkl": dkl}) 216 | 217 | result.it.append(i) 218 | result.kl.append(dkl) 219 | result.mean.append(mean) 220 | result.var.append(mean) 221 | result.std.append(std) 222 | 223 | return result 224 | 225 | 226 | if __name__ == "__main__": 227 | iterations = 1_000 228 | batch_size = 16384 229 | bins = 128 230 | dr.set_flag(dr.JitFlag.KernelHistory, True) 231 | 232 | sampler = MetropolisSampler(0.01, 0.01) 233 | metropolis = test("metropolis", iterations, batch_size, 10, bins, sampler) 234 | metropolis = metropolis.numpy() 235 | 236 | # print(f"{metropolis=}") 237 | # print(f"{jump_restore=}") 238 | 239 | plt.clf() 240 | plt.plot(metropolis.it, metropolis.kl, label="Metropolis") 241 | plt.xlabel("iteration") 242 | plt.ylabel("$D_{KL}$") 243 | plt.yscale("log") 244 | plt.legend() 245 | plt.savefig("out/dkl.svg") 246 | 247 | plt.clf() 248 | plt.plot(metropolis.it, metropolis.mean + metropolis.std, color="C0") 249 | plt.plot(metropolis.it, metropolis.mean - metropolis.std, color="C0") 250 | plt.plot([0, metropolis.it[-1]], [mean + std, mean + std], color="C1") 251 | plt.plot([0, metropolis.it[-1]], [mean - std, mean - std], color="C1") 252 | plt.xlabel("iteration") 253 | plt.ylabel("Standard Deviation") 254 | plt.legend( 255 | [ 256 | Line2D([0], [0], color="C0"), 257 | Line2D([0], [0], color="C1"), 258 | ], 259 | [ 260 | "Metropolis", 261 | "Target", 262 | ], 263 | ) 264 | plt.savefig("out/std.svg") 265 | -------------------------------------------------------------------------------- /mlt.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | import mitsuba as mi 4 | import drjit as dr 5 | import matplotlib.pyplot as plt 6 | 7 | mi.set_variant("cuda_ad_rgb") 8 | from pathrecord import Path, drjitstruct # noqa 9 | 10 | 11 | @drjitstruct 12 | class PVert: 13 | wo: mi.Vector3f 14 | f: mi.Spectrum 15 | 16 | def __init__(self, wo=mi.Vector3f(), f=mi.Spectrum()): 17 | self.wo = wo 18 | self.f = f 19 | 20 | 21 | class MltSampler: 22 | pass 23 | 24 | 25 | class Simple(mi.SamplingIntegrator): 26 | def __init__(self, props=mi.Properties()): 27 | super().__init__(props) 28 | self.max_depth = props.get("max_depth") 29 | self.rr_depth = props.get("rr_depth") 30 | 31 | def render(self: mi.Integrator, scene: mi.Scene, sensor: mi.Sensor, seed: int = 0, spp: int = 0, develop: bool = True, evaluate: bool = True) -> dr.scalar.TensorXf: 32 | film = sensor.film() 33 | sampler = sensor.sampler() 34 | 35 | spp = sampler.sample_count() 36 | self.spp = spp 37 | 38 | film_size = film.crop_size() 39 | n_chanels = film.prepare(self.aov_names()) 40 | self.n_chanels = n_chanels 41 | 42 | wavefront_size = film_size.x * film_size.y 43 | 44 | # sampler.set_samples_per_wavefront() 45 | sampler.seed(0, wavefront_size) 46 | 47 | block: mi.ImageBlock = film.create_block() 48 | block.set_offset(film.crop_offset()) 49 | 50 | idx = dr.arange(mi.UInt32, wavefront_size) 51 | 52 | pos = mi.Vector2f() 53 | pos.y = idx // film_size[0] 54 | pos.x = idx % film_size[0] 55 | 56 | pos += film.crop_offset() 57 | 58 | aovs = [mi.Float(0)] * n_chanels 59 | path = Path(wavefront_size, self.max_depth, dtype=PVert) 60 | 61 | print(spp) 62 | 63 | for i in range(spp): 64 | self.render_sample(scene, sensor, sampler, 65 | block, aovs, pos, path, idx) 66 | # Trigger kernel launch 67 | sampler.advance() 68 | sampler.schedule_state() 69 | dr.eval(path.vertices) 70 | dr.eval(block.tensor()) 71 | 72 | film.put_block(block) 73 | 74 | result = film.develop() 75 | dr.schedule(result) 76 | dr.eval() 77 | return result 78 | 79 | def render_sample(self, scene: mi.Scene, sensor: mi.Sensor, sampler: mi.Sampler, block: mi.ImageBlock, aovs, pos: mi.Vector2f, path: Path, idx: mi.UInt32, active=True): 80 | film = sensor.film() 81 | scale = 1. / mi.Vector2f(film.crop_size()) 82 | offset = - mi.Vector2f(film.crop_offset()) 83 | sample_pos = pos + offset + sampler.next_2d() 84 | time = 1. 85 | s1, s3 = sampler.next_1d(), sampler.next_2d() 86 | ray, ray_weight = sensor.sample_ray(time, s1, sample_pos * scale, s3) 87 | medium = sensor.medium() 88 | 89 | active = mi.Bool(True) 90 | (spec, mask, aov) = self.sample( 91 | scene, sampler, ray, path, idx, medium, active) 92 | 93 | spec = ray_weight * spec 94 | 95 | rgb = mi.Color3f() 96 | 97 | if mi.is_spectral: 98 | rgb = mi.spectrum_list_to_srgb(spec, ray.wavelengths, active) 99 | elif mi.is_monochromatic: 100 | rgb = spec.x 101 | else: 102 | rgb = spec 103 | 104 | # Debug: 105 | aovs[0] = rgb.x 106 | aovs[1] = rgb.y 107 | aovs[2] = rgb.z 108 | aovs[3] = 1. 109 | 110 | block.put(sample_pos, aovs) 111 | 112 | def sample(self, scene: mi.Scene, sampler: mi.Sampler, ray_: mi.RayDifferential3f, path: Path, idx: mi.UInt32, medium: mi.Medium = None, active: mi.Bool = True): 113 | bsdf_ctx = mi.BSDFContext() 114 | 115 | ray = mi.Ray3f(ray_) 116 | depth = mi.UInt32(0) 117 | f = mi.Spectrum(1.) 118 | L = mi.Spectrum(0.) 119 | active = mi.Bool(active) 120 | 121 | prev_si = dr.zeros(mi.SurfaceInteraction3f) 122 | 123 | loop = mi.Loop(name="Path Tracing", state=lambda: ( 124 | sampler, ray, depth, f, L, active, prev_si)) 125 | 126 | loop.set_max_iterations(self.max_depth) 127 | 128 | while loop(active): 129 | 130 | pvert: PVert = path[depth] 131 | wo_new = dr.erfinv(mi.warp.square_to_uniform_sphere( 132 | sampler.next_2d()))-pvert.wo 133 | 134 | si: mi.SurfaceInteraction3f = scene.ray_intersect( 135 | ray, ray_flags=mi.RayFlags.All, coherent=dr.eq(depth, 0)) 136 | 137 | bsdf: mi.BSDF = si.bsdf(ray) 138 | 139 | # Direct emission 140 | 141 | ds = mi.DirectionSample3f(scene, si=si, ref=prev_si) 142 | 143 | Le = f * ds.emitter.eval(si) 144 | 145 | active_next = (depth + 1 < self.max_depth) & si.is_valid() 146 | 147 | # BSDF Sampling 148 | bsdf_smaple, bsdf_val = bsdf.sample( 149 | bsdf_ctx, si, sampler.next_1d(), sampler.next_2d(), active_next) 150 | 151 | # Update loop variables 152 | path[depth] = PVert(bsdf_smaple.wo, bsdf_val) 153 | 154 | ray = si.spawn_ray(si.to_world(bsdf_smaple.wo)) 155 | L = (L + Le) 156 | f *= bsdf_val 157 | 158 | prev_si = dr.detach(si, True) 159 | 160 | # Stopping criterion (russian roulettte) 161 | 162 | active_next &= dr.neq(dr.max(f), 0) 163 | 164 | rr_prop = dr.maximum(f.x, dr.maximum(f.y, f.z)) 165 | rr_prop[depth < self.rr_depth] = 1. 166 | f *= dr.rcp(rr_prop) 167 | active_next &= (sampler.next_1d() < rr_prop) 168 | 169 | active = active_next 170 | depth += 1 171 | return (L, dr.neq(depth, 0), []) 172 | 173 | 174 | mi.register_integrator("integrator", lambda props: Simple(props)) 175 | 176 | scene = mi.cornell_box() 177 | scene['integrator']['type'] = 'integrator' 178 | scene['integrator']['max_depth'] = 16 179 | scene['integrator']['rr_depth'] = 2 180 | scene['sensor']['sampler']['sample_count'] = 16 181 | scene['sensor']['film']['width'] = 1024 182 | scene['sensor']['film']['height'] = 1024 183 | scene = mi.load_dict(scene) 184 | 185 | img = mi.render(scene) 186 | 187 | plt.imshow(img ** (1. / 2.2)) 188 | plt.axis("off") 189 | plt.show() 190 | -------------------------------------------------------------------------------- /patch.py: -------------------------------------------------------------------------------- 1 | import mitsuba as mi 2 | import drjit as dr 3 | import matplotlib.pyplot as plt 4 | 5 | """ 6 | Proof of concept implementation of automated monkeypatching of mitsuba vcalls. 7 | """ 8 | 9 | if __name__ == "__main__": 10 | mi.set_variant("cuda_ad_spectral") 11 | dr.set_flag(dr.JitFlag.KernelHistory, True) 12 | 13 | 14 | def patch(*functions: list[str]): 15 | def patch_decorator(cls): 16 | import mitsuba as mi 17 | 18 | assert len(cls.__bases__) == 1 19 | 20 | base = cls.__bases__[0] 21 | 22 | ptr_class = getattr(mi, f"{base.__name__}Ptr") 23 | 24 | for function in functions: 25 | 26 | if not hasattr(base, function): 27 | 28 | def default_impl(self, *args, **kwargs): 29 | raise RuntimeError("Method not implemented!") 30 | 31 | setattr(base, function, default_impl) 32 | 33 | def _impl(self, *args, **kwargs): 34 | if hasattr(self, "sample_wavelength"): 35 | return self.sample_wavelength(*args, **kwargs) 36 | 37 | def dispatch_impl(self, *args, **kwargs): 38 | return dr.dispatch(self, _impl, *args, **kwargs) 39 | 40 | setattr(ptr_class, function, dispatch_impl) 41 | 42 | return cls 43 | 44 | return patch_decorator 45 | 46 | 47 | def sample_wavelength(self, wavelengths: mi.Spectrum) -> mi.Spectrum: 48 | print(f"default {self.id()}") 49 | return wavelengths 50 | 51 | 52 | # Add default implementation 53 | mi.BSDF.sample_wavelength = sample_wavelength 54 | 55 | 56 | @patch("sample_wavelength") 57 | class Flouresent(mi.BSDF): 58 | def __init__(self, props: mi.Properties) -> None: 59 | super().__init__(props) 60 | 61 | def sample_wavelength(self, wavelengths: mi.Spectrum) -> mi.Spectrum: 62 | print(f"flouresent {self.id()}") 63 | return wavelengths + 1 64 | 65 | def to_string(self): 66 | return "Flouresent[]" 67 | 68 | 69 | mi.register_bsdf("flouresent", lambda props: Flouresent(props)) 70 | 71 | if __name__ == "__main__": 72 | scene = mi.cornell_box() 73 | scene["white"] = {"type": "flouresent"} 74 | scene: mi.Scene = mi.load_dict(scene) 75 | 76 | shape = scene.shapes_dr() 77 | bsdf = shape.bsdf() 78 | 79 | wavelengths = mi.Spectrum(0) 80 | 81 | # dr.dispatch(bsdf, sample_wavelength, wavelengths) 82 | 83 | result = bsdf.sample_wavelength(wavelengths) 84 | print(f"{result=}") 85 | for shape in scene.shapes(): 86 | print(f"{shape.bsdf().id()=}") 87 | -------------------------------------------------------------------------------- /path-mis.py: -------------------------------------------------------------------------------- 1 | import mitsuba as mi 2 | import drjit as dr 3 | import matplotlib.pyplot as plt 4 | import matplotlib.pyplot as plt 5 | 6 | mi.set_variant("llvm_ad_rgb") 7 | 8 | 9 | def mis_weight(pdf_a: mi.Float, pdf_b: mi.Float) -> mi.Float: 10 | """ 11 | Compute the Multiple Importance Sampling (MIS) weight given the densities 12 | of two sampling strategies according to the power heuristic. 13 | """ 14 | a2 = dr.sqr(pdf_a) 15 | return dr.detach(dr.select(pdf_a > 0, a2 / dr.fma(pdf_b, pdf_b, a2), 0), True) 16 | 17 | 18 | class PathIntegrator(mi.SamplingIntegrator): 19 | def __init__(self, props: mi.Properties): 20 | super().__init__(props) 21 | self.max_depth: int = props.get("max_depth", 8) 22 | self.rr_depth: int = props.get("rr_depth", 2) 23 | 24 | def sample( 25 | self, 26 | scene: mi.Scene, 27 | sampler: mi.Sampler, 28 | ray: mi.RayDifferential3f, 29 | medium: mi.Medium = None, 30 | active: bool = True, 31 | ) -> tuple[mi.Color3f, bool, list[float]]: 32 | # --------------------- Configure loop state ---------------------- 33 | 34 | ray = mi.Ray3f(ray) 35 | active = mi.Bool(active) 36 | throughput = mi.Spectrum(1.0) 37 | result = mi.Spectrum(0.0) 38 | eta = mi.Float(1.0) 39 | depth = mi.UInt32(0) 40 | 41 | valid_ray = mi.Bool(scene.environment() is not None) 42 | 43 | # Variables caching information from the previous bounce 44 | prev_si: mi.Interaction3f = dr.zeros(mi.Interaction3f) 45 | prev_bsdf_pdf = mi.Float(1.0) 46 | prev_bsdf_delta = mi.Bool(True) 47 | bsdf_ctx = mi.BSDFContext() 48 | 49 | loop = mi.Loop( 50 | "Path Tracer", 51 | state=lambda: ( 52 | sampler, 53 | ray, 54 | throughput, 55 | result, 56 | eta, 57 | depth, 58 | valid_ray, 59 | prev_si, 60 | prev_bsdf_pdf, 61 | prev_bsdf_delta, 62 | active, 63 | ), 64 | ) 65 | 66 | loop.set_max_iterations(self.max_depth) 67 | 68 | while loop(active): 69 | si: mi.SurfaceInteraction3f = scene.ray_intersect( 70 | ray, mi.RayFlags.All, dr.eq(depth, 0) 71 | ) 72 | 73 | # ---------------------- Direct emission ---------------------- 74 | 75 | ds = mi.DirectionSample3f(scene, si, prev_si) 76 | em_pdf = mi.Float(0.0) 77 | 78 | em_pdf = scene.pdf_emitter_direction(prev_si, ds, ~prev_bsdf_delta) 79 | 80 | mis_bsdf = mis_weight(prev_bsdf_pdf, em_pdf) 81 | 82 | result = dr.fma( 83 | throughput, 84 | ds.emitter.eval(si, prev_bsdf_pdf > 0.0) * mis_bsdf, 85 | result, 86 | ) 87 | 88 | active_next = ((depth + 1) < self.max_depth) & si.is_valid() 89 | 90 | bsdf: mi.BSDF = si.bsdf(ray) 91 | 92 | # ---------------------- Emitter sampling ---------------------- 93 | 94 | active_em = active_next & mi.has_flag(bsdf.flags(), mi.BSDFFlags.Smooth) 95 | 96 | ds, em_weight = scene.sample_emitter_direction( 97 | si, sampler.next_2d(), True, active_em 98 | ) 99 | 100 | wo = si.to_local(ds.d) 101 | 102 | # ------ Evaluate BSDF * cos(theta) and sample direction ------- 103 | 104 | sample1 = sampler.next_1d() 105 | sample2 = sampler.next_2d() 106 | 107 | bsdf_val, bsdf_pdf, bsdf_sample, bsdf_weight = bsdf.eval_pdf_sample( 108 | bsdf_ctx, si, wo, sample1, sample2 109 | ) 110 | 111 | # --------------- Emitter sampling contribution ---------------- 112 | 113 | bsdf_val = si.to_world_mueller(bsdf_val, -wo, si.wi) 114 | 115 | mi_em = dr.select(ds.delta, 1.0, mis_weight(ds.pdf, bsdf_pdf)) 116 | 117 | result[active_em] = dr.fma(throughput, bsdf_val * em_weight * mi_em, result) 118 | 119 | # ---------------------- BSDF sampling ---------------------- 120 | 121 | bsdf_weight = si.to_world_mueller(bsdf_weight, -bsdf_sample.wo, si.wi) 122 | 123 | ray = si.spawn_ray(si.to_world(bsdf_sample.wo)) 124 | 125 | # ------ Update loop variables based on current interaction ------ 126 | 127 | throughput *= bsdf_weight 128 | eta *= bsdf_sample.eta 129 | valid_ray |= ( 130 | active 131 | & si.is_valid() 132 | & ~mi.has_flag(bsdf_sample.sampled_type, mi.BSDFFlags.Null) 133 | ) 134 | 135 | prev_si = mi.Interaction3f(si) 136 | prev_bsdf_pdf = bsdf_sample.pdf 137 | prev_bsdf_delta = mi.has_flag(bsdf_sample.sampled_type, mi.BSDFFlags.Delta) 138 | 139 | # -------------------- Stopping criterion --------------------- 140 | 141 | depth[si.is_valid()] += 1 142 | 143 | throughput_max = dr.max(throughput) 144 | 145 | rr_prop = dr.minimum(throughput_max * dr.sqr(eta), 0.95) 146 | rr_active = depth >= self.rr_depth 147 | rr_continue = sampler.next_1d() < rr_prop 148 | 149 | throughput[rr_active] *= dr.rcp(rr_prop) 150 | 151 | active = ( 152 | active_next & (~rr_active | rr_continue) & (dr.neq(throughput_max, 0.0)) 153 | ) 154 | 155 | return dr.select(valid_ray, result, 0.0), valid_ray, [] 156 | 157 | 158 | mi.register_integrator("path_test", lambda props: PathIntegrator(props)) 159 | 160 | if __name__ == "__main__": 161 | with dr.suspend_grad(): 162 | scene = mi.load_dict(mi.cornell_box()) 163 | 164 | integrator = mi.load_dict( 165 | { 166 | "type": "path_test", 167 | } 168 | ) 169 | 170 | img = mi.render(scene, integrator=integrator, spp=128) 171 | 172 | plt.imshow(mi.util.convert_to_bitmap(img)) 173 | plt.show() 174 | -------------------------------------------------------------------------------- /path-record.py: -------------------------------------------------------------------------------- 1 | import mitsuba as mi 2 | import drjit as dr 3 | import matplotlib.pyplot as plt 4 | from dataclasses import dataclass 5 | 6 | mi.set_variant("cuda_ad_rgb") 7 | 8 | 9 | def drjitstruct(cls): 10 | annotations = cls.__dict__.get("__annotations__", {}) 11 | drjit_struct = {} 12 | for name, type in annotations.items(): 13 | drjit_struct[name] = type 14 | cls.DRJIT_STRUCT = drjit_struct 15 | return cls 16 | 17 | 18 | @drjitstruct 19 | class PVert: 20 | f: mi.Spectrum 21 | L: mi.Spectrum 22 | p: mi.Point3f 23 | 24 | def __init__(self, f=mi.Spectrum(), L=mi.Spectrum(), p=mi.Point3f()): 25 | self.f = f 26 | self.L = L 27 | self.p = p 28 | 29 | 30 | class Path: 31 | idx: mi.UInt32 32 | vertices: PVert 33 | 34 | def __init__(self, n_rays: int, max_depth: int): 35 | self.n_rays = n_rays 36 | self.max_depth = max_depth 37 | self.idx = dr.arange(mi.UInt32, n_rays) 38 | 39 | self.vertices = dr.zeros(PVert, shape=(self.max_depth * self.n_rays)) 40 | 41 | def __setitem__(self, depth: mi.UInt32, value: PVert): 42 | dr.scatter(self.vertices, value, depth * self.n_rays + self.idx) 43 | 44 | def __getitem__(self, depth: mi.UInt32) -> PVert: 45 | return dr.gather(PVert, self.vertices, depth * self.n_rays + self.idx) 46 | 47 | 48 | class Simple(mi.SamplingIntegrator): 49 | def __init__(self, props=mi.Properties()): 50 | super().__init__(props) 51 | self.max_depth = props.get("max_depth") 52 | self.rr_depth = props.get("rr_depth") 53 | 54 | # record a path 55 | def record( 56 | self, scene: mi.Scene, sampler: mi.Sampler, ray: mi.Ray3f, active 57 | ) -> Path: 58 | ray = mi.Ray3f(ray) 59 | bsdf_ctx = mi.BSDFContext() 60 | 61 | prev_si = dr.zeros(mi.SurfaceInteraction3f) 62 | 63 | n_rays = dr.shape(ray.o)[1] 64 | 65 | path = Path(n_rays, self.max_depth) 66 | depth = mi.UInt32(0) 67 | active = mi.Bool(active) 68 | 69 | loop = mi.Loop( 70 | name="Path Tracing", state=lambda: (sampler, ray, depth, active, prev_si) 71 | ) 72 | 73 | loop.set_max_iterations(self.max_depth) 74 | 75 | while loop(active): 76 | si: mi.SurfaceInteraction3f = scene.ray_intersect( 77 | ray, ray_flags=mi.RayFlags.All, coherent=dr.eq(depth, 0) 78 | ) 79 | 80 | bsdf: mi.BSDF = si.bsdf(ray) 81 | 82 | ds = mi.DirectionSample3f(scene, si=si, ref=prev_si) 83 | 84 | L = ds.emitter.eval(si) 85 | 86 | active_next = (depth + 1 < self.max_depth) & si.is_valid() 87 | 88 | bsdf_sample, f = bsdf.sample( 89 | bsdf_ctx, si, sampler.next_1d(), sampler.next_2d(), active_next 90 | ) 91 | 92 | ray = si.spawn_ray(si.to_world(bsdf_sample.wo)) 93 | 94 | path[depth] = PVert(f, L, si.p) 95 | 96 | prev_si = dr.detach(si, True) 97 | 98 | active_next &= dr.neq(dr.max(f), 0) 99 | 100 | active = active_next 101 | depth += 1 102 | 103 | return path 104 | 105 | def sample( 106 | self, 107 | scene: mi.Scene, 108 | sampler: mi.Sampler, 109 | ray: mi.RayDifferential3f, 110 | medium: mi.Medium = None, 111 | active: mi.Bool = True, 112 | ): 113 | index, emitter_pdf, emitter_sample = scene.sample_emitter( 114 | sampler.next_1d(), active 115 | ) 116 | 117 | ld = mi.warp.square_to_uniform_sphere(sampler.next_2d()) 118 | 119 | vpath = self.record(scene, sampler, ray, True) 120 | vert = vpath[mi.UInt32(0)] 121 | return (vert.f, mi.Bool(True), []) 122 | 123 | 124 | mi.register_integrator("integrator", lambda props: Simple(props)) 125 | 126 | scene = mi.cornell_box() 127 | scene["integrator"]["type"] = "integrator" 128 | scene["integrator"]["max_depth"] = 16 129 | scene["integrator"]["rr_depth"] = 2 130 | scene["sensor"]["sampler"]["sample_count"] = 1 131 | scene["sensor"]["film"]["width"] = 1024 132 | scene["sensor"]["film"]["height"] = 1024 133 | scene = mi.load_dict(scene) 134 | 135 | img = mi.render(scene) 136 | 137 | plt.imshow(img ** (1.0 / 2.2)) 138 | plt.axis("off") 139 | plt.show() 140 | -------------------------------------------------------------------------------- /path.py: -------------------------------------------------------------------------------- 1 | import mitsuba as mi 2 | import drjit as dr 3 | import matplotlib.pyplot as plt 4 | 5 | if __name__ == "__main__": 6 | mi.set_variant("cuda_ad_rgb") 7 | dr.set_flag(dr.JitFlag.KernelHistory, True) 8 | 9 | 10 | def mis_weight(pdf_a: mi.Float, pdf_b: mi.Float) -> mi.Float: 11 | """ 12 | Compute the Multiple Importance Sampling (MIS) weight given the densities 13 | of two sampling strategies according to the power heuristic. 14 | """ 15 | a2 = dr.square(pdf_a) 16 | b2 = dr.square(pdf_b) 17 | w = a2 / (a2 + b2) 18 | return dr.detach(dr.select(dr.isfinite(w), w, 0)) 19 | 20 | 21 | class Path(mi.SamplingIntegrator): 22 | def __init__(self, props: mi.Properties) -> None: 23 | self.max_depth = props.get("max_depth", def_value=16) 24 | self.rr_depth = props.get("rr_depth", def_value=4) 25 | super().__init__(props) 26 | 27 | # def render_sample( 28 | # self, 29 | # scene: mi.Scene, 30 | # sensor: mi.Sensor, 31 | # sampler: mi.Sampler, 32 | # block: mi.ImageBlock, 33 | # aovs: list[mi.Float32], 34 | # pos: mi.Vector2f, 35 | # diff_scale_factor: mi.ScalarFloat32, 36 | # active: mi.Bool = True, 37 | # ): 38 | # film = sensor.film() 39 | # has_alpha = mi.has_flag(film.flags(), mi.FilmFlags.Alpha) 40 | # box_filter = film.rfilter().is_box_filter() 41 | # 42 | # scale = 1.0 / mi.ScalarVector2f(film.crop_size()) 43 | # offset = -mi.ScalarVector2f(film.crop_offset()) * scale 44 | # 45 | # sample_pos = pos + sampler.next_2d(active) 46 | # adjusted_pos = dr.fma(sample_pos, scale, offset) 47 | # 48 | # apperature_sample = mi.Point2f(0.5) 49 | # if sensor.needs_aperture_sample(): 50 | # apperature_sample = sampler.next_2d(active) 51 | # 52 | # time = sensor.shutter_open() 53 | # if sensor.shutter_open_time() > 0.0: 54 | # time += sampler.next_1d(active) * sensor.shutter_open_time() 55 | # 56 | # wavelength_sample = 0.0 57 | # if mi.is_spectral: 58 | # wavelength_sample = sampler.next_1d(active) 59 | # 60 | # ray, ray_weight = sensor.sample_ray_differential( 61 | # time, wavelength_sample, adjusted_pos, apperature_sample 62 | # ) 63 | # 64 | # if ray.has_differentials: 65 | # ray.scale_differential(diff_scale_factor) 66 | # 67 | # medium = sensor.get_medium() 68 | # 69 | # spec, valid, _ = self.sample(scene, sampler, ray, medium, active) 70 | # 71 | # spec_u = mi.unpolarized_spectrum(ray_weight * spec) 72 | # 73 | # if mi.has_flag(film.flags(), mi.FilmFlags.Special): 74 | # film.prepare_sample( 75 | # spec_u, 76 | # ray.wavelengths, 77 | # aovs, 78 | # 1.0, 79 | # dr.select(valid, mi.Float32(1.0), mi.Float32(0.0)), 80 | # valid, 81 | # ) 82 | # else: 83 | # rgb = mi.Color3f() 84 | # if mi.is_spectral: 85 | # rgb = mi.spectrum_list_to_srgb(spec_u, ray.wavelengths, active) 86 | # elif mi.is_monochromatic: 87 | # rgb = spec_u.x 88 | # else: 89 | # rgb = spec_u 90 | # 91 | # aovs[0] = rgb.x 92 | # aovs[1] = rgb.y 93 | # aovs[2] = rgb.z 94 | # 95 | # if has_alpha: 96 | # aovs[3] = dr.select(valid, mi.Float32(1.0), mi.Float32(0.0)) 97 | # aovs[4] = 1.0 98 | # else: 99 | # aovs[3] = 1.0 100 | # 101 | # block.put(pos if box_filter else sample_pos, aovs, active) 102 | # 103 | # def render( 104 | # self, 105 | # scene: mi.Scene, 106 | # sensor: mi.Sensor, 107 | # seed: int, 108 | # spp: int, 109 | # develop: bool, 110 | # evaluate: bool, 111 | # ) -> mi.TensorXf: 112 | # m_stop = False 113 | # m_samples_per_pass = -1 114 | # 115 | # film = sensor.film() 116 | # film_size = film.crop_size() 117 | # if film.sample_border(): 118 | # film_size += 2 * film.rfilter().border_size() 119 | # 120 | # sampler = sensor.sampler() 121 | # 122 | # if spp > 0: 123 | # sampler.set_sample_count(spp) 124 | # spp = sampler.sample_count() 125 | # 126 | # spp_per_pass = spp if m_samples_per_pass == -1 else min(m_samples_per_pass, spp) 127 | # 128 | # if spp % spp_per_pass != 0: 129 | # raise Exception( 130 | # "sample_count (%d) must be a multiple of spp_per_pass (%d).", 131 | # spp, 132 | # spp_per_pass, 133 | # ) 134 | # 135 | # n_passes = spp / spp_per_pass 136 | # 137 | # n_channels = film.prepare(self.aov_names()) 138 | # 139 | # result = mi.TensorXf() 140 | # 141 | # if dr.is_jit_v(mi.Float): 142 | # if n_passes > 1 and not evaluate: 143 | # evaluate = True 144 | # 145 | # wavefront_size = film_size.x * film_size.y * spp_per_pass 146 | # 147 | # sampler.set_samples_per_wavefront(spp_per_pass) 148 | # 149 | # sampler.seed(seed, int(wavefront_size)) 150 | # 151 | # block: mi.ImageBlock = film.create_block() 152 | # block.set_offset(film.crop_offset()) 153 | # 154 | # block.set_coalesce(block.coalesce() & spp_per_pass >= 4) 155 | # 156 | # idx = dr.arange(mi.UInt32, wavefront_size) 157 | # idx //= spp_per_pass 158 | # 159 | # pos = mi.Vector2f() 160 | # pos.y = idx // film_size.x 161 | # pos.x = idx % film_size.x 162 | # 163 | # if film.sample_border(): 164 | # pos -= film.rfilter().border_size() 165 | # 166 | # pos += film.crop_offset() 167 | # 168 | # diff_scale_factor = dr.rsqrt(spp) 169 | # 170 | # aovs = [mi.Float32] * n_channels 171 | # 172 | # for i in range(int(n_passes)): 173 | # self.render_sample( 174 | # scene, sensor, sampler, block, aovs, pos, diff_scale_factor 175 | # ) 176 | # if n_passes > 1: 177 | # sampler.advance() 178 | # sampler.schedule_state() 179 | # dr.eval(block.tensor()) 180 | # 181 | # film.put_block(block) 182 | # 183 | # if develop: 184 | # result = film.develop() 185 | # dr.schedule(result) 186 | # else: 187 | # film.schedule_storage() 188 | # 189 | # if evaluate: 190 | # dr.eval() 191 | # 192 | # return result 193 | 194 | @dr.syntax 195 | def sample( 196 | self: mi.SamplingIntegrator, 197 | scene: mi.Scene, 198 | sampler: mi.Sampler, 199 | ray: mi.RayDifferential3f, 200 | medium: mi.Medium = None, 201 | active: bool = True, 202 | ) -> tuple[mi.Color3f, mi.Bool]: 203 | """ 204 | Contrary to the Mitsbua path tracer implementation, we start with a 205 | surface interaction instead of a ray. This should reduce the loop state 206 | and make it easier to comprehend the path tracing algorithm. 207 | 208 | We start with the first surface interaction si0. At every iteration of 209 | the loop, we try to estimate the outgoing radiance of the given surface 210 | interaction. The estimate of the next si and the emitter sample are 211 | combined with MIS. 212 | 213 | ```python 214 | # get first si 215 | L += Le(si) 216 | 217 | loop: 218 | # sample emitter sample `e` 219 | L += β * mis * f(si -> e) * Le(e) 220 | # sample next ``si`` 221 | L += β * mis * f(si -> si2) * Le(si2) 222 | 223 | β *= f(si -> si2) 224 | ``` 225 | """ 226 | # --------------------- Configure loop state ---------------------- 227 | L = mi.Spectrum(0.0) 228 | f = mi.Spectrum(1.0) 229 | eta = mi.Float(1.0) 230 | depth = mi.UInt32(1) 231 | ray = mi.Ray3f(ray) 232 | 233 | bsdf_ctx = mi.BSDFContext() 234 | active = mi.Bool(active) 235 | active &= depth < max_depth 236 | 237 | # ----------------------- Primary emission ------------------------ 238 | si: mi.SurfaceInteraction3f = scene.ray_intersect(ray, active) 239 | L += si.emitter(scene, active).eval(si, active) 240 | 241 | while active: 242 | bsdf = si.bsdf(ray) 243 | # ---------------------- Emitter sampling ---------------------- 244 | 245 | active_em = active & mi.has_flag(bsdf.flags(), mi.BSDFFlags.Smooth) 246 | 247 | ds, em_weight = scene.sample_emitter_direction( 248 | si, sampler.next_2d(), True, active_em 249 | ) 250 | active_em &= ds.pdf != 0.0 251 | 252 | wo = si.to_local(ds.d) 253 | 254 | bsdf_val, bsdf_pdf, bsdf_sample, bsdf_weight = bsdf.eval_pdf_sample( 255 | bsdf_ctx, si, wo, sampler.next_1d(), sampler.next_2d(), active 256 | ) 257 | 258 | mis_em = dr.select(ds.delta, 1, mis_weight(ds.pdf, bsdf_pdf)) 259 | L[active_em] += f * bsdf_val * em_weight * mis_em 260 | 261 | # -------------- Sample next Surface Interaction -------------- 262 | 263 | f *= bsdf_weight 264 | eta *= bsdf_sample.eta 265 | 266 | # -------------------- Stopping criterion --------------------- 267 | 268 | fmax = dr.max(f) 269 | 270 | rr_prob = dr.minimum(fmax * dr.square(eta), 0.95) 271 | rr_active = depth >= rr_depth 272 | rr_continue = sampler.next_1d() < rr_prob 273 | 274 | f[rr_active] *= dr.rcp(dr.detach(rr_prob)) 275 | 276 | active &= fmax != 0.0 277 | active &= ~rr_active | rr_continue 278 | 279 | # ---------------------- Direct emission ---------------------- 280 | ray = si.spawn_ray(si.to_world(bsdf_sample.wo)) 281 | si2: mi.SurfaceInteraction3f = scene.ray_intersect(ray, active) 282 | 283 | bsdf_delta: mi.Bool = mi.has_flag( 284 | bsdf_sample.sampled_type, mi.BSDFFlags.Delta 285 | ) 286 | 287 | ds = mi.DirectionSample3f(scene, si=si2, ref=si) 288 | em_pdf = scene.pdf_emitter_direction(si, ds, ~bsdf_delta) 289 | 290 | mis_bsdf = mis_weight(bsdf_sample.pdf, em_pdf) 291 | 292 | L[active] += f * ds.emitter.eval(si2, bsdf_sample.pdf > 0.0) * mis_bsdf 293 | 294 | si = dr.detach(si2, True) 295 | 296 | # ----------------------- Depth Update ------------------------ 297 | depth[active] += 1 298 | 299 | active &= depth < max_depth 300 | active &= si.is_valid() 301 | 302 | return L, (depth != 0), [] 303 | 304 | 305 | mi.register_integrator("mypath", lambda props: Path(props)) 306 | 307 | if __name__ == "__main__": 308 | scene = mi.cornell_box() 309 | scene = mi.load_dict(scene) 310 | 311 | # scene = mi.load_file("scenes/rings/scene.xml") 312 | 313 | max_depth = 3 314 | rr_depth = 1 315 | 316 | mypath = mi.load_dict( 317 | { 318 | "type": "mypath", 319 | "max_depth": max_depth, 320 | "rr_depth": rr_depth, 321 | } 322 | ) 323 | 324 | path = mi.load_dict( 325 | { 326 | "type": "path", 327 | "max_depth": max_depth, 328 | "rr_depth": rr_depth, 329 | } 330 | ) 331 | 332 | dr.kernel_history_clear() 333 | res = mi.render(scene, integrator=mypath, spp=1024) 334 | kernels = dr.kernel_history() 335 | optix_kernels = [ 336 | kernel 337 | for kernel in kernels 338 | if "uses_optix" in kernel and kernel["uses_optix"] == 1 339 | ] 340 | print(f"My Path: {optix_kernels}") 341 | print("") 342 | 343 | dr.kernel_history_clear() 344 | ref = mi.render(scene, integrator=path, spp=1024) 345 | kernels = dr.kernel_history() 346 | optix_kernels = [ 347 | kernel 348 | for kernel in kernels 349 | if "uses_optix" in kernel and kernel["uses_optix"] == 1 350 | ] 351 | print(f"Default Path: {optix_kernels}") 352 | 353 | mi.util.write_bitmap("out/res.exr", res) 354 | mi.util.write_bitmap("out/ref.exr", ref) 355 | 356 | diff = dr.abs(res - ref) 357 | 358 | mse = dr.mean(dr.square(diff), axis=None) 359 | print(f"{mse=}") 360 | 361 | # fig, ax = plt.subplots(1, 3, figsize=(9, 3)) 362 | 363 | # ax[0].imshow(mi.util.convert_to_bitmap(res)) 364 | # ax[0].set_title("img") 365 | # ax[1].imshow(mi.util.convert_to_bitmap(ref)) 366 | # ax[1].set_title("ref") 367 | # ax[2].imshow(mi.util.convert_to_bitmap(diff)) 368 | # ax[2].set_title("diff") 369 | # plt.show() 370 | -------------------------------------------------------------------------------- /pathrecord.py: -------------------------------------------------------------------------------- 1 | 2 | import mitsuba as mi 3 | from typing import overload 4 | import drjit as dr 5 | 6 | 7 | def drjitstruct(cls): 8 | annotations = cls.__dict__.get('__annotations__', {}) 9 | drjit_struct = {} 10 | for name, type in annotations.items(): 11 | drjit_struct[name] = type 12 | cls.DRJIT_STRUCT = drjit_struct 13 | return cls 14 | 15 | 16 | # Need to record parameters to reconstruct surface intaraction 17 | @drjitstruct 18 | class PVert: 19 | f: mi.Spectrum 20 | L: mi.Spectrum 21 | i: mi.Interaction3f 22 | ps: mi.PositionSample3f 23 | 24 | def __init__(self, f=mi.Spectrum(), L=mi.Spectrum(), i=mi.Interaction3f(), ps=mi.PositionSample3f): 25 | self.f = f 26 | self.L = L 27 | self.i = i 28 | self.ps = ps 29 | 30 | 31 | class Path: 32 | idx: mi.UInt32 33 | 34 | def __init__(self, n_rays: int, max_depth: int, dtype=PVert): 35 | self.n_rays = n_rays 36 | self.max_depth = max_depth 37 | self.idx = dr.arange(mi.UInt32, n_rays) 38 | self.dtype = dtype 39 | 40 | self.vertices = dr.zeros(dtype, shape=(self.max_depth * self.n_rays)) 41 | 42 | def __setitem__(self, depth: mi.UInt32, value): 43 | dr.scatter(self.vertices, value, depth * self.n_rays + self.idx) 44 | 45 | # Return vertex at depth 46 | @overload 47 | def __getitem__(self, depth: mi.UInt32) -> PVert: 48 | ... 49 | 50 | # Return a vertex at (depth, ray_index) 51 | @overload 52 | def __getitem__(self, idx: (mi.UInt32, mi.UInt32)) -> PVert: 53 | ... 54 | 55 | def __getitem__(self, idx): 56 | if isinstance(idx, mi.UInt32): 57 | return dr.gather(self.dtype, self.vertices, idx * self.n_rays + self.idx) 58 | if isinstance(idx, tuple) and isinstance(idx[0], mi.UInt32) and isinstance(idx[1], mi.UInt32): 59 | return dr.gather(self.dtype, self.vertices, idx[0] * self.n_rays + idx[1]) 60 | -------------------------------------------------------------------------------- /pixi.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | authors = ["Christian Döring "] 3 | channels = ["conda-forge"] 4 | description = "Add a short description here" 5 | name = "neural_interference" 6 | platforms = ["linux-64"] 7 | version = "0.1.0" 8 | 9 | [tasks] 10 | test-mitsuba = { cmd = "echo $PYTHONPATH && python -m pytest", cwd = "build-mitsuba", env = { PYTHONPATH = "python:$PYTHONPATH" }, depends-on = [ 11 | "build-mitsuba", 12 | ] } 13 | debug-mitsuba = { cmd = "echo $PYTHONPATH && gdb --args python -m pytest", cwd = "build-mitsuba", env = { PYTHONPATH = "python:$PYTHONPATH" }, depends-on = [ 14 | "build-mitsuba", 15 | ] } 16 | build-mitsuba = { cwd = "build-mitsuba", cmd = "ninja", depends-on = [ 17 | "configure-mitsuba", 18 | ] } 19 | configure-mitsuba = { cmd = "cmake -DCMAKE_EXPORT_COMPILE_COMMANDS=on -DCMAKE_BUILD_TYPE=RelWithDebInfo -G Ninja -S mitsuba3 -B build-mitsuba" } 20 | clean-mitsuba = { cmd = "rm -r build", cwd = "mitsuba3" } 21 | 22 | [dependencies] 23 | python = "3.12.*" 24 | 25 | # Mitsuba3 build tools 26 | gcc = { version = "13.2.*", channel = "conda-forge" } 27 | gxx = { version = "13.2.*", channel = "conda-forge" } 28 | zlib = "==1.3.1" 29 | libcxx = ">=19.1.1,<20" 30 | libcxx-devel = ">=19.1.1,<20" 31 | libcxxabi = ">=19.1.1,<20" 32 | ninja = ">=1.12.1,<2" 33 | 34 | cuda = "*" 35 | 36 | pip = ">=24.2,<25" 37 | 38 | [system-requirements] 39 | cuda = "12.4" 40 | 41 | [pypi-options] 42 | no-build-isolation = ["tinycudann"] 43 | 44 | [pypi-dependencies] 45 | tensorboardx = "*" 46 | mitsuba = ">=3.6.0, <4" 47 | tinycudann = { git = "https://github.com/NVlabs/tiny-cuda-nn.git", subdirectory = "bindings/torch", rev = "c91138bcd4c6877c8d5e60e483c0581aafc70cce" } 48 | 49 | torch = "*" 50 | torchvision = ">=0.20.1, <0.21" 51 | torchaudio = ">=2.5.1, <3" 52 | 53 | ipython = ">=8.28.0,<9" 54 | matplotlib = ">=3.9.2,<4" 55 | numpy = ">=2.1.2,<3" 56 | omegaconf = ">=2.3.0,<3" 57 | tqdm = ">=4.66.5,<5" 58 | 59 | [activation] 60 | scripts = ["build-mitsuba/setpath.sh"] 61 | # PYTHONPATH = "mitsuba3/build/python:$PYTHONPATH" 62 | 63 | [target.linux-64.activation.env] 64 | # Have to expose /lib64 for libcuda.so (cuda driver) 65 | LIBRARY_PATH = "/usr/lib/x86_64-linux-gnu:/lib64:$LIBRARY_PATH" 66 | CMAKE_PREFIX_PATH = "$CONDA_PREFIX" 67 | LD_LIBRARY_PATH = "$CONDA_PREFIX/lib" 68 | -------------------------------------------------------------------------------- /prefix_sum.py: -------------------------------------------------------------------------------- 1 | import mitsuba as mi 2 | import drjit as dr 3 | import math 4 | 5 | if __name__ == "__main__": 6 | mi.set_variant("cuda_ad_rgb") 7 | 8 | 9 | def prefix_sum(x: mi.Float) -> mi.Float: 10 | """ 11 | Implementation of a paralell prefix-sum described in 12 | 13 | W. Daniel Hillis and Guy L. Steele. 1986. Data parallel algorithms. Commun. ACM 29, 12 (Dec. 1986), 1170–1183. https://doi.org/10.1145/7902.7903 14 | """ 15 | x = type(x)(x) 16 | 17 | loop_record = dr.flag(dr.JitFlag.LoopRecord) 18 | dr.set_flag(dr.JitFlag.LoopRecord, False) 19 | 20 | n = dr.shape(x)[-1] 21 | i = 0 22 | 23 | loop = mi.Loop("prefix-sum", lambda: ()) 24 | 25 | while loop(i <= math.floor(math.log2(n))): 26 | j = dr.arange(mi.UInt, 2**i, n) 27 | if dr.shape(j)[-1] == 0: 28 | break 29 | res = dr.gather(type(x), x, j) + dr.gather(type(x), x, j - 2**i) 30 | dr.scatter(x, res, j) 31 | 32 | i += 1 33 | 34 | dr.set_flag(dr.JitFlag.LoopRecord, loop_record) 35 | 36 | return x 37 | 38 | 39 | if __name__ == "__main__": 40 | import numpy as np 41 | 42 | sampler: mi.Sampler = mi.load_dict({"type": "independent"}) 43 | sampler.seed(0, 1_000_000) 44 | 45 | x = sampler.next_1d() 46 | x_np = x.numpy() 47 | 48 | prefix_sum(x) 49 | x_np = np.cumsum(x_np) 50 | 51 | x = x.numpy() 52 | print(f"{x=}") 53 | print(f"{x_np=}") 54 | assert np.any(x != x_np) 55 | -------------------------------------------------------------------------------- /pssmlt.py: -------------------------------------------------------------------------------- 1 | from typing import overload 2 | import mitsuba as mi 3 | import drjit as dr 4 | import matplotlib.pyplot as plt 5 | from dataclasses import dataclass 6 | import gc 7 | 8 | 9 | def mis_weight(pdf_a: mi.Float, pdf_b: mi.Float) -> mi.Float: 10 | """ 11 | Compute the Multiple Importance Sampling (MIS) weight given the densities 12 | of two sampling strategies according to the power heuristic. 13 | """ 14 | a2 = dr.sqr(pdf_a) 15 | return dr.detach(dr.select(pdf_a > 0, a2 / dr.fma(pdf_b, pdf_b, a2), 0), True) 16 | 17 | 18 | def drjitstruct(cls): 19 | annotations = cls.__dict__.get("__annotations__", {}) 20 | drjit_struct = {} 21 | for name, type in annotations.items(): 22 | drjit_struct[name] = type 23 | cls.DRJIT_STRUCT = drjit_struct 24 | return cls 25 | 26 | 27 | class Path: 28 | def __init__(self, dtype, wavefront_size: int, max_depth: int): 29 | self.wavefront_size = wavefront_size 30 | self.max_depth = max_depth 31 | # self.idx = dr.arange(mi.UInt32, wavefront_size) 32 | self.dtype = dtype 33 | 34 | self.vertices = dr.zeros(dtype, shape=(self.max_depth * self.wavefront_size)) 35 | 36 | def __setitem__(self, depth: mi.UInt32, value): 37 | dr.scatter( 38 | self.vertices, 39 | value, 40 | depth * self.wavefront_size + dr.arange(mi.UInt32, self.wavefront_size), 41 | ) 42 | 43 | # Return vertex at depth 44 | @overload 45 | def __getitem__(self, depth: mi.UInt32): 46 | ... 47 | 48 | # Return a vertex at (depth, ray_index) 49 | @overload 50 | def __getitem__(self, idx: tuple[mi.UInt32, mi.UInt32]): 51 | ... 52 | 53 | def __getitem__(self, idx): 54 | if isinstance(idx, mi.UInt32): 55 | return dr.gather( 56 | self.dtype, 57 | self.vertices, 58 | idx * self.wavefront_size + dr.arange(mi.UInt32, self.wavefront_size), 59 | ) 60 | if ( 61 | isinstance(idx, tuple) 62 | and isinstance(idx[0], mi.UInt32) 63 | and isinstance(idx[1], mi.UInt32) 64 | ): 65 | return dr.gather( 66 | self.dtype, self.vertices, idx[0] * self.wavefront_size + idx[1] 67 | ) 68 | 69 | 70 | class MLTSampler(mi.Sampler): 71 | def __init__(self, props: mi.Properties) -> None: 72 | super().__init__(props) 73 | self.prng = mi.PCG32() 74 | 75 | def next_1d(self, active: bool = True) -> float: 76 | return super().next_1d(active) 77 | 78 | def next_2d(self, active: bool = True) -> mi.Point2f: 79 | return super().next_2d(active) 80 | 81 | def advance(self) -> None: 82 | return super().advance() 83 | 84 | def seed(self, seed: int, wavefront_size: int = 4294967295) -> None: 85 | super().seed(seed, wavefront_size) 86 | 87 | self.mutation_idx = dr.arange(mi.UInt32, self.wavefront_size()) 88 | 89 | idx = dr.arange(mi.UInt32, self.wavefront_size()) 90 | tmp = dr.opaque(seed) 91 | 92 | v0, v1 = mi.sample_tea_32(idx, tmp) 93 | self.prng.seed(1, v0, v1) 94 | 95 | 96 | class Pssmlt(mi.SamplingIntegrator): 97 | wo: Path 98 | L: mi.Color3f 99 | offset: mi.Vector2f 100 | # sample_count = 0 101 | cumulative_weight: mi.Float32 102 | path_type: ... 103 | 104 | def __init__(self, props: mi.Properties) -> None: 105 | self.max_depth = props.get("max_depth", def_value=16) 106 | self.rr_depth = props.get("rr_depth", def_value=4) 107 | super().__init__(props) 108 | 109 | def reset(self): 110 | ... 111 | 112 | def render_sample( 113 | self, 114 | scene: mi.Scene, 115 | sampler: mi.Sampler, 116 | sensor: mi.Sensor, 117 | block: mi.ImageBlock, 118 | pos: mi.Vector2u, 119 | large_step: mi.Bool, 120 | agregate: mi.Bool, 121 | ): 122 | large_step = mi.Bool(large_step) 123 | agregate = mi.Bool(agregate) 124 | film = sensor.film() 125 | 126 | proposed_offset = self.mutate_offset(self.offset, sampler.next_2d(), large_step) 127 | 128 | sample_pos = (mi.Point2f(pos) + proposed_offset) / mi.Point2f(film.crop_size()) 129 | ray, ray_weight = sensor.sample_ray(0.0, 0.0, sample_pos, mi.Point2f(0.5)) 130 | 131 | L = ( 132 | self.sample(scene, sampler, ray, self.proposed, large_step=large_step) 133 | * ray_weight 134 | ) 135 | dr.schedule(self.proposed.vertices) 136 | 137 | a = dr.clamp(mi.luminance(L) / mi.luminance(self.L), 0.0, 1.0) 138 | u = sampler.next_1d() 139 | 140 | accept = u < a 141 | proposed_weight = a 142 | current_weight = 1.0 - a 143 | self.cumulative_weight[accept] = proposed_weight 144 | self.cumulative_weight[~accept] += current_weight 145 | dr.schedule(self.cumulative_weight) 146 | 147 | # self.offset = dr.select(u < a, offset, self.offset) 148 | self.offset[accept] = proposed_offset 149 | dr.schedule(self.offset) 150 | 151 | # self.L = dr.select(accept, L, self.L) 152 | self.L[accept] = L 153 | dr.schedule(self.L) 154 | 155 | accept = dr.tile(accept, self.max_depth) 156 | self.path.vertices = dr.select( 157 | accept, self.proposed.vertices, self.path.vertices 158 | ) 159 | dr.schedule(self.path.vertices) 160 | 161 | res = self.L / self.cumulative_weight 162 | dr.schedule(self.cumulative_weight) 163 | 164 | aovs = [res.x, res.y, res.z, mi.Float(1.0)] 165 | block.put(pos, aovs, active=agregate) 166 | 167 | def render( 168 | self, 169 | scene: mi.Scene, 170 | sensor: mi.Sensor, 171 | seed: int = 0, 172 | spp: int = 1, 173 | develop: bool = True, 174 | evaluate: bool = True, 175 | ) -> mi.TensorXf: 176 | film = sensor.film() 177 | 178 | film_size = film.crop_size() 179 | 180 | wavefront_size = film_size.x * film_size.y * spp 181 | print(f"{wavefront_size=}") 182 | 183 | sampler = sensor.sampler() 184 | sampler.set_sample_count(spp) 185 | sampler.set_samples_per_wavefront(spp) 186 | sampler.seed(seed, wavefront_size) 187 | 188 | idx = dr.arange(mi.UInt, wavefront_size) 189 | idx //= spp 190 | 191 | pos = mi.Vector2u() 192 | pos.y = idx // film_size.x 193 | pos.x = dr.fma(-film_size.x, pos.y, idx) 194 | 195 | # Initialize State: 196 | self.path = Path(self.path_type, wavefront_size, self.max_depth) 197 | self.proposed = Path(self.path_type, wavefront_size, self.max_depth) 198 | self.offset = mi.Vector2f(0.5) 199 | self.L = mi.Color3f(0) 200 | self.cumulative_weight = mi.Float32(0.0) 201 | 202 | film.prepare(self.aov_names()) 203 | 204 | block: mi.ImageBlock = film.create_block() 205 | 206 | reset_interval = 50 207 | bootstrapping_count = 40 208 | for i in range(200): 209 | large_step = i % reset_interval == 0 210 | agregate = i % reset_interval > bootstrapping_count 211 | print(f"Iteration: {i}") 212 | print(f"{large_step=}") 213 | print(f"{agregate=}") 214 | 215 | self.render_sample(scene, sampler, sensor, block, pos, large_step, agregate) 216 | 217 | sampler.advance() 218 | sampler.schedule_state() 219 | dr.eval(block.tensor()) 220 | 221 | film.put_block(block) 222 | 223 | img = film.develop() 224 | dr.schedule(img) 225 | dr.eval() 226 | 227 | # self.sample_count += 1 228 | return img 229 | 230 | def sample( 231 | self, 232 | scene: mi.Scene, 233 | sampler: mi.Sampler, 234 | ray: mi.Ray3f, 235 | path: Path, 236 | large_step: mi.Bool, 237 | medium: mi.Medium = None, 238 | active: bool = True, 239 | ) -> tuple[mi.Color3f, Path]: 240 | ... 241 | 242 | def init_path(self, wavefront_size): 243 | ... 244 | 245 | def mutate_offset(self, x_old: mi.Vector2f, xnew: mi.Vector2f, large_step: mi.Bool): 246 | large_step = mi.Bool(large_step) 247 | return dr.select( 248 | large_step, 249 | xnew, 250 | dr.clamp( 251 | mi.warp.square_to_std_normal(xnew) * dr.sqrt(0.1) + x_old, 252 | 0.0, 253 | 1.0, 254 | ), 255 | ) 256 | -------------------------------------------------------------------------------- /pssmltpath.py: -------------------------------------------------------------------------------- 1 | from pssmlt import Path, Pssmlt, drjitstruct, mis_weight 2 | import mitsuba as mi 3 | import drjit as dr 4 | 5 | 6 | @drjitstruct 7 | class PathVert: 8 | wo: mi.Vector3f 9 | emitter_sample: mi.Point2f 10 | 11 | 12 | class PssmltPath(Pssmlt): 13 | def __init__(self, props: mi.Properties) -> None: 14 | self.path_type = PathVert 15 | super().__init__(props) 16 | 17 | def sample( 18 | self, 19 | scene: mi.Scene, 20 | sampler: mi.Sampler, 21 | ray: mi.RayDifferential3f, 22 | path: Path, 23 | large_step: mi.Bool, 24 | medium: mi.Medium = None, 25 | active: bool = True, 26 | ) -> mi.Color3f: 27 | # if initialize: 28 | # self.emitter_offset = Path(wavefront_size, self.max_depth, mi.Vector2f) 29 | # path = Path(PathVert, len(ray.d.x), self.max_depth) 30 | large_step = mi.Bool(large_step) 31 | 32 | ray = mi.Ray3f(ray) 33 | active = mi.Bool(active) 34 | f = mi.Spectrum(1.0) 35 | L = mi.Spectrum(0.0) 36 | eta = mi.Float(1.0) 37 | depth = mi.UInt32(0) 38 | 39 | valid_ray = mi.Bool(scene.environment() is not None) 40 | 41 | # Variables caching information from the previous bounce 42 | prev_si: mi.SurfaceInteraction3f = dr.zeros(mi.SurfaceInteraction3f) 43 | prev_bsdf_pdf = mi.Float(1.0) 44 | prev_bsdf_delta = mi.Bool(True) 45 | bsdf_ctx = mi.BSDFContext() 46 | 47 | loop = mi.Loop( 48 | "Path Tracer", 49 | state=lambda: ( 50 | sampler, 51 | ray, 52 | f, 53 | L, 54 | eta, 55 | depth, 56 | valid_ray, 57 | prev_si, 58 | prev_bsdf_pdf, 59 | prev_bsdf_delta, 60 | active, 61 | ), 62 | ) 63 | 64 | loop.set_max_iterations(self.max_depth) 65 | 66 | while loop(active): 67 | si = scene.ray_intersect(ray) # TODO: not necesarry in first interaction 68 | 69 | # ---------------------- Direct emission ---------------------- 70 | 71 | ds = mi.DirectionSample3f(scene, si, prev_si) 72 | em_pdf = mi.Float(0.0) 73 | 74 | em_pdf = scene.pdf_emitter_direction(prev_si, ds, ~prev_bsdf_delta) 75 | 76 | mis_bsdf = mis_weight(prev_bsdf_pdf, em_pdf) 77 | 78 | L = dr.fma( 79 | f, 80 | ds.emitter.eval(si, prev_bsdf_pdf > 0.0) * mis_bsdf, 81 | L, 82 | ) 83 | 84 | active_next = ((depth + 1) < self.max_depth) & si.is_valid() 85 | 86 | bsdf: mi.BSDF = si.bsdf(ray) 87 | 88 | # ------ Evaluate BSDF * cos(theta) and sample direction ------- 89 | 90 | # sample1 = sampler.next_1d() 91 | # sample2 = sampler.next_2d() 92 | 93 | # bsdf_val, bsdf_pdf, bsdf_sample, bsdf_weight = bsdf.eval_pdf_sample( 94 | # bsdf_ctx, si, wo, sample1, sample2 95 | # ) 96 | 97 | # ---------------------- BSDF sampling ---------------------- 98 | 99 | bsdf_sample, bsdf_weight = bsdf.sample( 100 | bsdf_ctx, si, sampler.next_1d(), sampler.next_2d() 101 | ) 102 | 103 | vert: PathVert = self.mutate( 104 | self.path[depth], bsdf_sample.wo, sampler.next_2d(), large_step 105 | ) 106 | 107 | bsdf_val, bsdf_pdf = bsdf.eval_pdf(bsdf_ctx, si, vert.wo, active) 108 | 109 | vert.wo[bsdf_pdf <= 0.0] = bsdf_sample.wo 110 | bsdf_weight[bsdf_pdf > 0.0] = bsdf_val / bsdf_pdf 111 | 112 | bsdf_weight = si.to_world_mueller(bsdf_weight, -bsdf_sample.wo, si.wi) 113 | 114 | ray = si.spawn_ray(si.to_world(vert.wo)) 115 | 116 | # ---------------------- Emitter sampling ---------------------- 117 | 118 | active_em = active_next & mi.has_flag(bsdf.flags(), mi.BSDFFlags.Smooth) 119 | 120 | ds, em_weight = scene.sample_emitter_direction( 121 | si, vert.emitter_sample, True, active_em 122 | ) 123 | 124 | wo = si.to_local(ds.d) 125 | 126 | bsdf_val, bsdf_pdf = bsdf.eval_pdf(bsdf_ctx, si, wo) 127 | 128 | # --------------- Emitter sampling contribution ---------------- 129 | 130 | bsdf_val = si.to_world_mueller(bsdf_val, -wo, si.wi) 131 | 132 | mi_em = dr.select(ds.delta, 1.0, mis_weight(ds.pdf, bsdf_pdf)) 133 | 134 | L[active_em] = dr.fma(f, bsdf_val * em_weight * mi_em, L) 135 | 136 | # ------ Update loop variables based on current interaction ------ 137 | 138 | path[depth] = vert 139 | 140 | f *= bsdf_weight 141 | eta *= bsdf_sample.eta 142 | valid_ray |= ( 143 | active 144 | & si.is_valid() 145 | & ~mi.has_flag(bsdf_sample.sampled_type, mi.BSDFFlags.Null) 146 | ) 147 | 148 | prev_si = si 149 | prev_bsdf_pdf = bsdf_sample.pdf 150 | prev_bsdf_delta = mi.has_flag(bsdf_sample.sampled_type, mi.BSDFFlags.Delta) 151 | 152 | # -------------------- Stopping criterion --------------------- 153 | 154 | depth[si.is_valid()] += 1 155 | 156 | throughput_max = dr.max(f) 157 | 158 | rr_prop = dr.minimum(throughput_max * dr.sqr(eta), 0.95) 159 | rr_active = depth >= self.rr_depth 160 | rr_continue = sampler.next_1d() < rr_prop 161 | 162 | f[rr_active] *= dr.rcp(rr_prop) 163 | 164 | active = ( 165 | active_next & (~rr_active | rr_continue) & (dr.neq(throughput_max, 0.0)) 166 | ) 167 | 168 | return L 169 | 170 | def mutate( 171 | self, old: PathVert, wo: mi.Vector3f, sample1: mi.Point2f, large_step: mi.Bool 172 | ) -> PathVert: 173 | large_step = mi.Bool(large_step) 174 | vert = PathVert() 175 | # vert.wo = dr.select(large_step, wo, dr.normalize(old.wo + wo * 10000.0)) 176 | a = 0.01 177 | vert.wo = dr.select(large_step, wo, dr.normalize(old.wo * (1 - a) + wo * a)) 178 | 179 | vert.emitter_sample = dr.select( 180 | large_step, 181 | sample1, 182 | dr.clamp( 183 | mi.warp.square_to_std_normal(sample1) * dr.sqrt(0.01) 184 | + old.emitter_sample, 185 | 0.0, 186 | 1.0, 187 | ), 188 | ) 189 | 190 | return vert 191 | 192 | 193 | mi.register_integrator("pssmlt", lambda props: PssmltPath(props)) 194 | -------------------------------------------------------------------------------- /pssmltsimple.py: -------------------------------------------------------------------------------- 1 | from pssmlt import Path, Pssmlt, drjitstruct 2 | import mitsuba as mi 3 | import drjit as dr 4 | 5 | 6 | @drjitstruct 7 | class PathVert: 8 | wo: mi.Vector3f 9 | 10 | 11 | class PssmltSimple(Pssmlt): 12 | def __init__(self, props: mi.Properties) -> None: 13 | self.path_type = PathVert 14 | super().__init__(props) 15 | 16 | def sample( 17 | self, 18 | scene: mi.Scene, 19 | sampler: mi.Sampler, 20 | ray: mi.RayDifferential3f, 21 | path: Path, 22 | large_step: mi.Bool, 23 | medium: mi.Medium = None, 24 | active: bool = True, 25 | ) -> mi.Color3f: 26 | large_step = mi.Bool(large_step) 27 | 28 | # --------------------- Configure loop state ---------------------- 29 | ray = mi.Ray3f(ray) 30 | f = mi.Spectrum(1.0) 31 | L = mi.Spectrum(0.0) 32 | eta = mi.Float(1.0) 33 | depth = mi.UInt32(0) 34 | bsdf_ctx = mi.BSDFContext() 35 | 36 | # Variables caching information from the previous bounce 37 | prev_si = dr.zeros(mi.SurfaceInteraction3f) 38 | prev_bsdf_pdf = mi.Float(1.0) 39 | prev_bsdf_delta = mi.Bool(True) 40 | active = mi.Bool(active) 41 | 42 | loop = mi.Loop( 43 | "Path Tracer", 44 | state=lambda: ( 45 | sampler, 46 | ray, 47 | f, 48 | L, 49 | eta, 50 | depth, 51 | prev_si, 52 | prev_bsdf_pdf, 53 | prev_bsdf_delta, 54 | active, 55 | ), 56 | ) 57 | 58 | loop.set_max_iterations(self.max_depth) 59 | 60 | while loop(active): 61 | with dr.resume_grad(): 62 | si: mi.SurfaceInteraction3f = scene.ray_intersect( 63 | ray, mi.RayFlags.All, coherent=mi.Bool(False) 64 | ) 65 | 66 | # ---------------------- Direct emission ---------------------- 67 | ds = mi.DirectionSample3f(scene, si, prev_si) 68 | em_pdf = scene.eval_emitter_direction(prev_si, ds, ~prev_bsdf_delta) 69 | # mis_bsdf = 1.0 70 | 71 | # L = dr.fma(f, ds.emitter.eval(si, prev_bsdf_pdf > 0.) * mis_bsdf, L) 72 | with dr.resume_grad(): 73 | # Le = f * mis_bsdf * ds.emitter.eval(si) 74 | L = dr.fma(f, ds.emitter.eval(si, prev_bsdf_pdf > 0.0), L) 75 | 76 | active_next = (depth + 1 < self.max_depth) & si.is_valid() 77 | 78 | # ---------------------- BSDF sampling ---------------------- 79 | bsdf: mi.BSDF = si.bsdf(ray) 80 | 81 | s1 = sampler.next_1d() 82 | s2 = sampler.next_2d() 83 | 84 | bsdf_sample, bsdf_weight = bsdf.sample(bsdf_ctx, si, s1, s2, active_next) 85 | bsdf_weight = si.to_world_mueller(bsdf_weight, -bsdf_sample.wo, si.wi) 86 | 87 | # Pssmlt mutating 88 | 89 | vert: PathVert = self.mutate(self.path[depth], bsdf_sample.wo, large_step) 90 | # wo = vert.wo 91 | 92 | # Reevaluate bsdf_weight after mutating wo 93 | bsdf_val, bsdf_pdf = bsdf.eval_pdf(bsdf_ctx, si, vert.wo, active) 94 | 95 | vert.wo[bsdf_pdf <= 0.0] = bsdf_sample.wo 96 | bsdf_weight[bsdf_pdf > 0.0] = bsdf_val / bsdf_pdf 97 | 98 | # vert.wo = wo 99 | path[depth] = vert 100 | 101 | ray = si.spawn_ray(si.to_world(vert.wo)) 102 | 103 | if False: 104 | ray = dr.detach(ray) 105 | 106 | wo = si.to_local(ray.d) 107 | bsdf_val, bsdf_pdf = bsdf.eval_pdf(bsdf_ctx, si, wo, active) 108 | bsdf_weight[bsdf_pdf > 0.0] = bsdf_val / dr.detach(bsdf_pdf) 109 | 110 | # ------ Update loop variables based on current interaction ------ 111 | 112 | f *= bsdf_weight 113 | eta *= bsdf_sample.eta 114 | 115 | prev_si = dr.detach(si) 116 | prev_bsdf_pdf = bsdf_sample.pdf 117 | prev_bsdf_delta = mi.has_flag(bsdf_sample.sampled_type, mi.BSDFFlags.Delta) 118 | 119 | # -------------------- Stopping criterion --------------------- 120 | 121 | depth[si.is_valid()] += 1 122 | 123 | fmax = dr.max(f) 124 | 125 | rr_prob = dr.minimum(fmax * dr.sqr(eta), 0.95) 126 | rr_active = depth >= self.rr_depth 127 | rr_continue = sampler.next_1d() < rr_prob 128 | 129 | f[rr_active] *= dr.rcp(dr.detach(rr_prob)) 130 | 131 | active = active_next & (~rr_active | rr_continue) & dr.neq(fmax, 0.0) 132 | 133 | return L 134 | 135 | def mutate(self, old: PathVert, wo: mi.Vector3f, large_step: mi.Bool) -> PathVert: 136 | large_step = mi.Bool(large_step) 137 | 138 | vert = PathVert() 139 | a = 0.1 140 | vert.wo = dr.select(large_step, wo, dr.normalize(old.wo * (1 - a) + wo * a)) 141 | 142 | return vert 143 | 144 | 145 | mi.register_integrator("pssmlt_simple", lambda props: PssmltSimple(props)) 146 | -------------------------------------------------------------------------------- /reductions.py: -------------------------------------------------------------------------------- 1 | from __future__ import ( 2 | annotations as __annotations__, 3 | ) # Delayed parsing of type annotations 4 | 5 | import mitsuba as mi 6 | import drjit as dr 7 | 8 | if __name__ == "__main__": 9 | mi.set_variant("cuda_ad_rgb") 10 | 11 | 12 | def scatter_reduce_with(func, target, value, index, active=True): 13 | # n_value = dr.shape(value)[-1] 14 | # n_target = dr.shape(target)[-1] 15 | n_value = dr.width(value) 16 | n_target = dr.width(target) 17 | # print(f"{n_value=}") 18 | # print(f"{n_target=}") 19 | 20 | current_scatter = dr.zeros(mi.UInt, n_target) 21 | queued_values = dr.arange(mi.UInt, n_value) 22 | 23 | while len(queued_values) > 0: 24 | """ 25 | First we scatter into the `current_scatter` array. 26 | For every double index, a random element is selected 27 | """ 28 | 29 | target_idx = dr.gather(mi.UInt, index, queued_values) 30 | lane_idx = dr.gather(mi.UInt, dr.arange(mi.UInt, n_value), queued_values) 31 | dr.scatter( 32 | current_scatter, 33 | lane_idx, 34 | dr.gather(mi.UInt, index, queued_values), 35 | ) 36 | 37 | """ 38 | We now get the selected values for scattering in this loop iteration 39 | """ 40 | current = dr.eq(dr.gather(mi.UInt, current_scatter, target_idx), lane_idx) 41 | 42 | current_idx = dr.gather(mi.UInt, queued_values, dr.compress(current)) 43 | 44 | queued_values = dr.gather(mi.UInt, queued_values, dr.compress(~current)) 45 | 46 | target_idx = dr.gather(mi.UInt, index, current_idx) 47 | 48 | a = dr.gather(type(target), target, target_idx) 49 | b = dr.gather(type(value), value, current_idx) 50 | """ 51 | After gathering the target and values of the current lanes we compute the result 52 | """ 53 | res = func(a, b) 54 | dr.scatter(target, res, target_idx) 55 | 56 | 57 | if __name__ == "__main__": 58 | target = dr.zeros(mi.Float, 10) 59 | index = dr.arange(mi.UInt, 25) % 10 60 | value = dr.ones(mi.Float, 25) 61 | 62 | scatter_reduce_with(lambda a, b: a + b, target, value, index) 63 | print(f"{target=}") 64 | -------------------------------------------------------------------------------- /reproject.py: -------------------------------------------------------------------------------- 1 | from __future__ import ( 2 | annotations as __annotations__, 3 | ) # Delayed parsing of type annotations 4 | 5 | import mitsuba as mi 6 | import drjit as dr 7 | 8 | 9 | def w2c(sensor: mi.ProjectiveCamera) -> mi.Transform4f: 10 | params = mi.traverse(sensor) 11 | 12 | C2S = mi.perspective_projection( 13 | params["film.size"], 14 | params["film.crop_size"], 15 | params["film.crop_offset"], 16 | params["x_fov"], 17 | params["near_clip"], 18 | params["far_clip"], 19 | ) 20 | W2C = params["to_world"] 21 | return C2S @ W2C 22 | 23 | 24 | if __name__ == "__main__": 25 | mi.set_variant("cuda_ad_rgb") 26 | scene = mi.load_dict(mi.cornell_box()) # type: mi.Scene 27 | 28 | params = mi.traverse(scene.sensors()[0]) 29 | 30 | C2S = mi.perspective_projection( 31 | params["film.size"], 32 | params["film.crop_size"], 33 | params["film.crop_offset"], 34 | params["x_fov"], 35 | params["near_clip"], 36 | params["far_clip"], 37 | ) 38 | W2C = params["to_world"] 39 | p = mi.Point3f(0.0, 0.0, -100.0) 40 | s = C2S @ (W2C @ p) 41 | print(f"{s=}") 42 | -------------------------------------------------------------------------------- /restirgi.py: -------------------------------------------------------------------------------- 1 | import mitsuba as mi 2 | import drjit as dr 3 | from tqdm import tqdm 4 | from drjitstruct import drjitstruct 5 | 6 | if __name__ == "__main__": 7 | mi.set_variant("cuda_ad_rgb") 8 | 9 | 10 | def RTXDI_Partial( 11 | receiver_pos: mi.Vector3f, sample_pos: mi.Vector3f, sample_normal: mi.Vector3f 12 | ) -> tuple[mi.Float, mi.Float]: 13 | vec = receiver_pos - sample_pos 14 | 15 | distance_to_surface = dr.norm(vec) 16 | cosine_emission_angle = dr.clamp( 17 | dr.dot(sample_normal, vec / distance_to_surface), 0, 1 18 | ) 19 | 20 | return distance_to_surface, cosine_emission_angle 21 | 22 | 23 | def RTXDI_J( 24 | receiver_pos: mi.Vector3f, 25 | neighbor_pos: mi.Vector3f, 26 | neighbor_res: "RestirReservoir", 27 | ) -> mi.Float: 28 | new_distance, new_cosine = RTXDI_Partial( 29 | receiver_pos, neighbor_res.z.x_s, neighbor_res.z.n_s 30 | ) 31 | original_distance, original_cosine = RTXDI_Partial( 32 | neighbor_pos, neighbor_res.z.x_s, neighbor_res.z.n_s 33 | ) 34 | 35 | jacobian = (new_cosine * original_distance * original_distance) / ( 36 | original_cosine * new_distance * new_distance 37 | ) 38 | jacobian = dr.select(dr.isinf(jacobian) | dr.isnan(jacobian), 0, jacobian) 39 | return jacobian 40 | 41 | 42 | def J(receiver_pos: mi.Vector3f, neighbor_res: mi.Vector3f) -> mi.Float: 43 | v_new = receiver_pos - neighbor_res.z.x_s 44 | d_new = dr.norm(v_new) 45 | cos_new = dr.clamp(dr.dot(v_new, neighbor_res.z.n_s) / d_new, 0, 1) 46 | 47 | v_old = neighbor_res.z.x_v - neighbor_res.z.x_s 48 | d_old = dr.norm(v_old) 49 | cos_old = dr.clamp(dr.dot(v_old, neighbor_res.z.n_s) / d_old, 0, 1) 50 | 51 | div = cos_old * dr.sqr(d_new) 52 | jacobian = dr.select(div > 0, cos_new * dr.sqr(d_old) / div, 0) 53 | return jacobian 54 | 55 | 56 | def J_rcp(q: "RestirSample", r: "RestirSample") -> mi.Float: 57 | """ 58 | Calculate the Reciprocal of the absolute of the Jacobian determinant. 59 | J_rcp = |J_{q\\rightarrow r}|^{-1} // Equation 11 from paper 60 | """ 61 | w_qq = q.x_v - q.x_s 62 | w_qq_len = dr.norm(w_qq) 63 | w_qq /= w_qq_len 64 | cos_psi_q = dr.clamp(dr.dot(w_qq, q.n_s), 0, 1) 65 | 66 | w_qr = r.x_v - q.x_s 67 | w_qr_len = dr.norm(w_qr) 68 | w_qr /= w_qr_len 69 | cos_psi_r = dr.clamp(dr.dot(w_qr, q.n_s), 0, 1) 70 | 71 | div = dr.abs(cos_psi_r) * dr.sqr(w_qq_len) 72 | return dr.select(div > 0, dr.abs(cos_psi_q) * dr.sqr(w_qr_len) / div, 0.0) 73 | 74 | 75 | def mis_weight(pdf_a: mi.Float, pdf_b: mi.Float) -> mi.Float: 76 | """ 77 | Compute the Multiple Importance Sampling (MIS) weight given the densities 78 | of two sampling strategies according to the power heuristic. 79 | """ 80 | a2 = dr.sqr(pdf_a) 81 | return dr.detach(dr.select(pdf_a > 0, a2 / dr.fma(pdf_b, pdf_b, a2), 0), True) 82 | 83 | 84 | def p_hat(f): 85 | return dr.norm(f) 86 | 87 | 88 | class ReuseSet: 89 | def __init__(self): 90 | self.M = [] 91 | self.active = [] 92 | self.p = [] 93 | self.n = [] 94 | 95 | def put(self, M: mi.UInt, pos: mi.Vector3f, n: mi.Vector3f, active: mi.Bool = True): 96 | self.M.append(M) 97 | self.p.append(pos) 98 | self.n.append(n) 99 | self.active.append(mi.Bool(active)) 100 | 101 | def __len__(self) -> int: 102 | assert len(self.M) == len(self.p) == len(self.active) == len(self.n) 103 | return len(self.M) 104 | 105 | 106 | @drjitstruct 107 | class RestirSample: 108 | x_v: mi.Vector3f 109 | n_v: mi.Vector3f 110 | x_s: mi.Vector3f 111 | n_s: mi.Vector3f 112 | 113 | L_o: mi.Color3f 114 | p_q: mi.Float 115 | valid: mi.Bool 116 | 117 | 118 | @drjitstruct 119 | class RestirReservoir: 120 | z: RestirSample 121 | w: mi.Float 122 | W: mi.Float 123 | M: mi.UInt 124 | 125 | def update( 126 | self, 127 | sampler: mi.Sampler, 128 | snew: RestirSample, 129 | wnew: mi.Float, 130 | active: mi.Bool = True, 131 | ): 132 | active = mi.Bool(active) 133 | if dr.shape(active)[-1] == 1: 134 | dr.make_opaque(active) 135 | 136 | self.w += dr.select(active, wnew, 0) 137 | self.M += dr.select(active, 1, 0) 138 | self.z: RestirSample = dr.select( 139 | active & (sampler.next_1d() < wnew / self.w), snew, self.z 140 | ) 141 | 142 | def merge( 143 | self, sampler: mi.Sampler, r: "RestirReservoir", p, active: mi.Bool = True 144 | ): 145 | active = mi.Bool(active) 146 | M0 = mi.UInt(self.M) 147 | self.update(sampler, r.z, p * r.W * r.M, active) 148 | self.M = dr.select(active, M0 + r.M, M0) 149 | 150 | 151 | class RestirIntegrator(mi.SamplingIntegrator): 152 | dist_threshold = 0.1 153 | angle_threshold = 25 * dr.pi / 180 154 | 155 | def __init__(self, props: mi.Properties): 156 | super().__init__(props) 157 | self.max_depth: int = props.get("max_depth", 8) 158 | self.rr_depth: int = props.get("rr_depth", 2) 159 | self.bias_correction = props.get("bias_correction", True) 160 | self.jacobian = props.get("jacobian", True) 161 | self.bsdf_sampling = props.get("bsdf_sampling", True) 162 | self.max_M_temporal = props.get("max_M_temporal", None) 163 | self.max_M_spatial = props.get("max_M_spatial", None) 164 | self.initial_search_radius = props.get("initial_search_radius", 10.0) 165 | self.minimal_search_radius = props.get("minimal_search_radius", 3.00) 166 | self.spatial_spatial_reuse = props.get("spatial_spatial_reuse", False) 167 | self.n = 0 168 | self.film_size: None | mi.Vector2u = None 169 | 170 | def to_idx(self, pos: mi.Vector2u) -> mi.UInt: 171 | pos = dr.clamp(mi.Point2u(pos), mi.Point2u(0), self.film_size) 172 | assert self.film_size is not None 173 | return (pos.y * self.film_size.x + pos.x) * self.spp + self.sample_offset 174 | 175 | def similar(self, s1: RestirSample, s2: RestirSample) -> mi.Bool: 176 | dist = dr.norm(s1.x_v - s2.x_v) 177 | similar = dist < self.dist_threshold 178 | similar &= dr.dot(s1.n_v, s2.n_v) > dr.cos(self.angle_threshold) 179 | 180 | return similar 181 | 182 | def render( 183 | self, 184 | scene: mi.Scene, 185 | sensor: mi.Sensor, 186 | seed: int = 0, 187 | spp: int = 1, 188 | develop: bool = True, 189 | evaluate: bool = True, 190 | ): 191 | film = sensor.film() 192 | 193 | film_size = film.crop_size() 194 | 195 | if self.film_size is None: 196 | self.film_size = film_size 197 | 198 | wavefront_size = film_size.x * film_size.y * spp 199 | 200 | sampler = sensor.sampler() 201 | sampler.set_sample_count(spp) 202 | sampler.set_samples_per_wavefront(spp) 203 | sampler.seed(seed, wavefront_size) 204 | 205 | idx = dr.arange(mi.UInt, wavefront_size) 206 | 207 | pos = mi.Vector2u() 208 | pos.x = idx // spp % film_size.x 209 | pos.y = idx // film_size.x // spp 210 | self.sample_offset = idx % spp 211 | self.spp = spp 212 | 213 | sample_pos = (mi.Point2f(pos) + sampler.next_2d()) / mi.Point2f( 214 | film.crop_size() 215 | ) 216 | 217 | if self.n == 0: 218 | self.temporal_reservoir: RestirReservoir = dr.zeros( 219 | RestirReservoir, wavefront_size 220 | ) 221 | self.spatial_reservoir: RestirReservoir = dr.zeros( 222 | RestirReservoir, wavefront_size 223 | ) 224 | self.search_radius = dr.full( 225 | mi.Float, self.initial_search_radius, wavefront_size 226 | ) 227 | 228 | self.prev_sensor: mi.Sensor = mi.load_dict({"type": "perspective"}) 229 | mi.traverse(self.prev_sensor).update(mi.traverse(sensor)) 230 | 231 | self.sample_initial(scene, sampler, sensor, sample_pos) 232 | dr.eval(self.sample) 233 | if self.n == 0: 234 | self.prev_sample = self.sample 235 | self.temporal_resampling(sampler, mi.Vector2f(pos)) 236 | dr.eval(self.temporal_reservoir) 237 | self.spatial_resampling(scene, sampler, pos) 238 | dr.eval(self.spatial_reservoir, self.search_radius) 239 | 240 | res = self.render_final() 241 | 242 | film.prepare(self.aov_names()) 243 | block: mi.ImageBlock = film.create_block() 244 | 245 | aovs = [res.x, res.y, res.z, mi.Float(1)] 246 | 247 | block.put(pos, aovs) 248 | 249 | film.put_block(block) 250 | 251 | img = film.develop() 252 | dr.eval(img) 253 | 254 | # Update n, prev_sensor and prev_sample 255 | self.n += 1 256 | mi.traverse(self.prev_sensor).update(mi.traverse(sensor)) 257 | self.prev_sample = self.sample 258 | 259 | return img 260 | 261 | def render_final(self) -> tuple[mi.Color3f, mi.Color3f, mi.Color3f]: 262 | assert self.film_size is not None 263 | R = self.spatial_reservoir 264 | S = R.z 265 | 266 | si = self.si_v 267 | bsdf: mi.BSDF = self.si_v.bsdf() 268 | β = bsdf.eval(mi.BSDFContext(), si, si.to_local(dr.normalize(S.x_s - si.p))) 269 | 270 | result = β * S.L_o * R.W + self.emittance 271 | 272 | return result 273 | 274 | def spatial_resampling( 275 | self, 276 | scene: mi.Scene, 277 | sampler: mi.Sampler, 278 | pos: mi.Vector2u, 279 | ): 280 | Rs = self.spatial_reservoir 281 | 282 | """ 283 | Create a new reservoir to merge the spatial reservoirs into. 284 | This is neccesary so we can clamp M. 285 | """ 286 | Rnew: RestirReservoir = dr.zeros(RestirReservoir) 287 | Q = ReuseSet() 288 | 289 | q: RestirSample = self.sample 290 | 291 | Z = mi.UInt(0) 292 | 293 | if self.spatial_spatial_reuse: 294 | Rnew.merge(sampler, Rs, p_hat(Rs.z.L_o)) 295 | Z += Rs.M 296 | 297 | max_iter = dr.select(Rs.M < self.max_M_spatial / 2, 9, 3) 298 | 299 | any_reused = dr.full(mi.Bool, False, len(pos.x)) 300 | 301 | for s in range(9): 302 | active = s < max_iter 303 | 304 | offset = ( 305 | mi.warp.square_to_uniform_disk(sampler.next_2d()) * self.search_radius 306 | ) 307 | p = dr.clamp(pos + mi.Vector2i(offset), mi.Point2u(0), self.film_size) 308 | 309 | qn: RestirSample = dr.gather(RestirSample, self.sample, self.to_idx(p)) 310 | 311 | active &= self.similar(qn, q) 312 | 313 | Rn: RestirReservoir = dr.gather( 314 | RestirReservoir, self.temporal_reservoir, self.to_idx(p), active 315 | ) # l.9 316 | 317 | si: mi.SurfaceInteraction3f = dr.zeros(mi.SurfaceInteraction3f) 318 | si.p = q.x_v 319 | si.n = q.n_v 320 | shadowed = scene.ray_test(si.spawn_ray_to(Rn.z.x_s), active) 321 | 322 | phat = dr.select( 323 | ~active | shadowed, 324 | 0, 325 | p_hat(Rn.z.L_o) 326 | * (dr.clamp(J(q.x_v, Rn), 0, 1000) if self.jacobian else 1.0), 327 | ) # l.11 - 13 328 | 329 | Rnew.merge(sampler, Rn, phat, active) 330 | 331 | Q.put(Rn.M, Rn.z.x_v, Rn.z.n_v, active) 332 | 333 | any_reused |= active 334 | 335 | phat = p_hat(Rnew.z.L_o) 336 | if self.bias_correction: 337 | for i in range(len(Q)): 338 | active = Q.active[i] 339 | 340 | si: mi.SurfaceInteraction3f = dr.zeros(mi.SurfaceInteraction3f) 341 | si.p = Rnew.z.x_s 342 | si.n = Rnew.z.n_s 343 | ray = si.spawn_ray_to(Q.p[i]) 344 | 345 | # active &= dr.dot(ray.d, Q.n[i]) < 0 346 | active &= ~scene.ray_test(ray, active) 347 | 348 | Z += dr.select(active, Q.M[i], 0) 349 | 350 | Rnew.W = dr.select(Z * phat > 0, Rnew.w / (Z * phat), 0.0) 351 | else: 352 | Rnew.W = dr.select(phat * Rnew.M > 0, Rnew.w / (Rnew.M * phat), 0) 353 | 354 | # Decrease search radius: 355 | self.search_radius = dr.maximum( 356 | dr.select(any_reused, self.search_radius, self.search_radius / 2), 357 | self.minimal_search_radius, 358 | ) 359 | 360 | if self.max_M_spatial is not None: 361 | Rnew.M = dr.minimum(Rnew.M, self.max_M_spatial) 362 | 363 | self.spatial_reservoir = Rnew 364 | 365 | def temporal_resampling( 366 | self, 367 | sampler: mi.Sampler, 368 | pos: mi.Vector2f, 369 | ): 370 | S = self.sample 371 | 372 | si: mi.SurfaceInteraction3f = dr.zeros(mi.SurfaceInteraction3f) 373 | si.p = S.x_v 374 | ds, _ = self.prev_sensor.sample_direction( 375 | si, mi.Point2f(0.0) 376 | ) # type: tuple[mi.DirectionSample3f, mi.Color3f] 377 | ds: mi.DirectionSample3f = ds 378 | 379 | valid = ds.pdf > 0 380 | 381 | Sprev: RestirSample = dr.gather( 382 | RestirSample, self.prev_sample, self.to_idx(mi.Point2u(ds.uv)), valid 383 | ) 384 | 385 | valid &= self.similar(S, Sprev) 386 | 387 | R = dr.select(valid, self.temporal_reservoir, dr.zeros(RestirReservoir)) 388 | 389 | """ 390 | Create a new reservoir to update with the new sample and merge the old temporal reservoir into. 391 | This is necesarry to limit the samples in the old temporal reservoir. 392 | """ 393 | Rnew: RestirReservoir = dr.zeros(RestirReservoir) 394 | 395 | phat = p_hat(S.L_o) 396 | w = dr.select(S.p_q > 0, phat / S.p_q, 0.0) # Weight for new sample 397 | Rnew.update(sampler, S, w) # Add new sample to Rnew 398 | 399 | # add min(R.M, CLAMP) samples from R 400 | Rnew.merge(sampler, R, p_hat(R.z.L_o)) 401 | 402 | phat = p_hat(Rnew.z.L_o) 403 | Rnew.W = dr.select( 404 | phat * Rnew.M > 0, Rnew.w / (Rnew.M * phat), 0 405 | ) # Update Contribution Weight W in Rnew 406 | 407 | if self.max_M_temporal is not None: 408 | Rnew.M = dr.minimum(Rnew.M, self.max_M_temporal) 409 | 410 | self.temporal_reservoir = Rnew 411 | 412 | def sample_initial( 413 | self, 414 | scene: mi.Scene, 415 | sampler: mi.Sampler, 416 | sensor: mi.Sensor, 417 | # pos: mi.Vector2u, 418 | sample_pos: mi.Point2f, 419 | ) -> RestirSample: 420 | S = RestirSample() 421 | ray, ray_weight = sensor.sample_ray(0.0, 0.0, sample_pos, mi.Point2f(0.5)) 422 | 423 | si: mi.SurfaceInteraction3f = scene.ray_intersect(ray) 424 | bsdf: mi.BSDF = si.bsdf() 425 | 426 | ds = mi.DirectionSample3f(scene, si, dr.zeros(mi.SurfaceInteraction3f)) 427 | emitter: mi.Emitter = ds.emitter 428 | self.emittance = emitter.eval(si) 429 | 430 | S.x_v = si.p 431 | S.n_v = si.n 432 | S.valid = si.is_valid() 433 | self.si_v = si 434 | 435 | if self.bsdf_sampling: 436 | bsdf_sample, bsdf_weight = bsdf.sample( 437 | mi.BSDFContext(), si, sampler.next_1d(), sampler.next_2d() 438 | ) 439 | 440 | wo = bsdf_sample.wo 441 | pdf = bsdf_sample.pdf 442 | else: 443 | wo = mi.warp.square_to_uniform_hemisphere(sampler.next_2d()) 444 | pdf = mi.warp.square_to_uniform_hemisphere_pdf(wo) 445 | 446 | S.p_q = pdf 447 | 448 | ray = si.spawn_ray(si.to_world(wo)) 449 | 450 | S.L_o = self.sample_ray(scene, sampler, ray) 451 | 452 | si: mi.SurfaceInteraction3f = scene.ray_intersect(ray) 453 | 454 | S.x_s = si.p 455 | S.n_s = si.n 456 | 457 | self.sample = S 458 | 459 | def sample_ray( 460 | self, 461 | scene: mi.Scene, 462 | sampler: mi.Sampler, 463 | ray: mi.Ray3f, 464 | active: bool = True, 465 | ) -> mi.Color3f: 466 | # --------------------- Configure loop state ---------------------- 467 | 468 | ray = mi.Ray3f(ray) 469 | active = mi.Bool(active) 470 | throughput = mi.Spectrum(1.0) 471 | result = mi.Spectrum(0.0) 472 | eta = mi.Float(1.0) 473 | depth = mi.UInt32(0) 474 | 475 | valid_ray = mi.Bool(scene.environment() is not None) 476 | 477 | # Variables caching information from the previous bounce 478 | prev_si: mi.SurfaceInteraction3f = dr.zeros(mi.SurfaceInteraction3f) 479 | prev_bsdf_pdf = mi.Float(1.0) 480 | prev_bsdf_delta = mi.Bool(True) 481 | bsdf_ctx = mi.BSDFContext() 482 | 483 | loop = mi.Loop( 484 | "Path Tracer", 485 | state=lambda: ( 486 | sampler, 487 | ray, 488 | throughput, 489 | result, 490 | eta, 491 | depth, 492 | valid_ray, 493 | prev_si, 494 | prev_bsdf_pdf, 495 | prev_bsdf_delta, 496 | active, 497 | ), 498 | ) 499 | 500 | loop.set_max_iterations(self.max_depth) 501 | 502 | while loop(active): 503 | # TODO: not necesarry in first interaction 504 | si = scene.ray_intersect(ray) 505 | 506 | # ---------------------- Direct emission ---------------------- 507 | 508 | ds = mi.DirectionSample3f(scene, si, prev_si) 509 | em_pdf = mi.Float(0.0) 510 | 511 | em_pdf = scene.pdf_emitter_direction(prev_si, ds, ~prev_bsdf_delta) 512 | 513 | mis_bsdf = mis_weight(prev_bsdf_pdf, em_pdf) 514 | 515 | result = dr.fma( 516 | throughput, 517 | ds.emitter.eval(si, prev_bsdf_pdf > 0.0) * mis_bsdf, 518 | result, 519 | ) 520 | 521 | active_next = ((depth + 1) < self.max_depth) & si.is_valid() 522 | 523 | bsdf: mi.BSDF = si.bsdf(ray) 524 | 525 | # ---------------------- Emitter sampling ---------------------- 526 | 527 | active_em = active_next & mi.has_flag(bsdf.flags(), mi.BSDFFlags.Smooth) 528 | 529 | ds, em_weight = scene.sample_emitter_direction( 530 | si, sampler.next_2d(), True, active_em 531 | ) 532 | 533 | wo = si.to_local(ds.d) 534 | 535 | # ------ Evaluate BSDF * cos(theta) and sample direction ------- 536 | 537 | sample1 = sampler.next_1d() 538 | sample2 = sampler.next_2d() 539 | 540 | bsdf_val, bsdf_pdf, bsdf_sample, bsdf_weight = bsdf.eval_pdf_sample( 541 | bsdf_ctx, si, wo, sample1, sample2 542 | ) 543 | 544 | # --------------- Emitter sampling contribution ---------------- 545 | 546 | bsdf_val = si.to_world_mueller(bsdf_val, -wo, si.wi) 547 | 548 | mi_em = dr.select(ds.delta, 1.0, mis_weight(ds.pdf, bsdf_pdf)) 549 | 550 | result[active_em] = dr.fma(throughput, bsdf_val * em_weight * mi_em, result) 551 | 552 | # ---------------------- BSDF sampling ---------------------- 553 | 554 | bsdf_weight = si.to_world_mueller(bsdf_weight, -bsdf_sample.wo, si.wi) 555 | 556 | ray = si.spawn_ray(si.to_world(bsdf_sample.wo)) 557 | 558 | # ------ Update loop variables based on current interaction ------ 559 | 560 | throughput *= bsdf_weight 561 | eta *= bsdf_sample.eta 562 | valid_ray |= ( 563 | active 564 | & si.is_valid() 565 | & ~mi.has_flag(bsdf_sample.sampled_type, mi.BSDFFlags.Null) 566 | ) 567 | 568 | prev_si = si 569 | prev_bsdf_pdf = bsdf_sample.pdf 570 | prev_bsdf_delta = mi.has_flag(bsdf_sample.sampled_type, mi.BSDFFlags.Delta) 571 | 572 | # -------------------- Stopping criterion --------------------- 573 | 574 | depth[si.is_valid()] += 1 575 | 576 | throughput_max = dr.max(throughput) 577 | 578 | rr_prop = dr.minimum(throughput_max * dr.sqr(eta), 0.95) 579 | rr_active = depth >= self.rr_depth 580 | rr_continue = sampler.next_1d() < rr_prop 581 | 582 | throughput[rr_active] *= dr.rcp(rr_prop) 583 | 584 | active = ( 585 | active_next & (~rr_active | rr_continue) & (dr.neq(throughput_max, 0.0)) 586 | ) 587 | 588 | return dr.select(valid_ray, result, 0.0) 589 | 590 | 591 | mi.register_integrator("restirgi", lambda props: RestirIntegrator(props)) 592 | 593 | if __name__ == "__main__": 594 | with dr.suspend_grad(): 595 | scene = mi.cornell_box() 596 | scene["sensor"]["film"]["width"] = 1024 597 | scene["sensor"]["film"]["height"] = 1024 598 | scene["sensor"]["film"]["rfilter"] = mi.load_dict({"type": "box"}) 599 | scene: mi.Scene = mi.load_dict(scene) 600 | # scene: mi.Scene = mi.load_file("./data/scenes/wall/scene.xml") 601 | # scene: mi.Scene = mi.load_file("./data/scenes/living-room-3/scene.xml") 602 | # scene: mi.Scene = mi.load_file("data/scenes/staircase/scene.xml") 603 | # scene: mi.Scene = mi.load_file("data/scenes/shadow-mask/scene.xml") 604 | # scene: mi.Scene = mi.load_file("data/scenes/dining-room/scene.xml") 605 | 606 | print("Rendering Reference Image:") 607 | ref = mi.render(scene, spp=256) 608 | mi.util.write_bitmap("out/ref.jpg", ref) 609 | 610 | integrator: RestirIntegrator = mi.load_dict( 611 | { 612 | "type": "restirgi", 613 | "jacobian": False, 614 | "bias_correction": False, 615 | "bsdf_sampling": True, 616 | "max_M_spatial": 500, 617 | "max_M_temporal": 30, 618 | "initial_search_radius": 10, 619 | } 620 | ) 621 | 622 | print("ReSTIRGI:") 623 | for i in tqdm(range(200)): 624 | img = mi.render(scene, integrator=integrator, seed=i, spp=1) 625 | 626 | mi.util.write_bitmap(f"out/{i}.jpg", img) 627 | -------------------------------------------------------------------------------- /simple.py: -------------------------------------------------------------------------------- 1 | import mitsuba as mi 2 | import drjit as dr 3 | import matplotlib.pyplot as plt 4 | 5 | mi.set_variant("cuda_ad_rgb") 6 | 7 | 8 | class Simple(mi.SamplingIntegrator): 9 | def __init__(self, props=mi.Properties()): 10 | super().__init__(props) 11 | self.max_depth = props.get("max_depth", def_value=8) 12 | self.rr_depth = props.get("rr_depth", def_value=2) 13 | 14 | def sample( 15 | self, 16 | scene: mi.Scene, 17 | sampler: mi.Sampler, 18 | ray: mi.RayDifferential3f, 19 | medium: mi.Medium = None, 20 | active: mi.Bool = True, 21 | ): 22 | # --------------------- Configure loop state ---------------------- 23 | ray = mi.Ray3f(ray) 24 | f = mi.Spectrum(1.0) 25 | L = mi.Spectrum(0.0) 26 | eta = mi.Float(1.0) 27 | depth = mi.UInt32(0) 28 | bsdf_ctx = mi.BSDFContext() 29 | 30 | # Variables caching information from the previous bounce 31 | prev_si = dr.zeros(mi.SurfaceInteraction3f) 32 | prev_bsdf_pdf = mi.Float(1.0) 33 | prev_bsdf_delta = mi.Bool(True) 34 | active = mi.Bool(active) 35 | 36 | loop = mi.Loop( 37 | "Path Tracer", 38 | state=lambda: ( 39 | sampler, 40 | ray, 41 | f, 42 | L, 43 | eta, 44 | depth, 45 | prev_si, 46 | prev_bsdf_pdf, 47 | prev_bsdf_delta, 48 | active, 49 | ), 50 | ) 51 | 52 | loop.set_max_iterations(self.max_depth) 53 | 54 | while loop(active): 55 | with dr.resume_grad(): 56 | si: mi.SurfaceInteraction3f = scene.ray_intersect( 57 | ray, mi.RayFlags.All, coherent=mi.Bool(False) 58 | ) 59 | 60 | # ---------------------- Direct emission ---------------------- 61 | ds = mi.DirectionSample3f(scene, si, prev_si) 62 | em_pdf = scene.eval_emitter_direction(prev_si, ds, ~prev_bsdf_delta) 63 | # mis_bsdf = 1.0 64 | 65 | # L = dr.fma(f, ds.emitter.eval(si, prev_bsdf_pdf > 0.) * mis_bsdf, L) 66 | with dr.resume_grad(): 67 | # Le = f * mis_bsdf * ds.emitter.eval(si) 68 | L = dr.fma(f, ds.emitter.eval(si, prev_bsdf_pdf > 0.0), L) 69 | 70 | active_next = (depth + 1 < self.max_depth) & si.is_valid() 71 | 72 | # ---------------------- BSDF sampling ---------------------- 73 | bsdf: mi.BSDF = si.bsdf(ray) 74 | 75 | s1 = sampler.next_1d() 76 | s2 = sampler.next_2d() 77 | 78 | bsdf_sample, bsdf_weight = bsdf.sample(bsdf_ctx, si, s1, s2, active_next) 79 | bsdf_weight = si.to_world_mueller(bsdf_weight, -bsdf_sample.wo, si.wi) 80 | 81 | # Pssmlt adjusting 82 | wo = bsdf_sample.wo 83 | 84 | ray = si.spawn_ray(si.to_world(wo)) 85 | 86 | if dr.grad_enabled(ray): 87 | ray = dr.detach(ray) 88 | 89 | wo = si.to_local(ray.d) 90 | bsdf_val, bsdf_pdf = bsdf.eval_pdf(bsdf_ctx, si, wo, active) 91 | bsdf_weight[bsdf_pdf > 0.0] = bsdf_val / dr.detach(bsdf_pdf) 92 | 93 | # ------ Update loop variables based on current interaction ------ 94 | 95 | f *= bsdf_weight 96 | eta *= bsdf_sample.eta 97 | 98 | prev_si = dr.detach(si) 99 | prev_bsdf_pdf = bsdf_sample.pdf 100 | prev_bsdf_delta = mi.has_flag(bsdf_sample.sampled_type, mi.BSDFFlags.Delta) 101 | 102 | # -------------------- Stopping criterion --------------------- 103 | 104 | depth[si.is_valid()] += 1 105 | 106 | fmax = dr.max(f) 107 | 108 | rr_prob = dr.minimum(fmax * dr.sqr(eta), 0.95) 109 | rr_active = depth >= self.rr_depth 110 | rr_continue = sampler.next_1d() < rr_prob 111 | 112 | f[rr_active] *= dr.rcp(dr.detach(rr_prob)) 113 | 114 | active = active_next & (~rr_active | rr_continue) & dr.neq(fmax, 0.0) 115 | 116 | return L, dr.neq(depth, 0), [] 117 | 118 | 119 | mi.register_integrator("integrator", lambda props: Simple(props)) 120 | 121 | if __name__ == "__main__": 122 | scene = mi.cornell_box() 123 | scene["integrator"]["type"] = "integrator" 124 | scene["integrator"]["max_depth"] = 16 125 | scene["integrator"]["rr_depth"] = 2 126 | scene["sensor"]["sampler"]["sample_count"] = 64 127 | scene["sensor"]["film"]["width"] = 1024 128 | scene["sensor"]["film"]["height"] = 1024 129 | scene = mi.load_dict(scene) 130 | 131 | img = mi.render(scene) 132 | 133 | plt.imshow(img ** (1.0 / 2.2)) 134 | plt.axis("off") 135 | plt.show() 136 | -------------------------------------------------------------------------------- /specular_from_bsdf.py: -------------------------------------------------------------------------------- 1 | import drjit as dr 2 | import mitsuba as mi 3 | from typing import Tuple 4 | 5 | mi.set_variant("cuda_ad_rgb") 6 | 7 | 8 | if __name__ == "__main__": 9 | 10 | scene = mi.cornell_box() 11 | scene["red"]["type"] = "conductor" 12 | scene["red"] = { 13 | "type": "dielectric", 14 | "specular_reflectance": { 15 | "type": "bitmap", 16 | "filename": "WoodFloor.jpg", 17 | }, 18 | } 19 | scene = mi.load_dict(scene) 20 | 21 | img = mi.render(scene) 22 | 23 | mi.util.write_bitmap("out/test.jpg", img) 24 | 25 | def specular(self, si: mi.SurfaceInteraction3f) -> mi.Color3f: 26 | params = mi.traverse(self) 27 | 28 | sr = "specular_reflectance" 29 | if f"{sr}.data" in params and f"{sr}.to_uv": 30 | data = params[f"{sr}.data"] 31 | to_uv = params[f"{sr}.to_uv"] 32 | 33 | texture = mi.Texture2f(data) 34 | 35 | to_uv = mi.Transform3f(to_uv) 36 | 37 | uv = to_uv @ si.uv 38 | 39 | return mi.Color3f(texture.eval(uv)) 40 | sr = "specular_reflectance" 41 | if f"{sr}.value" in params: 42 | return mi.Color3f(params[f"{sr}.value"]) 43 | else: 44 | return mi.Color3f(0) 45 | 46 | bsdf_ptrs = scene.shapes_dr().bsdf() 47 | print(f"{dr.width(bsdf_ptrs)=}") 48 | 49 | si = dr.zeros(mi.SurfaceInteraction3f, 8) 50 | specular_color = dr.dispatch(bsdf_ptrs, specular, si) 51 | print(f"{specular_color=}") 52 | -------------------------------------------------------------------------------- /sppm.py: -------------------------------------------------------------------------------- 1 | import mitsuba as mi 2 | import drjit as dr 3 | 4 | if __name__ == "__main__": 5 | mi.set_variant("cuda_ad_rgb") 6 | dr.set_flag(dr.JitFlag.LoopRecord, False) 7 | 8 | 9 | def hash(p: mi.Point3u | mi.Point3f, hash_size: int): 10 | if isinstance(p, mi.Point3f): 11 | p = mi.Point3u(mi.UInt(p.x), mi.UInt(p.y), mi.UInt(p.z)) 12 | return hash(p, hash_size) 13 | return ((p.x * 73856093) ^ (p.y * 19349663) ^ (p.z * 83492791)) % hash_size 14 | 15 | 16 | def cumsum(src: mi.UInt | mi.Float): 17 | N = dr.shape(src)[-1] 18 | idx = dr.arange(mi.UInt, N) 19 | dst = dr.zeros(type(src), N) 20 | depth = mi.UInt(0) 21 | 22 | loop = mi.Loop("cumsum", lambda: (idx, dst, depth)) 23 | 24 | loop.set_max_iterations(N) 25 | 26 | while loop(depth < idx): 27 | dst += dr.gather(mi.UInt, src, depth, depth < idx) 28 | depth += 1 29 | 30 | return dst 31 | 32 | 33 | class HashGrid: 34 | def expand_sample( 35 | self, sample: mi.Point3f, radius: mi.Float 36 | ) -> tuple[mi.UInt, mi.UInt]: 37 | initial_sample_size = dr.shape(sample)[-1] 38 | pmin = self.to_grid(sample - mi.Vector3f(radius)) 39 | pmax = self.to_grid(sample + mi.Vector3f(radius)) + 1 40 | 41 | grid_size: mi.Vector3u = pmax - pmin 42 | bins_per_grid = grid_size.x * grid_size.y * grid_size.z 43 | sample_size = dr.sum(bins_per_grid)[0] 44 | 45 | dr.eval(bins_per_grid) 46 | grid_offset = cumsum(bins_per_grid) 47 | print(f"{grid_offset=}") 48 | print(f"{bins_per_grid=}") 49 | 50 | sample_idx = dr.zeros(mi.UInt, sample_size) 51 | sample_h = dr.zeros(mi.UInt, sample_size) 52 | inside = dr.zeros(mi.Bool, sample_size) 53 | 54 | idx = mi.UInt(0) 55 | dr.set_flag(dr.JitFlag.LoopRecord, False) 56 | loop = mi.Loop("Bin Size", lambda: (idx,)) 57 | 58 | while loop(idx < bins_per_grid): 59 | z = idx // grid_size.x * grid_size.y 60 | y = idx % grid_size.z // grid_size.x 61 | x = idx % grid_size.z % grid_size.y 62 | p = mi.Point3u(x, y, z) 63 | p = p - grid_size // 2 + pmin 64 | h = hash(p, sample_size) 65 | # print(f"{sample_idx=}") 66 | # print(f"{grid_offset + idx=}") 67 | # dr.scatter( 68 | # inside, 69 | # (p > 0) & (p < self.resolution), 70 | # grid_offset + idx, 71 | # idx < bins_per_grid, 72 | # ) 73 | dr.scatter( 74 | sample_idx, 75 | dr.arange(mi.UInt, initial_sample_size), 76 | grid_offset + idx, 77 | idx < bins_per_grid, 78 | ) 79 | # print(f"{grid_offset+idx=}") 80 | dr.scatter( 81 | sample_h, 82 | h, 83 | grid_offset + idx, 84 | ) 85 | idx += 1 86 | 87 | print(f"{sample_size=}") 88 | print(f"{dr.count(inside)=}") 89 | print(f"{sample_idx=}") 90 | 91 | idx = dr.compress(inside) 92 | sample_h = dr.gather(mi.UInt, sample_h, idx) 93 | sample_idx = dr.gather(mi.UInt, sample_idx, idx) 94 | 95 | print(f"{sample_h=}") 96 | return sample_idx, sample_h 97 | 98 | def __init__(self, sample: mi.Point3f, radius: mi.Float, resolution: int) -> None: 99 | """ 100 | Constructs a 3D Hash Grid with the samples inserted. 101 | 102 | It uses the hash function from the pbrt-v3 SPPM implementaiton (https://github.com/mmp/pbrt-v3/blob/master/src/integrators/sppm.cpp) 103 | 104 | @param sample: Samples to insert into the Hash Grid 105 | @param resolution: The number of grid cells in each dimension 106 | """ 107 | # First expand samples 108 | 109 | # hash_size = dr.shape(sample)[-1] 110 | self.resolution = resolution 111 | self.bbmin = mi.Point3f(dr.min(sample.x), dr.min(sample.y), dr.min(sample.z)) 112 | self.bbmax = ( 113 | mi.Point3f(dr.max(sample.x), dr.max(sample.y), dr.max(sample.z)) + 0.0001 114 | ) 115 | 116 | ref_sample_idx, h = self.expand_sample(sample, radius) 117 | sample_size = dr.shape(ref_sample_idx)[-1] 118 | 119 | """ 120 | In order to calculate the offset for every bin we first calculate the 121 | size of every bin using the `scatter_reduce` function. 122 | The size is written into the `bin_size` array at the hash position `h`. 123 | Afterwards we calculate the cumulative sum in order to get an offset for 124 | every bin. 125 | Now querying the `bin_offset` array at position `h` gets the offset for the 126 | bin corresponding to that hash. 127 | """ 128 | bin_size = dr.zeros(mi.UInt, sample_size) 129 | dr.scatter_reduce(dr.ReduceOp.Add, bin_size, 1, h) 130 | dr.eval(bin_size) 131 | bin_offset = cumsum(bin_size) # This represents 132 | 133 | sample_bin_offset = dr.gather(mi.UInt, bin_offset, h) 134 | 135 | sample_idx = dr.zeros(mi.UInt, sample_size) 136 | sample_cell_cap = dr.gather(mi.UInt, bin_size, h) 137 | active_sample = dr.full(mi.Bool, True, sample_size) 138 | 139 | """ 140 | In this loop we iterate through all cells in a bin and from high to low insert 141 | the index of the sample into the `sample_idx` array. 142 | In order to not insert indices twice we need to 'deactivate' samples that have 143 | already been inserted. 144 | To do so we need to get the last inserted index which is only possible after 145 | calling `dr.eval` on `sample_idx`. 146 | Therefore the loop cannot be a Dr.Jit loop. 147 | """ 148 | 149 | depth = mi.UInt(0) 150 | max_depth = dr.max(bin_size)[0] 151 | loop_record = dr.flag(dr.JitFlag.LoopRecord) 152 | dr.set_flag(dr.JitFlag.LoopRecord, False) 153 | 154 | loop = mi.Loop("Fill bins", lambda: (depth)) 155 | 156 | while loop(depth < max_depth): 157 | dr.scatter_reduce( 158 | dr.ReduceOp.Max, 159 | sample_idx, 160 | ref_sample_idx, 161 | # dr.arange(mi.UInt, hash_size), 162 | depth + sample_bin_offset, 163 | (depth < sample_cell_cap) & active_sample, 164 | ) 165 | dr.eval(sample_idx) 166 | 167 | selected_sample = dr.gather( 168 | mi.UInt, sample_idx, depth + sample_bin_offset, depth < sample_cell_cap 169 | ) 170 | is_selected_sample = dr.eq(selected_sample, ref_sample_idx) 171 | active_sample &= ~is_selected_sample 172 | 173 | dr.set_flag(dr.JitFlag.LoopRecord, loop_record) 174 | 175 | self.__bin_size = bin_size 176 | self.__bin_offset = bin_offset 177 | self.__sample_idx = sample_idx 178 | self.__sample = sample 179 | 180 | def to_grid(self, p: mi.Point2f) -> mi.Point3u: 181 | p_grid = dr.clamp( 182 | mi.Point3u((p - self.bbmin) / (self.bbmax - self.bbmin) * self.resolution), 183 | mi.Point3u(0), 184 | mi.Point3u(self.resolution), 185 | ) 186 | return p_grid 187 | 188 | def hash(self, sample: mi.Point2f): 189 | return hash( 190 | (sample - self.bbmin) / (self.bbmax - self.bbmin) * self.resolution, 191 | self.sample_size, 192 | ) 193 | 194 | 195 | class SPPMIntegrator(mi.SamplingIntegrator): 196 | def __init__(self, props: mi.Properties) -> None: 197 | super().__init__(props) 198 | 199 | def sample_visible_point( 200 | self, 201 | scene: mi.Scene, 202 | sensor: mi.Sensor, 203 | sampler: mi.Sampler, 204 | sample_pos: mi.Point2f, 205 | ) -> tuple[mi.SurfaceInteraction3f, mi.Spectrum]: 206 | ray, ray_weight = sensor.sample_ray(0.0, 0.0, sample_pos, mi.Point2f(0.5)) 207 | max_depth = 6 208 | β = mi.Spectrum(1.0) 209 | depth = mi.UInt(0) 210 | active = mi.Bool(True) 211 | bsdf_ctx = mi.BSDFContext() 212 | si: mi.SurfaceInteraction3f = dr.zeros(mi.SurfaceInteraction3f) 213 | 214 | loop = mi.Loop("Camera Tracing", lambda: (depth, active, β, ray, si)) 215 | loop.set_max_iterations(max_depth) 216 | 217 | while loop(active): 218 | si: mi.SurfaceInteraction3f = scene.ray_intersect(ray, active) 219 | 220 | bsdf: mi.BSDF = si.bsdf(ray) 221 | bsdf_sample, bsdf_weight = bsdf.sample( 222 | bsdf_ctx, si, sampler.next_1d(), sampler.next_2d(), active 223 | ) 224 | 225 | active &= si.is_valid() 226 | active &= ~mi.has_flag(bsdf_sample.sampled_type, mi.BSDFFlags.Smooth) 227 | active &= depth < max_depth 228 | 229 | ray = si.spawn_ray(si.to_world(bsdf_sample.wo)) 230 | 231 | β[active] *= bsdf_weight 232 | depth += 1 233 | 234 | return si, β 235 | 236 | def render( 237 | self, 238 | scene: mi.Scene, 239 | sensor: mi.Sensor, 240 | seed: int, 241 | spp: int, 242 | develop: bool, 243 | evaluate: bool, 244 | ) -> mi.TensorXf: 245 | film = sensor.film() 246 | film_size = film.crop_size() 247 | 248 | wavefront_size = film_size.x * film_size.y 249 | 250 | sampler = sensor.sampler() 251 | sampler.set_sample_count(1) 252 | sampler.set_samples_per_wavefront(1) 253 | sampler.seed(seed, wavefront_size) 254 | 255 | idx = dr.arange(mi.UInt, wavefront_size) 256 | pos = mi.Vector2u() 257 | pos.y = idx // film_size.x 258 | pos.x = dr.fma(-film_size.x, pos.y, idx) 259 | 260 | sample_pos = (mi.Point2f(pos) + sampler.next_2d()) / mi.Point2f( 261 | film.crop_size() 262 | ) 263 | 264 | # Sample visible points: 265 | 266 | vp_si, vp_β = self.sample_visible_point(scene, sensor, sampler, sample_pos) 267 | 268 | dr.eval(vp_si, vp_β) 269 | 270 | grid = HashGrid(vp_si.p, 100) 271 | print(f"{vp_β=}") 272 | print(f"{dr.count(vp_si.is_valid())=}") 273 | 274 | ... 275 | 276 | 277 | if __name__ == "__main__": 278 | sampler: mi.Sampler = mi.load_dict({"type": "independent"}) 279 | sampler.seed(2, 10) 280 | gird = HashGrid( 281 | mi.Point3f(sampler.next_1d(), sampler.next_1d(), sampler.next_1d()), 282 | sampler.next_1d() * 0.01, 283 | 100, 284 | ) 285 | # scene: mi.Scene = mi.load_dict(mi.cornell_box()) 286 | # 287 | # integrator = SPPMIntegrator(mi.Properties()) 288 | # 289 | # integrator.render(scene, scene.sensors()[0], 0, 1, True, True) 290 | # mi.render(scene, integrator=integrator) 291 | -------------------------------------------------------------------------------- /test-restir-dynamic.py: -------------------------------------------------------------------------------- 1 | import mitsuba as mi 2 | from tqdm import tqdm 3 | 4 | if __name__ == "__main__": 5 | mi.set_variant("cuda_ad_rgb") 6 | 7 | import restirgi 8 | 9 | if __name__ == "__main__": 10 | integrator: restirgi.RestirReservoir = mi.load_dict( 11 | { 12 | "type": "restirgi", 13 | "jacobian": True, 14 | "spatial_biased": True, 15 | "bsdf_sampling": True, 16 | "max_M_spatial": 500, 17 | "max_M_temporal": 30, 18 | } 19 | ) 20 | 21 | scene: mi.Scene = mi.load_file("data/scenes/staircase/scene.xml") 22 | params = mi.traverse(scene) 23 | print(f"{params=}") 24 | 25 | for i in tqdm(range(200)): 26 | params["PerspectiveCamera.to_world"] @= mi.Transform4f.translate( 27 | [0.0, 0.0, 0.01] 28 | ) 29 | params.update() 30 | 31 | img = mi.render(scene, params, seed=i, integrator=integrator, spp=1) 32 | mi.util.write_bitmap(f"out/{i}.jpg", img) 33 | -------------------------------------------------------------------------------- /test-restir-jacobian.py: -------------------------------------------------------------------------------- 1 | import mitsuba as mi 2 | import drjit as dr 3 | import matplotlib.pyplot as plt 4 | from tqdm import tqdm 5 | 6 | mi.set_variant("cuda_ad_rgb") 7 | 8 | import restirgi 9 | 10 | if __name__ == "__main__": 11 | n_iterations = 100 12 | spp = 1 13 | 14 | scene = mi.cornell_box() 15 | scene["sensor"]["film"]["width"] = 1024 16 | scene["sensor"]["film"]["height"] = 1024 17 | scene["sensor"]["film"]["rfilter"] = mi.load_dict({"type": "box"}) 18 | scene: mi.Scene = mi.load_dict(scene) 19 | scene = mi.load_file("./data/scenes/staircase/scene.xml") 20 | # scene = mi.load_file("./data/scenes/wall/scene.xml") 21 | # scene = mi.load_file("./data/scenes/living-room-3/scene.xml") 22 | # scene: mi.Scene = mi.load_file("data/scenes/shadow-mask/scene.xml") 23 | 24 | ref = mi.render(scene, spp=256) 25 | mi.util.write_bitmap("out/ref.exr", ref) 26 | 27 | biased: restirgi.RestirIntegrator = mi.load_dict( 28 | { 29 | "type": "restirgi", 30 | "jacobian": False, 31 | "bias_correction": False, 32 | "bsdf_sampling": True, 33 | "max_M_spatial": 500, 34 | "max_M_temporal": 30, 35 | } 36 | ) 37 | 38 | unbiased: restirgi.RestirIntegrator = mi.load_dict( 39 | { 40 | "type": "restirgi", 41 | "jacobian": True, 42 | "bias_correction": False, 43 | "bsdf_sampling": True, 44 | "max_M_spatial": 500, 45 | "max_M_temporal": 30, 46 | } 47 | ) 48 | 49 | var_biased = [] 50 | bias_biased = [] 51 | mse_biased = [] 52 | 53 | print("Biased") 54 | for i in tqdm(range(n_iterations)): 55 | img = mi.render(scene, integrator=biased, seed=i, spp=spp) 56 | var_biased.append(dr.mean_nested(dr.sqr(img - dr.mean_nested(img)))[0]) 57 | bias_biased.append(dr.mean_nested(img - ref)[0]) 58 | mse_biased.append(dr.mean_nested(dr.sqr(img - ref))) 59 | 60 | img_biased = img 61 | 62 | mi.util.write_bitmap("out/biased.exr", img_biased) 63 | 64 | var_unbiased = [] 65 | bias_unbiased = [] 66 | mse_unbiased = [] 67 | 68 | print("Unbiased") 69 | for i in tqdm(range(n_iterations)): 70 | img = mi.render(scene, integrator=unbiased, seed=i, spp=spp) 71 | var_unbiased.append(dr.mean_nested(dr.sqr(img - dr.mean_nested(img)))[0]) 72 | bias_unbiased.append(dr.mean_nested(img - ref)[0]) 73 | mse_unbiased.append(dr.mean_nested(dr.sqr(img - ref))) 74 | 75 | img_unbiased = img 76 | 77 | mi.util.write_bitmap("out/unbiased.exr", img_unbiased) 78 | 79 | fig, ax = plt.subplots(2, 3, figsize=(20, 10)) 80 | fig.patch.set_visible(False) 81 | 82 | ax[0][0].axis("off") 83 | ax[0][0].imshow(mi.util.convert_to_bitmap(ref)) 84 | ax[0][0].set_title("Reference") 85 | 86 | ax[0][1].plot(bias_biased, label="Biased") 87 | ax[0][1].plot(bias_unbiased, label="Bias Corrected") 88 | ax[0][1].legend(loc="best") 89 | ax[0][1].set_title("Sample Bias") 90 | 91 | ax[1][0].axis("off") 92 | ax[1][0].set_title("Biased") 93 | ax[1][0].imshow(mi.util.convert_to_bitmap(img_biased)) 94 | 95 | ax[1][1].axis("off") 96 | ax[1][1].set_title("Bias Corrected") 97 | ax[1][1].imshow(mi.util.convert_to_bitmap(img_unbiased)) 98 | 99 | ax[0][2].plot(mse_biased, label="Biased") 100 | ax[0][2].plot(mse_unbiased, label="Bias Corrected") 101 | ax[0][2].legend(loc="best") 102 | ax[0][2].set_title("MSE") 103 | 104 | ax[1][2].plot(var_biased, label="Biased") 105 | ax[1][2].plot(var_unbiased, label="Bias Corrected") 106 | ax[1][2].legend(loc="best") 107 | ax[1][2].set_title("Variance") 108 | 109 | fig.tight_layout() 110 | plt.show() 111 | -------------------------------------------------------------------------------- /test-restir-spatial-spatial.py: -------------------------------------------------------------------------------- 1 | import mitsuba as mi 2 | import drjit as dr 3 | import matplotlib.pyplot as plt 4 | from tqdm import tqdm 5 | 6 | mi.set_variant("cuda_ad_rgb") 7 | 8 | import restirgi 9 | 10 | if __name__ == "__main__": 11 | n_iterations = 100 12 | spp = 1 13 | 14 | scene = mi.cornell_box() 15 | scene["sensor"]["film"]["width"] = 1024 16 | scene["sensor"]["film"]["height"] = 1024 17 | scene["sensor"]["film"]["rfilter"] = mi.load_dict({"type": "box"}) 18 | scene: mi.Scene = mi.load_dict(scene) 19 | # scene = mi.load_file("./data/scenes/wall/scene.xml") 20 | scene = mi.load_file("./data/scenes/staircase/scene.xml") 21 | # scene: mi.Scene = mi.load_file("data/scenes/shadow-mask/scene.xml") 22 | 23 | ref = mi.render(scene, spp=256) 24 | mi.util.write_bitmap("out/ref.exr", ref) 25 | 26 | biased: restirgi.RestirIntegrator = mi.load_dict( 27 | { 28 | "type": "restirgi", 29 | "jacobian": False, 30 | "bias_correction": False, 31 | "spatial_spatial_reuse": True, 32 | "bsdf_sampling": True, 33 | "max_M_spatial": 500, 34 | "max_M_temporal": 30, 35 | } 36 | ) 37 | 38 | unbiased: restirgi.RestirIntegrator = mi.load_dict( 39 | { 40 | "type": "restirgi", 41 | "jacobian": False, 42 | "bias_correction": False, 43 | "spatial_spatial_reuse": False, 44 | "bsdf_sampling": True, 45 | "max_M_spatial": 500, 46 | "max_M_temporal": 30, 47 | } 48 | ) 49 | 50 | var_biased = [] 51 | bias_biased = [] 52 | mse_biased = [] 53 | 54 | print("Biased") 55 | for i in tqdm(range(n_iterations)): 56 | img = mi.render(scene, integrator=biased, seed=i, spp=spp) 57 | var_biased.append(dr.mean_nested(dr.sqr(img - dr.mean_nested(img)))[0]) 58 | bias_biased.append(dr.mean_nested(img - ref)[0]) 59 | mse_biased.append(dr.mean_nested(dr.sqr(img - ref))) 60 | 61 | img_biased = img 62 | 63 | mi.util.write_bitmap("out/biased.exr", img_biased) 64 | 65 | var_unbiased = [] 66 | bias_unbiased = [] 67 | mse_unbiased = [] 68 | 69 | print("Unbiased") 70 | for i in tqdm(range(n_iterations)): 71 | img = mi.render(scene, integrator=unbiased, seed=i, spp=spp) 72 | var_unbiased.append(dr.mean_nested(dr.sqr(img - dr.mean_nested(img)))[0]) 73 | bias_unbiased.append(dr.mean_nested(img - ref)[0]) 74 | mse_unbiased.append(dr.mean_nested(dr.sqr(img - ref))) 75 | 76 | img_unbiased = img 77 | 78 | mi.util.write_bitmap("out/unbiased.exr", img_unbiased) 79 | 80 | fig, ax = plt.subplots(2, 3, figsize=(20, 10)) 81 | fig.patch.set_visible(False) 82 | 83 | ax[0][0].axis("off") 84 | ax[0][0].imshow(mi.util.convert_to_bitmap(ref)) 85 | ax[0][0].set_title("Reference") 86 | 87 | ax[0][1].plot(bias_biased, label="Biased") 88 | ax[0][1].plot(bias_unbiased, label="Bias Corrected") 89 | ax[0][1].legend(loc="best") 90 | ax[0][1].set_title("Sample Bias") 91 | 92 | ax[1][0].axis("off") 93 | ax[1][0].set_title("Biased") 94 | ax[1][0].imshow(mi.util.convert_to_bitmap(img_biased)) 95 | 96 | ax[1][1].axis("off") 97 | ax[1][1].set_title("Bias Corrected") 98 | ax[1][1].imshow(mi.util.convert_to_bitmap(img_unbiased)) 99 | 100 | ax[0][2].plot(mse_biased, label="Biased") 101 | ax[0][2].plot(mse_unbiased, label="Bias Corrected") 102 | ax[0][2].legend(loc="best") 103 | ax[0][2].set_title("MSE") 104 | 105 | ax[1][2].plot(var_biased, label="Biased") 106 | ax[1][2].plot(var_unbiased, label="Bias Corrected") 107 | ax[1][2].legend(loc="best") 108 | ax[1][2].set_title("Variance") 109 | 110 | fig.tight_layout() 111 | plt.show() 112 | -------------------------------------------------------------------------------- /test-restir-spatial.py: -------------------------------------------------------------------------------- 1 | import mitsuba as mi 2 | import drjit as dr 3 | import matplotlib.pyplot as plt 4 | from tqdm import tqdm 5 | 6 | mi.set_variant("cuda_ad_rgb") 7 | 8 | import restirgi 9 | 10 | if __name__ == "__main__": 11 | n_iterations = 100 12 | spp = 1 13 | 14 | scene = mi.cornell_box() 15 | scene["sensor"]["film"]["width"] = 1024 16 | scene["sensor"]["film"]["height"] = 1024 17 | scene["sensor"]["film"]["rfilter"] = mi.load_dict({"type": "box"}) 18 | scene: mi.Scene = mi.load_dict(scene) 19 | # scene = mi.load_file("./data/scenes/wall/scene.xml") 20 | scene = mi.load_file("./data/scenes/staircase/scene.xml") 21 | # scene: mi.Scene = mi.load_file("data/scenes/shadow-mask/scene.xml") 22 | 23 | ref = mi.render(scene, spp=256) 24 | mi.util.write_bitmap("out/ref.exr", ref) 25 | 26 | biased: restirgi.RestirIntegrator = mi.load_dict( 27 | { 28 | "type": "restirgi", 29 | "jacobian": False, 30 | "bias_correction": False, 31 | "bsdf_sampling": True, 32 | "max_M_spatial": 500, 33 | "max_M_temporal": 30, 34 | } 35 | ) 36 | 37 | unbiased: restirgi.RestirIntegrator = mi.load_dict( 38 | { 39 | "type": "restirgi", 40 | "jacobian": False, 41 | "bias_correction": True, 42 | "bsdf_sampling": True, 43 | "max_M_spatial": 500, 44 | "max_M_temporal": 30, 45 | } 46 | ) 47 | 48 | var_biased = [] 49 | bias_biased = [] 50 | mse_biased = [] 51 | 52 | print("Biased") 53 | for i in tqdm(range(n_iterations)): 54 | img = mi.render(scene, integrator=biased, seed=i, spp=spp) 55 | var_biased.append(dr.mean_nested(dr.sqr(img - dr.mean_nested(img)))[0]) 56 | bias_biased.append(dr.mean_nested(img - ref)[0]) 57 | mse_biased.append(dr.mean_nested(dr.sqr(img - ref))) 58 | 59 | img_biased = img 60 | 61 | mi.util.write_bitmap("out/biased.exr", img_biased) 62 | 63 | var_unbiased = [] 64 | bias_unbiased = [] 65 | mse_unbiased = [] 66 | 67 | print("Unbiased") 68 | for i in tqdm(range(n_iterations)): 69 | img = mi.render(scene, integrator=unbiased, seed=i, spp=spp) 70 | var_unbiased.append(dr.mean_nested(dr.sqr(img - dr.mean_nested(img)))[0]) 71 | bias_unbiased.append(dr.mean_nested(img - ref)[0]) 72 | mse_unbiased.append(dr.mean_nested(dr.sqr(img - ref))) 73 | 74 | img_unbiased = img 75 | 76 | mi.util.write_bitmap("out/unbiased.exr", img_unbiased) 77 | 78 | fig, ax = plt.subplots(2, 3, figsize=(20, 10)) 79 | fig.patch.set_visible(False) 80 | 81 | ax[0][0].axis("off") 82 | ax[0][0].imshow(mi.util.convert_to_bitmap(ref)) 83 | ax[0][0].set_title("Reference") 84 | 85 | ax[0][1].plot(bias_biased, label="Biased") 86 | ax[0][1].plot(bias_unbiased, label="Bias Corrected") 87 | ax[0][1].legend(loc="best") 88 | ax[0][1].set_title("Sample Bias") 89 | 90 | ax[1][0].axis("off") 91 | ax[1][0].set_title("Biased") 92 | ax[1][0].imshow(mi.util.convert_to_bitmap(img_biased)) 93 | 94 | ax[1][1].axis("off") 95 | ax[1][1].set_title("Bias Corrected") 96 | ax[1][1].imshow(mi.util.convert_to_bitmap(img_unbiased)) 97 | 98 | ax[0][2].plot(mse_biased, label="Biased") 99 | ax[0][2].plot(mse_unbiased, label="Bias Corrected") 100 | ax[0][2].legend(loc="best") 101 | ax[0][2].set_title("MSE") 102 | 103 | ax[1][2].plot(var_biased, label="Biased") 104 | ax[1][2].plot(var_unbiased, label="Bias Corrected") 105 | ax[1][2].legend(loc="best") 106 | ax[1][2].set_title("Variance") 107 | 108 | fig.tight_layout() 109 | plt.show() 110 | -------------------------------------------------------------------------------- /test.py: -------------------------------------------------------------------------------- 1 | import mitsuba as mi 2 | import drjit as dr 3 | 4 | mi.set_variant("cuda_ad_rgb") 5 | 6 | dr.set_flag(dr.JitFlag.LoopRecord, False) 7 | dr.set_log_level(dr.LogLevel.Trace) 8 | 9 | sampler: mi.Sampler = mi.load_dict({"type": "independent"}) 10 | sampler.seed(0, 10) 11 | 12 | depth = mi.UInt(0, 0, 0) 13 | 14 | loop = mi.Loop("test", lambda: (depth)) 15 | while loop(depth < 10): 16 | depth += 1 17 | 18 | print(f"{depth=}") 19 | -------------------------------------------------------------------------------- /testpssmlt.py: -------------------------------------------------------------------------------- 1 | import gc 2 | from typing import overload 3 | import mitsuba as mi 4 | import drjit as dr 5 | import matplotlib.pyplot as plt 6 | from dataclasses import dataclass 7 | 8 | mi.set_variant("cuda_ad_rgb") 9 | # dr.set_log_level(dr.LogLevel.Debug) 10 | # mi.set_log_level(mi.LogLevel.Debug) 11 | import pssmltpath 12 | import pssmltsimple 13 | import simple 14 | import pssmlt 15 | 16 | 17 | if __name__ == "__main__": 18 | scene = mi.load_file("data/caustics/scene.xml") 19 | 20 | img = None 21 | with dr.suspend_grad(): 22 | integrator = mi.load_dict( 23 | { 24 | "type": "ptracer", 25 | } 26 | ) # type: ignore 27 | ref_pt = mi.render(scene, integrator=integrator, spp=128) 28 | mi.util.write_bitmap("out/ref_pt.png", ref_pt) 29 | 30 | integrator = mi.load_dict( 31 | { 32 | "type": "path", 33 | } 34 | ) # type: ignore 35 | ref_path = mi.render(scene, integrator=integrator, spp=128) 36 | mi.util.write_bitmap("out/ref_path.png", ref_path) 37 | 38 | integrator: pssmltsimple.Pssmlt = mi.load_dict( 39 | { 40 | "type": "pssmlt", 41 | "max_depth": 8, 42 | "rr_depth": 2, 43 | } 44 | ) # type: ignore 45 | img = integrator.render(scene, scene.sensors()[0], seed=0, spp=4) 46 | mi.util.write_bitmap("out/img.png", img) 47 | 48 | mi.util.write_bitmap("out/dif-pt.png", img - ref_pt) 49 | mi.util.write_bitmap("out/dif-pt-1.png", ref_pt - img) 50 | -------------------------------------------------------------------------------- /to_dict.py: -------------------------------------------------------------------------------- 1 | import mitsuba as mi 2 | import drjit as dr 3 | 4 | if __name__ == "__main__": 5 | mi.set_variant("cuda_ad_rgb") 6 | 7 | 8 | def to_dict(scene: mi.Scene): 9 | assert isinstance(scene, mi.Scene) 10 | 11 | unknown_counter = 0 12 | 13 | def get_id(child: mi.Object): 14 | nonlocal unknown_counter 15 | id = child.id() 16 | if id == "": 17 | id = f"unknown{unknown_counter}" 18 | unknown_counter += 1 19 | return id 20 | 21 | children = [ 22 | *scene.shapes(), 23 | *scene.emitters(), 24 | *scene.sensors(), 25 | scene.integrator(), 26 | ] 27 | return { 28 | "type": "scene", 29 | **{get_id(child): child for child in children}, 30 | } 31 | 32 | 33 | if __name__ == "__main__": 34 | scene = mi.cornell_box() 35 | scene["sg"] = { 36 | "type": "shapegroup", 37 | "second_object": { 38 | "type": "sphere", 39 | "to_world": mi.ScalarTransform4f() 40 | .translate([-0.5, 0, 0]) 41 | .scale([0.2, 0.2, 0.2]), 42 | "bsdf": { 43 | "type": "diffuse", 44 | }, 45 | }, 46 | } 47 | scene["first_instance"] = { 48 | "type": "instance", 49 | "shapegroup": {"type": "ref", "id": "sg"}, 50 | } 51 | 52 | scene = mi.load_dict(scene) 53 | 54 | ref = mi.render(scene, spp=128) 55 | mi.util.write_bitmap("out/ref.exr", ref) 56 | 57 | scene = to_dict(scene) 58 | print(f"{scene=}") 59 | 60 | scene = mi.load_dict(scene) 61 | 62 | res = mi.render(scene, spp=128) 63 | mi.util.write_bitmap("out/res.exr", ref) 64 | 65 | assert dr.allclose(ref, res) 66 | -------------------------------------------------------------------------------- /util.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations # Delayed parsing of type annotations 2 | 3 | import mitsuba as mi 4 | import drjit as dr 5 | 6 | 7 | def concat_gather(arrays: list): 8 | final_width = 0 9 | for array in arrays: 10 | final_width += dr.width(array) 11 | 12 | index = dr.arange(mi.UInt, final_width) 13 | final_array = None 14 | 15 | for i in range(len(arrays)): 16 | array = arrays[i] 17 | gathered_array = dr.gather( 18 | type(array), array, index, index < dr.width(array) 19 | ) # relies on wrapping behaviour of UInt 20 | if final_array is None: 21 | final_array = gathered_array 22 | else: 23 | final_array = dr.select( 24 | index < dr.width(array), gathered_array, final_array 25 | ) 26 | 27 | index = index - dr.width(array) 28 | 29 | return final_array 30 | 31 | 32 | def concat_scatter(arrays: list): 33 | final_width = 0 34 | for array in arrays: 35 | final_width += dr.width(array) 36 | 37 | dst = dr.zeros(type(arrays[0]), shape=final_width) 38 | count = 0 39 | for array in arrays: 40 | n = dr.shape(array)[-1] 41 | i = dr.arange(mi.UInt32, count, count + n) 42 | dr.scatter(dst, array, i, i < final_width) 43 | 44 | count += n 45 | return dst 46 | 47 | 48 | if __name__ == "__main__": 49 | mi.set_variant("cuda_ad_rgb") 50 | 51 | sampler1: mi.Sampler = mi.load_dict({"type": "independent"}) 52 | sampler1.seed(0, 126) 53 | sampler2: mi.Sampler = mi.load_dict({"type": "independent"}) 54 | sampler2.seed(1, 2) 55 | 56 | a = sampler1.next_1d() 57 | b = sampler2.next_1d() 58 | 59 | result = concat([a, b]) 60 | 61 | result2 = concat_scatter([a, b]) 62 | 63 | assert dr.all(result == result2) 64 | --------------------------------------------------------------------------------