Blender V5.0
path_state.h
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2011-2022 Blender Foundation
2 *
3 * SPDX-License-Identifier: Apache-2.0 */
4
5#pragma once
6
8
10
12
13/* Initialize queues, so that this path is considered terminated.
14 * Used for early outputs in the camera ray initialization, as well as initialization of split
15 * states for shadow catcher. */
17{
18 INTEGRATOR_STATE_WRITE(state, path, queued_kernel) = 0;
19#ifndef __KERNEL_GPU__
20 INTEGRATOR_STATE_WRITE(&state->shadow, shadow_path, queued_kernel) = 0;
21 INTEGRATOR_STATE_WRITE(&state->ao, shadow_path, queued_kernel) = 0;
22#endif
23}
24
25/* Minimalistic initialization of the path state, which is needed for early outputs in the
26 * integrator initialization to work. */
29 const int x,
30 const int y)
31{
32 const uint render_pixel_index = (uint)tile->offset + x + y * tile->stride;
33
34 INTEGRATOR_STATE_WRITE(state, path, render_pixel_index) = render_pixel_index;
35
37}
38
39/* Initialize the rest of the path state needed to continue the path integration. */
42 const int sample,
43 const uint rng_pixel,
44 const Spectrum throughput)
45{
47 INTEGRATOR_STATE_WRITE(state, path, bounce) = 0;
48 INTEGRATOR_STATE_WRITE(state, path, diffuse_bounce) = 0;
49 INTEGRATOR_STATE_WRITE(state, path, glossy_bounce) = 0;
50 INTEGRATOR_STATE_WRITE(state, path, transmission_bounce) = 0;
51 INTEGRATOR_STATE_WRITE(state, path, transparent_bounce) = 0;
52 INTEGRATOR_STATE_WRITE(state, path, volume_bounce) = 0;
53 INTEGRATOR_STATE_WRITE(state, path, volume_bounds_bounce) = 0;
54 if ((kernel_data.kernel_features & KERNEL_FEATURE_NODE_PORTAL)) {
55 INTEGRATOR_STATE_WRITE(state, path, portal_bounce) = 0;
56 }
57 INTEGRATOR_STATE_WRITE(state, path, rng_pixel) = rng_pixel;
58 INTEGRATOR_STATE_WRITE(state, path, rng_offset) = PRNG_BOUNCE_NUM;
61 INTEGRATOR_STATE_WRITE(state, path, mis_ray_pdf) = 0.0f;
62 INTEGRATOR_STATE_WRITE(state, path, min_ray_pdf) = FLT_MAX;
63 INTEGRATOR_STATE_WRITE(state, path, continuation_probability) = 1.0f;
64 INTEGRATOR_STATE_WRITE(state, path, throughput) = throughput;
65 INTEGRATOR_STATE_WRITE(state, path, optical_depth) = 0.0f;
66#if defined(__PATH_GUIDING__)
67 if ((kernel_data.kernel_features & KERNEL_FEATURE_PATH_GUIDING)) {
68 INTEGRATOR_STATE_WRITE(state, path, unguided_throughput) = 1.0f;
69 INTEGRATOR_STATE_WRITE(state, guiding, path_segment) = nullptr;
70 INTEGRATOR_STATE_WRITE(state, guiding, use_surface_guiding) = false;
71 INTEGRATOR_STATE_WRITE(state, guiding, sample_surface_guiding_rand) = 0.5f;
72 INTEGRATOR_STATE_WRITE(state, guiding, surface_guiding_sampling_prob) = 0.0f;
73 INTEGRATOR_STATE_WRITE(state, guiding, bssrdf_sampling_prob) = 0.0f;
74 INTEGRATOR_STATE_WRITE(state, guiding, use_volume_guiding) = false;
75 INTEGRATOR_STATE_WRITE(state, guiding, sample_volume_guiding_rand) = 0.5f;
76 INTEGRATOR_STATE_WRITE(state, guiding, volume_guiding_sampling_prob) = 0.0f;
77 }
78#endif
79#ifdef __MNEE__
80 INTEGRATOR_STATE_WRITE(state, path, mnee) = 0;
81#endif
82
86
87 if (kernel_data.kernel_features & KERNEL_FEATURE_VOLUME) {
89 state, volume_stack, 0, object) = kernel_data.background.object_index;
91 state, volume_stack, 0, shader) = kernel_data.background.volume_shader;
92 INTEGRATOR_STATE_ARRAY_WRITE(state, volume_stack, 1, object) = OBJECT_NONE;
93 INTEGRATOR_STATE_ARRAY_WRITE(state, volume_stack, 1, shader) = SHADER_NONE;
94 }
95
96#ifdef __DENOISING_FEATURES__
97 if (kernel_data.kernel_features & KERNEL_FEATURE_DENOISING) {
99 INTEGRATOR_STATE_WRITE(state, path, denoising_feature_throughput) = one_spectrum();
100 }
101#endif
102
103#ifdef __LIGHT_LINKING__
104 if (kernel_data.kernel_features & KERNEL_FEATURE_LIGHT_LINKING) {
105 INTEGRATOR_STATE_WRITE(state, path, mis_ray_object) = OBJECT_NONE;
106 }
107#endif
108}
109
112 const int label,
113 const int shader_flag)
114{
115 uint32_t flag = INTEGRATOR_STATE(state, path, flag);
116
117 /* ray through transparent keeps same flags from previous ray and is
118 * not counted as a regular bounce, transparent has separate max */
119 if (label & (LABEL_TRANSPARENT | LABEL_RAY_PORTAL)) {
120 const int transparent_bounce = INTEGRATOR_STATE(state, path, transparent_bounce) + 1;
121
123 if (transparent_bounce >= kernel_data.integrator.transparent_max_bounce) {
124 /* FIXME: `transparent_max_bounce` could be 0, but `transparent_bounce` is at least 1 when we
125 * enter this path. */
127 }
128
129 if (shader_flag & SD_RAY_PORTAL) {
132 state, path, portal_bounce) = INTEGRATOR_STATE(state, path, portal_bounce) + 1;
133 }
134
136 INTEGRATOR_STATE_WRITE(state, path, transparent_bounce) = transparent_bounce;
137 /* Random number generator next bounce. */
138 INTEGRATOR_STATE_WRITE(state, path, rng_offset) += PRNG_BOUNCE_NUM;
139 return;
140 }
141
142 const int bounce = INTEGRATOR_STATE(state, path, bounce) + 1;
143 if (bounce >= kernel_data.integrator.max_bounce) {
145 }
146
148
149#ifdef __VOLUME__
150 if (label & LABEL_VOLUME_SCATTER) {
151 /* volume scatter */
154 if (!(flag & PATH_RAY_ANY_PASS)) {
156 }
157
158 const int volume_bounce = INTEGRATOR_STATE(state, path, volume_bounce) + 1;
159 INTEGRATOR_STATE_WRITE(state, path, volume_bounce) = volume_bounce;
160 if (volume_bounce >= kernel_data.integrator.max_volume_bounce) {
162 }
163
164 if (bounce == 1) {
166 }
167 }
168 else
169#endif
170 {
171 /* surface reflection/transmission */
172 if (label & LABEL_REFLECT) {
175
176 if (label & LABEL_DIFFUSE) {
177 const int diffuse_bounce = INTEGRATOR_STATE(state, path, diffuse_bounce) + 1;
178 INTEGRATOR_STATE_WRITE(state, path, diffuse_bounce) = diffuse_bounce;
179 if (diffuse_bounce >= kernel_data.integrator.max_diffuse_bounce) {
181 }
182 }
183 else {
184 const int glossy_bounce = INTEGRATOR_STATE(state, path, glossy_bounce) + 1;
185 INTEGRATOR_STATE_WRITE(state, path, glossy_bounce) = glossy_bounce;
186 if (glossy_bounce >= kernel_data.integrator.max_glossy_bounce) {
188 }
189 }
190 }
191 else {
193
195
196 if (!(label & LABEL_TRANSMIT_TRANSPARENT)) {
198 }
199
200 const int transmission_bounce = INTEGRATOR_STATE(state, path, transmission_bounce) + 1;
201 INTEGRATOR_STATE_WRITE(state, path, transmission_bounce) = transmission_bounce;
202 if (transmission_bounce >= kernel_data.integrator.max_transmission_bounce) {
204 }
205 }
206
207 /* diffuse/glossy/singular */
208 if (label & LABEL_DIFFUSE) {
210 }
211 else if (label & LABEL_GLOSSY) {
213 }
214 else {
217 }
218
219 /* Flag for consistent MIS weights with light tree. */
220 if (shader_flag & SD_BSDF_HAS_TRANSMISSION) {
222 }
223
224 /* Render pass categories. */
227 }
228 }
229
231 INTEGRATOR_STATE_WRITE(state, path, bounce) = bounce;
232
233 /* Random number generator next bounce. */
234 INTEGRATOR_STATE_WRITE(state, path, rng_offset) += PRNG_BOUNCE_NUM;
235}
236
237#ifdef __VOLUME__
238ccl_device_inline bool path_state_volume_next(IntegratorState state)
239{
240 /* For volume bounding meshes we pass through without counting transparent
241 * bounces, only sanity check in case self intersection gets us stuck. */
242 const uint32_t volume_bounds_bounce = INTEGRATOR_STATE(state, path, volume_bounds_bounce) + 1;
243 INTEGRATOR_STATE_WRITE(state, path, volume_bounds_bounce) = volume_bounds_bounce;
244 if (volume_bounds_bounce > VOLUME_BOUNDS_MAX) {
245 return false;
246 }
247
248 /* Random number generator next bounce. */
249 INTEGRATOR_STATE_WRITE(state, path, rng_offset) += PRNG_BOUNCE_NUM;
250
251 return true;
252}
253#endif
254
256{
257 const uint32_t path_flag = INTEGRATOR_STATE(state, path, flag);
258
259 uint32_t visibility = path_flag & PATH_RAY_ALL_VISIBILITY;
260
261 /* For visibility, diffuse/glossy are for reflection only. */
262 if (visibility & PATH_RAY_TRANSMIT) {
263 visibility &= ~(PATH_RAY_DIFFUSE | PATH_RAY_GLOSSY);
264 }
265
266 visibility = SHADOW_CATCHER_PATH_VISIBILITY(path_flag, visibility);
267
268 return visibility;
269}
270
273 const uint32_t path_flag)
274{
275 if (path_flag & PATH_RAY_TRANSPARENT) {
276 const int transparent_bounce = INTEGRATOR_STATE(state, path, transparent_bounce);
277 /* Do at least specified number of bounces without RR. */
278 if (transparent_bounce <= kernel_data.integrator.transparent_min_bounce) {
279 return 1.0f;
280 }
281 }
282 else {
283 const int bounce = INTEGRATOR_STATE(state, path, bounce);
284 /* Do at least specified number of bounces without RR. */
285 if (bounce <= kernel_data.integrator.min_bounce) {
286 return 1.0f;
287 }
288 }
289
290 /* Probabilistic termination: use `sqrt()` to roughly match typical view
291 * transform and do path termination a bit later on average. */
292 Spectrum throughput = INTEGRATOR_STATE(state, path, throughput);
293#if defined(__PATH_GUIDING__) && PATH_GUIDING_LEVEL >= 4
294 if ((kernel_data.kernel_features & KERNEL_FEATURE_PATH_GUIDING)) {
295 throughput *= INTEGRATOR_STATE(state, path, unguided_throughput);
296 }
297#endif
298 return min(sqrtf(reduce_max(fabs(throughput))), 1.0f);
299}
300
302{
303 if (!kernel_data.integrator.ao_bounces) {
304 return false;
305 }
306
307 const int bounce = INTEGRATOR_STATE(state, path, bounce) -
308 INTEGRATOR_STATE(state, path, transmission_bounce) -
309 (INTEGRATOR_STATE(state, path, glossy_bounce) > 0) + 1;
310 return (bounce > kernel_data.integrator.ao_bounces);
311}
312
313/* Random Number Sampling Utility Functions
314 *
315 * For each random number in each step of the path we must have a unique
316 * dimension to avoid using the same sequence twice.
317 *
318 * For branches in the path we must be careful not to reuse the same number
319 * in a sequence and offset accordingly.
320 */
321
322/* RNG State loaded onto stack. */
328
330 ccl_private RNGState *rng_state)
331{
332 rng_state->rng_pixel = INTEGRATOR_STATE(state, path, rng_pixel);
333 rng_state->rng_offset = INTEGRATOR_STATE(state, path, rng_offset);
334 rng_state->sample = INTEGRATOR_STATE(state, path, sample);
335}
336
338 ccl_private RNGState *rng_state)
339{
340 rng_state->rng_pixel = INTEGRATOR_STATE(state, shadow_path, rng_pixel);
341 rng_state->rng_offset = INTEGRATOR_STATE(state, shadow_path, rng_offset);
342 rng_state->sample = INTEGRATOR_STATE(state, shadow_path, sample);
343}
344
346{
347 /* To get an uncorrelated sequence of samples (e.g. for subsurface random walk), just change
348 * the dimension offset since all implemented samplers can generate unlimited numbers of
349 * dimensions anyways. The only thing to ensure is that the offset is divisible by 4. */
350 rng_state->rng_offset = hash_hp_seeded_uint(rng_state->rng_offset, seed) & ~0x3;
351}
352
354 const ccl_private RNGState *rng_state,
355 const int dimension)
356{
357 return path_rng_1D(
358 kg, rng_state->rng_pixel, rng_state->sample, rng_state->rng_offset + dimension);
359}
360
362 const ccl_private RNGState *rng_state,
363 const int dimension)
364{
365 return path_rng_2D(
366 kg, rng_state->rng_pixel, rng_state->sample, rng_state->rng_offset + dimension);
367}
368
370 const ccl_private RNGState *rng_state,
371 const int dimension)
372{
373 return path_rng_3D(
374 kg, rng_state->rng_pixel, rng_state->sample, rng_state->rng_offset + dimension);
375}
376
378 const ccl_private RNGState *rng_state,
379 const int branch,
380 const int num_branches,
381 const int dimension)
382{
383 return path_rng_1D(kg,
384 rng_state->rng_pixel,
385 rng_state->sample * num_branches + branch,
386 rng_state->rng_offset + dimension);
387}
388
390 const ccl_private RNGState *rng_state,
391 const int branch,
392 const int num_branches,
393 const int dimension)
394{
395 return path_rng_2D(kg,
396 rng_state->rng_pixel,
397 rng_state->sample * num_branches + branch,
398 rng_state->rng_offset + dimension);
399}
400
402 const ccl_private RNGState *rng_state,
403 const int branch,
404 const int num_branches,
405 const int dimension)
406{
407 return path_rng_3D(kg,
408 rng_state->rng_pixel,
409 rng_state->sample * num_branches + branch,
410 rng_state->rng_offset + dimension);
411}
412
413/* Utility functions to get light termination value,
414 * since it might not be needed in many cases.
415 */
418{
419 if (kernel_data.integrator.light_inv_rr_threshold > 0.0f) {
421 }
422 return 0.0f;
423}
424
unsigned int uint
static unsigned long seed
Definition btSoftBody.h:39
reduce_max(value.rgb)") DEFINE_VALUE("REDUCE(lhs
#define kernel_assert(cond)
#define KERNEL_FEATURE_VOLUME
#define KERNEL_FEATURE_NODE_PORTAL
#define kernel_data
#define ccl_restrict
#define VOLUME_BOUNDS_MAX
#define KERNEL_FEATURE_PATH_GUIDING
#define SHADER_NONE
#define one_spectrum
#define PRIM_NONE
#define OBJECT_NONE
#define ccl_private
const ThreadKernelGlobalsCPU * KernelGlobals
#define ccl_device_inline
#define KERNEL_FEATURE_DENOISING
#define KERNEL_FEATURE_LIGHT_LINKING
#define ccl_global
#define SHADOW_CATCHER_PATH_VISIBILITY(path_flag, visibility)
#define CCL_NAMESPACE_END
ccl_device_inline uint hash_hp_seeded_uint(const uint i, uint seed)
Definition hash.h:526
const ccl_global KernelWorkTile * tile
@ SD_RAY_PORTAL
@ SD_BSDF_HAS_TRANSMISSION
@ PRIMITIVE_NONE
@ PRNG_BOUNCE_NUM
@ PRNG_LIGHT_TERMINATE
@ PATH_RAY_VOLUME_PASS
@ PATH_RAY_TERMINATE_AFTER_TRANSPARENT
@ PATH_RAY_SINGULAR
@ PATH_RAY_REFLECT
@ PATH_RAY_MIS_HAD_TRANSMISSION
@ PATH_RAY_TRANSPARENT
@ PATH_RAY_TRANSMIT
@ PATH_RAY_VOLUME_SCATTER
@ PATH_RAY_MIS_SKIP
@ PATH_RAY_DENOISING_FEATURES
@ PATH_RAY_GLOSSY
@ PATH_RAY_SURFACE_PASS
@ PATH_RAY_ALL_VISIBILITY
@ PATH_RAY_DIFFUSE
@ PATH_RAY_TRANSPARENT_BACKGROUND
@ PATH_RAY_CAMERA
@ PATH_RAY_ANY_PASS
@ PATH_RAY_TERMINATE_ON_NEXT_SURFACE
@ PATH_RAY_VOLUME_PRIMARY_TRANSMIT
@ PATH_RAY_DIFFUSE_ANCESTOR
@ LABEL_TRANSMIT
@ LABEL_RAY_PORTAL
@ LABEL_TRANSMIT_TRANSPARENT
@ LABEL_VOLUME_SCATTER
@ LABEL_DIFFUSE
@ LABEL_SINGULAR
@ LABEL_GLOSSY
@ LABEL_REFLECT
@ LABEL_TRANSPARENT
ccl_device_inline float2 fabs(const float2 a)
static ulong state[N]
ccl_device_inline void path_state_init_integrator(KernelGlobals kg, IntegratorState state, const int sample, const uint rng_pixel, const Spectrum throughput)
Definition path_state.h:40
ccl_device_inline float path_state_continuation_probability(KernelGlobals kg, ConstIntegratorState state, const uint32_t path_flag)
Definition path_state.h:271
ccl_device_inline bool path_state_ao_bounce(KernelGlobals kg, ConstIntegratorState state)
Definition path_state.h:301
ccl_device_inline float path_state_rng_1D(KernelGlobals kg, const ccl_private RNGState *rng_state, const int dimension)
Definition path_state.h:353
ccl_device_inline void shadow_path_state_rng_load(ConstIntegratorShadowState state, ccl_private RNGState *rng_state)
Definition path_state.h:337
ccl_device_inline float path_branched_rng_1D(KernelGlobals kg, const ccl_private RNGState *rng_state, const int branch, const int num_branches, const int dimension)
Definition path_state.h:377
ccl_device_inline float path_state_rng_light_termination(KernelGlobals kg, const ccl_private RNGState *state)
Definition path_state.h:416
ccl_device_inline void path_state_rng_load(ConstIntegratorState state, ccl_private RNGState *rng_state)
Definition path_state.h:329
ccl_device_inline uint path_state_ray_visibility(ConstIntegratorState state)
Definition path_state.h:255
ccl_device_inline float2 path_branched_rng_2D(KernelGlobals kg, const ccl_private RNGState *rng_state, const int branch, const int num_branches, const int dimension)
Definition path_state.h:389
ccl_device_inline void path_state_next(KernelGlobals kg, IntegratorState state, const int label, const int shader_flag)
Definition path_state.h:110
ccl_device_inline float3 path_branched_rng_3D(KernelGlobals kg, const ccl_private RNGState *rng_state, const int branch, const int num_branches, const int dimension)
Definition path_state.h:401
CCL_NAMESPACE_BEGIN ccl_device_inline void path_state_init_queues(IntegratorState state)
Definition path_state.h:16
ccl_device_inline void path_state_rng_scramble(ccl_private RNGState *rng_state, const int seed)
Definition path_state.h:345
ccl_device_inline void path_state_init(IntegratorState state, const ccl_global KernelWorkTile *ccl_restrict tile, const int x, const int y)
Definition path_state.h:27
ccl_device_inline float2 path_state_rng_2D(KernelGlobals kg, const ccl_private RNGState *rng_state, const int dimension)
Definition path_state.h:361
ccl_device_inline float3 path_state_rng_3D(KernelGlobals kg, const ccl_private RNGState *rng_state, const int dimension)
Definition path_state.h:369
ccl_device_forceinline float2 path_rng_2D(KernelGlobals kg, const uint rng_pixel, const int sample, const int dimension)
Definition pattern.h:83
ccl_device_forceinline float3 path_rng_3D(KernelGlobals kg, const uint rng_pixel, const int sample, const int dimension)
Definition pattern.h:100
ccl_device_forceinline float path_rng_1D(KernelGlobals kg, const uint rng_pixel, const uint sample, const int dimension)
Definition pattern.h:66
#define sqrtf
#define min(a, b)
Definition sort.cc:36
#define INTEGRATOR_STATE_ARRAY_WRITE(state, nested_struct, array_index, member)
Definition state.h:240
#define INTEGRATOR_STATE_WRITE(state, nested_struct, member)
Definition state.h:236
#define INTEGRATOR_STATE(state, nested_struct, member)
Definition state.h:235
const IntegratorShadowStateCPU * ConstIntegratorShadowState
Definition state.h:231
IntegratorStateCPU * IntegratorState
Definition state.h:228
const IntegratorStateCPU * ConstIntegratorState
Definition state.h:229
#define FLT_MAX
Definition stdcycles.h:14
uint rng_pixel
Definition path_state.h:324
uint rng_offset
Definition path_state.h:325
float3 Spectrum
uint8_t flag
Definition wm_window.cc:145