Blender V5.0
state_util.h
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2011-2022 Blender Foundation
2 *
3 * SPDX-License-Identifier: Apache-2.0 */
4
5#pragma once
6
7#include "kernel/globals.h"
8
10
12
14
15/* Ray */
16
18 const ccl_private Ray *ccl_restrict ray)
19{
20#if defined(__INTEGRATOR_GPU_PACKED_STATE__) && defined(__KERNEL_GPU__)
21 static_assert(sizeof(ray->P) == sizeof(float4), "Bad assumption about float3 padding");
22 /* dP and dP are packed based on the assumption that float3 is padded to 16 bytes.
23 * This assumption hold trues on Metal, but not CUDA.
24 */
25 ((ccl_private float4 &)ray->P).w = ray->dP;
26 ((ccl_private float4 &)ray->D).w = ray->dD;
27 INTEGRATOR_STATE_WRITE(state, ray, packed) = (ccl_private packed_ray &)*ray;
28
29 /* Ensure that we can correctly cast between Ray and the generated packed_ray struct. */
30 static_assert(offsetof(packed_ray, P) == offsetof(Ray, P),
31 "Generated packed_ray struct is misaligned with Ray struct");
32 static_assert(offsetof(packed_ray, D) == offsetof(Ray, D),
33 "Generated packed_ray struct is misaligned with Ray struct");
34 static_assert(offsetof(packed_ray, tmin) == offsetof(Ray, tmin),
35 "Generated packed_ray struct is misaligned with Ray struct");
36 static_assert(offsetof(packed_ray, tmax) == offsetof(Ray, tmax),
37 "Generated packed_ray struct is misaligned with Ray struct");
38 static_assert(offsetof(packed_ray, time) == offsetof(Ray, time),
39 "Generated packed_ray struct is misaligned with Ray struct");
40 static_assert(offsetof(packed_ray, dP) == 12 + offsetof(Ray, P),
41 "Generated packed_ray struct is misaligned with Ray struct");
42 static_assert(offsetof(packed_ray, dD) == 12 + offsetof(Ray, D),
43 "Generated packed_ray struct is misaligned with Ray struct");
44#else
45 INTEGRATOR_STATE_WRITE(state, ray, P) = ray->P;
46 INTEGRATOR_STATE_WRITE(state, ray, D) = ray->D;
47 INTEGRATOR_STATE_WRITE(state, ray, tmin) = ray->tmin;
48 INTEGRATOR_STATE_WRITE(state, ray, tmax) = ray->tmax;
49 INTEGRATOR_STATE_WRITE(state, ray, time) = ray->time;
50 INTEGRATOR_STATE_WRITE(state, ray, dP) = ray->dP;
51 INTEGRATOR_STATE_WRITE(state, ray, dD) = ray->dD;
52#endif
53}
54
57{
58#if defined(__INTEGRATOR_GPU_PACKED_STATE__) && defined(__KERNEL_GPU__)
59 *((ccl_private packed_ray *)ray) = INTEGRATOR_STATE(state, ray, packed);
60 ray->dP = ((ccl_private float4 &)ray->P).w;
61 ray->dD = ((ccl_private float4 &)ray->D).w;
62#else
63 ray->P = INTEGRATOR_STATE(state, ray, P);
64 ray->D = INTEGRATOR_STATE(state, ray, D);
65 ray->tmin = INTEGRATOR_STATE(state, ray, tmin);
66 ray->tmax = INTEGRATOR_STATE(state, ray, tmax);
67 ray->time = INTEGRATOR_STATE(state, ray, time);
68 ray->dP = INTEGRATOR_STATE(state, ray, dP);
69 ray->dD = INTEGRATOR_STATE(state, ray, dD);
70#endif
71}
72
73/* Shadow Ray */
74
77{
78 INTEGRATOR_STATE_WRITE(state, shadow_ray, P) = ray->P;
79 INTEGRATOR_STATE_WRITE(state, shadow_ray, D) = ray->D;
80 INTEGRATOR_STATE_WRITE(state, shadow_ray, tmin) = ray->tmin;
81 INTEGRATOR_STATE_WRITE(state, shadow_ray, tmax) = ray->tmax;
82 INTEGRATOR_STATE_WRITE(state, shadow_ray, time) = ray->time;
83 INTEGRATOR_STATE_WRITE(state, shadow_ray, dP) = ray->dP;
84}
85
88{
89 ray->P = INTEGRATOR_STATE(state, shadow_ray, P);
90 ray->D = INTEGRATOR_STATE(state, shadow_ray, D);
91 ray->tmin = INTEGRATOR_STATE(state, shadow_ray, tmin);
92 ray->tmax = INTEGRATOR_STATE(state, shadow_ray, tmax);
93 ray->time = INTEGRATOR_STATE(state, shadow_ray, time);
94 ray->dP = INTEGRATOR_STATE(state, shadow_ray, dP);
95 ray->dD = differential_zero_compact();
96}
97
100{
101 /* There is a bit of implicit knowledge about the way how the kernels are invoked and what the
102 * state is actually storing. Special logic here is needed because the intersect_shadow kernel
103 * might be called multiple times. This happens when the total number of intersections by the
104 * ray (shadow_path.num_hits) exceeds INTEGRATOR_SHADOW_ISECT_SIZE.
105 *
106 * Writing of the shadow_ray.self to the state happens only during the shadow ray setup, and
107 * the shadow_isect array gets overwritten by the intersect_shadow kernel. It is important to
108 * preserve the exact values of the light_object and light_prim for all invocations of the
109 * intersect_shadow kernel. Hence they are written to dedicated fields in the state.
110 *
111 * The self.object and self.prim are kept at the latest handled intersection: during shadow path
112 * branch-off it matches the main ray.self. For the consecutive calls of the intersect_shadow
113 * kernels it comes from the furthest intersection (the last element of the shadow_isect). So we
114 * use INTEGRATOR_SHADOW_ISECT_SIZE - 1 index for both writing and reading. This utilizes
115 * knowledge that intersect_shadow kernel is only called for either initial intersection, or when
116 * the number of ray intersections exceeds the shadow_isect size.
117 *
118 * This should help avoiding situations when the same intersection is recorded multiple times
119 * throughout separate invocations of the intersect_shadow kernel. However, it is still not
120 * fully reliable as there might be more than INTEGRATOR_SHADOW_ISECT_SIZE intersections at the
121 * same ray->t. There is no reliable way to deal with such situation, and offsetting ray from
122 * the shade_shadow kernel which will avoid potential false-positive detection of light being
123 * fully blocked at the expense of potentially ignoring some intersections. If the offset is
124 * used then preserving self.object and self.prim might not be as useful, but it definitely does
125 * not harm. */
126
128 state, shadow_isect, INTEGRATOR_SHADOW_ISECT_SIZE - 1, object) = ray->self.object;
130 state, shadow_isect, INTEGRATOR_SHADOW_ISECT_SIZE - 1, prim) = ray->self.prim;
131
132 INTEGRATOR_STATE_WRITE(state, shadow_ray, self_light_object) = ray->self.light_object;
133 INTEGRATOR_STATE_WRITE(state, shadow_ray, self_light_prim) = ray->self.light_prim;
134}
135
138{
139 ray->self.object = INTEGRATOR_STATE_ARRAY(
140 state, shadow_isect, INTEGRATOR_SHADOW_ISECT_SIZE - 1, object);
141 ray->self.prim = INTEGRATOR_STATE_ARRAY(
142 state, shadow_isect, INTEGRATOR_SHADOW_ISECT_SIZE - 1, prim);
143 ray->self.light_object = INTEGRATOR_STATE(state, shadow_ray, self_light_object);
144 ray->self.light_prim = INTEGRATOR_STATE(state, shadow_ray, self_light_prim);
145}
146
147/* Intersection */
148
151{
152#if defined(__INTEGRATOR_GPU_PACKED_STATE__) && defined(__KERNEL_GPU__)
153 INTEGRATOR_STATE_WRITE(state, isect, packed) = (ccl_private packed_isect &)*isect;
154
155 /* Ensure that we can correctly cast between Intersection and the generated packed_isect struct.
156 */
157 static_assert(offsetof(packed_isect, t) == offsetof(Intersection, t),
158 "Generated packed_isect struct is misaligned with Intersection struct");
159 static_assert(offsetof(packed_isect, u) == offsetof(Intersection, u),
160 "Generated packed_isect struct is misaligned with Intersection struct");
161 static_assert(offsetof(packed_isect, v) == offsetof(Intersection, v),
162 "Generated packed_isect struct is misaligned with Intersection struct");
163 static_assert(offsetof(packed_isect, object) == offsetof(Intersection, object),
164 "Generated packed_isect struct is misaligned with Intersection struct");
165 static_assert(offsetof(packed_isect, prim) == offsetof(Intersection, prim),
166 "Generated packed_isect struct is misaligned with Intersection struct");
167 static_assert(offsetof(packed_isect, type) == offsetof(Intersection, type),
168 "Generated packed_isect struct is misaligned with Intersection struct");
169#else
170 INTEGRATOR_STATE_WRITE(state, isect, t) = isect->t;
171 INTEGRATOR_STATE_WRITE(state, isect, u) = isect->u;
172 INTEGRATOR_STATE_WRITE(state, isect, v) = isect->v;
173 INTEGRATOR_STATE_WRITE(state, isect, object) = isect->object;
174 INTEGRATOR_STATE_WRITE(state, isect, prim) = isect->prim;
175 INTEGRATOR_STATE_WRITE(state, isect, type) = isect->type;
176#endif
177}
178
181{
182#if defined(__INTEGRATOR_GPU_PACKED_STATE__) && defined(__KERNEL_GPU__)
183 *((ccl_private packed_isect *)isect) = INTEGRATOR_STATE(state, isect, packed);
184#else
185 isect->prim = INTEGRATOR_STATE(state, isect, prim);
186 isect->object = INTEGRATOR_STATE(state, isect, object);
187 isect->type = INTEGRATOR_STATE(state, isect, type);
188 isect->u = INTEGRATOR_STATE(state, isect, u);
189 isect->v = INTEGRATOR_STATE(state, isect, v);
190 isect->t = INTEGRATOR_STATE(state, isect, t);
191#endif
192}
193
194#ifdef __VOLUME__
195ccl_device_forceinline VolumeStack integrator_state_read_volume_stack(ConstIntegratorState state,
196 const int i)
197{
198 VolumeStack entry = {INTEGRATOR_STATE_ARRAY(state, volume_stack, i, object),
199 INTEGRATOR_STATE_ARRAY(state, volume_stack, i, shader)};
200 return entry;
201}
202
203ccl_device_forceinline void integrator_state_write_volume_stack(IntegratorState state,
204 const int i,
205 VolumeStack entry)
206{
207 INTEGRATOR_STATE_ARRAY_WRITE(state, volume_stack, i, object) = entry.object;
208 INTEGRATOR_STATE_ARRAY_WRITE(state, volume_stack, i, shader) = entry.shader;
209}
210
211ccl_device_forceinline bool integrator_state_volume_stack_is_empty(KernelGlobals kg,
213{
214 return (kernel_data.kernel_features & KERNEL_FEATURE_VOLUME) ?
215 INTEGRATOR_STATE_ARRAY(state, volume_stack, 0, shader) == SHADER_NONE :
216 true;
217}
218
219ccl_device_forceinline void integrator_state_copy_volume_stack_to_shadow(
221{
222 if (kernel_data.kernel_features & KERNEL_FEATURE_VOLUME) {
223 int index = 0;
224 int shader;
225 do {
226 shader = INTEGRATOR_STATE_ARRAY(state, volume_stack, index, shader);
227
228 INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_volume_stack, index, object) =
229 INTEGRATOR_STATE_ARRAY(state, volume_stack, index, object);
230 INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_volume_stack, index, shader) = shader;
231
232 ++index;
233 } while (shader != SHADER_NONE);
234 }
235}
236
237ccl_device_forceinline void integrator_state_copy_volume_stack(KernelGlobals kg,
238 IntegratorState to_state,
240{
241 if (kernel_data.kernel_features & KERNEL_FEATURE_VOLUME) {
242 int index = 0;
243 int shader;
244 do {
245 shader = INTEGRATOR_STATE_ARRAY(state, volume_stack, index, shader);
246
247 INTEGRATOR_STATE_ARRAY_WRITE(to_state, volume_stack, index, object) = INTEGRATOR_STATE_ARRAY(
248 state, volume_stack, index, object);
249 INTEGRATOR_STATE_ARRAY_WRITE(to_state, volume_stack, index, shader) = shader;
250
251 ++index;
252 } while (shader != SHADER_NONE);
253 }
254}
255
257integrator_state_read_shadow_volume_stack(ConstIntegratorShadowState state, const int i)
258{
259 VolumeStack entry = {INTEGRATOR_STATE_ARRAY(state, shadow_volume_stack, i, object),
260 INTEGRATOR_STATE_ARRAY(state, shadow_volume_stack, i, shader)};
261 return entry;
262}
263
264ccl_device_forceinline bool integrator_state_shadow_volume_stack_is_empty(
266{
267 return (kernel_data.kernel_features & KERNEL_FEATURE_VOLUME) ?
268 INTEGRATOR_STATE_ARRAY(state, shadow_volume_stack, 0, shader) == SHADER_NONE :
269 true;
270}
271
272ccl_device_forceinline void integrator_state_write_shadow_volume_stack(IntegratorShadowState state,
273 const int i,
274 VolumeStack entry)
275{
276 INTEGRATOR_STATE_ARRAY_WRITE(state, shadow_volume_stack, i, object) = entry.object;
277 INTEGRATOR_STATE_ARRAY_WRITE(state, shadow_volume_stack, i, shader) = entry.shader;
278}
279
280#endif /* __VOLUME__*/
281
282/* Shadow Intersection */
283
287 const int index)
288{
289 INTEGRATOR_STATE_ARRAY_WRITE(state, shadow_isect, index, t) = isect->t;
290 INTEGRATOR_STATE_ARRAY_WRITE(state, shadow_isect, index, u) = isect->u;
291 INTEGRATOR_STATE_ARRAY_WRITE(state, shadow_isect, index, v) = isect->v;
292 INTEGRATOR_STATE_ARRAY_WRITE(state, shadow_isect, index, object) = isect->object;
293 INTEGRATOR_STATE_ARRAY_WRITE(state, shadow_isect, index, prim) = isect->prim;
294 INTEGRATOR_STATE_ARRAY_WRITE(state, shadow_isect, index, type) = isect->type;
295}
296
300 const int index)
301{
302 isect->prim = INTEGRATOR_STATE_ARRAY(state, shadow_isect, index, prim);
303 isect->object = INTEGRATOR_STATE_ARRAY(state, shadow_isect, index, object);
304 isect->type = INTEGRATOR_STATE_ARRAY(state, shadow_isect, index, type);
305 isect->u = INTEGRATOR_STATE_ARRAY(state, shadow_isect, index, u);
306 isect->v = INTEGRATOR_STATE_ARRAY(state, shadow_isect, index, v);
307 isect->t = INTEGRATOR_STATE_ARRAY(state, shadow_isect, index, t);
308}
309
310#if defined(__KERNEL_GPU__)
311ccl_device_inline void integrator_state_copy_only(KernelGlobals kg,
312 ConstIntegratorState to_state,
314{
315 int index;
316
317 /* Rely on the compiler to optimize out unused assignments and `while(false)`'s. */
318
319# define KERNEL_STRUCT_BEGIN(name) \
320 index = 0; \
321 do {
322
323# define KERNEL_STRUCT_MEMBER(parent_struct, type, name, feature) \
324 if (kernel_integrator_state.parent_struct.name != nullptr) { \
325 kernel_integrator_state.parent_struct.name[to_state] = \
326 kernel_integrator_state.parent_struct.name[state]; \
327 }
328
329# ifdef __INTEGRATOR_GPU_PACKED_STATE__
330# define KERNEL_STRUCT_BEGIN_PACKED(parent_struct, feature) \
331 KERNEL_STRUCT_BEGIN(parent_struct) \
332 KERNEL_STRUCT_MEMBER(parent_struct, packed_##parent_struct, packed, feature)
333# define KERNEL_STRUCT_MEMBER_PACKED(parent_struct, type, name, feature)
334# else
335# define KERNEL_STRUCT_MEMBER_PACKED KERNEL_STRUCT_MEMBER
336# define KERNEL_STRUCT_BEGIN_PACKED(parent_struct, feature) KERNEL_STRUCT_BEGIN(parent_struct)
337# endif
338
339# define KERNEL_STRUCT_ARRAY_MEMBER(parent_struct, type, name, feature) \
340 if (kernel_integrator_state.parent_struct[index].name != nullptr) { \
341 kernel_integrator_state.parent_struct[index].name[to_state] = \
342 kernel_integrator_state.parent_struct[index].name[state]; \
343 }
344
345# define KERNEL_STRUCT_END(name) \
346 } \
347 while (false) \
348 ;
349
350# define KERNEL_STRUCT_END_ARRAY(name, cpu_array_size, gpu_array_size) \
351 ++index; \
352 } \
353 while (index < gpu_array_size) \
354 ;
355
356# define KERNEL_STRUCT_VOLUME_STACK_SIZE kernel_data.volume_stack_size
357
359
360# undef KERNEL_STRUCT_BEGIN
361# undef KERNEL_STRUCT_BEGIN_PACKED
362# undef KERNEL_STRUCT_MEMBER
363# undef KERNEL_STRUCT_MEMBER_PACKED
364# undef KERNEL_STRUCT_ARRAY_MEMBER
365# undef KERNEL_STRUCT_END
366# undef KERNEL_STRUCT_END_ARRAY
367# undef KERNEL_STRUCT_VOLUME_STACK_SIZE
368}
369
370ccl_device_inline void integrator_state_move(KernelGlobals kg,
371 ConstIntegratorState to_state,
373{
374 integrator_state_copy_only(kg, to_state, state);
375
376 INTEGRATOR_STATE_WRITE(state, path, queued_kernel) = 0;
377}
378
379ccl_device_inline void integrator_shadow_state_copy_only(KernelGlobals kg,
382{
383 int index;
384
385 /* Rely on the compiler to optimize out unused assignments and `while(false)`'s. */
386
387# define KERNEL_STRUCT_BEGIN(name) \
388 index = 0; \
389 do {
390
391# define KERNEL_STRUCT_MEMBER(parent_struct, type, name, feature) \
392 if (kernel_integrator_state.parent_struct.name != nullptr) { \
393 kernel_integrator_state.parent_struct.name[to_state] = \
394 kernel_integrator_state.parent_struct.name[state]; \
395 }
396
397# ifdef __INTEGRATOR_GPU_PACKED_STATE__
398# define KERNEL_STRUCT_BEGIN_PACKED(parent_struct, feature) \
399 KERNEL_STRUCT_BEGIN(parent_struct) \
400 KERNEL_STRUCT_MEMBER(parent_struct, type, packed, feature)
401# define KERNEL_STRUCT_MEMBER_PACKED(parent_struct, type, name, feature)
402# else
403# define KERNEL_STRUCT_MEMBER_PACKED KERNEL_STRUCT_MEMBER
404# define KERNEL_STRUCT_BEGIN_PACKED(parent_struct, feature) KERNEL_STRUCT_BEGIN(parent_struct)
405# endif
406
407# define KERNEL_STRUCT_ARRAY_MEMBER(parent_struct, type, name, feature) \
408 if (kernel_integrator_state.parent_struct[index].name != nullptr) { \
409 kernel_integrator_state.parent_struct[index].name[to_state] = \
410 kernel_integrator_state.parent_struct[index].name[state]; \
411 }
412
413# define KERNEL_STRUCT_END(name) \
414 } \
415 while (false) \
416 ;
417
418# define KERNEL_STRUCT_END_ARRAY(name, cpu_array_size, gpu_array_size) \
419 ++index; \
420 } \
421 while (index < gpu_array_size) \
422 ;
423
424# define KERNEL_STRUCT_VOLUME_STACK_SIZE kernel_data.volume_stack_size
425
427
428# undef KERNEL_STRUCT_BEGIN
429# undef KERNEL_STRUCT_BEGIN_PACKED
430# undef KERNEL_STRUCT_MEMBER
431# undef KERNEL_STRUCT_MEMBER_PACKED
432# undef KERNEL_STRUCT_ARRAY_MEMBER
433# undef KERNEL_STRUCT_END
434# undef KERNEL_STRUCT_END_ARRAY
435# undef KERNEL_STRUCT_VOLUME_STACK_SIZE
436}
437
438ccl_device_inline void integrator_shadow_state_move(KernelGlobals kg,
439 ConstIntegratorState to_state,
441{
442 integrator_shadow_state_copy_only(kg, to_state, state);
443
444 INTEGRATOR_STATE_WRITE(state, shadow_path, queued_kernel) = 0;
445}
446
447#endif
448
449/* NOTE: Leaves kernel scheduling information untouched. Use INIT semantic for one of the paths
450 * after this function. */
453{
454#if defined(__KERNEL_GPU__)
456 &kernel_integrator_state.next_main_path_index[0], 1);
457
458 integrator_state_copy_only(kg, to_state, state);
459#else
460 IntegratorStateCPU *ccl_restrict to_state = state + 1;
461
462 /* Only copy the required subset for performance. */
463 to_state->path = state->path;
464 to_state->ray = state->ray;
465 to_state->isect = state->isect;
466# ifdef __VOLUME__
467 integrator_state_copy_volume_stack(kg, to_state, state);
468# endif
469#endif
470
471 return to_state;
472}
473
474#ifndef __KERNEL_GPU__
476{
477 return INTEGRATOR_STATE(state, path, bounce);
478}
479
481 const int /*unused*/)
482{
483 return INTEGRATOR_STATE(state, shadow_path, bounce);
484}
485
487 const int /*unused*/)
488{
489 return INTEGRATOR_STATE(state, path, diffuse_bounce);
490}
491
493 const int /*unused*/)
494{
495 return INTEGRATOR_STATE(state, shadow_path, diffuse_bounce);
496}
497
499 const int /*unused*/)
500{
501 return INTEGRATOR_STATE(state, path, glossy_bounce);
502}
503
505 const int /*unused*/)
506{
507 return INTEGRATOR_STATE(state, shadow_path, glossy_bounce);
508}
509
511 const int /*unused*/)
512{
513 return INTEGRATOR_STATE(state, path, transmission_bounce);
514}
515
517 const int /*unused*/)
518{
519 return INTEGRATOR_STATE(state, shadow_path, transmission_bounce);
520}
521
523 const int /*unused*/)
524{
525 return INTEGRATOR_STATE(state, path, transparent_bounce);
526}
527
529 const int /*unused*/)
530{
531 return INTEGRATOR_STATE(state, shadow_path, transparent_bounce);
532}
533
536 const int /*unused*/)
537{
538 return (kernel_data.kernel_features & KERNEL_FEATURE_NODE_PORTAL) ?
539 INTEGRATOR_STATE(state, path, portal_bounce) :
540 0;
541}
542
545 const int /*unused*/)
546{
547 return (kernel_data.kernel_features & KERNEL_FEATURE_NODE_PORTAL) ?
548 INTEGRATOR_STATE(state, shadow_path, portal_bounce) :
549 0;
550}
551
552#else
554 const uint32_t path_flag)
555{
556 return (path_flag & PATH_RAY_SHADOW) ? INTEGRATOR_STATE(state, shadow_path, bounce) :
557 INTEGRATOR_STATE(state, path, bounce);
558}
559
561 const uint32_t path_flag)
562{
563 return (path_flag & PATH_RAY_SHADOW) ? INTEGRATOR_STATE(state, shadow_path, diffuse_bounce) :
564 INTEGRATOR_STATE(state, path, diffuse_bounce);
565}
566
568 const uint32_t path_flag)
569{
570 return (path_flag & PATH_RAY_SHADOW) ? INTEGRATOR_STATE(state, shadow_path, glossy_bounce) :
571 INTEGRATOR_STATE(state, path, glossy_bounce);
572}
573
575 const uint32_t path_flag)
576{
577 return (path_flag & PATH_RAY_SHADOW) ?
578 INTEGRATOR_STATE(state, shadow_path, transmission_bounce) :
579 INTEGRATOR_STATE(state, path, transmission_bounce);
580}
581
583 const uint32_t path_flag)
584{
585 return (path_flag & PATH_RAY_SHADOW) ? INTEGRATOR_STATE(state, shadow_path, transparent_bounce) :
586 INTEGRATOR_STATE(state, path, transparent_bounce);
587}
588
591 const uint32_t path_flag)
592{
593 if ((kernel_data.kernel_features & KERNEL_FEATURE_NODE_PORTAL) == 0) {
594 return 0;
595 }
596 return (path_flag & PATH_RAY_SHADOW) ? INTEGRATOR_STATE(state, shadow_path, portal_bounce) :
597 INTEGRATOR_STATE(state, path, portal_bounce);
598}
599
600#endif
601
#define D
ATOMIC_INLINE uint32_t atomic_fetch_and_add_uint32(uint32_t *p, uint32_t x)
ATTR_WARN_UNUSED_RESULT const BMVert * v
SIMD_FORCE_INLINE const btScalar & w() const
Return the w value.
Definition btQuadWord.h:119
#define KERNEL_FEATURE_VOLUME
#define KERNEL_FEATURE_NODE_PORTAL
#define kernel_data
#define ccl_restrict
#define ccl_device_forceinline
#define SHADER_NONE
#define INTEGRATOR_SHADOW_ISECT_SIZE
#define ccl_private
const ThreadKernelGlobalsCPU * KernelGlobals
#define ccl_device_inline
#define CCL_NAMESPACE_END
#define kernel_integrator_state
#define offsetof(t, d)
ccl_device_forceinline float differential_zero_compact()
#define packed
@ PATH_RAY_SHADOW
static ulong state[N]
IntegratorShadowStateCPU * IntegratorShadowState
Definition state.h:230
#define INTEGRATOR_STATE_ARRAY_WRITE(state, nested_struct, array_index, member)
Definition state.h:240
#define INTEGRATOR_STATE_WRITE(state, nested_struct, member)
Definition state.h:236
#define INTEGRATOR_STATE(state, nested_struct, member)
Definition state.h:235
const IntegratorShadowStateCPU * ConstIntegratorShadowState
Definition state.h:231
#define INTEGRATOR_STATE_ARRAY(state, nested_struct, array_index, member)
Definition state.h:238
IntegratorStateCPU * IntegratorState
Definition state.h:228
const IntegratorStateCPU * ConstIntegratorState
Definition state.h:229
ccl_device_forceinline void integrator_state_read_shadow_ray(ConstIntegratorShadowState state, ccl_private Ray *ccl_restrict ray)
Definition state_util.h:86
ccl_device_inline int integrator_state_bounce(ConstIntegratorState state, const int)
Definition state_util.h:475
ccl_device_forceinline void integrator_state_write_shadow_ray_self(IntegratorShadowState state, const ccl_private Ray *ccl_restrict ray)
Definition state_util.h:98
ccl_device_inline int integrator_state_transmission_bounce(ConstIntegratorState state, const int)
Definition state_util.h:510
ccl_device_forceinline void integrator_state_write_shadow_isect(IntegratorShadowState state, const ccl_private Intersection *ccl_restrict isect, const int index)
Definition state_util.h:284
ccl_device_forceinline void integrator_state_read_ray(ConstIntegratorState state, ccl_private Ray *ccl_restrict ray)
Definition state_util.h:55
ccl_device_inline int integrator_state_transparent_bounce(ConstIntegratorState state, const int)
Definition state_util.h:522
ccl_device_forceinline void integrator_state_write_shadow_ray(IntegratorShadowState state, const ccl_private Ray *ccl_restrict ray)
Definition state_util.h:75
ccl_device_inline IntegratorState integrator_state_shadow_catcher_split(KernelGlobals kg, IntegratorState state)
Definition state_util.h:451
ccl_device_forceinline void integrator_state_read_shadow_isect(ConstIntegratorShadowState state, ccl_private Intersection *ccl_restrict isect, const int index)
Definition state_util.h:297
ccl_device_forceinline void integrator_state_read_isect(ConstIntegratorState state, ccl_private Intersection *ccl_restrict isect)
Definition state_util.h:179
ccl_device_inline int integrator_state_glossy_bounce(ConstIntegratorState state, const int)
Definition state_util.h:498
ccl_device_inline int integrator_state_diffuse_bounce(ConstIntegratorState state, const int)
Definition state_util.h:486
CCL_NAMESPACE_BEGIN ccl_device_forceinline void integrator_state_write_ray(IntegratorState state, const ccl_private Ray *ccl_restrict ray)
Definition state_util.h:17
ccl_device_inline int integrator_state_portal_bounce(KernelGlobals kg, ConstIntegratorState state, const int)
Definition state_util.h:534
ccl_device_forceinline void integrator_state_write_isect(IntegratorState state, const ccl_private Intersection *ccl_restrict isect)
Definition state_util.h:149
ccl_device_forceinline void integrator_state_read_shadow_ray_self(ConstIntegratorShadowState state, ccl_private Ray *ccl_restrict ray)
Definition state_util.h:136
i
Definition text_draw.cc:230