Blender V4.3
state_util.h
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2011-2022 Blender Foundation
2 *
3 * SPDX-License-Identifier: Apache-2.0 */
4
5#pragma once
6
8
10
12
13/* Ray */
14
16 ccl_private const Ray *ccl_restrict ray)
17{
18#if defined(__INTEGRATOR_GPU_PACKED_STATE__) && defined(__KERNEL_GPU__)
19 static_assert(sizeof(ray->P) == sizeof(float4), "Bad assumption about float3 padding");
20 /* dP and dP are packed based on the assumption that float3 is padded to 16 bytes.
21 * This assumption hold trues on Metal, but not CUDA.
22 */
23 ((ccl_private float4 &)ray->P).w = ray->dP;
24 ((ccl_private float4 &)ray->D).w = ray->dD;
25 INTEGRATOR_STATE_WRITE(state, ray, packed) = (ccl_private packed_ray &)*ray;
26
27 /* Ensure that we can correctly cast between Ray and the generated packed_ray struct. */
28 static_assert(offsetof(packed_ray, P) == offsetof(Ray, P),
29 "Generated packed_ray struct is misaligned with Ray struct");
30 static_assert(offsetof(packed_ray, D) == offsetof(Ray, D),
31 "Generated packed_ray struct is misaligned with Ray struct");
32 static_assert(offsetof(packed_ray, tmin) == offsetof(Ray, tmin),
33 "Generated packed_ray struct is misaligned with Ray struct");
34 static_assert(offsetof(packed_ray, tmax) == offsetof(Ray, tmax),
35 "Generated packed_ray struct is misaligned with Ray struct");
36 static_assert(offsetof(packed_ray, time) == offsetof(Ray, time),
37 "Generated packed_ray struct is misaligned with Ray struct");
38 static_assert(offsetof(packed_ray, dP) == 12 + offsetof(Ray, P),
39 "Generated packed_ray struct is misaligned with Ray struct");
40 static_assert(offsetof(packed_ray, dD) == 12 + offsetof(Ray, D),
41 "Generated packed_ray struct is misaligned with Ray struct");
42#else
43 INTEGRATOR_STATE_WRITE(state, ray, P) = ray->P;
44 INTEGRATOR_STATE_WRITE(state, ray, D) = ray->D;
45 INTEGRATOR_STATE_WRITE(state, ray, tmin) = ray->tmin;
46 INTEGRATOR_STATE_WRITE(state, ray, tmax) = ray->tmax;
47 INTEGRATOR_STATE_WRITE(state, ray, time) = ray->time;
48 INTEGRATOR_STATE_WRITE(state, ray, dP) = ray->dP;
49 INTEGRATOR_STATE_WRITE(state, ray, dD) = ray->dD;
50#endif
51}
52
55{
56#if defined(__INTEGRATOR_GPU_PACKED_STATE__) && defined(__KERNEL_GPU__)
57 *((ccl_private packed_ray *)ray) = INTEGRATOR_STATE(state, ray, packed);
58 ray->dP = ((ccl_private float4 &)ray->P).w;
59 ray->dD = ((ccl_private float4 &)ray->D).w;
60#else
61 ray->P = INTEGRATOR_STATE(state, ray, P);
62 ray->D = INTEGRATOR_STATE(state, ray, D);
63 ray->tmin = INTEGRATOR_STATE(state, ray, tmin);
64 ray->tmax = INTEGRATOR_STATE(state, ray, tmax);
65 ray->time = INTEGRATOR_STATE(state, ray, time);
66 ray->dP = INTEGRATOR_STATE(state, ray, dP);
67 ray->dD = INTEGRATOR_STATE(state, ray, dD);
68#endif
69}
70
71/* Shadow Ray */
72
75{
76 INTEGRATOR_STATE_WRITE(state, shadow_ray, P) = ray->P;
77 INTEGRATOR_STATE_WRITE(state, shadow_ray, D) = ray->D;
78 INTEGRATOR_STATE_WRITE(state, shadow_ray, tmin) = ray->tmin;
79 INTEGRATOR_STATE_WRITE(state, shadow_ray, tmax) = ray->tmax;
80 INTEGRATOR_STATE_WRITE(state, shadow_ray, time) = ray->time;
81 INTEGRATOR_STATE_WRITE(state, shadow_ray, dP) = ray->dP;
82}
83
86{
87 ray->P = INTEGRATOR_STATE(state, shadow_ray, P);
88 ray->D = INTEGRATOR_STATE(state, shadow_ray, D);
89 ray->tmin = INTEGRATOR_STATE(state, shadow_ray, tmin);
90 ray->tmax = INTEGRATOR_STATE(state, shadow_ray, tmax);
91 ray->time = INTEGRATOR_STATE(state, shadow_ray, time);
92 ray->dP = INTEGRATOR_STATE(state, shadow_ray, dP);
93 ray->dD = differential_zero_compact();
94}
95
98{
99 if (kernel_data.kernel_features & KERNEL_FEATURE_SHADOW_LINKING) {
100 INTEGRATOR_STATE_WRITE(state, shadow_ray, self_light) = ray->self.light;
101 }
102
103 /* Save memory by storing the light and object indices in the shadow_isect. */
104 /* TODO(sergey): This optimization does not work on GPU where multiple iterations of intersection
105 * is needed if there are more than 4 transparent intersections. The indices starts to conflict
106 * with each other. */
107 INTEGRATOR_STATE_ARRAY_WRITE(state, shadow_isect, 0, object) = ray->self.object;
108 INTEGRATOR_STATE_ARRAY_WRITE(state, shadow_isect, 0, prim) = ray->self.prim;
109 INTEGRATOR_STATE_ARRAY_WRITE(state, shadow_isect, 1, object) = ray->self.light_object;
110 INTEGRATOR_STATE_ARRAY_WRITE(state, shadow_isect, 1, prim) = ray->self.light_prim;
111}
112
115{
116 if (kernel_data.kernel_features & KERNEL_FEATURE_SHADOW_LINKING) {
117 ray->self.light = INTEGRATOR_STATE(state, shadow_ray, self_light);
118 }
119
120 ray->self.object = INTEGRATOR_STATE_ARRAY(state, shadow_isect, 0, object);
121 ray->self.prim = INTEGRATOR_STATE_ARRAY(state, shadow_isect, 0, prim);
122 ray->self.light_object = INTEGRATOR_STATE_ARRAY(state, shadow_isect, 1, object);
123 ray->self.light_prim = INTEGRATOR_STATE_ARRAY(state, shadow_isect, 1, prim);
124}
125
126/* Intersection */
127
130{
131#if defined(__INTEGRATOR_GPU_PACKED_STATE__) && defined(__KERNEL_GPU__)
132 INTEGRATOR_STATE_WRITE(state, isect, packed) = (ccl_private packed_isect &)*isect;
133
134 /* Ensure that we can correctly cast between Intersection and the generated packed_isect struct.
135 */
136 static_assert(offsetof(packed_isect, t) == offsetof(Intersection, t),
137 "Generated packed_isect struct is misaligned with Intersection struct");
138 static_assert(offsetof(packed_isect, u) == offsetof(Intersection, u),
139 "Generated packed_isect struct is misaligned with Intersection struct");
140 static_assert(offsetof(packed_isect, v) == offsetof(Intersection, v),
141 "Generated packed_isect struct is misaligned with Intersection struct");
142 static_assert(offsetof(packed_isect, object) == offsetof(Intersection, object),
143 "Generated packed_isect struct is misaligned with Intersection struct");
144 static_assert(offsetof(packed_isect, prim) == offsetof(Intersection, prim),
145 "Generated packed_isect struct is misaligned with Intersection struct");
146 static_assert(offsetof(packed_isect, type) == offsetof(Intersection, type),
147 "Generated packed_isect struct is misaligned with Intersection struct");
148#else
149 INTEGRATOR_STATE_WRITE(state, isect, t) = isect->t;
150 INTEGRATOR_STATE_WRITE(state, isect, u) = isect->u;
151 INTEGRATOR_STATE_WRITE(state, isect, v) = isect->v;
152 INTEGRATOR_STATE_WRITE(state, isect, object) = isect->object;
153 INTEGRATOR_STATE_WRITE(state, isect, prim) = isect->prim;
154 INTEGRATOR_STATE_WRITE(state, isect, type) = isect->type;
155#endif
156}
157
160{
161#if defined(__INTEGRATOR_GPU_PACKED_STATE__) && defined(__KERNEL_GPU__)
162 *((ccl_private packed_isect *)isect) = INTEGRATOR_STATE(state, isect, packed);
163#else
164 isect->prim = INTEGRATOR_STATE(state, isect, prim);
165 isect->object = INTEGRATOR_STATE(state, isect, object);
166 isect->type = INTEGRATOR_STATE(state, isect, type);
167 isect->u = INTEGRATOR_STATE(state, isect, u);
168 isect->v = INTEGRATOR_STATE(state, isect, v);
169 isect->t = INTEGRATOR_STATE(state, isect, t);
170#endif
171}
172
173#ifdef __VOLUME__
174ccl_device_forceinline VolumeStack integrator_state_read_volume_stack(ConstIntegratorState state,
175 int i)
176{
177 VolumeStack entry = {INTEGRATOR_STATE_ARRAY(state, volume_stack, i, object),
178 INTEGRATOR_STATE_ARRAY(state, volume_stack, i, shader)};
179 return entry;
180}
181
182ccl_device_forceinline void integrator_state_write_volume_stack(IntegratorState state,
183 int i,
184 VolumeStack entry)
185{
186 INTEGRATOR_STATE_ARRAY_WRITE(state, volume_stack, i, object) = entry.object;
187 INTEGRATOR_STATE_ARRAY_WRITE(state, volume_stack, i, shader) = entry.shader;
188}
189
190ccl_device_forceinline bool integrator_state_volume_stack_is_empty(KernelGlobals kg,
192{
193 return (kernel_data.kernel_features & KERNEL_FEATURE_VOLUME) ?
194 INTEGRATOR_STATE_ARRAY(state, volume_stack, 0, shader) == SHADER_NONE :
195 true;
196}
197
198ccl_device_forceinline void integrator_state_copy_volume_stack_to_shadow(
200{
201 if (kernel_data.kernel_features & KERNEL_FEATURE_VOLUME) {
202 int index = 0;
203 int shader;
204 do {
205 shader = INTEGRATOR_STATE_ARRAY(state, volume_stack, index, shader);
206
207 INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_volume_stack, index, object) =
208 INTEGRATOR_STATE_ARRAY(state, volume_stack, index, object);
209 INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_volume_stack, index, shader) = shader;
210
211 ++index;
212 } while (shader != OBJECT_NONE);
213 }
214}
215
216ccl_device_forceinline void integrator_state_copy_volume_stack(KernelGlobals kg,
217 IntegratorState to_state,
219{
220 if (kernel_data.kernel_features & KERNEL_FEATURE_VOLUME) {
221 int index = 0;
222 int shader;
223 do {
224 shader = INTEGRATOR_STATE_ARRAY(state, volume_stack, index, shader);
225
226 INTEGRATOR_STATE_ARRAY_WRITE(to_state, volume_stack, index, object) = INTEGRATOR_STATE_ARRAY(
227 state, volume_stack, index, object);
228 INTEGRATOR_STATE_ARRAY_WRITE(to_state, volume_stack, index, shader) = shader;
229
230 ++index;
231 } while (shader != OBJECT_NONE);
232 }
233}
234
236integrator_state_read_shadow_volume_stack(ConstIntegratorShadowState state, int i)
237{
238 VolumeStack entry = {INTEGRATOR_STATE_ARRAY(state, shadow_volume_stack, i, object),
239 INTEGRATOR_STATE_ARRAY(state, shadow_volume_stack, i, shader)};
240 return entry;
241}
242
243ccl_device_forceinline bool integrator_state_shadow_volume_stack_is_empty(
245{
246 return (kernel_data.kernel_features & KERNEL_FEATURE_VOLUME) ?
247 INTEGRATOR_STATE_ARRAY(state, shadow_volume_stack, 0, shader) == SHADER_NONE :
248 true;
249}
250
251ccl_device_forceinline void integrator_state_write_shadow_volume_stack(IntegratorShadowState state,
252 int i,
253 VolumeStack entry)
254{
255 INTEGRATOR_STATE_ARRAY_WRITE(state, shadow_volume_stack, i, object) = entry.object;
256 INTEGRATOR_STATE_ARRAY_WRITE(state, shadow_volume_stack, i, shader) = entry.shader;
257}
258
259#endif /* __VOLUME__*/
260
261/* Shadow Intersection */
262
266 const int index)
267{
268 INTEGRATOR_STATE_ARRAY_WRITE(state, shadow_isect, index, t) = isect->t;
269 INTEGRATOR_STATE_ARRAY_WRITE(state, shadow_isect, index, u) = isect->u;
270 INTEGRATOR_STATE_ARRAY_WRITE(state, shadow_isect, index, v) = isect->v;
271 INTEGRATOR_STATE_ARRAY_WRITE(state, shadow_isect, index, object) = isect->object;
272 INTEGRATOR_STATE_ARRAY_WRITE(state, shadow_isect, index, prim) = isect->prim;
273 INTEGRATOR_STATE_ARRAY_WRITE(state, shadow_isect, index, type) = isect->type;
274}
275
279 const int index)
280{
281 isect->prim = INTEGRATOR_STATE_ARRAY(state, shadow_isect, index, prim);
282 isect->object = INTEGRATOR_STATE_ARRAY(state, shadow_isect, index, object);
283 isect->type = INTEGRATOR_STATE_ARRAY(state, shadow_isect, index, type);
284 isect->u = INTEGRATOR_STATE_ARRAY(state, shadow_isect, index, u);
285 isect->v = INTEGRATOR_STATE_ARRAY(state, shadow_isect, index, v);
286 isect->t = INTEGRATOR_STATE_ARRAY(state, shadow_isect, index, t);
287}
288
289#if defined(__KERNEL_GPU__)
290ccl_device_inline void integrator_state_copy_only(KernelGlobals kg,
291 ConstIntegratorState to_state,
293{
294 int index;
295
296 /* Rely on the compiler to optimize out unused assignments and `while(false)`'s. */
297
298# define KERNEL_STRUCT_BEGIN(name) \
299 index = 0; \
300 do {
301
302# define KERNEL_STRUCT_MEMBER(parent_struct, type, name, feature) \
303 if (kernel_integrator_state.parent_struct.name != nullptr) { \
304 kernel_integrator_state.parent_struct.name[to_state] = \
305 kernel_integrator_state.parent_struct.name[state]; \
306 }
307
308# ifdef __INTEGRATOR_GPU_PACKED_STATE__
309# define KERNEL_STRUCT_BEGIN_PACKED(parent_struct, feature) \
310 KERNEL_STRUCT_BEGIN(parent_struct) \
311 KERNEL_STRUCT_MEMBER(parent_struct, packed_##parent_struct, packed, feature)
312# define KERNEL_STRUCT_MEMBER_PACKED(parent_struct, type, name, feature)
313# else
314# define KERNEL_STRUCT_MEMBER_PACKED KERNEL_STRUCT_MEMBER
315# define KERNEL_STRUCT_BEGIN_PACKED(parent_struct, feature) KERNEL_STRUCT_BEGIN(parent_struct)
316# endif
317
318# define KERNEL_STRUCT_ARRAY_MEMBER(parent_struct, type, name, feature) \
319 if (kernel_integrator_state.parent_struct[index].name != nullptr) { \
320 kernel_integrator_state.parent_struct[index].name[to_state] = \
321 kernel_integrator_state.parent_struct[index].name[state]; \
322 }
323
324# define KERNEL_STRUCT_END(name) \
325 } \
326 while (false) \
327 ;
328
329# define KERNEL_STRUCT_END_ARRAY(name, cpu_array_size, gpu_array_size) \
330 ++index; \
331 } \
332 while (index < gpu_array_size) \
333 ;
334
335# define KERNEL_STRUCT_VOLUME_STACK_SIZE kernel_data.volume_stack_size
336
338
339# undef KERNEL_STRUCT_BEGIN
340# undef KERNEL_STRUCT_BEGIN_PACKED
341# undef KERNEL_STRUCT_MEMBER
342# undef KERNEL_STRUCT_MEMBER_PACKED
343# undef KERNEL_STRUCT_ARRAY_MEMBER
344# undef KERNEL_STRUCT_END
345# undef KERNEL_STRUCT_END_ARRAY
346# undef KERNEL_STRUCT_VOLUME_STACK_SIZE
347}
348
349ccl_device_inline void integrator_state_move(KernelGlobals kg,
350 ConstIntegratorState to_state,
352{
353 integrator_state_copy_only(kg, to_state, state);
354
355 INTEGRATOR_STATE_WRITE(state, path, queued_kernel) = 0;
356}
357
358ccl_device_inline void integrator_shadow_state_copy_only(KernelGlobals kg,
361{
362 int index;
363
364 /* Rely on the compiler to optimize out unused assignments and `while(false)`'s. */
365
366# define KERNEL_STRUCT_BEGIN(name) \
367 index = 0; \
368 do {
369
370# define KERNEL_STRUCT_MEMBER(parent_struct, type, name, feature) \
371 if (kernel_integrator_state.parent_struct.name != nullptr) { \
372 kernel_integrator_state.parent_struct.name[to_state] = \
373 kernel_integrator_state.parent_struct.name[state]; \
374 }
375
376# ifdef __INTEGRATOR_GPU_PACKED_STATE__
377# define KERNEL_STRUCT_BEGIN_PACKED(parent_struct, feature) \
378 KERNEL_STRUCT_BEGIN(parent_struct) \
379 KERNEL_STRUCT_MEMBER(parent_struct, type, packed, feature)
380# define KERNEL_STRUCT_MEMBER_PACKED(parent_struct, type, name, feature)
381# else
382# define KERNEL_STRUCT_MEMBER_PACKED KERNEL_STRUCT_MEMBER
383# define KERNEL_STRUCT_BEGIN_PACKED(parent_struct, feature) KERNEL_STRUCT_BEGIN(parent_struct)
384# endif
385
386# define KERNEL_STRUCT_ARRAY_MEMBER(parent_struct, type, name, feature) \
387 if (kernel_integrator_state.parent_struct[index].name != nullptr) { \
388 kernel_integrator_state.parent_struct[index].name[to_state] = \
389 kernel_integrator_state.parent_struct[index].name[state]; \
390 }
391
392# define KERNEL_STRUCT_END(name) \
393 } \
394 while (false) \
395 ;
396
397# define KERNEL_STRUCT_END_ARRAY(name, cpu_array_size, gpu_array_size) \
398 ++index; \
399 } \
400 while (index < gpu_array_size) \
401 ;
402
403# define KERNEL_STRUCT_VOLUME_STACK_SIZE kernel_data.volume_stack_size
404
406
407# undef KERNEL_STRUCT_BEGIN
408# undef KERNEL_STRUCT_BEGIN_PACKED
409# undef KERNEL_STRUCT_MEMBER
410# undef KERNEL_STRUCT_MEMBER_PACKED
411# undef KERNEL_STRUCT_ARRAY_MEMBER
412# undef KERNEL_STRUCT_END
413# undef KERNEL_STRUCT_END_ARRAY
414# undef KERNEL_STRUCT_VOLUME_STACK_SIZE
415}
416
417ccl_device_inline void integrator_shadow_state_move(KernelGlobals kg,
418 ConstIntegratorState to_state,
420{
421 integrator_shadow_state_copy_only(kg, to_state, state);
422
423 INTEGRATOR_STATE_WRITE(state, shadow_path, queued_kernel) = 0;
424}
425
426#endif
427
428/* NOTE: Leaves kernel scheduling information untouched. Use INIT semantic for one of the paths
429 * after this function. */
432{
433#if defined(__KERNEL_GPU__)
435 &kernel_integrator_state.next_main_path_index[0], 1);
436
437 integrator_state_copy_only(kg, to_state, state);
438#else
439 IntegratorStateCPU *ccl_restrict to_state = state + 1;
440
441 /* Only copy the required subset for performance. */
442 to_state->path = state->path;
443 to_state->ray = state->ray;
444 to_state->isect = state->isect;
445# ifdef __VOLUME__
446 integrator_state_copy_volume_stack(kg, to_state, state);
447# endif
448#endif
449
450 return to_state;
451}
452
453#ifndef __KERNEL_GPU__
458
460{
461 return INTEGRATOR_STATE(state, shadow_path, bounce);
462}
463
465{
466 return INTEGRATOR_STATE(state, path, diffuse_bounce);
467}
468
470{
471 return INTEGRATOR_STATE(state, shadow_path, diffuse_bounce);
472}
473
475{
476 return INTEGRATOR_STATE(state, path, glossy_bounce);
477}
478
480{
481 return INTEGRATOR_STATE(state, shadow_path, glossy_bounce);
482}
483
485{
486 return INTEGRATOR_STATE(state, path, transmission_bounce);
487}
488
490 const int)
491{
492 return INTEGRATOR_STATE(state, shadow_path, transmission_bounce);
493}
494
496{
497 return INTEGRATOR_STATE(state, path, transparent_bounce);
498}
499
501 const int)
502{
503 return INTEGRATOR_STATE(state, shadow_path, transparent_bounce);
504}
505#else
507 const uint32_t path_flag)
508{
509 return (path_flag & PATH_RAY_SHADOW) ? INTEGRATOR_STATE(state, shadow_path, bounce) :
510 INTEGRATOR_STATE(state, path, bounce);
511}
512
514 const uint32_t path_flag)
515{
516 return (path_flag & PATH_RAY_SHADOW) ? INTEGRATOR_STATE(state, shadow_path, diffuse_bounce) :
517 INTEGRATOR_STATE(state, path, diffuse_bounce);
518}
519
521 const uint32_t path_flag)
522{
523 return (path_flag & PATH_RAY_SHADOW) ? INTEGRATOR_STATE(state, shadow_path, glossy_bounce) :
524 INTEGRATOR_STATE(state, path, glossy_bounce);
525}
526
528 const uint32_t path_flag)
529{
530 return (path_flag & PATH_RAY_SHADOW) ?
531 INTEGRATOR_STATE(state, shadow_path, transmission_bounce) :
532 INTEGRATOR_STATE(state, path, transmission_bounce);
533}
534
536 const uint32_t path_flag)
537{
538 return (path_flag & PATH_RAY_SHADOW) ? INTEGRATOR_STATE(state, shadow_path, transparent_bounce) :
539 INTEGRATOR_STATE(state, path, transparent_bounce);
540}
541#endif
542
ATOMIC_INLINE uint32_t atomic_fetch_and_add_uint32(uint32_t *p, uint32_t x)
ATTR_WARN_UNUSED_RESULT const BMVert * v
#define kernel_data
const KernelGlobalsCPU *ccl_restrict KernelGlobals
#define ccl_restrict
#define ccl_device_forceinline
#define ccl_private
#define ccl_device_inline
#define CCL_NAMESPACE_END
#define kernel_integrator_state
#define offsetof(t, d)
ccl_device_forceinline float differential_zero_compact()
#define KERNEL_FEATURE_VOLUME
#define SHADER_NONE
@ PATH_RAY_SHADOW
#define OBJECT_NONE
#define KERNEL_FEATURE_SHADOW_LINKING
static ulong state[N]
const IntegratorShadowStateCPU *ccl_restrict ConstIntegratorShadowState
Definition state.h:231
#define INTEGRATOR_STATE_ARRAY_WRITE(state, nested_struct, array_index, member)
Definition state.h:240
IntegratorStateCPU *ccl_restrict IntegratorState
Definition state.h:228
#define INTEGRATOR_STATE_WRITE(state, nested_struct, member)
Definition state.h:236
const IntegratorStateCPU *ccl_restrict ConstIntegratorState
Definition state.h:229
#define INTEGRATOR_STATE(state, nested_struct, member)
Definition state.h:235
IntegratorShadowStateCPU *ccl_restrict IntegratorShadowState
Definition state.h:230
#define INTEGRATOR_STATE_ARRAY(state, nested_struct, array_index, member)
Definition state.h:238
ccl_device_forceinline void integrator_state_read_shadow_ray(ConstIntegratorShadowState state, ccl_private Ray *ccl_restrict ray)
Definition state_util.h:84
ccl_device_inline int integrator_state_bounce(ConstIntegratorState state, const int)
Definition state_util.h:454
ccl_device_forceinline void integrator_state_write_shadow_isect(IntegratorShadowState state, ccl_private const Intersection *ccl_restrict isect, const int index)
Definition state_util.h:263
ccl_device_inline int integrator_state_transmission_bounce(ConstIntegratorState state, const int)
Definition state_util.h:484
ccl_device_forceinline void integrator_state_read_ray(ConstIntegratorState state, ccl_private Ray *ccl_restrict ray)
Definition state_util.h:53
ccl_device_inline int integrator_state_transparent_bounce(ConstIntegratorState state, const int)
Definition state_util.h:495
ccl_device_inline IntegratorState integrator_state_shadow_catcher_split(KernelGlobals kg, IntegratorState state)
Definition state_util.h:430
ccl_device_forceinline void integrator_state_read_shadow_ray_self(KernelGlobals kg, ConstIntegratorShadowState state, ccl_private Ray *ccl_restrict ray)
Definition state_util.h:113
ccl_device_forceinline void integrator_state_read_shadow_isect(ConstIntegratorShadowState state, ccl_private Intersection *ccl_restrict isect, const int index)
Definition state_util.h:276
ccl_device_forceinline void integrator_state_read_isect(ConstIntegratorState state, ccl_private Intersection *ccl_restrict isect)
Definition state_util.h:158
ccl_device_inline int integrator_state_glossy_bounce(ConstIntegratorState state, const int)
Definition state_util.h:474
ccl_device_forceinline void integrator_state_write_shadow_ray_self(KernelGlobals kg, IntegratorShadowState state, ccl_private const Ray *ccl_restrict ray)
Definition state_util.h:96
ccl_device_inline int integrator_state_diffuse_bounce(ConstIntegratorState state, const int)
Definition state_util.h:464
ccl_device_forceinline void integrator_state_write_isect(IntegratorState state, ccl_private const Intersection *ccl_restrict isect)
Definition state_util.h:128
ccl_device_forceinline void integrator_state_write_shadow_ray(IntegratorShadowState state, ccl_private const Ray *ccl_restrict ray)
Definition state_util.h:73
CCL_NAMESPACE_BEGIN ccl_device_forceinline void integrator_state_write_ray(IntegratorState state, ccl_private const Ray *ccl_restrict ray)
Definition state_util.h:15
unsigned int uint32_t
Definition stdint.h:80