Blender V5.0
light_passes.h
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2011-2022 Blender Foundation
2 *
3 * SPDX-License-Identifier: Apache-2.0 */
4
5#pragma once
6
7#include "kernel/film/write.h"
8
10
12#include "util/atomic.h"
13
15
16/* --------------------------------------------------------------------
17 * BSDF Evaluation
18 *
19 * BSDF evaluation result, split between diffuse and glossy. This is used to
20 * accumulate render passes separately. Note that reflection, transmission
21 * and volume scattering are written to different render passes, but we assume
22 * that only one of those can happen at a bounce, and so do not need to accumulate
23 * them separately. */
24
26 const ccl_private ShaderClosure *sc,
27 const float3 wo,
28 Spectrum value)
29{
30 eval->diffuse = zero_spectrum();
31 eval->glossy = zero_spectrum();
32
33 if (CLOSURE_IS_BSDF_DIFFUSE(sc->type)) {
34 eval->diffuse = value;
35 }
36 else if (CLOSURE_IS_BSDF_GLOSSY(sc->type)) {
37 eval->glossy = value;
38 }
39 else if (CLOSURE_IS_GLASS(sc->type)) {
40 /* Glass can count as glossy or transmission, depending on which side we end up on. */
41 if (dot(sc->N, wo) > 0.0f) {
42 eval->glossy = value;
43 }
44 }
45
46 eval->sum = value;
47}
48
50{
51 eval->diffuse = zero_spectrum();
52 eval->glossy = zero_spectrum();
53 eval->sum = value;
54}
55
57 const ccl_private ShaderClosure *sc,
58 const float3 wo,
59 Spectrum value)
60{
61 if (CLOSURE_IS_BSDF_DIFFUSE(sc->type)) {
62 eval->diffuse += value;
63 }
64 else if (CLOSURE_IS_BSDF_GLOSSY(sc->type)) {
65 eval->glossy += value;
66 }
67 else if (CLOSURE_IS_GLASS(sc->type)) {
68 if (dot(sc->N, wo) > 0.0f) {
69 eval->glossy += value;
70 }
71 }
72
73 eval->sum += value;
74}
75
77{
78 eval->sum += value;
79}
80
82{
83 return is_zero(eval->sum);
84}
85
87{
88 eval->diffuse *= value;
89 eval->glossy *= value;
90 eval->sum *= value;
91}
92
94{
95 eval->diffuse *= value;
96 eval->glossy *= value;
97 eval->sum *= value;
98}
99
101{
102 return eval->sum;
103}
104
106{
107 /* Ratio of diffuse weight to recover proportions for writing to render pass.
108 * We assume reflection, transmission and volume scatter to be exclusive. */
109 return safe_divide(eval->diffuse, eval->sum);
110}
111
113{
114 /* Ratio of glossy weight to recover proportions for writing to render pass.
115 * We assume reflection, transmission and volume scatter to be exclusive. */
116 return safe_divide(eval->glossy, eval->sum);
117}
118
119/* --------------------------------------------------------------------
120 * Clamping
121 *
122 * Clamping is done on a per-contribution basis so that we can write directly
123 * to render buffers instead of using per-thread memory, and to avoid the
124 * impact of clamping on other contributions. */
125
128 const int bounce)
129{
130#ifdef __KERNEL_DEBUG_NAN__
131 if (!isfinite_safe(*L)) {
132 kernel_assert(!"Cycles sample with non-finite value detected");
133 }
134#endif
135 /* Make sure all components are finite, allowing the contribution to be usable by adaptive
136 * sampling convergence check, but also to make it so render result never causes issues with
137 * post-processing. */
138 *L = ensure_finite(*L);
139
140#ifdef __CLAMP_SAMPLE__
141 const float limit = (bounce > 0) ? kernel_data.integrator.sample_clamp_indirect :
142 kernel_data.integrator.sample_clamp_direct;
143 const float sum = reduce_add(fabs(*L));
144 if (sum > limit) {
145 *L *= limit / sum;
146 }
147#endif
148}
149
150/* --------------------------------------------------------------------
151 * Pass accumulation utilities.
152 */
153
154/* --------------------------------------------------------------------
155 * Adaptive sampling.
156 */
157
161 const int sample,
162 const int sample_offset)
163{
164 if (kernel_data.film.pass_sample_count == PASS_UNUSED) {
165 return sample;
166 }
167
169
171 (ccl_global uint *)(buffer) + kernel_data.film.pass_sample_count, 1) +
172 sample_offset;
173}
174
176 const int sample,
177 const Spectrum contribution,
178 ccl_global float *ccl_restrict buffer)
179{
180 /* Adaptive Sampling. Fill the additional buffer with only one half of the samples and
181 * calculate our stopping criteria. This is the heuristic from "A hierarchical automatic
182 * stopping condition for Monte Carlo global illumination" except that here it is applied
183 * per pixel and not in hierarchical tiles. */
184
185 if (kernel_data.film.pass_adaptive_aux_buffer == PASS_UNUSED) {
186 return;
187 }
188
189 if (sample_is_class_A(kernel_data.integrator.sampling_pattern, sample)) {
190 const float3 contribution_rgb = spectrum_to_rgb(contribution);
191
192 film_write_pass_float4(buffer + kernel_data.film.pass_adaptive_aux_buffer,
193 make_float4(contribution_rgb.x * 2.0f,
194 contribution_rgb.y * 2.0f,
195 contribution_rgb.z * 2.0f,
196 0.0f));
197 }
198}
199
200/* Write the volume and surface contribution for volume scattering probability guiding. */
203 buffer,
204 const uint32_t path_flag,
205 const Spectrum contribution)
206{
207 int pass_offset = PASS_UNUSED;
208 if (path_flag & PATH_RAY_VOLUME_PRIMARY_TRANSMIT) {
209 pass_offset = kernel_data.film.pass_volume_transmit;
210 }
211 else if (path_flag & PATH_RAY_VOLUME_SCATTER) {
212 pass_offset = kernel_data.film.pass_volume_scatter;
213 }
214
215 if (pass_offset != PASS_UNUSED) {
216 film_write_pass_spectrum(buffer + pass_offset, contribution);
217 }
218}
219
220/* --------------------------------------------------------------------
221 * Shadow catcher.
222 */
223
224#ifdef __SHADOW_CATCHER__
225
226/* Accumulate contribution to the Shadow Catcher pass.
227 *
228 * Returns truth if the contribution is fully handled here and is not to be added to the other
229 * passes (like combined, adaptive sampling). */
230
231ccl_device bool film_write_shadow_catcher(KernelGlobals kg,
232 const uint32_t path_flag,
233 const Spectrum contribution,
234 ccl_global float *ccl_restrict buffer)
235{
236 if (!kernel_data.integrator.has_shadow_catcher) {
237 return false;
238 }
239
240 kernel_assert(kernel_data.film.pass_shadow_catcher != PASS_UNUSED);
241 kernel_assert(kernel_data.film.pass_shadow_catcher_matte != PASS_UNUSED);
242
243 /* Matte pass. */
244 if (kernel_shadow_catcher_is_matte_path(path_flag)) {
245 film_write_pass_spectrum(buffer + kernel_data.film.pass_shadow_catcher_matte, contribution);
246 /* NOTE: Accumulate the combined pass and to the samples count pass, so that the adaptive
247 * sampling is based on how noisy the combined pass is as if there were no catchers in the
248 * scene. */
249 }
250
251 /* Shadow catcher pass. */
252 if (kernel_shadow_catcher_is_object_pass(path_flag)) {
253 film_write_pass_spectrum(buffer + kernel_data.film.pass_shadow_catcher, contribution);
254 return true;
255 }
256
257 return false;
258}
259
260ccl_device bool film_write_shadow_catcher_transparent(KernelGlobals kg,
261 const uint32_t path_flag,
262 const Spectrum contribution,
263 const float transparent,
264 ccl_global float *ccl_restrict buffer)
265{
266 if (!kernel_data.integrator.has_shadow_catcher) {
267 return false;
268 }
269
270 kernel_assert(kernel_data.film.pass_shadow_catcher != PASS_UNUSED);
271 kernel_assert(kernel_data.film.pass_shadow_catcher_matte != PASS_UNUSED);
272
273 if (path_flag & PATH_RAY_SHADOW_CATCHER_BACKGROUND) {
274 return true;
275 }
276
277 /* Matte pass. */
278 if (kernel_shadow_catcher_is_matte_path(path_flag)) {
279 const float3 contribution_rgb = spectrum_to_rgb(contribution);
280
281 film_write_pass_float4(buffer + kernel_data.film.pass_shadow_catcher_matte,
282 make_float4(contribution_rgb, transparent));
283 /* NOTE: Accumulate the combined pass and to the samples count pass, so that the adaptive
284 * sampling is based on how noisy the combined pass is as if there were no catchers in the
285 * scene. */
286 }
287
288 /* Shadow catcher pass. */
289 if (kernel_shadow_catcher_is_object_pass(path_flag)) {
290 /* NOTE: The transparency of the shadow catcher pass is ignored. It is not needed for the
291 * calculation and the alpha channel of the pass contains numbers of samples contributed to a
292 * pixel of the pass. */
293 film_write_pass_spectrum(buffer + kernel_data.film.pass_shadow_catcher, contribution);
294 return true;
295 }
296
297 return false;
298}
299
300ccl_device void film_write_shadow_catcher_transparent_only(KernelGlobals kg,
301 const uint32_t path_flag,
302 const float transparent,
303 ccl_global float *ccl_restrict buffer)
304{
305 if (!kernel_data.integrator.has_shadow_catcher) {
306 return;
307 }
308
309 kernel_assert(kernel_data.film.pass_shadow_catcher_matte != PASS_UNUSED);
310
311 /* Matte pass. */
312 if (kernel_shadow_catcher_is_matte_path(path_flag)) {
313 film_write_pass_float(buffer + kernel_data.film.pass_shadow_catcher_matte + 3, transparent);
314 }
315}
316
317/* Write shadow catcher passes on a bounce from the shadow catcher object. */
318ccl_device_forceinline void film_write_shadow_catcher_bounce_data(
320{
321 kernel_assert(kernel_data.film.pass_shadow_catcher_sample_count != PASS_UNUSED);
322 kernel_assert(kernel_data.film.pass_shadow_catcher_matte != PASS_UNUSED);
323
325
326 /* Count sample for the shadow catcher object. */
327 film_write_pass_float(buffer + kernel_data.film.pass_shadow_catcher_sample_count, 1.0f);
328
329 /* Since the split is done, the sample does not contribute to the matte, so accumulate it as
330 * transparency to the matte. */
331 const Spectrum throughput = INTEGRATOR_STATE(state, path, throughput);
332 film_write_pass_float(buffer + kernel_data.film.pass_shadow_catcher_matte + 3,
333 average(throughput));
334}
335
336#endif /* __SHADOW_CATCHER__ */
337
338/* --------------------------------------------------------------------
339 * Render passes.
340 */
341
342/* Write combined pass. */
344 const uint32_t path_flag,
345 const int sample,
346 const Spectrum contribution,
347 ccl_global float *ccl_restrict buffer)
348{
349#ifdef __SHADOW_CATCHER__
350 if (film_write_shadow_catcher(kg, path_flag, contribution, buffer)) {
351 return;
352 }
353#endif
354
355 if (kernel_data.film.light_pass_flag & PASSMASK(COMBINED)) {
356 film_write_pass_spectrum(buffer + kernel_data.film.pass_combined, contribution);
357 }
358
359 film_write_adaptive_buffer(kg, sample, contribution, buffer);
360 film_write_volume_scattering_guiding_pass(kg, buffer, path_flag, contribution);
361}
362
363/* Write combined pass with transparency. */
365 const uint32_t path_flag,
366 const int sample,
367 const Spectrum contribution,
368 const float transparent,
369 ccl_global float *ccl_restrict buffer)
370{
371#ifdef __SHADOW_CATCHER__
372 if (film_write_shadow_catcher_transparent(kg, path_flag, contribution, transparent, buffer)) {
373 return;
374 }
375#endif
376
377 if (kernel_data.film.light_pass_flag & PASSMASK(COMBINED)) {
378 const float3 contribution_rgb = spectrum_to_rgb(contribution);
379
380 film_write_pass_float4(buffer + kernel_data.film.pass_combined,
381 make_float4(contribution_rgb, transparent));
382 }
383
384 film_write_adaptive_buffer(kg, sample, contribution, buffer);
385 film_write_volume_scattering_guiding_pass(kg, buffer, path_flag, contribution);
386}
387
388/* Write background or emission to appropriate pass. */
390 KernelGlobals kg,
392 Spectrum contribution,
393 ccl_global float *ccl_restrict buffer,
394 const int pass,
395 const int lightgroup = LIGHTGROUP_NONE)
396{
397 if (!(kernel_data.film.light_pass_flag & PASS_ANY)) {
398 return;
399 }
400
401#ifdef __PASSES__
402 const uint32_t path_flag = INTEGRATOR_STATE(state, path, flag);
403 int pass_offset = PASS_UNUSED;
404
405 /* Denoising albedo. */
406# ifdef __DENOISING_FEATURES__
407 if (path_flag & PATH_RAY_DENOISING_FEATURES) {
408 if (kernel_data.film.pass_denoising_albedo != PASS_UNUSED) {
409 const Spectrum denoising_feature_throughput = INTEGRATOR_STATE(
410 state, path, denoising_feature_throughput);
411 const Spectrum denoising_albedo = denoising_feature_throughput * contribution;
412 film_write_pass_spectrum(buffer + kernel_data.film.pass_denoising_albedo, denoising_albedo);
413 }
414 }
415# endif /* __DENOISING_FEATURES__ */
416
417 const bool is_shadowcatcher = (path_flag & PATH_RAY_SHADOW_CATCHER_HIT) != 0;
418 if (!is_shadowcatcher && lightgroup != LIGHTGROUP_NONE &&
419 kernel_data.film.pass_lightgroup != PASS_UNUSED)
420 {
421 film_write_pass_spectrum(buffer + kernel_data.film.pass_lightgroup + 3 * lightgroup,
422 contribution);
423 }
424
425 if (!(path_flag & PATH_RAY_ANY_PASS)) {
426 /* Directly visible, write to emission or background pass. */
427 pass_offset = pass;
428 }
429 else if (is_shadowcatcher) {
430 /* Don't write any light passes for shadow catcher, for easier
431 * compositing back together of the combined pass. */
432 return;
433 }
434 else if (kernel_data.kernel_features & KERNEL_FEATURE_LIGHT_PASSES) {
435 if (path_flag & PATH_RAY_SURFACE_PASS) {
436 /* Indirectly visible through reflection. */
437 const Spectrum diffuse_weight = INTEGRATOR_STATE(state, path, pass_diffuse_weight);
438 const Spectrum glossy_weight = INTEGRATOR_STATE(state, path, pass_glossy_weight);
439
440 /* Glossy */
441 const int glossy_pass_offset = ((INTEGRATOR_STATE(state, path, bounce) == 1) ?
442 kernel_data.film.pass_glossy_direct :
443 kernel_data.film.pass_glossy_indirect);
444 if (glossy_pass_offset != PASS_UNUSED) {
445 film_write_pass_spectrum(buffer + glossy_pass_offset, glossy_weight * contribution);
446 }
447
448 /* Transmission */
449 const int transmission_pass_offset = ((INTEGRATOR_STATE(state, path, bounce) == 1) ?
450 kernel_data.film.pass_transmission_direct :
451 kernel_data.film.pass_transmission_indirect);
452
453 if (transmission_pass_offset != PASS_UNUSED) {
454 /* Transmission is what remains if not diffuse and glossy, not stored explicitly to save
455 * GPU memory. */
456 const Spectrum transmission_weight = one_spectrum() - diffuse_weight - glossy_weight;
457 film_write_pass_spectrum(buffer + transmission_pass_offset,
458 transmission_weight * contribution);
459 }
460
461 /* Reconstruct diffuse subset of throughput. */
462 pass_offset = (INTEGRATOR_STATE(state, path, bounce) == 1) ?
463 kernel_data.film.pass_diffuse_direct :
464 kernel_data.film.pass_diffuse_indirect;
465 if (pass_offset != PASS_UNUSED) {
466 contribution *= diffuse_weight;
467 }
468 }
469 else if (path_flag & PATH_RAY_VOLUME_PASS) {
470 /* Indirectly visible through volume. */
471 pass_offset = (INTEGRATOR_STATE(state, path, bounce) == 1) ?
472 kernel_data.film.pass_volume_direct :
473 kernel_data.film.pass_volume_indirect;
474 }
475 }
476
477 /* Single write call for GPU coherence. */
478 if (pass_offset != PASS_UNUSED) {
479 film_write_pass_spectrum(buffer + pass_offset, contribution);
480 }
481#endif /* __PASSES__ */
482}
483
484/* Write light contribution to render buffer. */
488{
489 /* The throughput for shadow paths already contains the light shader evaluation. */
490 Spectrum contribution = INTEGRATOR_STATE(state, shadow_path, throughput);
491 film_clamp_light(kg, &contribution, INTEGRATOR_STATE(state, shadow_path, bounce));
492
494
495 const uint32_t path_flag = INTEGRATOR_STATE(state, shadow_path, flag);
496 const int sample = INTEGRATOR_STATE(state, shadow_path, sample);
497
498 /* Ambient occlusion. */
499 if (path_flag & PATH_RAY_SHADOW_FOR_AO) {
500 if ((kernel_data.kernel_features & KERNEL_FEATURE_AO_PASS) && (path_flag & PATH_RAY_CAMERA)) {
501 film_write_pass_spectrum(buffer + kernel_data.film.pass_ao, contribution);
502 }
503 if (kernel_data.kernel_features & KERNEL_FEATURE_AO_ADDITIVE) {
504 const Spectrum ao_weight = INTEGRATOR_STATE(state, shadow_path, unshadowed_throughput);
505 film_write_combined_pass(kg, path_flag, sample, contribution * ao_weight, buffer);
506 }
507 return;
508 }
509
510 /* Direct light shadow. */
511 film_write_combined_pass(kg, path_flag, sample, contribution, buffer);
512
513#ifdef __PASSES__
514 if (kernel_data.film.light_pass_flag & PASS_ANY) {
515 const uint32_t path_flag = INTEGRATOR_STATE(state, shadow_path, flag);
516
517 /* Don't write any light passes for shadow catcher, for easier
518 * compositing back together of the combined pass. */
519 if (path_flag & PATH_RAY_SHADOW_CATCHER_HIT) {
520 return;
521 }
522
523 /* Write lightgroup pass. LIGHTGROUP_NONE is ~0 so decode from unsigned to signed */
524 const int lightgroup = (int)(INTEGRATOR_STATE(state, shadow_path, lightgroup)) - 1;
525 if (lightgroup != LIGHTGROUP_NONE && kernel_data.film.pass_lightgroup != PASS_UNUSED) {
526 film_write_pass_spectrum(buffer + kernel_data.film.pass_lightgroup + 3 * lightgroup,
527 contribution);
528 }
529
530 if (kernel_data.kernel_features & KERNEL_FEATURE_LIGHT_PASSES) {
531 int pass_offset = PASS_UNUSED;
532
533 if (path_flag & PATH_RAY_SURFACE_PASS) {
534 /* Indirectly visible through reflection. */
535 const Spectrum diffuse_weight = INTEGRATOR_STATE(state, shadow_path, pass_diffuse_weight);
536 const Spectrum glossy_weight = INTEGRATOR_STATE(state, shadow_path, pass_glossy_weight);
537
538 /* Glossy */
539 const int glossy_pass_offset = ((INTEGRATOR_STATE(state, shadow_path, bounce) == 0) ?
540 kernel_data.film.pass_glossy_direct :
541 kernel_data.film.pass_glossy_indirect);
542 if (glossy_pass_offset != PASS_UNUSED) {
543 film_write_pass_spectrum(buffer + glossy_pass_offset, glossy_weight * contribution);
544 }
545
546 /* Transmission */
547 const int transmission_pass_offset = ((INTEGRATOR_STATE(state, shadow_path, bounce) == 0) ?
548 kernel_data.film.pass_transmission_direct :
549 kernel_data.film.pass_transmission_indirect);
550
551 if (transmission_pass_offset != PASS_UNUSED) {
552 /* Transmission is what remains if not diffuse and glossy, not stored explicitly to save
553 * GPU memory. */
554 const Spectrum transmission_weight = one_spectrum() - diffuse_weight - glossy_weight;
555 film_write_pass_spectrum(buffer + transmission_pass_offset,
556 transmission_weight * contribution);
557 }
558
559 /* Reconstruct diffuse subset of throughput. */
560 pass_offset = (INTEGRATOR_STATE(state, shadow_path, bounce) == 0) ?
561 kernel_data.film.pass_diffuse_direct :
562 kernel_data.film.pass_diffuse_indirect;
563 if (pass_offset != PASS_UNUSED) {
564 contribution *= diffuse_weight;
565 }
566 }
567 else if (path_flag & PATH_RAY_VOLUME_PASS) {
568 /* Indirectly visible through volume. */
569 pass_offset = (INTEGRATOR_STATE(state, shadow_path, bounce) == 0) ?
570 kernel_data.film.pass_volume_direct :
571 kernel_data.film.pass_volume_indirect;
572 }
573
574 /* Single write call for GPU coherence. */
575 if (pass_offset != PASS_UNUSED) {
576 film_write_pass_spectrum(buffer + pass_offset, contribution);
577 }
578 }
579 }
580#endif
581}
582
583/* Write transparency to render buffer.
584 *
585 * Note that we accumulate transparency = 1 - alpha in the render buffer.
586 * Otherwise we'd have to write alpha on path termination, which happens
587 * in many places. */
589 const uint32_t path_flag,
590 const float transparent,
591 ccl_global float *ccl_restrict buffer)
592{
593 if (kernel_data.film.light_pass_flag & PASSMASK(COMBINED)) {
594 film_write_pass_float(buffer + kernel_data.film.pass_combined + 3, transparent);
595 }
596
597#ifdef __SHADOW_CATCHER__
598 film_write_shadow_catcher_transparent_only(kg, path_flag, transparent, buffer);
599#endif
600
601 if (path_flag & PATH_RAY_VOLUME_PRIMARY_TRANSMIT) {
602 kernel_assert(kernel_data.film.pass_volume_transmit != PASS_UNUSED);
603 film_write_pass_spectrum(buffer + kernel_data.film.pass_volume_transmit,
604 make_spectrum(transparent));
605 }
606}
607
608/* Write holdout to render buffer. */
611 const uint32_t path_flag,
612 const float transparent,
614{
616 film_write_transparent(kg, path_flag, transparent, buffer);
617}
618
619/* Write background contribution to render buffer.
620 *
621 * Includes transparency, matching film_write_transparent. */
624 const Spectrum L,
625 const float transparent,
626 const bool is_transparent_background_ray,
628{
629 Spectrum contribution = INTEGRATOR_STATE(state, path, throughput) * L;
630 film_clamp_light(kg, &contribution, INTEGRATOR_STATE(state, path, bounce) - 1);
631
633 const uint32_t path_flag = INTEGRATOR_STATE(state, path, flag);
634
635 if (is_transparent_background_ray) {
636 film_write_transparent(kg, path_flag, transparent, buffer);
637 }
638 else {
639 const int sample = INTEGRATOR_STATE(state, path, sample);
640 film_write_combined_transparent_pass(kg, path_flag, sample, contribution, transparent, buffer);
641 }
643 state,
644 contribution,
645 buffer,
646 kernel_data.film.pass_background,
647 kernel_data.background.lightgroup);
648}
649
650/* Write emission to render buffer. */
653 const Spectrum L,
655 const int lightgroup = LIGHTGROUP_NONE)
656{
657 Spectrum contribution = L;
658 film_clamp_light(kg, &contribution, INTEGRATOR_STATE(state, path, bounce) - 1);
659
661 const uint32_t path_flag = INTEGRATOR_STATE(state, path, flag);
662 const int sample = INTEGRATOR_STATE(state, path, sample);
663
664 film_write_combined_pass(kg, path_flag, sample, contribution, buffer);
666 kg, state, contribution, buffer, kernel_data.film.pass_emission, lightgroup);
667}
668
671 const Spectrum L,
672 const float mis_weight,
674 const int lightgroup = LIGHTGROUP_NONE)
675{
676 Spectrum contribution = INTEGRATOR_STATE(state, path, throughput) * L * mis_weight;
677 film_clamp_light(kg, &contribution, INTEGRATOR_STATE(state, path, bounce) - 1);
678
680 const uint32_t path_flag = INTEGRATOR_STATE(state, path, flag);
681 const int sample = INTEGRATOR_STATE(state, path, sample);
682
683 film_write_combined_pass(kg, path_flag, sample, contribution, buffer);
685 kg, state, contribution, buffer, kernel_data.film.pass_emission, lightgroup);
686}
687
MINLINE float safe_divide(float a, float b)
unsigned int uint
ATOMIC_INLINE uint32_t atomic_fetch_and_add_uint32(uint32_t *p, uint32_t x)
static T sum(const btAlignedObjectArray< T > &items)
dot(value.rgb, luminance_coefficients)") DEFINE_VALUE("REDUCE(lhs
#define kernel_assert(cond)
#define CLOSURE_IS_GLASS(type)
#define kernel_data
#define PASS_UNUSED
#define ccl_restrict
#define ccl_device_forceinline
#define one_spectrum
#define zero_spectrum
#define make_spectrum(f)
#define CLOSURE_IS_BSDF_GLOSSY(type)
#define ccl_private
const ThreadKernelGlobalsCPU * KernelGlobals
#define ccl_device_inline
#define KERNEL_FEATURE_LIGHT_PASSES
#define KERNEL_FEATURE_AO_PASS
#define LIGHTGROUP_NONE
#define CLOSURE_IS_BSDF_DIFFUSE(type)
#define PASSMASK(pass)
#define ccl_global
#define PASS_ANY
#define KERNEL_FEATURE_AO_ADDITIVE
#define CCL_NAMESPACE_END
ccl_gpu_kernel_postfix ccl_global KernelWorkTile const int ccl_global float * render_buffer
@ PATH_RAY_SHADOW_FOR_AO
@ PATH_RAY_SHADOW_CATCHER_HIT
@ PATH_RAY_VOLUME_PASS
@ PATH_RAY_VOLUME_SCATTER
@ PATH_RAY_DENOISING_FEATURES
@ PATH_RAY_SURFACE_PASS
@ PATH_RAY_SHADOW_CATCHER_BACKGROUND
@ PATH_RAY_CAMERA
@ PATH_RAY_ANY_PASS
@ PATH_RAY_VOLUME_PRIMARY_TRANSMIT
ccl_device_inline float3 spectrum_to_rgb(Spectrum s)
ccl_device_inline void film_write_background(KernelGlobals kg, ConstIntegratorState state, const Spectrum L, const float transparent, const bool is_transparent_background_ray, ccl_global float *ccl_restrict render_buffer)
ccl_device_inline void film_write_surface_emission(KernelGlobals kg, ConstIntegratorState state, const Spectrum L, const float mis_weight, ccl_global float *ccl_restrict render_buffer, const int lightgroup=LIGHTGROUP_NONE)
ccl_device_inline void film_write_direct_light(KernelGlobals kg, ConstIntegratorShadowState state, ccl_global float *ccl_restrict render_buffer)
ccl_device void film_write_adaptive_buffer(KernelGlobals kg, const int sample, const Spectrum contribution, ccl_global float *ccl_restrict buffer)
ccl_device_inline void film_write_holdout(KernelGlobals kg, ConstIntegratorState state, const uint32_t path_flag, const float transparent, ccl_global float *ccl_restrict render_buffer)
ccl_device_inline bool bsdf_eval_is_zero(ccl_private BsdfEval *eval)
ccl_device_inline void film_write_volume_emission(KernelGlobals kg, ConstIntegratorState state, const Spectrum L, ccl_global float *ccl_restrict render_buffer, const int lightgroup=LIGHTGROUP_NONE)
CCL_NAMESPACE_BEGIN ccl_device_inline void bsdf_eval_init(ccl_private BsdfEval *eval, const ccl_private ShaderClosure *sc, const float3 wo, Spectrum value)
ccl_device_inline void film_write_combined_pass(KernelGlobals kg, const uint32_t path_flag, const int sample, const Spectrum contribution, ccl_global float *ccl_restrict buffer)
ccl_device_inline Spectrum bsdf_eval_pass_glossy_weight(const ccl_private BsdfEval *eval)
ccl_device_inline void bsdf_eval_accum(ccl_private BsdfEval *eval, const ccl_private ShaderClosure *sc, const float3 wo, Spectrum value)
ccl_device_inline void bsdf_eval_mul(ccl_private BsdfEval *eval, const float value)
ccl_device_inline int film_write_sample(KernelGlobals kg, ConstIntegratorState state, ccl_global float *ccl_restrict render_buffer, const int sample, const int sample_offset)
ccl_device_inline Spectrum bsdf_eval_sum(const ccl_private BsdfEval *eval)
ccl_device_inline void film_write_emission_or_background_pass(KernelGlobals kg, ConstIntegratorState state, Spectrum contribution, ccl_global float *ccl_restrict buffer, const int pass, const int lightgroup=LIGHTGROUP_NONE)
ccl_device_forceinline void film_clamp_light(KernelGlobals kg, ccl_private Spectrum *L, const int bounce)
ccl_device_inline Spectrum bsdf_eval_pass_diffuse_weight(const ccl_private BsdfEval *eval)
ccl_device_inline void film_write_transparent(KernelGlobals kg, const uint32_t path_flag, const float transparent, ccl_global float *ccl_restrict buffer)
ccl_device_inline void film_write_volume_scattering_guiding_pass(KernelGlobals kg, ccl_global float *ccl_restrict buffer, const uint32_t path_flag, const Spectrum contribution)
ccl_device_inline void film_write_combined_transparent_pass(KernelGlobals kg, const uint32_t path_flag, const int sample, const Spectrum contribution, const float transparent, ccl_global float *ccl_restrict buffer)
ccl_device_inline float ensure_finite(const float v)
Definition math_base.h:356
ccl_device_inline bool isfinite_safe(const float f)
Definition math_base.h:348
ccl_device_inline dual1 reduce_add(const dual< T > a)
Definition math_dual.h:49
ccl_device_inline bool is_zero(const float2 a)
ccl_device_inline float2 fabs(const float2 a)
static ulong state[N]
#define L
float average(point a)
Definition node_math.h:144
ccl_device_inline bool sample_is_class_A(const int pattern, const int sample)
Definition pattern.h:163
#define ccl_device
#define make_float4
#define INTEGRATOR_STATE(state, nested_struct, member)
Definition state.h:235
const IntegratorShadowStateCPU * ConstIntegratorShadowState
Definition state.h:231
IntegratorStateCPU * IntegratorState
Definition state.h:228
const IntegratorStateCPU * ConstIntegratorState
Definition state.h:229
float z
Definition sky_math.h:136
float y
Definition sky_math.h:136
float x
Definition sky_math.h:136
float3 Spectrum
uint8_t flag
Definition wm_window.cc:145
ccl_device_inline void film_write_pass_float4(ccl_global float *ccl_restrict buffer, const float4 value)
Definition write.h:92
ccl_device_inline void film_write_pass_spectrum(ccl_global float *ccl_restrict buffer, Spectrum value)
Definition write.h:86
ccl_device_forceinline ccl_global float * film_pass_pixel_render_buffer_shadow(KernelGlobals kg, ConstIntegratorShadowState state, ccl_global float *ccl_restrict render_buffer)
Definition write.h:33
ccl_device_inline void film_write_pass_float(ccl_global float *ccl_restrict buffer, const float value)
Definition write.h:58
CCL_NAMESPACE_BEGIN ccl_device_forceinline ccl_global float * film_pass_pixel_render_buffer(KernelGlobals kg, ConstIntegratorState state, ccl_global float *ccl_restrict render_buffer)
Definition write.h:24