Blender V5.0
kernel/camera/camera.h
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2011-2022 Blender Foundation
2 *
3 * SPDX-License-Identifier: Apache-2.0 */
4
5#pragma once
6
7#include "kernel/globals.h"
8
13
14#ifdef WITH_OSL
15# include "kernel/osl/camera.h"
16#endif
17
19
20/* Perspective Camera */
21
23{
24 const float blades = cam->blades;
25 float2 bokeh;
26
27 if (blades == 0.0f) {
28 /* sample disk */
29 bokeh = sample_uniform_disk(rand);
30 }
31 else {
32 /* sample polygon */
33 const float rotation = cam->bladesrotation;
34 bokeh = regular_polygon_sample(blades, rotation, rand);
35 }
36
37 /* anamorphic lens bokeh */
38 bokeh.x *= cam->inv_aperture_ratio;
39
40 return bokeh;
41}
42
44 const float2 raster_xy,
45 const float2 rand_lens,
46 ccl_private Ray *ray)
47{
48 /* create ray form raster position */
49 const ProjectionTransform rastertocamera = kernel_data.cam.rastertocamera;
50 const float3 raster = make_float3(raster_xy);
51 float3 Pcamera = transform_perspective(&rastertocamera, raster);
52
53 if (kernel_data.cam.have_perspective_motion) {
54 /* TODO(sergey): Currently we interpolate projected coordinate which
55 * gives nice looking result and which is simple, but is in fact a bit
56 * different comparing to constructing projective matrix from an
57 * interpolated field of view.
58 */
59 if (ray->time < 0.5f) {
60 const ProjectionTransform rastertocamera_pre = kernel_data.cam.perspective_pre;
61 const float3 Pcamera_pre = transform_perspective(&rastertocamera_pre, raster);
62 Pcamera = interp(Pcamera_pre, Pcamera, ray->time * 2.0f);
63 }
64 else {
65 const ProjectionTransform rastertocamera_post = kernel_data.cam.perspective_post;
66 const float3 Pcamera_post = transform_perspective(&rastertocamera_post, raster);
67 Pcamera = interp(Pcamera, Pcamera_post, (ray->time - 0.5f) * 2.0f);
68 }
69 }
70
72 float3 D = Pcamera;
73
74 /* modify ray for depth of field */
75 const float aperturesize = kernel_data.cam.aperturesize;
76
77 if (aperturesize > 0.0f) {
78 /* sample point on aperture */
79 const float2 lens_uv = camera_sample_aperture(&kernel_data.cam, rand_lens) * aperturesize;
80
81 /* compute point on plane of focus */
82 const float ft = kernel_data.cam.focaldistance / D.z;
83 const float3 Pfocus = D * ft;
84
85 /* update ray for effect of lens */
86 P = make_float3(lens_uv);
87 D = normalize(Pfocus - P);
88 }
89
90 /* transform ray from camera to world */
91 Transform cameratoworld = kernel_data.cam.cameratoworld;
92
93 if (kernel_data.cam.num_motion_steps) {
95 kernel_data_array(camera_motion),
96 kernel_data.cam.num_motion_steps,
97 ray->time);
98 }
99
100 P = transform_point(&cameratoworld, P);
101 D = normalize(transform_direction(&cameratoworld, D));
102
103 const bool use_stereo = kernel_data.cam.interocular_offset != 0.0f;
104 if (!use_stereo) {
105 /* No stereo */
106 ray->P = P;
107 ray->D = D;
108
109#ifdef __RAY_DIFFERENTIALS__
110 const float3 Dcenter = transform_direction(&cameratoworld, Pcamera);
111 const float3 Dcenter_normalized = normalize(Dcenter);
112
113 /* TODO: can this be optimized to give compact differentials directly? */
114 ray->dP = differential_zero_compact();
115 differential3 dD;
116 dD.dx = normalize(Dcenter + make_float3(kernel_data.cam.dx)) - Dcenter_normalized;
117 dD.dy = normalize(Dcenter + make_float3(kernel_data.cam.dy)) - Dcenter_normalized;
118 ray->dD = differential_make_compact(dD);
119#endif
120 }
121 else {
122 /* Spherical stereo */
124 ray->P = P;
125 ray->D = D;
126
127#ifdef __RAY_DIFFERENTIALS__
128 /* Ray differentials, computed from scratch using the raster coordinates
129 * because we don't want to be affected by depth of field. We compute
130 * ray origin and direction for the center and two neighboring pixels
131 * and simply take their differences. */
132 const float3 Pnostereo = transform_point(&cameratoworld, zero_float3());
133
134 float3 Pcenter = Pnostereo;
135 float3 Dcenter = Pcamera;
136 Dcenter = normalize(transform_direction(&cameratoworld, Dcenter));
137 spherical_stereo_transform(&kernel_data.cam, &Pcenter, &Dcenter);
138
139 float3 Px = Pnostereo;
140 float3 Dx = transform_perspective(&rastertocamera,
141 make_float3(raster.x + 1.0f, raster.y, 0.0f));
142 Dx = normalize(transform_direction(&cameratoworld, Dx));
144
145 differential3 dP;
146 differential3 dD;
147
148 dP.dx = Px - Pcenter;
149 dD.dx = Dx - Dcenter;
150
151 float3 Py = Pnostereo;
152 float3 Dy = transform_perspective(&rastertocamera,
153 make_float3(raster.x, raster.y + 1.0f, 0.0f));
154 Dy = normalize(transform_direction(&cameratoworld, Dy));
156
157 dP.dy = Py - Pcenter;
158 dD.dy = Dy - Dcenter;
159 ray->dD = differential_make_compact(dD);
160 ray->dP = differential_make_compact(dP);
161#endif
162 }
163
164 /* clipping */
165 const float z_inv = 1.0f / normalize(Pcamera).z;
166 const float nearclip = kernel_data.cam.nearclip * z_inv;
167 ray->P += nearclip * ray->D;
168 ray->dP += nearclip * ray->dD;
169 ray->tmin = 0.0f;
170 ray->tmax = kernel_data.cam.cliplength * z_inv;
171
172 return one_spectrum();
173}
174
175/* Orthographic Camera */
177 const float2 raster_xy,
178 const float2 rand_lens,
179 ccl_private Ray *ray)
180{
181 /* create ray form raster position */
182 const ProjectionTransform rastertocamera = kernel_data.cam.rastertocamera;
183 const float3 Pcamera = transform_perspective(&rastertocamera, make_float3(raster_xy));
184
185 float3 P;
186 float3 D = make_float3(0.0f, 0.0f, 1.0f);
187
188 /* modify ray for depth of field */
189 const float aperturesize = kernel_data.cam.aperturesize;
190
191 if (aperturesize > 0.0f) {
192 /* sample point on aperture */
193 const float2 lens_uv = camera_sample_aperture(&kernel_data.cam, rand_lens) * aperturesize;
194
195 /* compute point on plane of focus */
196 const float3 Pfocus = D * kernel_data.cam.focaldistance;
197
198 /* Update ray for effect of lens */
199 const float3 lens_uvw = make_float3(lens_uv);
200
201 D = normalize(Pfocus - lens_uvw);
202 /* Compute position the ray will be if it traveled until it intersected the near clip plane.
203 * This allows for correct DOF while allowing near clipping. */
204 P = Pcamera + lens_uvw + (D * (kernel_data.cam.nearclip / D.z));
205 }
206 else {
207 P = Pcamera + make_float3(0.0f, 0.0f, kernel_data.cam.nearclip);
208 }
209 /* transform ray from camera to world */
210 Transform cameratoworld = kernel_data.cam.cameratoworld;
211
212 if (kernel_data.cam.num_motion_steps) {
214 kernel_data_array(camera_motion),
215 kernel_data.cam.num_motion_steps,
216 ray->time);
217 }
218
219 ray->P = transform_point(&cameratoworld, P);
220 ray->D = normalize(transform_direction(&cameratoworld, D));
221
222#ifdef __RAY_DIFFERENTIALS__
223 /* ray differential */
224 differential3 dP;
225 dP.dx = make_float3(kernel_data.cam.dx);
226 dP.dy = make_float3(kernel_data.cam.dx);
227
228 ray->dP = differential_make_compact(dP);
229 ray->dD = differential_zero_compact();
230#endif
231
232 /* clipping */
233 ray->tmin = 0.0f;
234 ray->tmax = kernel_data.cam.cliplength;
235
236 return one_spectrum();
237}
238
239/* Custom Camera */
240
242 const ccl_global DecomposedTransform *cam_motion,
243 float3 P,
244 float3 D,
246 float3 Pcenter,
247 float3 Dcenter,
248 float3 Px,
249 float3 Dx,
250 float3 Py,
251 float3 Dy,
252#endif
253 ccl_private Ray *ray)
254{
255 /* Transform the ray from camera to world. */
256 Transform cameratoworld = cam->cameratoworld;
257
258 if (cam->num_motion_steps) {
260 &cameratoworld, cam_motion, cam->num_motion_steps, ray->time);
261 }
262
263 /* Stereo transform */
264 const bool use_stereo = cam->interocular_offset != 0.0f;
265 if (use_stereo) {
267 }
268
269 P = transform_point(&cameratoworld, P);
270 D = normalize(transform_direction(&cameratoworld, D));
271
272 ray->P = P;
273 ray->D = D;
274
275#ifdef __RAY_DIFFERENTIALS__
276 if (use_stereo) {
277 spherical_stereo_transform(cam, &Pcenter, &Dcenter);
278 spherical_stereo_transform(cam, &Px, &Dx);
279 spherical_stereo_transform(cam, &Py, &Dy);
280
281 differential3 dP;
282 Pcenter = transform_point(&cameratoworld, Pcenter);
283 dP.dx = transform_point(&cameratoworld, Px) - Pcenter;
284 dP.dy = transform_point(&cameratoworld, Py) - Pcenter;
285 ray->dP = differential_make_compact(dP);
286 }
287 else {
288 ray->dP = differential_zero_compact();
289 }
290
291 differential3 dD;
292 Dcenter = normalize(transform_direction(&cameratoworld, Dcenter));
293 dD.dx = normalize(transform_direction(&cameratoworld, Dx)) - Dcenter;
294 dD.dy = normalize(transform_direction(&cameratoworld, Dy)) - Dcenter;
295 ray->dD = differential_make_compact(dD);
296#endif
297
298 /* clipping */
299 const float nearclip = cam->nearclip;
300 ray->P += nearclip * ray->D;
301 ray->dP += nearclip * ray->dD;
302 ray->tmin = 0.0f;
303 ray->tmax = cam->cliplength;
304}
305
308 const ccl_global DecomposedTransform *cam_motion,
309 const float2 raster,
310 const float2 rand_lens,
311 ccl_private Ray *ray)
312{
313#ifdef WITH_OSL
314 /* Transform raster position to camera space. */
315 const ProjectionTransform rastertocamera = cam->rastertocamera;
316 float3 sensor = transform_perspective(&rastertocamera, make_float3(raster.x, raster.y, 0.0f));
317 float3 dSdx = transform_perspective_direction(&rastertocamera, make_float3(1.0f, 0.0f, 0.0f));
318 float3 dSdy = transform_perspective_direction(&rastertocamera, make_float3(0.0f, 1.0f, 0.0f));
319 /* Execute OSL shader to sample position, direction and transmission. */
320 packed_float3 P, dPdx, dPdy, D, dDdx, dDdy, throughput;
321 throughput = osl_eval_camera(kg, sensor, dSdx, dSdy, rand_lens, P, dPdx, dPdy, D, dDdx, dDdy);
322 /* Zero throughput indicates failed sampling. */
323 if (is_zero(throughput)) {
324 return zero_spectrum();
325 }
326
328 cam_motion,
329 P,
330 D,
332 P,
333 D,
334 P + dPdx,
335 D + dDdx,
336 P + dPdy,
337 D + dDdy,
338# endif
339 ray);
340
341 return throughput;
342#else
343 (void)kg;
344 (void)cam;
345 (void)cam_motion;
346 (void)raster;
347 (void)rand_lens;
348 (void)ray;
349 return zero_spectrum();
350#endif
351}
352
353/* Panorama Camera */
354
356 const float x,
357 const float y)
358{
359 const ProjectionTransform rastertocamera = cam->rastertocamera;
360 const float3 Pcamera = transform_perspective(&rastertocamera, make_float3(x, y, 0.0f));
361 return panorama_to_direction(cam, Pcamera.x, Pcamera.y);
362}
363
365 const ccl_global DecomposedTransform *cam_motion,
366 const float2 raster,
367 const float2 rand_lens,
368 ccl_private Ray *ray)
369{
370 /* Create ray from raster position. */
371 float3 P = zero_float3();
372 float3 D = camera_panorama_direction(cam, raster.x, raster.y);
373
374#ifdef __RAY_DIFFERENTIALS__
375 /* Ray differentials, computed from scratch using the raster coordinates
376 * because we don't want to be affected by depth of field. We compute
377 * ray origin and direction for the center and two neighboring pixels
378 * and simply take their differences. */
379 float3 Dcenter = D;
380 float3 Dx = camera_panorama_direction(cam, raster.x + 1.0f, raster.y);
381 float3 Dy = camera_panorama_direction(cam, raster.x, raster.y + 1.0f);
382#endif
383
384 /* Here, zero indicates failed sampling, e.g. when the raster position is outside
385 * the fisheye lens. */
386 if (is_zero(D)) {
387 return zero_spectrum();
388 }
389
390 /* Perform depth-of-field sampling. */
391 const float aperturesize = cam->aperturesize;
392
393 if (aperturesize > 0.0f) {
394 /* Sample a point on the aperture. */
395 const float2 lens_uv = camera_sample_aperture(cam, rand_lens) * aperturesize;
396
397 /* Compute the intersection of the original ray with the focal plane. */
398 const float3 Dfocus = normalize(D);
399 const float3 Pfocus = Dfocus * cam->focaldistance;
400
401 /* Calculate orthonormal coordinate system perpendicular to Dfocus. */
402 const float3 U = normalize(make_float3(1.0f, 0.0f, 0.0f) - Dfocus.x * Dfocus);
403 const float3 V = normalize(cross(Dfocus, U));
404
405 /* Compute new ray by shifting its origin (to account for aperture position) and
406 * setting its direction to meet the original ray at the focal plane. */
407 P = U * lens_uv.x + V * lens_uv.y;
408 D = normalize(Pfocus - P);
409 }
410
412 cam_motion,
413 P,
414 D,
416 zero_float3(),
417 Dcenter,
418 zero_float3(),
419 Dx,
420 zero_float3(),
421 Dy,
422#endif
423 ray);
424
425 return one_spectrum();
426}
427
428/* Common */
429
430/* Generates an outgoing camera ray for the given raster position and random inputs.
431 * Returns camera sensitivity (used to initialize path throughput). */
433 const int x,
434 const int y,
435 const float2 filter_uv,
436 const float time,
437 const float2 lens_uv,
438 ccl_private Ray *ray)
439{
440 /* pixel filter */
441 const int filter_table_offset = kernel_data.tables.filter_table_offset;
442 const float2 raster = make_float2(
443 x + lookup_table_read(kg, filter_uv.x, filter_table_offset, FILTER_TABLE_SIZE),
444 y + lookup_table_read(kg, filter_uv.y, filter_table_offset, FILTER_TABLE_SIZE));
445
446 /* motion blur */
447 if (kernel_data.cam.shuttertime == -1.0f) {
448 ray->time = 0.5f;
449 }
450 else {
451 /* TODO(sergey): Such lookup is unneeded when there's rolling shutter
452 * effect in use but rolling shutter duration is set to 0.0.
453 */
454 const int shutter_table_offset = kernel_data.cam.shutter_table_offset;
455 ray->time = lookup_table_read(kg, time, shutter_table_offset, SHUTTER_TABLE_SIZE);
456 /* TODO(sergey): Currently single rolling shutter effect type only
457 * where scan-lines are acquired from top to bottom and whole scan-line
458 * is acquired at once (no delay in acquisition happens between pixels
459 * of single scan-line).
460 *
461 * Might want to support more models in the future.
462 */
463 if (kernel_data.cam.rolling_shutter_type) {
464 /* Time corresponding to a fully rolling shutter only effect:
465 * top of the frame is time 0.0, bottom of the frame is time 1.0.
466 */
467 const float time = 1.0f - (float)y / kernel_data.cam.height;
468 const float duration = kernel_data.cam.rolling_shutter_duration;
469 if (duration != 0.0f) {
470 /* This isn't fully physical correct, but lets us to have simple
471 * controls in the interface. The idea here is basically sort of
472 * linear interpolation between how much rolling shutter effect
473 * exist on the frame and how much of it is a motion blur effect.
474 */
475 ray->time = (ray->time - 0.5f) * duration;
476 ray->time += (time - 0.5f) * (1.0f - duration) + 0.5f;
477 }
478 else {
479 ray->time = time;
480 }
481 }
482 }
483
484 /* sample */
485 if (kernel_data.cam.type == CAMERA_PERSPECTIVE) {
486 return camera_sample_perspective(kg, raster, lens_uv, ray);
487 }
488 if (kernel_data.cam.type == CAMERA_ORTHOGRAPHIC) {
489 return camera_sample_orthographic(kg, raster, lens_uv, ray);
490 }
491 if (kernel_data.cam.type == CAMERA_PANORAMA) {
492 const ccl_global DecomposedTransform *cam_motion = kernel_data_array(camera_motion);
493 return camera_sample_panorama(&kernel_data.cam, cam_motion, raster, lens_uv, ray);
494 }
495 if (kernel_data.cam.type == CAMERA_CUSTOM) {
496 const ccl_global DecomposedTransform *cam_motion = kernel_data_array(camera_motion);
497 return camera_sample_custom(kg, &kernel_data.cam, cam_motion, raster, lens_uv, ray);
498 }
499 kernel_assert(false);
500 return zero_spectrum();
501}
502
503/* Utilities */
504
506{
507 const Transform cameratoworld = kernel_data.cam.cameratoworld;
508 return make_float3(cameratoworld.x.w, cameratoworld.y.w, cameratoworld.z.w);
509}
510
512{
513 const Transform cameratoworld = kernel_data.cam.cameratoworld;
514 const float3 camP = make_float3(cameratoworld.x.w, cameratoworld.y.w, cameratoworld.z.w);
515
516 if (kernel_data.cam.type == CAMERA_ORTHOGRAPHIC) {
517 const float3 camD = make_float3(cameratoworld.x.z, cameratoworld.y.z, cameratoworld.z.z);
518 return fabsf(dot((P - camP), camD));
519 }
520 return len(P - camP);
521}
522
524{
525 if (kernel_data.cam.type == CAMERA_PERSPECTIVE || kernel_data.cam.type == CAMERA_ORTHOGRAPHIC) {
526 const Transform worldtocamera = kernel_data.cam.worldtocamera;
527 return transform_point(&worldtocamera, P).z;
528 }
529 const Transform cameratoworld = kernel_data.cam.cameratoworld;
530 const float3 camP = make_float3(cameratoworld.x.w, cameratoworld.y.w, cameratoworld.z.w);
531 return len(P - camP);
532}
533
535{
536 const Transform cameratoworld = kernel_data.cam.cameratoworld;
537
538 if (kernel_data.cam.type == CAMERA_ORTHOGRAPHIC) {
539 const float3 camD = make_float3(cameratoworld.x.z, cameratoworld.y.z, cameratoworld.z.z);
540 return -camD;
541 }
542 const float3 camP = make_float3(cameratoworld.x.w, cameratoworld.y.w, cameratoworld.z.w);
543 return normalize(camP - P);
544}
545
547 ccl_private ShaderData *sd,
548 float3 P)
549{
550 if (kernel_data.cam.type == CAMERA_PERSPECTIVE || kernel_data.cam.type == CAMERA_ORTHOGRAPHIC) {
551 /* perspective / ortho */
552 if (sd->object == PRIM_NONE && kernel_data.cam.type == CAMERA_PERSPECTIVE) {
553 P += camera_position(kg);
554 }
555
556 const ProjectionTransform tfm = kernel_data.cam.worldtondc;
557 return transform_perspective(&tfm, P);
558 }
559 /* panorama or custom */
560 const Transform tfm = kernel_data.cam.worldtocamera;
561
562 if (sd->object != OBJECT_NONE) {
563 P = normalize(transform_point(&tfm, P));
564 }
565 else {
567 }
568
569 if (kernel_data.cam.type == CAMERA_PANORAMA) {
571 }
572 /* TODO: Fall back to camera coordinates until we have inverse mappings for custom cameras. */
573 return P;
574}
575
#define D
#define U
nullptr float
dot(value.rgb, luminance_coefficients)") DEFINE_VALUE("REDUCE(lhs
ccl_device_inline float2 direction_to_panorama(ccl_constant KernelCamera *cam, const float3 dir)
ccl_device_inline void spherical_stereo_transform(ccl_constant KernelCamera *cam, ccl_private float3 *P, ccl_private float3 *D)
ccl_device_inline float3 panorama_to_direction(ccl_constant KernelCamera *cam, const float u, float v)
ccl_device_forceinline float3 dPdx(const ccl_private ShaderData *sd)
ccl_device_forceinline float3 dPdy(const ccl_private ShaderData *sd)
ccl_device_inline float3 transform_perspective_direction(const ccl_private ProjectionTransform *t, const float3 a)
CCL_NAMESPACE_END CCL_NAMESPACE_BEGIN ccl_device_inline float3 transform_perspective(const ccl_private ProjectionTransform *t, const float3 a)
#define kernel_assert(cond)
#define kernel_data
#define one_spectrum
#define FILTER_TABLE_SIZE
#define PRIM_NONE
#define ccl_constant
#define zero_spectrum
#define OBJECT_NONE
#define ccl_private
const ThreadKernelGlobalsCPU * KernelGlobals
#define ccl_device_inline
#define __RAY_DIFFERENTIALS__
#define SHUTTER_TABLE_SIZE
#define ccl_global
#define kernel_data_array(name)
#define CCL_NAMESPACE_END
ccl_device_forceinline float3 make_float3(const float x, const float y, const float z)
ccl_device_forceinline float differential_make_compact(const float dD)
ccl_device_forceinline float differential_zero_compact()
VecBase< float, D > normalize(VecOp< float, D >) RET
VecBase< float, 3 > cross(VecOp< float, 3 >, VecOp< float, 3 >) RET
ccl_device_inline void camera_sample_to_ray(ccl_constant KernelCamera *cam, const ccl_global DecomposedTransform *cam_motion, float3 P, float3 D, ccl_private Ray *ray)
CCL_NAMESPACE_BEGIN ccl_device float2 camera_sample_aperture(ccl_constant KernelCamera *cam, const float2 rand)
ccl_device Spectrum camera_sample_perspective(KernelGlobals kg, const float2 raster_xy, const float2 rand_lens, ccl_private Ray *ray)
ccl_device_inline float3 camera_position(KernelGlobals kg)
ccl_device_inline Spectrum camera_sample_custom(KernelGlobals kg, ccl_constant KernelCamera *cam, const ccl_global DecomposedTransform *cam_motion, const float2 raster, const float2 rand_lens, ccl_private Ray *ray)
ccl_device_inline float3 camera_panorama_direction(ccl_constant KernelCamera *cam, const float x, const float y)
ccl_device_inline Spectrum camera_sample_panorama(ccl_constant KernelCamera *cam, const ccl_global DecomposedTransform *cam_motion, const float2 raster, const float2 rand_lens, ccl_private Ray *ray)
ccl_device_inline float camera_z_depth(KernelGlobals kg, const float3 P)
ccl_device_inline float3 camera_world_to_ndc(KernelGlobals kg, ccl_private ShaderData *sd, float3 P)
ccl_device_inline float camera_distance(KernelGlobals kg, const float3 P)
ccl_device Spectrum camera_sample_orthographic(KernelGlobals kg, const float2 raster_xy, const float2 rand_lens, ccl_private Ray *ray)
ccl_device_inline Spectrum camera_sample(KernelGlobals kg, const int x, const int y, const float2 filter_uv, const float time, const float2 lens_uv, ccl_private Ray *ray)
ccl_device_inline float3 camera_direction_from_point(KernelGlobals kg, const float3 P)
packed_float3 osl_eval_camera(KernelGlobals kg, const packed_float3 sensor, const packed_float3 dSdx, const packed_float3 dSdy, const float2 rand_lens, packed_float3 &P, packed_float3 &dPdx, packed_float3 &dPdy, packed_float3 &D, packed_float3 &dDdx, packed_float3 &dDdy)
@ CAMERA_PERSPECTIVE
@ CAMERA_PANORAMA
@ CAMERA_CUSTOM
@ CAMERA_ORTHOGRAPHIC
CCL_NAMESPACE_BEGIN ccl_device float lookup_table_read(KernelGlobals kg, float x, const int offset, const int size)
ccl_device_inline float interp(const float a, const float b, const float t)
Definition math_base.h:502
ccl_device_inline bool is_zero(const float2 a)
CCL_NAMESPACE_BEGIN ccl_device_inline float3 zero_float3()
Definition math_float3.h:17
#define fabsf
#define ccl_device
#define make_float2
CCL_NAMESPACE_BEGIN ccl_device float2 sample_uniform_disk(const float2 rand)
ccl_device float2 regular_polygon_sample(const float corners, float rotation, const float2 rand)
float4 y
Definition transform.h:23
float4 x
Definition transform.h:23
float4 z
Definition transform.h:23
float x
float y
float z
Definition sky_math.h:136
float y
Definition sky_math.h:136
float x
Definition sky_math.h:136
float z
Definition sky_math.h:225
float w
Definition sky_math.h:225
ccl_device void transform_motion_array_interpolate(ccl_private Transform *tfm, const ccl_global DecomposedTransform *motion, const uint numsteps, const float time)
Definition transform.h:591
ccl_device_inline float3 transform_direction(const ccl_private Transform *t, const float3 a)
Definition transform.h:127
ccl_device_inline float3 transform_point(const ccl_private Transform *t, const float3 a)
Definition transform.h:56
float3 Spectrum
uint len
CCL_NAMESPACE_BEGIN struct Window V