Blender V4.3
scene/camera.cpp
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2011-2022 Blender Foundation
2 *
3 * SPDX-License-Identifier: Apache-2.0 */
4
5#include "scene/camera.h"
6#include "scene/mesh.h"
7#include "scene/object.h"
8#include "scene/scene.h"
9#include "scene/stats.h"
10#include "scene/tables.h"
11
12#include "device/device.h"
13
14#include "util/foreach.h"
15#include "util/function.h"
16#include "util/log.h"
17#include "util/math_cdf.h"
18#include "util/task.h"
19#include "util/time.h"
20#include "util/vector.h"
21
22/* needed for calculating differentials */
25
27
29
30static float shutter_curve_eval(float x, array<float> &shutter_curve)
31{
32 if (shutter_curve.size() == 0) {
33 return 1.0f;
34 }
35
36 x = saturatef(x) * shutter_curve.size() - 1;
37 int index = (int)x;
38 float frac = x - index;
39 if (index < shutter_curve.size() - 1) {
40 return mix(shutter_curve[index], shutter_curve[index + 1], frac);
41 }
42 else {
43 return shutter_curve[shutter_curve.size() - 1];
44 }
45}
46
48{
49 NodeType *type = NodeType::add("camera", create);
50
51 SOCKET_FLOAT(shuttertime, "Shutter Time", 1.0f);
52
53 static NodeEnum motion_position_enum;
54 motion_position_enum.insert("start", MOTION_POSITION_START);
55 motion_position_enum.insert("center", MOTION_POSITION_CENTER);
56 motion_position_enum.insert("end", MOTION_POSITION_END);
57 SOCKET_ENUM(motion_position, "Motion Position", motion_position_enum, MOTION_POSITION_CENTER);
58
59 static NodeEnum rolling_shutter_type_enum;
60 rolling_shutter_type_enum.insert("none", ROLLING_SHUTTER_NONE);
61 rolling_shutter_type_enum.insert("top", ROLLING_SHUTTER_TOP);
62 SOCKET_ENUM(rolling_shutter_type,
63 "Rolling Shutter Type",
64 rolling_shutter_type_enum,
65 ROLLING_SHUTTER_NONE);
66 SOCKET_FLOAT(rolling_shutter_duration, "Rolling Shutter Duration", 0.1f);
67
68 SOCKET_FLOAT_ARRAY(shutter_curve, "Shutter Curve", array<float>());
69
70 SOCKET_FLOAT(aperturesize, "Aperture Size", 0.0f);
71 SOCKET_FLOAT(focaldistance, "Focal Distance", 10.0f);
72 SOCKET_UINT(blades, "Blades", 0);
73 SOCKET_FLOAT(bladesrotation, "Blades Rotation", 0.0f);
74
75 SOCKET_TRANSFORM(matrix, "Matrix", transform_identity());
76 SOCKET_TRANSFORM_ARRAY(motion, "Motion", array<Transform>());
77
78 SOCKET_FLOAT(aperture_ratio, "Aperture Ratio", 1.0f);
79
80 static NodeEnum type_enum;
81 type_enum.insert("perspective", CAMERA_PERSPECTIVE);
82 type_enum.insert("orthograph", CAMERA_ORTHOGRAPHIC);
83 type_enum.insert("panorama", CAMERA_PANORAMA);
84 SOCKET_ENUM(camera_type, "Type", type_enum, CAMERA_PERSPECTIVE);
85
86 static NodeEnum panorama_type_enum;
87 panorama_type_enum.insert("equirectangular", PANORAMA_EQUIRECTANGULAR);
88 panorama_type_enum.insert("equiangular_cubemap_face", PANORAMA_EQUIANGULAR_CUBEMAP_FACE);
89 panorama_type_enum.insert("mirrorball", PANORAMA_MIRRORBALL);
90 panorama_type_enum.insert("fisheye_equidistant", PANORAMA_FISHEYE_EQUIDISTANT);
91 panorama_type_enum.insert("fisheye_equisolid", PANORAMA_FISHEYE_EQUISOLID);
92 panorama_type_enum.insert("fisheye_lens_polynomial", PANORAMA_FISHEYE_LENS_POLYNOMIAL);
93 panorama_type_enum.insert("panorama_central_cylindrical", PANORAMA_CENTRAL_CYLINDRICAL);
94 SOCKET_ENUM(panorama_type, "Panorama Type", panorama_type_enum, PANORAMA_EQUIRECTANGULAR);
95
96 SOCKET_FLOAT(fisheye_fov, "Fisheye FOV", M_PI_F);
97 SOCKET_FLOAT(fisheye_lens, "Fisheye Lens", 10.5f);
98 SOCKET_FLOAT(latitude_min, "Latitude Min", -M_PI_2_F);
99 SOCKET_FLOAT(latitude_max, "Latitude Max", M_PI_2_F);
100 SOCKET_FLOAT(longitude_min, "Longitude Min", -M_PI_F);
101 SOCKET_FLOAT(longitude_max, "Longitude Max", M_PI_F);
102 SOCKET_FLOAT(fov, "FOV", M_PI_4_F);
103 SOCKET_FLOAT(fov_pre, "FOV Pre", M_PI_4_F);
104 SOCKET_FLOAT(fov_post, "FOV Post", M_PI_4_F);
105
106 SOCKET_FLOAT(fisheye_polynomial_k0, "Fisheye Polynomial K0", 0.0f);
107 SOCKET_FLOAT(fisheye_polynomial_k1, "Fisheye Polynomial K1", 0.0f);
108 SOCKET_FLOAT(fisheye_polynomial_k2, "Fisheye Polynomial K2", 0.0f);
109 SOCKET_FLOAT(fisheye_polynomial_k3, "Fisheye Polynomial K3", 0.0f);
110 SOCKET_FLOAT(fisheye_polynomial_k4, "Fisheye Polynomial K4", 0.0f);
111
112 SOCKET_FLOAT(central_cylindrical_range_u_min, "Central Cylindrical Range U Min", -M_PI_F);
113 SOCKET_FLOAT(central_cylindrical_range_u_max, "Central Cylindrical Range U Max", M_PI_F);
114 SOCKET_FLOAT(central_cylindrical_range_v_min, "Central Cylindrical Range V Min", -1.0f);
115 SOCKET_FLOAT(central_cylindrical_range_v_max, "Central Cylindrical Range V Max", 1.0f);
116
117 static NodeEnum stereo_eye_enum;
118 stereo_eye_enum.insert("none", STEREO_NONE);
119 stereo_eye_enum.insert("left", STEREO_LEFT);
120 stereo_eye_enum.insert("right", STEREO_RIGHT);
121 SOCKET_ENUM(stereo_eye, "Stereo Eye", stereo_eye_enum, STEREO_NONE);
122
123 SOCKET_BOOLEAN(use_spherical_stereo, "Use Spherical Stereo", false);
124
125 SOCKET_FLOAT(interocular_distance, "Interocular Distance", 0.065f);
126 SOCKET_FLOAT(convergence_distance, "Convergence Distance", 30.0f * 0.065f);
127
128 SOCKET_BOOLEAN(use_pole_merge, "Use Pole Merge", false);
129 SOCKET_FLOAT(pole_merge_angle_from, "Pole Merge Angle From", 60.0f * M_PI_F / 180.0f);
130 SOCKET_FLOAT(pole_merge_angle_to, "Pole Merge Angle To", 75.0f * M_PI_F / 180.0f);
131
132 SOCKET_FLOAT(sensorwidth, "Sensor Width", 0.036f);
133 SOCKET_FLOAT(sensorheight, "Sensor Height", 0.024f);
134
135 SOCKET_FLOAT(nearclip, "Near Clip", 1e-5f);
136 SOCKET_FLOAT(farclip, "Far Clip", 1e5f);
137
138 SOCKET_FLOAT(viewplane.left, "Viewplane Left", 0);
139 SOCKET_FLOAT(viewplane.right, "Viewplane Right", 0);
140 SOCKET_FLOAT(viewplane.bottom, "Viewplane Bottom", 0);
141 SOCKET_FLOAT(viewplane.top, "Viewplane Top", 0);
142
143 SOCKET_FLOAT(border.left, "Border Left", 0);
144 SOCKET_FLOAT(border.right, "Border Right", 0);
145 SOCKET_FLOAT(border.bottom, "Border Bottom", 0);
146 SOCKET_FLOAT(border.top, "Border Top", 0);
147
148 SOCKET_FLOAT(viewport_camera_border.left, "Viewport Border Left", 0);
149 SOCKET_FLOAT(viewport_camera_border.right, "Viewport Border Right", 0);
150 SOCKET_FLOAT(viewport_camera_border.bottom, "Viewport Border Bottom", 0);
151 SOCKET_FLOAT(viewport_camera_border.top, "Viewport Border Top", 0);
152
153 SOCKET_FLOAT(offscreen_dicing_scale, "Offscreen Dicing Scale", 1.0f);
154
155 SOCKET_INT(full_width, "Full Width", 1024);
156 SOCKET_INT(full_height, "Full Height", 512);
157
158 SOCKET_BOOLEAN(use_perspective_motion, "Use Perspective Motion", false);
159
160 return type;
161}
162
163Camera::Camera() : Node(get_node_type())
164{
166
167 width = 1024;
168 height = 512;
169
170 use_perspective_motion = false;
171
172 shutter_curve.resize(RAMP_TABLE_SIZE);
173 for (int i = 0; i < shutter_curve.size(); ++i) {
174 shutter_curve[i] = 1.0f;
175 }
176
178
185
187
188 dx = zero_float3();
189 dy = zero_float3();
190
191 need_device_update = true;
192 need_flags_update = true;
194
195 memset((void *)&kernel_camera, 0, sizeof(kernel_camera));
196}
197
199
201{
202 if (camera_type == CAMERA_PANORAMA) {
203 viewplane.left = 0.0f;
204 viewplane.right = 1.0f;
205 viewplane.bottom = 0.0f;
206 viewplane.top = 1.0f;
207 }
208 else {
209 float aspect = (float)full_width / (float)full_height;
210 if (full_width >= full_height) {
211 viewplane.left = -aspect;
212 viewplane.right = aspect;
213 viewplane.bottom = -1.0f;
214 viewplane.top = 1.0f;
215 }
216 else {
217 viewplane.left = -1.0f;
218 viewplane.right = 1.0f;
219 viewplane.bottom = -1.0f / aspect;
220 viewplane.top = 1.0f / aspect;
221 }
222 }
223}
224
226{
227 Scene::MotionType need_motion = scene->need_motion();
228
229 if (previous_need_motion != need_motion) {
230 /* scene's motion model could have been changed since previous device
231 * camera update this could happen for example in case when one render
232 * layer has got motion pass and another not */
233 need_device_update = true;
234 }
235
236 if (!is_modified()) {
237 return;
238 }
239
240 scoped_callback_timer timer([scene](double time) {
241 if (scene->update_stats) {
242 scene->update_stats->camera.times.add_entry({"update", time});
243 }
244 });
245
246 /* Full viewport to camera border in the viewport. */
247 Transform fulltoborder = transform_from_viewplane(viewport_camera_border);
248 Transform bordertofull = transform_inverse(fulltoborder);
249
250 /* NDC to raster. */
251 Transform ndctoraster = transform_scale(width, height, 1.0f) * bordertofull;
252 Transform full_ndctoraster = transform_scale(full_width, full_height, 1.0f) * bordertofull;
253
254 /* Raster to screen. */
255 Transform screentondc = fulltoborder * transform_from_viewplane(viewplane);
256
257 Transform screentoraster = ndctoraster * screentondc;
258 Transform rastertoscreen = transform_inverse(screentoraster);
259 Transform full_screentoraster = full_ndctoraster * screentondc;
260 Transform full_rastertoscreen = transform_inverse(full_screentoraster);
261
262 /* Screen to camera. */
263 ProjectionTransform cameratoscreen;
264 if (camera_type == CAMERA_PERSPECTIVE) {
265 cameratoscreen = projection_perspective(fov, nearclip, farclip);
266 }
267 else if (camera_type == CAMERA_ORTHOGRAPHIC) {
268 cameratoscreen = projection_orthographic(nearclip, farclip);
269 }
270 else {
271 cameratoscreen = projection_identity();
272 }
273
274 ProjectionTransform screentocamera = projection_inverse(cameratoscreen);
275
276 rastertocamera = screentocamera * rastertoscreen;
277 full_rastertocamera = screentocamera * full_rastertoscreen;
278
279 cameratoworld = matrix;
280 screentoworld = cameratoworld * screentocamera;
281 rastertoworld = cameratoworld * rastertocamera;
282 ndctoworld = rastertoworld * ndctoraster;
283
284 /* note we recompose matrices instead of taking inverses of the above, this
285 * is needed to avoid inverting near degenerate matrices that happen due to
286 * precision issues with large scenes */
287 worldtocamera = transform_inverse(matrix);
288 worldtoscreen = cameratoscreen * worldtocamera;
289 worldtondc = screentondc * worldtoscreen;
290 worldtoraster = ndctoraster * worldtondc;
291
292 /* differentials */
293 if (camera_type == CAMERA_ORTHOGRAPHIC) {
294 dx = transform_perspective_direction(&rastertocamera, make_float3(1, 0, 0));
295 dy = transform_perspective_direction(&rastertocamera, make_float3(0, 1, 0));
296 full_dx = transform_perspective_direction(&full_rastertocamera, make_float3(1, 0, 0));
297 full_dy = transform_perspective_direction(&full_rastertocamera, make_float3(0, 1, 0));
298 }
299 else if (camera_type == CAMERA_PERSPECTIVE) {
300 dx = transform_perspective(&rastertocamera, make_float3(1, 0, 0)) -
301 transform_perspective(&rastertocamera, make_float3(0, 0, 0));
302 dy = transform_perspective(&rastertocamera, make_float3(0, 1, 0)) -
303 transform_perspective(&rastertocamera, make_float3(0, 0, 0));
304 full_dx = transform_perspective(&full_rastertocamera, make_float3(1, 0, 0)) -
305 transform_perspective(&full_rastertocamera, make_float3(0, 0, 0));
306 full_dy = transform_perspective(&full_rastertocamera, make_float3(0, 1, 0)) -
307 transform_perspective(&full_rastertocamera, make_float3(0, 0, 0));
308 }
309 else {
310 dx = zero_float3();
311 dy = zero_float3();
312 }
313
314 dx = transform_direction(&cameratoworld, dx);
315 dy = transform_direction(&cameratoworld, dy);
316 full_dx = transform_direction(&cameratoworld, full_dx);
317 full_dy = transform_direction(&cameratoworld, full_dy);
318
319 if (camera_type == CAMERA_PERSPECTIVE) {
320 float3 v = transform_perspective(&full_rastertocamera,
321 make_float3(full_width, full_height, 1.0f));
322 frustum_right_normal = normalize(make_float3(v.z, 0.0f, -v.x));
323 frustum_top_normal = normalize(make_float3(0.0f, v.z, -v.y));
324
325 v = transform_perspective(&full_rastertocamera, make_float3(0.0f, 0.0f, 1.0f));
326 frustum_left_normal = normalize(make_float3(-v.z, 0.0f, v.x));
327 frustum_bottom_normal = normalize(make_float3(0.0f, -v.z, v.y));
328 }
329
330 /* Compute kernel camera data. */
331 KernelCamera *kcam = &kernel_camera;
332
333 /* store matrices */
334 kcam->screentoworld = screentoworld;
335 kcam->rastertoworld = rastertoworld;
336 kcam->rastertocamera = rastertocamera;
337 kcam->cameratoworld = cameratoworld;
338 kcam->worldtocamera = worldtocamera;
339 kcam->worldtoscreen = worldtoscreen;
340 kcam->worldtoraster = worldtoraster;
341 kcam->worldtondc = worldtondc;
342 kcam->ndctoworld = ndctoworld;
343
344 /* camera motion */
345 kcam->num_motion_steps = 0;
346 kcam->have_perspective_motion = 0;
347 kernel_camera_motion.clear();
348
349 /* Test if any of the transforms are actually different. */
350 bool have_motion = false;
351 for (size_t i = 0; i < motion.size(); i++) {
352 have_motion = have_motion || motion[i] != matrix;
353 }
354
355 if (need_motion == Scene::MOTION_PASS) {
356 if (camera_type == CAMERA_PANORAMA) {
357 if (have_motion) {
358 kcam->motion_pass_pre = transform_inverse(motion[0]);
359 kcam->motion_pass_post = transform_inverse(motion[motion.size() - 1]);
360 }
361 else {
362 kcam->motion_pass_pre = kcam->worldtocamera;
363 kcam->motion_pass_post = kcam->worldtocamera;
364 }
365 }
366 else {
367 if (have_motion || fov != fov_pre || fov != fov_post) {
368 /* Note the values for perspective_pre/perspective_post calculated for MOTION_PASS are
369 * different to those calculated for MOTION_BLUR below, so the code has not been combined.
370 */
371 ProjectionTransform cameratoscreen_pre = projection_perspective(
372 fov_pre, nearclip, farclip);
373 ProjectionTransform cameratoscreen_post = projection_perspective(
374 fov_post, nearclip, farclip);
375 ProjectionTransform cameratoraster_pre = screentoraster * cameratoscreen_pre;
376 ProjectionTransform cameratoraster_post = screentoraster * cameratoscreen_post;
377 kcam->perspective_pre = cameratoraster_pre * transform_inverse(motion[0]);
378 kcam->perspective_post = cameratoraster_post *
379 transform_inverse(motion[motion.size() - 1]);
380 }
381 else {
382 kcam->perspective_pre = worldtoraster;
383 kcam->perspective_post = worldtoraster;
384 }
385 }
386 }
387 else if (need_motion == Scene::MOTION_BLUR) {
388 if (have_motion) {
389 kernel_camera_motion.resize(motion.size());
390 transform_motion_decompose(kernel_camera_motion.data(), motion.data(), motion.size());
391 kcam->num_motion_steps = motion.size();
392 }
393
394 /* TODO(sergey): Support other types of camera. */
395 if (use_perspective_motion && camera_type == CAMERA_PERSPECTIVE) {
396 ProjectionTransform screentocamera_pre = projection_inverse(
397 projection_perspective(fov_pre, nearclip, farclip));
398 ProjectionTransform screentocamera_post = projection_inverse(
399 projection_perspective(fov_post, nearclip, farclip));
400
401 kcam->perspective_pre = screentocamera_pre * rastertoscreen;
402 kcam->perspective_post = screentocamera_post * rastertoscreen;
403 kcam->have_perspective_motion = 1;
404 }
405 }
406
407 /* depth of field */
408 kcam->aperturesize = aperturesize;
409 kcam->focaldistance = max(focaldistance, 1e-5f);
410 kcam->blades = (blades < 3) ? 0.0f : blades;
411 kcam->bladesrotation = bladesrotation;
412
413 /* motion blur */
414 kcam->shuttertime = (need_motion == Scene::MOTION_BLUR) ? shuttertime : -1.0f;
415 kcam->motion_position = motion_position;
416
417 /* type */
418 kcam->type = camera_type;
419
420 /* anamorphic lens bokeh */
421 kcam->inv_aperture_ratio = 1.0f / aperture_ratio;
422
423 /* panorama */
424 kcam->panorama_type = panorama_type;
425 kcam->fisheye_fov = fisheye_fov;
426 kcam->fisheye_lens = fisheye_lens;
427 kcam->equirectangular_range = make_float4(longitude_min - longitude_max,
428 -longitude_min,
429 latitude_min - latitude_max,
430 -latitude_min + M_PI_2_F);
431 kcam->fisheye_lens_polynomial_bias = fisheye_polynomial_k0;
433 fisheye_polynomial_k1, fisheye_polynomial_k2, fisheye_polynomial_k3, fisheye_polynomial_k4);
434 kcam->central_cylindrical_range = make_float4(-central_cylindrical_range_u_min,
435 -central_cylindrical_range_u_max,
436 central_cylindrical_range_v_min,
437 central_cylindrical_range_v_max);
438
439 switch (stereo_eye) {
440 case STEREO_LEFT:
441 kcam->interocular_offset = -interocular_distance * 0.5f;
442 break;
443 case STEREO_RIGHT:
444 kcam->interocular_offset = interocular_distance * 0.5f;
445 break;
446 case STEREO_NONE:
447 default:
448 kcam->interocular_offset = 0.0f;
449 break;
450 }
451
452 kcam->convergence_distance = convergence_distance;
453 if (use_pole_merge) {
454 kcam->pole_merge_angle_from = pole_merge_angle_from;
455 kcam->pole_merge_angle_to = pole_merge_angle_to;
456 }
457 else {
458 kcam->pole_merge_angle_from = -1.0f;
459 kcam->pole_merge_angle_to = -1.0f;
460 }
461
462 /* sensor size */
463 kcam->sensorwidth = sensorwidth;
464 kcam->sensorheight = sensorheight;
465
466 /* render size */
467 kcam->width = width;
468 kcam->height = height;
469
470 /* store differentials */
471 kcam->dx = float3_to_float4(dx);
472 kcam->dy = float3_to_float4(dy);
473
474 /* clipping */
475 kcam->nearclip = nearclip;
476 kcam->cliplength = (farclip == FLT_MAX) ? FLT_MAX : farclip - nearclip;
477
478 /* Camera in volume. */
479 kcam->is_inside_volume = 0;
480
481 /* Rolling shutter effect */
482 kcam->rolling_shutter_type = rolling_shutter_type;
483 kcam->rolling_shutter_duration = rolling_shutter_duration;
484
485 /* Set further update flags */
486 clear_modified();
487 need_device_update = true;
488 need_flags_update = true;
489 previous_need_motion = need_motion;
490}
491
492void Camera::device_update(Device * /*device*/, DeviceScene *dscene, Scene *scene)
493{
494 update(scene);
495
496 if (!need_device_update) {
497 return;
498 }
499
500 scoped_callback_timer timer([scene](double time) {
501 if (scene->update_stats) {
502 scene->update_stats->camera.times.add_entry({"device_update", time});
503 }
504 });
505
506 scene->lookup_tables->remove_table(&shutter_table_offset);
507 if (kernel_camera.shuttertime != -1.0f) {
508 vector<float> shutter_table;
510 0.0f,
511 1.0f,
512 function_bind(shutter_curve_eval, _1, shutter_curve),
513 false,
514 shutter_table);
515 shutter_table_offset = scene->lookup_tables->add_table(dscene, shutter_table);
516 kernel_camera.shutter_table_offset = (int)shutter_table_offset;
517 }
518
519 dscene->data.cam = kernel_camera;
520
521 size_t num_motion_steps = kernel_camera_motion.size();
522 if (num_motion_steps) {
523 DecomposedTransform *camera_motion = dscene->camera_motion.alloc(num_motion_steps);
524 memcpy(camera_motion, kernel_camera_motion.data(), sizeof(*camera_motion) * num_motion_steps);
525 dscene->camera_motion.copy_to_device();
526 }
527 else {
528 dscene->camera_motion.free();
529 }
530}
531
532void Camera::device_update_volume(Device * /*device*/, DeviceScene *dscene, Scene *scene)
533{
535 return;
536 }
537
538 KernelIntegrator *kintegrator = &dscene->data.integrator;
539 if (kintegrator->use_volumes) {
540 KernelCamera *kcam = &dscene->data.cam;
541 BoundBox viewplane_boundbox = viewplane_bounds_get();
542
543 /* Parallel object update, with grain size to avoid too much threading overhead
544 * for individual objects. */
545 static const int OBJECTS_PER_TASK = 32;
546 parallel_for(blocked_range<size_t>(0, scene->objects.size(), OBJECTS_PER_TASK),
547 [&](const blocked_range<size_t> &r) {
548 for (size_t i = r.begin(); i != r.end(); i++) {
549 Object *object = scene->objects[i];
550 if (object->get_geometry()->has_volume &&
551 viewplane_boundbox.intersects(object->bounds)) {
552 /* TODO(sergey): Consider adding more grained check. */
553 VLOG_INFO << "Detected camera inside volume.";
554 kcam->is_inside_volume = 1;
555 parallel_for_cancel();
556 break;
557 }
558 }
559 });
560
561 if (!kcam->is_inside_volume) {
562 VLOG_INFO << "Camera is outside of the volume.";
563 }
564 }
565
566 need_device_update = false;
567 need_flags_update = false;
568}
569
570void Camera::device_free(Device * /*device*/, DeviceScene *dscene, Scene *scene)
571{
572 scene->lookup_tables->remove_table(&shutter_table_offset);
573 dscene->camera_motion.free();
574}
575
576float3 Camera::transform_raster_to_world(float raster_x, float raster_y)
577{
578 float3 D, P;
579 if (camera_type == CAMERA_PERSPECTIVE) {
580 D = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y, 0.0f));
581 float3 Pclip = normalize(D);
582 P = zero_float3();
583 /* TODO(sergey): Aperture support? */
586 /* TODO(sergey): Clipping is conditional in kernel, and hence it could
587 * be mistakes in here, currently leading to wrong camera-in-volume
588 * detection.
589 */
590 P += nearclip * D / Pclip.z;
591 }
592 else if (camera_type == CAMERA_ORTHOGRAPHIC) {
593 D = make_float3(0.0f, 0.0f, 1.0f);
594 /* TODO(sergey): Aperture support? */
595 P = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y, 0.0f));
598 }
599 else {
600 assert(!"unsupported camera type");
601 }
602 return P;
603}
604
606{
607 /* TODO(sergey): This is all rather stupid, but is there a way to perform
608 * checks we need in a more clear and smart fashion? */
610
611 const float max_aperture_size = aperture_ratio < 1.0f ? aperturesize / aperture_ratio :
612 aperturesize;
613
614 if (camera_type == CAMERA_PANORAMA) {
615 const float extend = max_aperture_size + nearclip;
616 if (use_spherical_stereo == false) {
618 }
619 else {
620 float half_eye_distance = interocular_distance * 0.5f;
621
622 bounds.grow(
623 make_float3(cameratoworld.x.w + half_eye_distance, cameratoworld.y.w, cameratoworld.z.w),
624 extend);
625
626 bounds.grow(
627 make_float3(cameratoworld.z.w, cameratoworld.y.w + half_eye_distance, cameratoworld.z.w),
628 extend);
629
630 bounds.grow(
631 make_float3(cameratoworld.x.w - half_eye_distance, cameratoworld.y.w, cameratoworld.z.w),
632 extend);
633
634 bounds.grow(
635 make_float3(cameratoworld.x.w, cameratoworld.y.w - half_eye_distance, cameratoworld.z.w),
636 extend);
637 }
638 }
639 else {
640 /* max_aperture_size = Max horizontal distance a ray travels from aperture edge to focus point.
641 * Scale that value based on the ratio between focaldistance and nearclip to figure out the
642 * horizontal distance the DOF ray will travel before reaching the nearclip plane, where it
643 * will start rendering from.
644 * In some cases (focus distance is close to camera, and nearclip plane is far from camera),
645 * this scaled value is larger than nearclip, in which case we add it to `extend` to extend the
646 * bounding box to account for these rays.
647 *
648 * ----------------- nearclip plane
649 * / scaled_horz_dof_ray, nearclip
650 * /
651 * /
652 * / max_aperture_size, focaldistance
653 * /|
654 * / |
655 * / |
656 * / |
657 * ------ max_aperture_size, 0
658 * 0, 0
659 */
660
661 const float scaled_horz_dof_ray = (max_aperture_size > 0.0f) ?
662 max_aperture_size * (nearclip / focaldistance) :
663 0.0f;
664 const float extend = max_aperture_size + max(nearclip, scaled_horz_dof_ray);
665
666 bounds.grow(transform_raster_to_world(0.0f, 0.0f), extend);
667 bounds.grow(transform_raster_to_world(0.0f, (float)height), extend);
668 bounds.grow(transform_raster_to_world((float)width, (float)height), extend);
669 bounds.grow(transform_raster_to_world((float)width, 0.0f), extend);
670 if (camera_type == CAMERA_PERSPECTIVE) {
671 /* Center point has the most distance in local Z axis,
672 * use it to construct bounding box/
673 */
674 bounds.grow(transform_raster_to_world(0.5f * width, 0.5f * height), extend);
675 }
676 }
677 return bounds;
678}
679
681{
682 float res = 1.0f;
683
684 if (camera_type == CAMERA_ORTHOGRAPHIC) {
685 res = min(len(full_dx), len(full_dy));
686
687 if (offscreen_dicing_scale > 1.0f) {
690 make_float3(full_width, full_height, 0.0f));
692
693 /* Create point clamped to frustum */
694 float3 c;
695 c.x = max(v2.x, min(v1.x, p.x));
696 c.y = max(v2.y, min(v1.y, p.y));
697 c.z = max(0.0f, p.z);
698
699 /* Check right side */
700 float f_dist = len(p - c) / sqrtf((v1.x * v1.x + v1.y * v1.y) * 0.5f);
701 if (f_dist < 0.0f) {
702 /* Check left side */
703 f_dist = len(p - c) / sqrtf((v2.x * v2.x + v2.y * v2.y) * 0.5f);
704 }
705 if (f_dist > 0.0f) {
706 res += res * f_dist * (offscreen_dicing_scale - 1.0f);
707 }
708 }
709 }
710 else if (camera_type == CAMERA_PERSPECTIVE) {
711 /* Calculate as if point is directly ahead of the camera. */
712 float3 raster = make_float3(0.5f * full_width, 0.5f * full_height, 0.0f);
714
715 /* dDdx */
716 float3 Ddiff = transform_direction(&cameratoworld, Pcamera);
718 float3 dDdx = normalize(Ddiff + dx) - normalize(Ddiff);
719
720 /* dPdx */
721 float dist = len(transform_point(&worldtocamera, P));
722 float3 D = normalize(Ddiff);
723 res = len(dist * dDdx - dot(dist * dDdx, D) * D);
724
725 /* Decent approx distance to frustum
726 * (doesn't handle corners correctly, but not that big of a deal) */
727 float f_dist = 0.0f;
728
729 if (offscreen_dicing_scale > 1.0f) {
731
732 /* Distance from the four planes */
733 float r = dot(p, frustum_right_normal);
734 float t = dot(p, frustum_top_normal);
735 float l = dot(p, frustum_left_normal);
736 float b = dot(p, frustum_bottom_normal);
737
738 if (r <= 0.0f && l <= 0.0f && t <= 0.0f && b <= 0.0f) {
739 /* Point is inside frustum */
740 f_dist = 0.0f;
741 }
742 else if (r > 0.0f && l > 0.0f && t > 0.0f && b > 0.0f) {
743 /* Point is behind frustum */
744 f_dist = len(p);
745 }
746 else {
747 /* Point may be behind or off to the side, need to check */
752
753 float dist[] = {r, l, t, b};
754 float3 along[] = {along_right, along_left, along_top, along_bottom};
755
756 bool test_o = false;
757
758 float *d = dist;
759 float3 *a = along;
760 for (int i = 0; i < 4; i++, d++, a++) {
761 /* Test if we should check this side at all */
762 if (*d > 0.0f) {
763 if (dot(p, *a) >= 0.0f) {
764 /* We are in front of the back edge of this side of the frustum */
765 f_dist = max(f_dist, *d);
766 }
767 else {
768 /* Possibly far enough behind the frustum to use distance to origin instead of edge
769 */
770 test_o = true;
771 }
772 }
773 }
774
775 if (test_o) {
776 f_dist = (f_dist > 0) ? min(f_dist, len(p)) : len(p);
777 }
778 }
779
780 if (f_dist > 0.0f) {
781 res += len(dDdx - dot(dDdx, D) * D) * f_dist * (offscreen_dicing_scale - 1.0f);
782 }
783 }
784 }
785 else if (camera_type == CAMERA_PANORAMA) {
787 float dist = len(D);
788
789 Ray ray;
790 memset(&ray, 0, sizeof(ray));
791
792 /* Distortion can become so great that the results become meaningless, there
793 * may be a better way to do this, but calculating differentials from the
794 * point directly ahead seems to produce good enough results. */
795#if 0
797 float3 raster = transform_perspective(&full_cameratoraster, make_float3(dir.x, dir.y, 0.0f));
798
799 ray.t = 1.0f;
801 &kernel_camera, kernel_camera_motion.data(), raster.x, raster.y, 0.0f, 0.0f, &ray);
802 if (ray.t == 0.0f) {
803 /* No differentials, just use from directly ahead. */
806 0.5f * make_float2(full_width, full_height),
807 zero_float2(),
808 &ray);
809 }
810#else
813 0.5f * make_float2(full_width, full_height),
814 zero_float2(),
815 &ray);
816#endif
817
818 /* TODO: would it help to use more accurate differentials here? */
819 return differential_transfer_compact(ray.dP, ray.D, ray.dD, dist);
820 }
821
822 return res;
823}
824
826{
827 return motion.size() > 1;
828}
829
830void Camera::set_screen_size(int width_, int height_)
831{
832 if (width_ != width || height_ != height) {
833 width = width_;
834 height = height_;
835 tag_modified();
836 }
837}
838
839float Camera::motion_time(int step) const
840{
841 return (use_motion()) ? 2.0f * step / (motion.size() - 1) - 1.0f : 0.0f;
842}
843
844int Camera::motion_step(float time) const
845{
846 if (use_motion()) {
847 for (int step = 0; step < motion.size(); step++) {
848 if (time == motion_time(step)) {
849 return step;
850 }
851 }
852 }
853
854 return -1;
855}
856
#define D
ATTR_WARN_UNUSED_RESULT const BMVert * v2
ATTR_WARN_UNUSED_RESULT const BMLoop * l
ATTR_WARN_UNUSED_RESULT const BMVert * v
static btDbvtVolume bounds(btDbvtNode **leaves, int count)
Definition btDbvt.cpp:299
SIMD_FORCE_INLINE btVector3 & normalize()
Normalize this vector x^2 + y^2 + z^2 = 1.
Definition btVector3.h:303
float bottom
Definition boundbox.h:194
float top
Definition boundbox.h:195
float right
Definition boundbox.h:193
float left
Definition boundbox.h:192
device_vector< DecomposedTransform > camera_motion
Definition devicescene.h:55
KernelData data
Definition devicescene.h:95
size_t size() const
local_group_size(16, 16) .push_constant(Type b
additional_info("compositor_sum_squared_difference_float_shared") .push_constant(Type output_img float dot(value.rgb, luminance_coefficients)") .define("LOAD(value)"
ccl_device_inline float2 direction_to_panorama(ccl_constant KernelCamera *cam, float3 dir)
CCL_NAMESPACE_BEGIN ccl_device_inline float frac(float x, ccl_private int *ix)
ccl_device_inline float3 transform_perspective(ccl_private const ProjectionTransform *t, const float3 a)
ccl_device_inline ProjectionTransform projection_perspective(float fov, float n, float f)
ccl_device_inline float3 transform_perspective_direction(ccl_private const ProjectionTransform *t, const float3 a)
ccl_device_inline ProjectionTransform projection_identity()
ProjectionTransform projection_inverse(const ProjectionTransform &a)
Definition transform.cpp:98
ccl_device_inline ProjectionTransform projection_orthographic(float znear, float zfar)
#define function_bind
#define CCL_NAMESPACE_END
ccl_device_forceinline float4 make_float4(const float x, const float y, const float z, const float w)
#define saturatef(x)
ccl_device_forceinline float3 make_float3(const float x, const float y, const float z)
ccl_device_forceinline float2 make_float2(const float x, const float y)
#define sqrtf(x)
ccl_device_forceinline float differential_transfer_compact(const float ray_dP, const float3, const float ray_dD, const float ray_t)
int len
draw_view in_light_buf[] float
draw_view push_constant(Type::INT, "radiance_src") .push_constant(Type capture_info_buf storage_buf(1, Qualifier::READ, "ObjectBounds", "bounds_buf[]") .push_constant(Type draw_view int
#define mix(a, b, c)
Definition hash.h:36
ccl_device_inline void camera_sample_panorama(ccl_constant KernelCamera *cam, ccl_global const DecomposedTransform *cam_motion, const float2 raster, const float2 rand_lens, ccl_private Ray *ray)
@ MOTION_POSITION_END
@ MOTION_POSITION_START
@ MOTION_POSITION_CENTER
#define SHUTTER_TABLE_SIZE
#define RAMP_TABLE_SIZE
@ PANORAMA_MIRRORBALL
@ PANORAMA_FISHEYE_EQUISOLID
@ PANORAMA_CENTRAL_CYLINDRICAL
@ PANORAMA_EQUIANGULAR_CUBEMAP_FACE
@ PANORAMA_FISHEYE_EQUIDISTANT
@ PANORAMA_FISHEYE_LENS_POLYNOMIAL
@ PANORAMA_EQUIRECTANGULAR
@ CAMERA_PERSPECTIVE
@ CAMERA_PANORAMA
@ CAMERA_ORTHOGRAPHIC
#define VLOG_INFO
Definition log.h:72
void util_cdf_inverted(const int resolution, const float from, const float to, Functor functor, const bool make_symmetric, vector< float > &inv_cdf)
Definition math_cdf.h:47
CCL_NAMESPACE_BEGIN ccl_device_inline float2 zero_float2()
Definition math_float2.h:14
ccl_device_inline float len_squared(const float2 a)
CCL_NAMESPACE_BEGIN ccl_device_inline float3 zero_float3()
Definition math_float3.h:15
#define M_PI_F
Definition mikk_util.hh:15
#define SOCKET_FLOAT(name, ui_name, default_value,...)
Definition node_type.h:200
#define SOCKET_INT(name, ui_name, default_value,...)
Definition node_type.h:194
#define SOCKET_FLOAT_ARRAY(name, ui_name, default_value,...)
Definition node_type.h:248
#define SOCKET_TRANSFORM(name, ui_name, default_value,...)
Definition node_type.h:214
#define SOCKET_UINT(name, ui_name, default_value,...)
Definition node_type.h:196
#define NODE_DEFINE(structname)
Definition node_type.h:148
#define SOCKET_TRANSFORM_ARRAY(name, ui_name, default_value,...)
Definition node_type.h:269
#define SOCKET_BOOLEAN(name, ui_name, default_value,...)
Definition node_type.h:192
#define SOCKET_ENUM(name, ui_name, values, default_value,...)
Definition node_type.h:216
static CCL_NAMESPACE_BEGIN float shutter_curve_eval(float x, array< float > &shutter_curve)
@ TABLE_OFFSET_INVALID
#define M_PI_2_F
Definition sky_float3.h:20
#define min(a, b)
Definition sort.c:32
#define FLT_MAX
Definition stdcycles.h:14
bool need_device_update
ProjectionTransform worldtoraster
float3 frustum_right_normal
BoundBox2D viewplane
void set_screen_size(int width_, int height_)
int previous_need_motion
float3 full_dy
bool need_flags_update
float3 full_dx
ProjectionTransform rastertoworld
BoundBox viewplane_bounds_get()
Transform worldtocamera
float motion_time(int step) const
void update(Scene *scene)
ProjectionTransform rastertocamera
ProjectionTransform screentoworld
bool use_motion() const
size_t shutter_table_offset
void device_update_volume(Device *device, DeviceScene *dscene, Scene *scene)
void device_free(Device *device, DeviceScene *dscene, Scene *scene)
ProjectionTransform ndctoworld
float3 dx
void device_update(Device *device, DeviceScene *dscene, Scene *scene)
float3 frustum_left_normal
float world_to_raster_size(float3 P)
Transform cameratoworld
int motion_step(float time) const
void compute_auto_viewplane()
float3 frustum_top_normal
ProjectionTransform full_rastertocamera
float3 frustum_bottom_normal
float3 dy
KernelCamera kernel_camera
array< DecomposedTransform > kernel_camera_motion
ProjectionTransform ndctoworld
float pole_merge_angle_to
int have_perspective_motion
ProjectionTransform rastertocamera
float pole_merge_angle_from
float4 central_cylindrical_range
ProjectionTransform worldtoraster
float fisheye_lens_polynomial_bias
Transform motion_pass_post
Transform cameratoworld
float4 equirectangular_range
float interocular_offset
Transform worldtocamera
ProjectionTransform screentoworld
float inv_aperture_ratio
ProjectionTransform perspective_post
float rolling_shutter_duration
ProjectionTransform perspective_pre
ProjectionTransform worldtoscreen
Transform motion_pass_pre
ProjectionTransform worldtondc
float convergence_distance
ProjectionTransform rastertoworld
float4 fisheye_lens_polynomial_coefficients
void insert(const char *x, int y)
Definition node_enum.h:21
static NodeType * add(const char *name, CreateFunc create, Type type=NONE, const NodeType *base=NULL)
void tag_modified()
bool is_modified() const
MotionType
Definition scene.h:177
@ MOTION_PASS
Definition scene.h:177
@ MOTION_BLUR
Definition scene.h:177
float4 y
Definition transform.h:24
float4 x
Definition transform.h:24
float4 z
Definition transform.h:24
float x
float y
float z
Definition sky_float3.h:27
float y
Definition sky_float3.h:27
float x
Definition sky_float3.h:27
void transform_motion_decompose(DecomposedTransform *decomp, const Transform *motion, size_t size)
Transform transform_from_viewplane(BoundBox2D &viewplane)
ccl_device_inline Transform transform_identity()
Definition transform.h:296
ccl_device_inline Transform transform_inverse(const Transform tfm)
Definition transform.h:423
ccl_device_inline float3 transform_direction(ccl_private const Transform *t, const float3 a)
Definition transform.h:94
ccl_device_inline Transform transform_scale(float3 s)
Definition transform.h:254
CCL_NAMESPACE_END CCL_NAMESPACE_BEGIN ccl_device_inline float3 transform_point(ccl_private const Transform *t, const float3 a)
Definition transform.h:63
float max
ccl_device_inline float4 float3_to_float4(const float3 a)
Definition util/math.h:540
wmTimer * timer