Blender V5.0
scene/camera.cpp
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2011-2022 Blender Foundation
2 *
3 * SPDX-License-Identifier: Apache-2.0 */
4
5#include <algorithm>
6
7#include "scene/camera.h"
8#include "scene/mesh.h"
9#include "scene/object.h"
10#include "scene/osl.h"
11#include "scene/scene.h"
12#include "scene/stats.h"
13#include "scene/tables.h"
14
15#include "device/device.h"
16
17#include "util/log.h"
18#include "util/math_cdf.h"
19#include "util/tbb.h"
20#include "util/time.h"
21#include "util/vector.h"
22
24
25/* Custom cameras don't work with adaptive subdivision currently, and it's a bit tricky
26 * to fix for the OptiX case as there is no OSL shader compiled for the CPU. This is a temporary
27 * workaround to fall back to a perspective camera for that case. */
28#define FIX_CUSTOM_CAMERA_CRASH
29
31
32static float shutter_curve_eval(float x, array<float> &shutter_curve)
33{
34 if (shutter_curve.size() == 0) {
35 return 1.0f;
36 }
37
38 x = saturatef(x) * shutter_curve.size() - 1;
39 const int index = (int)x;
40 const float frac = x - index;
41 if (index < shutter_curve.size() - 1) {
42 return mix(shutter_curve[index], shutter_curve[index + 1], frac);
43 }
44 return shutter_curve[shutter_curve.size() - 1];
45}
46
48{
49 NodeType *type = NodeType::add("camera", create);
50
51 SOCKET_FLOAT(shuttertime, "Shutter Time", 1.0f);
52
53 static NodeEnum motion_position_enum;
54 motion_position_enum.insert("start", MOTION_POSITION_START);
55 motion_position_enum.insert("center", MOTION_POSITION_CENTER);
56 motion_position_enum.insert("end", MOTION_POSITION_END);
57 SOCKET_ENUM(motion_position, "Motion Position", motion_position_enum, MOTION_POSITION_CENTER);
58
59 static NodeEnum rolling_shutter_type_enum;
60 rolling_shutter_type_enum.insert("none", ROLLING_SHUTTER_NONE);
61 rolling_shutter_type_enum.insert("top", ROLLING_SHUTTER_TOP);
62 SOCKET_ENUM(rolling_shutter_type,
63 "Rolling Shutter Type",
64 rolling_shutter_type_enum,
65 ROLLING_SHUTTER_NONE);
66 SOCKET_FLOAT(rolling_shutter_duration, "Rolling Shutter Duration", 0.1f);
67
68 SOCKET_FLOAT_ARRAY(shutter_curve, "Shutter Curve", array<float>());
69
70 SOCKET_FLOAT(aperturesize, "Aperture Size", 0.0f);
71 SOCKET_FLOAT(focaldistance, "Focal Distance", 10.0f);
72 SOCKET_UINT(blades, "Blades", 0);
73 SOCKET_FLOAT(bladesrotation, "Blades Rotation", 0.0f);
74
75 SOCKET_TRANSFORM(matrix, "Matrix", transform_identity());
76 SOCKET_TRANSFORM_ARRAY(motion, "Motion", array<Transform>());
77
78 SOCKET_FLOAT(aperture_ratio, "Aperture Ratio", 1.0f);
79
80 static NodeEnum type_enum;
81 type_enum.insert("perspective", CAMERA_PERSPECTIVE);
82 type_enum.insert("orthograph", CAMERA_ORTHOGRAPHIC);
83 type_enum.insert("panorama", CAMERA_PANORAMA);
84 type_enum.insert("custom", CAMERA_CUSTOM);
85 SOCKET_ENUM(camera_type, "Type", type_enum, CAMERA_PERSPECTIVE);
86
87 static NodeEnum panorama_type_enum;
88 panorama_type_enum.insert("equirectangular", PANORAMA_EQUIRECTANGULAR);
89 panorama_type_enum.insert("equiangular_cubemap_face", PANORAMA_EQUIANGULAR_CUBEMAP_FACE);
90 panorama_type_enum.insert("mirrorball", PANORAMA_MIRRORBALL);
91 panorama_type_enum.insert("fisheye_equidistant", PANORAMA_FISHEYE_EQUIDISTANT);
92 panorama_type_enum.insert("fisheye_equisolid", PANORAMA_FISHEYE_EQUISOLID);
93 panorama_type_enum.insert("fisheye_lens_polynomial", PANORAMA_FISHEYE_LENS_POLYNOMIAL);
94 panorama_type_enum.insert("panorama_central_cylindrical", PANORAMA_CENTRAL_CYLINDRICAL);
95 SOCKET_ENUM(panorama_type, "Panorama Type", panorama_type_enum, PANORAMA_EQUIRECTANGULAR);
96
97 SOCKET_FLOAT(fisheye_fov, "Fisheye FOV", M_PI_F);
98 SOCKET_FLOAT(fisheye_lens, "Fisheye Lens", 10.5f);
99 SOCKET_FLOAT(latitude_min, "Latitude Min", -M_PI_2_F);
100 SOCKET_FLOAT(latitude_max, "Latitude Max", M_PI_2_F);
101 SOCKET_FLOAT(longitude_min, "Longitude Min", -M_PI_F);
102 SOCKET_FLOAT(longitude_max, "Longitude Max", M_PI_F);
103 SOCKET_FLOAT(fov, "FOV", M_PI_4_F);
104 SOCKET_FLOAT(fov_pre, "FOV Pre", M_PI_4_F);
105 SOCKET_FLOAT(fov_post, "FOV Post", M_PI_4_F);
106
107 SOCKET_FLOAT(fisheye_polynomial_k0, "Fisheye Polynomial K0", 0.0f);
108 SOCKET_FLOAT(fisheye_polynomial_k1, "Fisheye Polynomial K1", 0.0f);
109 SOCKET_FLOAT(fisheye_polynomial_k2, "Fisheye Polynomial K2", 0.0f);
110 SOCKET_FLOAT(fisheye_polynomial_k3, "Fisheye Polynomial K3", 0.0f);
111 SOCKET_FLOAT(fisheye_polynomial_k4, "Fisheye Polynomial K4", 0.0f);
112
113 SOCKET_FLOAT(central_cylindrical_range_u_min, "Central Cylindrical Range U Min", -M_PI_F);
114 SOCKET_FLOAT(central_cylindrical_range_u_max, "Central Cylindrical Range U Max", M_PI_F);
115 SOCKET_FLOAT(central_cylindrical_range_v_min, "Central Cylindrical Range V Min", -1.0f);
116 SOCKET_FLOAT(central_cylindrical_range_v_max, "Central Cylindrical Range V Max", 1.0f);
117
118 static NodeEnum stereo_eye_enum;
119 stereo_eye_enum.insert("none", STEREO_NONE);
120 stereo_eye_enum.insert("left", STEREO_LEFT);
121 stereo_eye_enum.insert("right", STEREO_RIGHT);
122 SOCKET_ENUM(stereo_eye, "Stereo Eye", stereo_eye_enum, STEREO_NONE);
123
124 SOCKET_BOOLEAN(use_spherical_stereo, "Use Spherical Stereo", false);
125
126 SOCKET_FLOAT(interocular_distance, "Interocular Distance", 0.065f);
127 SOCKET_FLOAT(convergence_distance, "Convergence Distance", 30.0f * 0.065f);
128
129 SOCKET_BOOLEAN(use_pole_merge, "Use Pole Merge", false);
130 SOCKET_FLOAT(pole_merge_angle_from, "Pole Merge Angle From", 60.0f * M_PI_F / 180.0f);
131 SOCKET_FLOAT(pole_merge_angle_to, "Pole Merge Angle To", 75.0f * M_PI_F / 180.0f);
132
133 SOCKET_FLOAT(sensorwidth, "Sensor Width", 0.036f);
134 SOCKET_FLOAT(sensorheight, "Sensor Height", 0.024f);
135
136 SOCKET_FLOAT(nearclip, "Near Clip", 1e-5f);
137 SOCKET_FLOAT(farclip, "Far Clip", 1e5f);
138
139 SOCKET_FLOAT(viewplane.left, "Viewplane Left", 0);
140 SOCKET_FLOAT(viewplane.right, "Viewplane Right", 0);
141 SOCKET_FLOAT(viewplane.bottom, "Viewplane Bottom", 0);
142 SOCKET_FLOAT(viewplane.top, "Viewplane Top", 0);
143
144 SOCKET_FLOAT(border.left, "Border Left", 0);
145 SOCKET_FLOAT(border.right, "Border Right", 0);
146 SOCKET_FLOAT(border.bottom, "Border Bottom", 0);
147 SOCKET_FLOAT(border.top, "Border Top", 0);
148
149 SOCKET_FLOAT(viewport_camera_border.left, "Viewport Border Left", 0);
150 SOCKET_FLOAT(viewport_camera_border.right, "Viewport Border Right", 0);
151 SOCKET_FLOAT(viewport_camera_border.bottom, "Viewport Border Bottom", 0);
152 SOCKET_FLOAT(viewport_camera_border.top, "Viewport Border Top", 0);
153
154 SOCKET_FLOAT(offscreen_dicing_scale, "Offscreen Dicing Scale", 1.0f);
155
156 SOCKET_INT(full_width, "Full Width", 1024);
157 SOCKET_INT(full_height, "Full Height", 512);
158
159 SOCKET_BOOLEAN(use_perspective_motion, "Use Perspective Motion", false);
160
161 return type;
162}
163
164Camera::Camera() : Node(get_node_type())
165{
167
168 width = 1024;
169 height = 512;
170
171 use_perspective_motion = false;
172
173 shutter_curve.resize(RAMP_TABLE_SIZE);
174 for (int i = 0; i < shutter_curve.size(); ++i) {
175 shutter_curve[i] = 1.0f;
176 }
177
179
186
188
189 dx = zero_float3();
190 dy = zero_float3();
191
192 need_device_update = true;
193 need_flags_update = true;
195
196 memset((void *)&kernel_camera, 0, sizeof(kernel_camera));
197}
198
199Camera::~Camera() = default;
200
202{
203 if (camera_type == CAMERA_PANORAMA || camera_type == CAMERA_CUSTOM) {
205 }
206 else {
207 const float aspect = (float)full_width / (float)full_height;
208 if (full_width >= full_height) {
209 viewplane = BoundBox2D(make_float2(aspect, 1.0f));
210 }
211 else {
212 viewplane = BoundBox2D(make_float2(1.0f, 1.0f / aspect));
213 }
214 }
215}
216
218{
219 const Scene::MotionType need_motion = scene->need_motion();
220
221 if (previous_need_motion != need_motion) {
222 /* scene's motion model could have been changed since previous device
223 * camera update this could happen for example in case when one render
224 * layer has got motion pass and another not */
225 need_device_update = true;
226 }
227
228 if (!is_modified()) {
229 return;
230 }
231
232 const scoped_callback_timer timer([scene](double time) {
233 if (scene->update_stats) {
234 scene->update_stats->camera.times.add_entry({"update", time});
235 }
236 });
237
238 /* Full viewport to camera border in the viewport. */
240 const Transform bordertofull = transform_inverse(fulltoborder);
241
242 /* NDC to raster. */
243 const Transform ndctoraster = transform_scale(width, height, 1.0f) * bordertofull;
244 const Transform full_ndctoraster = transform_scale(full_width, full_height, 1.0f) * bordertofull;
245
246 /* Raster to screen. */
247 const Transform screentondc = fulltoborder * transform_from_viewplane(viewplane);
248
249 const Transform screentoraster = ndctoraster * screentondc;
250 const Transform rastertoscreen = transform_inverse(screentoraster);
251 const Transform full_screentoraster = full_ndctoraster * screentondc;
252 const Transform full_rastertoscreen = transform_inverse(full_screentoraster);
253
254 /* Screen to camera. */
255 ProjectionTransform cameratoscreen;
256 if (camera_type == CAMERA_PERSPECTIVE) {
257 cameratoscreen = projection_perspective(fov, nearclip, farclip);
258 }
259 else if (camera_type == CAMERA_ORTHOGRAPHIC) {
260 cameratoscreen = projection_orthographic(nearclip, farclip);
261 }
262 else {
263 cameratoscreen = projection_identity();
264 }
265
266 const ProjectionTransform screentocamera = projection_inverse(cameratoscreen);
267
268 rastertocamera = screentocamera * rastertoscreen;
269 full_rastertocamera = screentocamera * full_rastertoscreen;
270
271#ifdef FIX_CUSTOM_CAMERA_CRASH
272 if (camera_type == CAMERA_CUSTOM) {
273 const ProjectionTransform full_cameratoscreen = projection_perspective(fov, nearclip, farclip);
274 const ProjectionTransform full_screentocamera = projection_inverse(full_cameratoscreen);
275 full_rastertocamera = full_screentocamera * full_rastertoscreen;
276 }
277#endif
278
279 cameratoworld = matrix;
280 screentoworld = cameratoworld * screentocamera;
282 ndctoworld = rastertoworld * ndctoraster;
283
284 /* note we recompose matrices instead of taking inverses of the above, this
285 * is needed to avoid inverting near degenerate matrices that happen due to
286 * precision issues with large scenes */
288 worldtoscreen = cameratoscreen * worldtocamera;
289 worldtondc = screentondc * worldtoscreen;
290 worldtoraster = ndctoraster * worldtondc;
291
292 /* differentials */
293 if (camera_type == CAMERA_ORTHOGRAPHIC) {
298 }
299 else if (camera_type == CAMERA_PERSPECTIVE) {
308 }
309 else {
310#ifdef FIX_CUSTOM_CAMERA_CRASH
311 if (camera_type == CAMERA_CUSTOM) {
316 }
317#endif
318 dx = zero_float3();
319 dy = zero_float3();
320 }
321
326
327#ifdef FIX_CUSTOM_CAMERA_CRASH
328 if (camera_type == CAMERA_PERSPECTIVE || camera_type == CAMERA_CUSTOM) {
329#else
330 if (camera_type == CAMERA_PERSPECTIVE) {
331#endif
333 make_float3(full_width, full_height, 0.0f));
336
340 }
341
342 /* Compute kernel camera data. */
344
345 /* store matrices */
353 kcam->worldtondc = worldtondc;
354 kcam->ndctoworld = ndctoworld;
355
356 /* camera motion */
357 kcam->num_motion_steps = 0;
358 kcam->have_perspective_motion = 0;
359 kernel_camera_motion.clear();
360
361 /* Test if any of the transforms are actually different. */
362 bool have_motion = false;
363 for (size_t i = 0; i < motion.size(); i++) {
364 have_motion = have_motion || motion[i] != matrix;
365 }
366
367 if (need_motion == Scene::MOTION_PASS) {
368 if (camera_type == CAMERA_PANORAMA || camera_type == CAMERA_CUSTOM) {
369 if (have_motion) {
370 kcam->motion_pass_pre = transform_inverse(motion[0]);
371 kcam->motion_pass_post = transform_inverse(motion[motion.size() - 1]);
372 }
373 else {
374 kcam->motion_pass_pre = kcam->worldtocamera;
375 kcam->motion_pass_post = kcam->worldtocamera;
376 }
377 }
378 else {
379 if (have_motion || fov != fov_pre || fov != fov_post) {
380 /* Note the values for perspective_pre/perspective_post calculated for MOTION_PASS are
381 * different to those calculated for MOTION_BLUR below, so the code has not been combined.
382 */
383 const ProjectionTransform cameratoscreen_pre = projection_perspective(
384 fov_pre, nearclip, farclip);
385 const ProjectionTransform cameratoscreen_post = projection_perspective(
386 fov_post, nearclip, farclip);
387 const ProjectionTransform cameratoraster_pre = screentoraster * cameratoscreen_pre;
388 const ProjectionTransform cameratoraster_post = screentoraster * cameratoscreen_post;
389 kcam->perspective_pre = cameratoraster_pre * transform_inverse(motion[0]);
390 kcam->perspective_post = cameratoraster_post *
391 transform_inverse(motion[motion.size() - 1]);
392 }
393 else {
396 }
397 }
398 }
399 else if (need_motion == Scene::MOTION_BLUR) {
400 if (have_motion) {
401 kernel_camera_motion.resize(motion.size());
402 transform_motion_decompose(kernel_camera_motion.data(), motion.data(), motion.size());
403 kcam->num_motion_steps = motion.size();
404 }
405
406 /* TODO(sergey): Support other types of camera. */
407 if (use_perspective_motion && camera_type == CAMERA_PERSPECTIVE) {
408 const ProjectionTransform screentocamera_pre = projection_inverse(
409 projection_perspective(fov_pre, nearclip, farclip));
410 const ProjectionTransform screentocamera_post = projection_inverse(
411 projection_perspective(fov_post, nearclip, farclip));
412
413 kcam->perspective_pre = screentocamera_pre * rastertoscreen;
414 kcam->perspective_post = screentocamera_post * rastertoscreen;
415 kcam->have_perspective_motion = 1;
416 }
417 }
418
419 /* depth of field */
420 kcam->aperturesize = aperturesize;
421 kcam->focaldistance = max(focaldistance, 1e-5f);
422 kcam->blades = (blades < 3) ? 0.0f : blades;
423 kcam->bladesrotation = bladesrotation;
424
425 /* motion blur */
426 kcam->shuttertime = (need_motion == Scene::MOTION_BLUR) ? shuttertime : -1.0f;
427 kcam->motion_position = motion_position;
428
429 /* type */
430 kcam->type = camera_type;
431
432 /* anamorphic lens bokeh */
433 kcam->inv_aperture_ratio = 1.0f / aperture_ratio;
434
435 /* panorama */
437 kcam->fisheye_fov = fisheye_fov;
450
451 switch (stereo_eye) {
452 case STEREO_LEFT:
453 kcam->interocular_offset = -interocular_distance * 0.5f;
454 break;
455 case STEREO_RIGHT:
456 kcam->interocular_offset = interocular_distance * 0.5f;
457 break;
458 case STEREO_NONE:
459 default:
460 kcam->interocular_offset = 0.0f;
461 break;
462 }
463
464 kcam->convergence_distance = convergence_distance;
465 if (use_pole_merge) {
466 kcam->pole_merge_angle_from = pole_merge_angle_from;
467 kcam->pole_merge_angle_to = pole_merge_angle_to;
468 }
469 else {
470 kcam->pole_merge_angle_from = -1.0f;
471 kcam->pole_merge_angle_to = -1.0f;
472 }
473
474 /* sensor size */
475 kcam->sensorwidth = sensorwidth;
476 kcam->sensorheight = sensorheight;
477
478 /* render size */
479 kcam->width = width;
480 kcam->height = height;
481
482 /* store differentials */
483 kcam->dx = make_float4(dx);
484 kcam->dy = make_float4(dy);
485
486 /* clipping */
487 kcam->nearclip = nearclip;
488 kcam->cliplength = (farclip == FLT_MAX) ? FLT_MAX : farclip - nearclip;
489
490 /* Rolling shutter effect */
491 kcam->rolling_shutter_type = rolling_shutter_type;
492 kcam->rolling_shutter_duration = rolling_shutter_duration;
493
494 /* Set further update flags */
496 need_device_update = true;
497 need_flags_update = true;
498 previous_need_motion = need_motion;
499}
500
501void Camera::device_update(Device * /*device*/, DeviceScene *dscene, Scene *scene)
502{
503 update(scene);
504
505 if (!need_device_update) {
506 return;
507 }
508
509 const scoped_callback_timer timer([scene](double time) {
510 if (scene->update_stats) {
511 scene->update_stats->camera.times.add_entry({"device_update", time});
512 }
513 });
514
515 scene->lookup_tables->remove_table(&shutter_table_offset);
516 if (kernel_camera.shuttertime != -1.0f) {
517 vector<float> shutter_table;
520 0.0f,
521 1.0f,
522 [this](const float x) { return shutter_curve_eval(x, shutter_curve); },
523 false,
524 shutter_table);
525 shutter_table_offset = scene->lookup_tables->add_table(dscene, shutter_table);
526 kernel_camera.shutter_table_offset = (int)shutter_table_offset;
527 }
528
529 dscene->data.cam = kernel_camera;
530
531 const size_t num_motion_steps = kernel_camera_motion.size();
532 if (num_motion_steps) {
533 DecomposedTransform *camera_motion = dscene->camera_motion.alloc(num_motion_steps);
534 std::copy_n(kernel_camera_motion.data(), num_motion_steps, camera_motion);
536 }
537 else {
538 dscene->camera_motion.free();
539 }
540}
541
542void Camera::device_update_volume(Device * /*device*/, DeviceScene *dscene, Scene *scene)
543{
545 return;
546 }
547
548 kernel_camera.is_inside_volume = 0;
549
550 KernelIntegrator *kintegrator = &dscene->data.integrator;
551 if (kintegrator->use_volumes) {
552 if (camera_type == CAMERA_CUSTOM) {
553 kernel_camera.is_inside_volume = 1;
554 LOG_INFO << "Considering custom camera to be inside volume.";
555 }
556 else {
557 BoundBox viewplane_boundbox = viewplane_bounds_get();
558
559 /* Parallel object update, with grain size to avoid too much threading overhead
560 * for individual objects. */
561 static const int OBJECTS_PER_TASK = 32;
562 parallel_for(blocked_range<size_t>(0, scene->objects.size(), OBJECTS_PER_TASK),
563 [&](const blocked_range<size_t> &r) {
564 for (size_t i = r.begin(); i != r.end(); i++) {
565 Object *object = scene->objects[i];
566 if (object->get_geometry()->has_volume &&
567 viewplane_boundbox.intersects(object->bounds)) {
568 /* TODO(sergey): Consider adding more grained check. */
569 LOG_INFO << "Detected camera inside volume.";
570 kernel_camera.is_inside_volume = 1;
571 parallel_for_cancel();
572 break;
573 }
574 }
575 });
576
577 if (!kernel_camera.is_inside_volume) {
578 LOG_INFO << "Camera is outside of the volume.";
579 }
580 }
581 }
582
583 dscene->data.cam.is_inside_volume = kernel_camera.is_inside_volume;
584
585 need_device_update = false;
586 need_flags_update = false;
587}
588
589void Camera::device_free(Device * /*device*/, DeviceScene *dscene, Scene *scene)
590{
591 scene->lookup_tables->remove_table(&shutter_table_offset);
592 dscene->camera_motion.free();
593}
594
595float3 Camera::transform_full_raster_to_world(const float raster_x, const float raster_y)
596{
597 float3 D;
598 float3 P;
599 if (camera_type == CAMERA_PERSPECTIVE) {
600 D = transform_perspective(&full_rastertocamera, make_float3(raster_x, raster_y, 0.0f));
601 const float3 Pclip = normalize(D);
602 P = zero_float3();
603 /* TODO(sergey): Aperture support? */
606 /* TODO(sergey): Clipping is conditional in kernel, and hence it could
607 * be mistakes in here, currently leading to wrong camera-in-volume
608 * detection.
609 */
610 P += nearclip * D / Pclip.z;
611 }
612 else if (camera_type == CAMERA_ORTHOGRAPHIC) {
613 D = make_float3(0.0f, 0.0f, 1.0f);
614 /* TODO(sergey): Aperture support? */
615 P = transform_perspective(&full_rastertocamera, make_float3(raster_x, raster_y, 0.0f));
618 }
619 else {
620 assert(!"unsupported camera type");
621 }
622 return P;
623}
624
626{
627 /* TODO(sergey): This is all rather stupid, but is there a way to perform
628 * checks we need in a more clear and smart fashion? */
630
631 const float max_aperture_size = aperture_ratio < 1.0f ? aperturesize / aperture_ratio :
632 aperturesize;
633
634 if (camera_type == CAMERA_PANORAMA || camera_type == CAMERA_CUSTOM) {
635 const float extend = max_aperture_size + nearclip;
636 if (use_spherical_stereo == false) {
637 bounds.grow(make_float3(cameratoworld.x.w, cameratoworld.y.w, cameratoworld.z.w), extend);
638 }
639 else {
640 const float half_eye_distance = interocular_distance * 0.5f;
641
642 bounds.grow(
643 make_float3(cameratoworld.x.w + half_eye_distance, cameratoworld.y.w, cameratoworld.z.w),
644 extend);
645
646 bounds.grow(
647 make_float3(cameratoworld.z.w, cameratoworld.y.w + half_eye_distance, cameratoworld.z.w),
648 extend);
649
650 bounds.grow(
651 make_float3(cameratoworld.x.w - half_eye_distance, cameratoworld.y.w, cameratoworld.z.w),
652 extend);
653
654 bounds.grow(
655 make_float3(cameratoworld.x.w, cameratoworld.y.w - half_eye_distance, cameratoworld.z.w),
656 extend);
657 }
658 }
659 else {
660 /* max_aperture_size = Max horizontal distance a ray travels from aperture edge to focus point.
661 * Scale that value based on the ratio between focaldistance and nearclip to figure out the
662 * horizontal distance the DOF ray will travel before reaching the nearclip plane, where it
663 * will start rendering from.
664 * In some cases (focus distance is close to camera, and nearclip plane is far from camera),
665 * this scaled value is larger than nearclip, in which case we add it to `extend` to extend the
666 * bounding box to account for these rays.
667 *
668 * ----------------- nearclip plane
669 * / scaled_horz_dof_ray, nearclip
670 * /
671 * /
672 * / max_aperture_size, focaldistance
673 * /|
674 * / |
675 * / |
676 * / |
677 * ------ max_aperture_size, 0
678 * 0, 0
679 */
680
681 const float scaled_horz_dof_ray = (max_aperture_size > 0.0f) ?
682 max_aperture_size * (nearclip / focaldistance) :
683 0.0f;
684 const float extend = max_aperture_size + max(nearclip, scaled_horz_dof_ray);
685
686 bounds.grow(transform_full_raster_to_world(0.0f, 0.0f), extend);
687 bounds.grow(transform_full_raster_to_world(0.0f, (float)full_height), extend);
688 bounds.grow(transform_full_raster_to_world((float)full_width, (float)full_height), extend);
689 bounds.grow(transform_full_raster_to_world((float)full_width, 0.0f), extend);
690 if (camera_type == CAMERA_PERSPECTIVE) {
691 /* Center point has the most distance in local Z axis,
692 * use it to construct bounding box/
693 */
694 bounds.grow(transform_full_raster_to_world(0.5f * full_width, 0.5f * full_height), extend);
695 }
696 }
697 return bounds;
698}
699
701{
702 float res = 1.0f;
703
704 if (camera_type == CAMERA_ORTHOGRAPHIC) {
705 res = min(len(full_dx), len(full_dy));
706
707 if (offscreen_dicing_scale > 1.0f) {
710 make_float3(full_width, full_height, 0.0f));
712
713 /* Create point clamped to frustum */
714 float3 c;
715 c.x = max(v2.x, min(v1.x, p.x));
716 c.y = max(v2.y, min(v1.y, p.y));
717 c.z = max(0.0f, p.z);
718
719 /* Check right side */
720 float f_dist = len(p - c) / sqrtf((v1.x * v1.x + v1.y * v1.y) * 0.5f);
721 if (f_dist < 0.0f) {
722 /* Check left side */
723 f_dist = len(p - c) / sqrtf((v2.x * v2.x + v2.y * v2.y) * 0.5f);
724 }
725 if (f_dist > 0.0f) {
726 res += res * f_dist * (offscreen_dicing_scale - 1.0f);
727 }
728 }
729 }
730#ifdef FIX_CUSTOM_CAMERA_CRASH
731 else if (camera_type == CAMERA_PERSPECTIVE || camera_type == CAMERA_CUSTOM) {
732#else
733 else if (camera_type == CAMERA_PERSPECTIVE) {
734#endif
735 /* Calculate as if point is directly ahead of the camera. */
736 const float3 raster = make_float3(0.5f * full_width, 0.5f * full_height, 0.0f);
737 const float3 Pcamera = transform_perspective(&full_rastertocamera, raster);
738
739 /* dDdx */
740 const float3 Ddiff = transform_direction(&cameratoworld, Pcamera);
742 const float3 dDdx = normalize(Ddiff + dx) - normalize(Ddiff);
743
744 /* dPdx */
745 const float dist = len(transform_point(&worldtocamera, P));
746 const float3 D = normalize(Ddiff);
747 res = len(dist * dDdx - dot(dist * dDdx, D) * D);
748
749 /* Decent approx distance to frustum
750 * (doesn't handle corners correctly, but not that big of a deal) */
751 float f_dist = 0.0f;
752
753 if (offscreen_dicing_scale > 1.0f) {
755
756 /* Distance from the four planes */
757 const float r = dot(p, frustum_right_normal);
758 const float t = dot(p, frustum_top_normal);
759 const float l = dot(p, frustum_left_normal);
760 const float b = dot(p, frustum_bottom_normal);
761
762 if (r <= 0.0f && l <= 0.0f && t <= 0.0f && b <= 0.0f) {
763 /* Point is inside frustum */
764 f_dist = 0.0f;
765 }
766 else if (r > 0.0f && l > 0.0f && t > 0.0f && b > 0.0f) {
767 /* Point is behind frustum */
768 f_dist = len(p);
769 }
770 else {
771 /* Point may be behind or off to the side, need to check */
772 const float3 along_right = make_float3(
774 const float3 along_left = make_float3(frustum_left_normal.z, 0.0f, -frustum_left_normal.x);
775 const float3 along_top = make_float3(0.0f, -frustum_top_normal.z, frustum_top_normal.y);
776 const float3 along_bottom = make_float3(
778
779 float dist[] = {r, l, t, b};
780 float3 along[] = {along_right, along_left, along_top, along_bottom};
781
782 bool test_o = false;
783
784 float *d = dist;
785 float3 *a = along;
786 for (int i = 0; i < 4; i++, d++, a++) {
787 /* Test if we should check this side at all */
788 if (*d > 0.0f) {
789 if (dot(p, *a) >= 0.0f) {
790 /* We are in front of the back edge of this side of the frustum */
791 f_dist = max(f_dist, *d);
792 }
793 else {
794 /* Possibly far enough behind the frustum to use distance to origin instead of edge
795 */
796 test_o = true;
797 }
798 }
799 }
800
801 if (test_o) {
802 f_dist = (f_dist > 0) ? min(f_dist, len(p)) : len(p);
803 }
804 }
805
806 if (f_dist > 0.0f) {
807 res += len(dDdx - dot(dDdx, D) * D) * f_dist * (offscreen_dicing_scale - 1.0f);
808 }
809 }
810 }
811 else if (camera_type == CAMERA_PANORAMA || camera_type == CAMERA_CUSTOM) {
813 const float dist = len(D);
814
815 Ray ray = {};
816
817 /* Distortion can become so great that the results become meaningless, there
818 * may be a better way to do this, but calculating differentials from the
819 * point directly ahead seems to produce good enough results. */
820 if (camera_type == CAMERA_CUSTOM) {
821 camera_sample_custom(nullptr,
824 0.5f * make_float2(full_width, full_height),
825 zero_float2(),
826 &ray);
827 }
828 else {
829#if 0
831 float3 raster = transform_perspective(&full_cameratoraster, make_float3(dir.x, dir.y, 0.0f));
832
833 ray.t = 1.0f;
835 &kernel_camera, kernel_camera_motion.data(), raster.x, raster.y, 0.0f, 0.0f, &ray);
836 if (ray.t == 0.0f) {
837 /* No differentials, just use from directly ahead. */
840 0.5f * make_float2(full_width, full_height),
841 zero_float2(),
842 &ray);
843 }
844#else
847 0.5f * make_float2(full_width, full_height),
848 zero_float2(),
849 &ray);
850#endif
851 }
852
853 /* TODO: would it help to use more accurate differentials here? */
854 return differential_transfer_compact(ray.dP, ray.D, ray.dD, dist);
855 }
856
857 return res;
858}
859
861{
862 return motion.size() > 1;
863}
864
865bool Camera::set_screen_size(const int width_, int height_)
866{
867 if (width_ != width || height_ != height) {
868 width = width_;
869 height = height_;
870 tag_modified();
871 return true;
872 }
873
874 return false;
875}
876
877float Camera::motion_time(const int step) const
878{
879 return (use_motion()) ? 2.0f * step / (motion.size() - 1) - 1.0f : 0.0f;
880}
881
882int Camera::motion_step(const float time) const
883{
884 if (use_motion()) {
885 for (int step = 0; step < motion.size(); step++) {
886 if (time == motion_time(step)) {
887 return step;
888 }
889 }
890 }
891
892 return -1;
893}
894
897 const std::string &filepath,
898 const std::string &bytecode_hash,
899 const std::string &bytecode)
900{
901#ifdef WITH_OSL
902 /* Ensure shading system exists before we try to load a shader. */
903 scene->osl_manager->shading_system_init(scene->shader_manager->get_scene_linear_space());
904
905 /* Load the shader. */
906 const char *hash;
907
908 if (!filepath.empty()) {
909 hash = scene->osl_manager->shader_load_filepath(filepath);
910 }
911 else {
912 hash = scene->osl_manager->shader_test_loaded(bytecode_hash);
913 if (!hash) {
914 hash = scene->osl_manager->shader_load_bytecode(bytecode_hash, bytecode);
915 }
916 }
917
918 bool changed = false;
919
920 if (!hash) {
921 changed = (!script_name.empty() || !script_params.empty());
922 script_name = "";
923 script_params.clear();
924 }
925 else {
926 changed = (script_name != hash);
928
929 OSLShaderInfo *info = scene->osl_manager->shader_loaded_info(hash);
930
931 /* Fetch parameter values. */
932 std::set<ustring> used_params;
933 for (int i = 0; i < info->query.nparams(); i++) {
934 const OSL::OSLQuery::Parameter *param = info->query.getparam(i);
935
936 /* Skip unsupported types. */
937 if (param->varlenarray || param->isstruct || param->type.arraylen > 1 || param->isoutput ||
938 param->isclosure)
939 continue;
940
941 vector<uint8_t> raw_data;
942 int vec_size = (int)param->type.aggregate;
943 if (param->type.basetype == TypeDesc::INT) {
945 if (!params.get_int(param->name, data) || data.size() != vec_size) {
946 continue;
947 }
948 raw_data.resize(sizeof(int) * vec_size);
949 memcpy(raw_data.data(), data.data(), sizeof(int) * vec_size);
950 }
951 else if (param->type.basetype == TypeDesc::FLOAT) {
953 if (!params.get_float(param->name, data) || data.size() != vec_size) {
954 continue;
955 }
956 raw_data.resize(sizeof(float) * vec_size);
957 memcpy(raw_data.data(), data.data(), sizeof(float) * vec_size);
958 }
959 else if (param->type.basetype == TypeDesc::STRING) {
960 string data;
961 if (!params.get_string(param->name, data)) {
962 continue;
963 }
964 raw_data.resize(data.length() + 1);
965 memcpy(raw_data.data(), data.c_str(), data.length() + 1);
966 }
967 else
968 continue;
969
970 auto entry = std::make_pair(raw_data, param->type);
971 auto it = script_params.find(param->name);
972 if (it == script_params.end()) {
973 script_params[param->name] = entry;
974 changed = true;
975 }
976 else if (it->second != entry) {
977 it->second = entry;
978 changed = true;
979 }
980
981 used_params.insert(param->name);
982 }
983
984 /* Remove unused parameters. */
985 for (auto it = script_params.begin(); it != script_params.end();) {
986 if (used_params.count(it->first)) {
987 it++;
988 }
989 else {
990 it = script_params.erase(it);
991 changed = true;
992 }
993 }
994 }
995
996 if (changed) {
997 tag_modified();
998 scene->osl_manager->tag_update();
999 }
1000#else
1001 (void)scene;
1002 (void)params;
1003 (void)filepath;
1004 (void)bytecode_hash;
1005 (void)bytecode;
1006#endif
1007}
1008
1010{
1011#ifdef WITH_OSL
1012 if (script_name == "") {
1013 return;
1014 }
1015
1016 script_name = "";
1017 script_params.clear();
1018
1019 scene->osl_manager->tag_update();
1020#else
1021 (void)scene;
1022#endif
1023}
1024
1026{
1027 uint kernel_features = 0;
1028
1029 if (!script_name.empty()) {
1030 kernel_features |= KERNEL_FEATURE_OSL_CAMERA;
1031 }
1032
1033 return kernel_features;
1034}
1035
#define D
unsigned int uint
BMesh const char void * data
ATTR_WARN_UNUSED_RESULT const BMVert * v2
ATTR_WARN_UNUSED_RESULT const BMLoop * l
ATTR_WARN_UNUSED_RESULT const BMVert * v
static btDbvtVolume bounds(btDbvtNode **leaves, int count)
Definition btDbvt.cpp:299
device_vector< DecomposedTransform > camera_motion
Definition devicescene.h:48
KernelData data
Definition devicescene.h:94
size_t size() const
T * alloc(const size_t width, const size_t height=0)
size_t size() const
nullptr float
dot(value.rgb, luminance_coefficients)") DEFINE_VALUE("REDUCE(lhs
ccl_device_inline float2 direction_to_panorama(ccl_constant KernelCamera *cam, const float3 dir)
CCL_NAMESPACE_BEGIN ccl_device_inline float frac(const float x, ccl_private int *ix)
ccl_device_inline float3 transform_perspective_direction(const ccl_private ProjectionTransform *t, const float3 a)
CCL_NAMESPACE_END CCL_NAMESPACE_BEGIN ccl_device_inline float3 transform_perspective(const ccl_private ProjectionTransform *t, const float3 a)
ccl_device_inline ProjectionTransform projection_identity()
ccl_device_inline ProjectionTransform projection_inverse(const ProjectionTransform tfm)
ccl_device_inline ProjectionTransform projection_perspective(const float fov, const float n, float f)
ccl_device_inline ProjectionTransform projection_orthographic(const float znear, const float zfar)
#define M_PI_2_F
#define M_PI_4_F
#define SHUTTER_TABLE_SIZE
#define KERNEL_FEATURE_OSL_CAMERA
#define RAMP_TABLE_SIZE
#define CCL_NAMESPACE_END
#define saturatef(x)
ccl_device_forceinline float3 make_float3(const float x, const float y, const float z)
ccl_device_forceinline float differential_transfer_compact(const float ray_dP, const float3, const float ray_dD, const float ray_t)
#define assert(assertion)
VecBase< float, D > normalize(VecOp< float, D >) RET
VecBase< float, D > step(VecOp< float, D >, VecOp< float, D >) RET
uiWidgetBaseParameters params[MAX_WIDGET_BASE_BATCH]
ccl_device_inline Spectrum camera_sample_custom(KernelGlobals kg, ccl_constant KernelCamera *cam, const ccl_global DecomposedTransform *cam_motion, const float2 raster, const float2 rand_lens, ccl_private Ray *ray)
ccl_device_inline Spectrum camera_sample_panorama(ccl_constant KernelCamera *cam, const ccl_global DecomposedTransform *cam_motion, const float2 raster, const float2 rand_lens, ccl_private Ray *ray)
@ MOTION_POSITION_END
@ MOTION_POSITION_START
@ MOTION_POSITION_CENTER
@ PANORAMA_MIRRORBALL
@ PANORAMA_FISHEYE_EQUISOLID
@ PANORAMA_CENTRAL_CYLINDRICAL
@ PANORAMA_EQUIANGULAR_CUBEMAP_FACE
@ PANORAMA_FISHEYE_EQUIDISTANT
@ PANORAMA_FISHEYE_LENS_POLYNOMIAL
@ PANORAMA_EQUIRECTANGULAR
@ CAMERA_PERSPECTIVE
@ CAMERA_PANORAMA
@ CAMERA_CUSTOM
@ CAMERA_ORTHOGRAPHIC
#define LOG_INFO
Definition log.h:106
void util_cdf_inverted(const int resolution, const float from, const float to, Functor functor, const bool make_symmetric, vector< float > &inv_cdf)
Definition math_cdf.h:45
CCL_NAMESPACE_BEGIN ccl_device_inline float2 zero_float2()
Definition math_float2.h:13
ccl_device_inline float len_squared(const float2 a)
CCL_NAMESPACE_BEGIN ccl_device_inline float3 zero_float3()
Definition math_float3.h:17
#define SOCKET_FLOAT(name, ui_name, default_value,...)
Definition node_type.h:211
#define SOCKET_INT(name, ui_name, default_value,...)
Definition node_type.h:205
#define SOCKET_FLOAT_ARRAY(name, ui_name, default_value,...)
Definition node_type.h:259
#define SOCKET_TRANSFORM(name, ui_name, default_value,...)
Definition node_type.h:225
#define SOCKET_UINT(name, ui_name, default_value,...)
Definition node_type.h:207
#define NODE_DEFINE(structname)
Definition node_type.h:152
#define SOCKET_TRANSFORM_ARRAY(name, ui_name, default_value,...)
Definition node_type.h:280
#define SOCKET_BOOLEAN(name, ui_name, default_value,...)
Definition node_type.h:203
#define SOCKET_ENUM(name, ui_name, values, default_value,...)
Definition node_type.h:227
#define mix
#define sqrtf
#define make_float2
#define make_float4
#define M_PI_F
static CCL_NAMESPACE_BEGIN float shutter_curve_eval(float x, array< float > &shutter_curve)
@ TABLE_OFFSET_INVALID
#define min(a, b)
Definition sort.cc:36
#define FLT_MAX
Definition stdcycles.h:14
bool need_device_update
float world_to_raster_size(const float3 P)
char panorama_type
ProjectionTransform worldtoraster
float3 frustum_right_normal
float motion_time(const int step) const
BoundBox2D viewplane
int previous_need_motion
float3 full_dy
bool need_flags_update
float latitude_max
float3 full_dx
ProjectionTransform rastertoworld
BoundBox viewplane_bounds_get()
float central_cylindrical_range_v_min
Transform worldtocamera
uint get_kernel_features() const
float fisheye_polynomial_k3
ProjectionTransform worldtoscreen
ProjectionTransform worldtondc
float longitude_max
float central_cylindrical_range_u_min
void update(Scene *scene)
ProjectionTransform rastertocamera
float fisheye_polynomial_k1
float central_cylindrical_range_u_max
float latitude_min
ProjectionTransform screentoworld
float fisheye_polynomial_k2
bool use_motion() const
size_t shutter_table_offset
void device_update_volume(Device *device, DeviceScene *dscene, Scene *scene)
void device_free(Device *device, DeviceScene *dscene, Scene *scene)
int motion_step(const float time) const
ProjectionTransform ndctoworld
~Camera() override
float3 dx
std::string script_name
void device_update(Device *device, DeviceScene *dscene, Scene *scene)
bool set_screen_size(const int width_, int height_)
float3 frustum_left_normal
void clear_osl_camera(Scene *scene)
float fisheye_fov
Transform cameratoworld
float fisheye_polynomial_k0
void compute_auto_viewplane()
float3 frustum_top_normal
float fisheye_polynomial_k4
ProjectionTransform full_rastertocamera
float3 frustum_bottom_normal
float fisheye_lens
float3 dy
void set_osl_camera(Scene *scene, OSLCameraParamQuery &params, const std::string &filepath, const std::string &bytecode_hash="", const std::string &bytecode="")
@ STEREO_LEFT
@ STEREO_NONE
@ STEREO_RIGHT
BoundBox2D viewport_camera_border
KernelCamera kernel_camera
float central_cylindrical_range_v_max
map< ustring, pair< vector< uint8_t >, TypeDesc > > script_params
float longitude_min
array< DecomposedTransform > kernel_camera_motion
ProjectionTransform ndctoworld
float pole_merge_angle_to
int have_perspective_motion
ProjectionTransform rastertocamera
float pole_merge_angle_from
float4 central_cylindrical_range
ProjectionTransform worldtoraster
float fisheye_lens_polynomial_bias
Transform motion_pass_post
Transform cameratoworld
float4 equirectangular_range
float interocular_offset
Transform worldtocamera
ProjectionTransform screentoworld
float inv_aperture_ratio
ProjectionTransform perspective_post
float rolling_shutter_duration
ProjectionTransform perspective_pre
ProjectionTransform worldtoscreen
Transform motion_pass_pre
ProjectionTransform worldtondc
float convergence_distance
ProjectionTransform rastertoworld
float4 fisheye_lens_polynomial_coefficients
void insert(const char *x, const int y)
Definition node_enum.h:21
static NodeType * add(const char *name, CreateFunc create, Type type=NONE, const NodeType *base=nullptr)
void clear_modified()
void hash(MD5Hash &md5)
void tag_modified()
bool is_modified() const
Node(const NodeType *type, ustring name=ustring())
float dD
float dP
float3 D
MotionType need_motion() const
Definition scene.cpp:410
unique_ptr< SceneUpdateStats > update_stats
Definition scene.h:175
MotionType
Definition scene.h:185
@ MOTION_PASS
Definition scene.h:185
@ MOTION_BLUR
Definition scene.h:185
unique_ptr< ShaderManager > shader_manager
Definition scene.h:148
unique_ptr< OSLManager > osl_manager
Definition scene.h:147
unique_ptr_vector< Object > objects
Definition scene.h:141
unique_ptr< LookupTables > lookup_tables
Definition scene.h:124
float x
float y
float z
Definition sky_math.h:136
float y
Definition sky_math.h:136
float x
Definition sky_math.h:136
i
Definition text_draw.cc:230
max
Definition text_draw.cc:251
void transform_motion_decompose(DecomposedTransform *decomp, const Transform *motion, const size_t size)
Transform transform_from_viewplane(BoundBox2D &viewplane)
ccl_device_inline Transform transform_identity()
Definition transform.h:322
ccl_device_inline Transform transform_scale(const float3 s)
Definition transform.h:280
ccl_device_inline Transform transform_inverse(const Transform tfm)
Definition transform.h:525
ccl_device_inline float3 transform_direction(const ccl_private Transform *t, const float3 a)
Definition transform.h:127
ccl_device_inline float3 transform_point(const ccl_private Transform *t, const float3 a)
Definition transform.h:56
wmTimer * timer
uint len