Blender V4.5
scene/object.cpp
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2011-2022 Blender Foundation
2 *
3 * SPDX-License-Identifier: Apache-2.0 */
4
5#include "scene/object.h"
6
7#include "device/device.h"
8#include "scene/camera.h"
9#include "scene/curves.h"
10#include "scene/hair.h"
11#include "scene/integrator.h"
12#include "scene/light.h"
13#include "scene/mesh.h"
14#include "scene/particles.h"
15#include "scene/pointcloud.h"
16#include "scene/scene.h"
17#include "scene/stats.h"
18#include "scene/volume.h"
19
20#include "util/log.h"
21#include "util/map.h"
22#include "util/murmurhash.h"
23#include "util/progress.h"
24#include "util/set.h"
25#include "util/tbb.h"
26#include "util/vector.h"
27
29
30/* Global state of object transform update. */
31
33 /* Global state used by device_update_object_transform().
34 * Common for both threaded and non-threaded update.
35 */
36
37 /* Type of the motion required by the scene settings. */
39
40 /* Mapping from particle system to a index in packed particle array.
41 * Only used for read.
42 */
43 map<ParticleSystem *, int> particle_offset;
44
45 /* Motion offsets for each object. */
47
48 /* Packed object arrays. Those will be filled in. */
55
56 /* Flags which will be synchronized to Integrator. */
61
62 /* ** Scheduling queue. ** */
64
65 /* First unused object index in the queue. */
67};
68
69/* Object */
70
72{
73 NodeType *type = NodeType::add("object", create);
74
75 SOCKET_NODE(geometry, "Geometry", Geometry::get_node_base_type());
76 SOCKET_TRANSFORM(tfm, "Transform", transform_identity());
77 SOCKET_UINT(visibility, "Visibility", ~0);
78 SOCKET_COLOR(color, "Color", zero_float3());
79 SOCKET_FLOAT(alpha, "Alpha", 0.0f);
80 SOCKET_UINT(random_id, "Random ID", 0);
81 SOCKET_INT(pass_id, "Pass ID", 0);
82 SOCKET_BOOLEAN(use_holdout, "Use Holdout", false);
83 SOCKET_BOOLEAN(hide_on_missing_motion, "Hide on Missing Motion", false);
84 SOCKET_POINT(dupli_generated, "Dupli Generated", zero_float3());
85 SOCKET_POINT2(dupli_uv, "Dupli UV", zero_float2());
86 SOCKET_TRANSFORM_ARRAY(motion, "Motion", array<Transform>());
87 SOCKET_FLOAT(shadow_terminator_shading_offset, "Shadow Terminator Shading Offset", 0.0f);
88 SOCKET_FLOAT(shadow_terminator_geometry_offset, "Shadow Terminator Geometry Offset", 0.1f);
89 SOCKET_STRING(asset_name, "Asset Name", ustring());
90
91 SOCKET_BOOLEAN(is_shadow_catcher, "Shadow Catcher", false);
92
93 SOCKET_BOOLEAN(is_caustics_caster, "Cast Shadow Caustics", false);
94 SOCKET_BOOLEAN(is_caustics_receiver, "Receive Shadow Caustics", false);
95
96 SOCKET_BOOLEAN(is_bake_target, "Bake Target", false);
97
98 SOCKET_NODE(particle_system, "Particle System", ParticleSystem::get_node_type());
99 SOCKET_INT(particle_index, "Particle Index", 0);
100
101 SOCKET_FLOAT(ao_distance, "AO Distance", 0.0f);
102
103 SOCKET_STRING(lightgroup, "Light Group", ustring());
104 SOCKET_UINT(receiver_light_set, "Light Set Index", 0);
105 SOCKET_UINT64(light_set_membership, "Light Set Membership", LIGHT_LINK_MASK_ALL);
106 SOCKET_UINT(blocker_shadow_set, "Shadow Set Index", 0);
107 SOCKET_UINT64(shadow_set_membership, "Shadow Set Membership", LIGHT_LINK_MASK_ALL);
108
109 return type;
110}
111
112Object::Object() : Node(get_node_type())
113{
114 particle_system = nullptr;
115 particle_index = 0;
116 attr_map_offset = 0;
118 intersects_volume = false;
119}
120
121Object::~Object() = default;
122
124{
125 if (!use_motion()) {
126 return;
127 }
128
129 bool have_motion = false;
130
131 for (size_t i = 0; i < motion.size(); i++) {
132 if (motion[i] == transform_empty()) {
133 if (hide_on_missing_motion) {
134 /* Hide objects that have no valid previous or next
135 * transform, for example particle that stop existing. It
136 * would be better to handle this in the kernel and make
137 * objects invisible outside certain motion steps. */
138 tfm = transform_empty();
139 motion.clear();
140 return;
141 }
142 /* Otherwise just copy center motion. */
143 motion[i] = tfm;
144 }
145
146 /* Test if any of the transforms are actually different. */
147 have_motion = have_motion || motion[i] != tfm;
148 }
149
150 /* Clear motion array if there is no actual motion. */
151 if (!have_motion) {
152 motion.clear();
153 }
154}
155
156void Object::compute_bounds(bool motion_blur)
157{
158 const BoundBox mbounds = geometry->bounds;
159
160 if (motion_blur && use_motion()) {
161 array<DecomposedTransform> decomp(motion.size());
162 transform_motion_decompose(decomp.data(), motion.data(), motion.size());
163
165
166 /* TODO: this is really terrible. according to PBRT there is a better
167 * way to find this iteratively, but did not find implementation yet
168 * or try to implement myself */
169 for (float t = 0.0f; t < 1.0f; t += (1.0f / 128.0f)) {
170 Transform ttfm;
171
172 transform_motion_array_interpolate(&ttfm, decomp.data(), motion.size(), t);
173 bounds.grow(mbounds.transformed(&ttfm));
174 }
175 }
176 else {
177 /* No motion blur case. */
178 if (geometry->transform_applied) {
179 bounds = mbounds;
180 }
181 else {
182 bounds = mbounds.transformed(&tfm);
183 }
184 }
185}
186
187void Object::apply_transform(bool apply_to_motion)
188{
189 if (!geometry || tfm == transform_identity()) {
190 return;
191 }
192
193 geometry->apply_transform(tfm, apply_to_motion);
194
195 /* we keep normals pointing in same direction on negative scale, notify
196 * geometry about this in it (re)calculates normals */
197 if (transform_negative_scale(tfm)) {
198 geometry->transform_negative_scaled = true;
199 }
200
201 if (bounds.valid()) {
202 geometry->compute_bounds();
203 compute_bounds(false);
204 }
205
206 /* tfm is not reset to identity, all code that uses it needs to check the
207 * transform_applied boolean */
208}
209
211{
213
214 if (is_modified()) {
216
217 if (use_holdout_is_modified()) {
219 }
220
221 if (is_shadow_catcher_is_modified()) {
224 }
225 }
226
227 if (geometry) {
228 if (tfm_is_modified() || motion_is_modified()) {
230 }
231
232 if (visibility_is_modified()) {
234 }
235
236 for (Node *node : geometry->get_used_shaders()) {
237 Shader *shader = static_cast<Shader *>(node);
239 scene->light_manager->tag_update(scene, LightManager::EMISSIVE_MESH_MODIFIED);
240 }
241 }
242 }
243
244 scene->camera->need_flags_update = true;
245 scene->object_manager->tag_update(scene, flag);
246}
247
249{
250 return (motion.size() > 1);
251}
252
253float Object::motion_time(const int step) const
254{
255 return (use_motion()) ? 2.0f * step / (motion.size() - 1) - 1.0f : 0.0f;
256}
257
258int Object::motion_step(const float time) const
259{
260 if (use_motion()) {
261 for (size_t step = 0; step < motion.size(); step++) {
262 if (time == motion_time(step)) {
263 return step;
264 }
265 }
266 }
267
268 return -1;
269}
270
272{
273 /* Not supported for lights yet. */
274 if (geometry->is_light()) {
275 return false;
276 }
277 /* Mesh itself can be empty,can skip all such objects. */
278 if (!bounds.valid() || bounds.size() == zero_float3()) {
279 return false;
280 }
281 /* TODO(sergey): Check for mesh vertices/curves. visibility flags. */
282 return true;
283}
284
286{
287 return SHADOW_CATCHER_OBJECT_VISIBILITY(is_shadow_catcher, visibility & PATH_RAY_ALL_VISIBILITY);
288}
289
291{
292 if (geometry->geometry_type != Geometry::MESH && geometry->geometry_type != Geometry::VOLUME) {
293 return FLT_MAX;
294 }
295
296 Mesh *mesh = static_cast<Mesh *>(geometry);
297
298 if (!mesh->has_volume) {
299 return FLT_MAX;
300 }
301
302 /* Compute step rate from shaders. */
303 float step_rate = FLT_MAX;
304
305 for (Node *node : mesh->get_used_shaders()) {
306 Shader *shader = static_cast<Shader *>(node);
307 if (shader->has_volume) {
308 if ((shader->get_heterogeneous_volume() && shader->has_volume_spatial_varying) ||
310 {
311 step_rate = fminf(shader->get_volume_step_rate(), step_rate);
312 }
313 }
314 }
315
316 if (step_rate == FLT_MAX) {
317 return FLT_MAX;
318 }
319
320 /* Compute step size from voxel grids. */
321 float step_size = FLT_MAX;
322
323 if (geometry->is_volume()) {
324 Volume *volume = static_cast<Volume *>(geometry);
325
326 for (Attribute &attr : volume->attributes.attributes) {
327 if (attr.element == ATTR_ELEMENT_VOXEL) {
328 ImageHandle &handle = attr.data_voxel();
329 const ImageMetaData &metadata = handle.metadata();
330 if (metadata.width == 0 || metadata.height == 0 || metadata.depth == 0) {
331 continue;
332 }
333
334 /* User specified step size. */
335 float voxel_step_size = volume->get_step_size();
336
337 if (voxel_step_size == 0.0f) {
338 /* Auto detect step size. */
340#ifdef WITH_NANOVDB
341 /* Dimensions were not applied to image transform with NanoVDB (see image_vdb.cpp) */
342 if (metadata.type != IMAGE_DATA_TYPE_NANOVDB_FLOAT &&
344 metadata.type != IMAGE_DATA_TYPE_NANOVDB_FPN &&
346#endif
347 {
348 size /= make_float3(metadata.width, metadata.height, metadata.depth);
349 }
350
351 /* Step size is transformed from voxel to world space. */
352 Transform voxel_tfm = tfm;
353 if (metadata.use_transform_3d) {
354 voxel_tfm = tfm * transform_inverse(metadata.transform_3d);
355 }
356 voxel_step_size = reduce_min(fabs(transform_direction(&voxel_tfm, size)));
357 }
358 else if (volume->get_object_space()) {
359 /* User specified step size in object space. */
360 const float3 size = make_float3(voxel_step_size, voxel_step_size, voxel_step_size);
361 voxel_step_size = reduce_min(fabs(transform_direction(&tfm, size)));
362 }
363
364 if (voxel_step_size > 0.0f) {
365 step_size = fminf(voxel_step_size, step_size);
366 }
367 }
368 }
369 }
370
371 if (step_size == FLT_MAX) {
372 /* Fall back to 1/10th of bounds for procedural volumes. */
373 assert(bounds.valid());
374 step_size = 0.1f * average(bounds.size());
375 }
376
377 step_size *= step_rate;
378
379 return step_size;
380}
381
383{
384 return index;
385}
386
388{
389 Geometry *geom = get_geometry();
390 if (!geom->is_mesh() && !geom->is_volume()) {
391 return false;
392 }
393 /* Skip non-traceable objects. */
394 if (!is_traceable()) {
395 return false;
396 }
397 /* Skip if we are not visible for BSDFs. */
398 if (!(get_visibility() &
400 {
401 return false;
402 }
403 /* Skip if we have no emission shaders. */
404 /* TODO(sergey): Ideally we want to avoid such duplicated loop, since it'll
405 * iterate all geometry shaders twice (when counting and when calculating
406 * triangle area.
407 */
408 for (Node *node : geom->get_used_shaders()) {
409 Shader *shader = static_cast<Shader *>(node);
411 return true;
412 }
413 }
414 return false;
415}
416
418{
419 if (get_receiver_light_set()) {
420 return true;
421 }
422
423 if (get_light_set_membership() != LIGHT_LINK_MASK_ALL) {
424 return true;
425 }
426
427 return false;
428}
429
431{
432 if (get_blocker_shadow_set()) {
433 return true;
434 }
435
436 if (get_shadow_set_membership() != LIGHT_LINK_MASK_ALL) {
437 return true;
438 }
439
440 return false;
441}
442
443/* Object Manager */
444
446{
447 update_flags = UPDATE_ALL;
448 need_flags_update = true;
449}
450
452
453static float object_volume_density(const Transform &tfm, Geometry *geom)
454{
455 if (geom->is_volume()) {
456 /* Volume density automatically adjust to object scale. */
457 if (static_cast<Volume *>(geom)->get_object_space()) {
458 const float3 unit = normalize(one_float3());
459 return 1.0f / len(transform_direction(&tfm, unit));
460 }
461 }
462
463 return 1.0f;
464}
465
467{
468 return (geom->is_mesh() || geom->is_volume()) ? static_cast<Mesh *>(geom)->get_verts().size() :
469 geom->is_hair() ? static_cast<Hair *>(geom)->get_curve_keys().size() :
470 geom->is_pointcloud() ? static_cast<PointCloud *>(geom)->num_points() :
471 0;
472}
473
475 Object *ob,
476 bool update_all,
477 const Scene *scene)
478{
479 KernelObject &kobject = state->objects[ob->index];
480 Transform *object_motion_pass = state->object_motion_pass;
481
482 Geometry *geom = ob->geometry;
483 uint flag = 0;
484
485 /* Compute transformations. */
486 const Transform tfm = ob->tfm;
487 const Transform itfm = transform_inverse(tfm);
488
489 const float3 color = ob->color;
490 const float pass_id = ob->pass_id;
491 const float random_number = (float)ob->random_id * (1.0f / (float)0xFFFFFFFF);
492 const int particle_index = (ob->particle_system) ?
493 ob->particle_index + state->particle_offset[ob->particle_system] :
494 0;
495
496 kobject.tfm = tfm;
497 kobject.itfm = itfm;
498 kobject.volume_density = object_volume_density(tfm, geom);
499 kobject.color[0] = color.x;
500 kobject.color[1] = color.y;
501 kobject.color[2] = color.z;
502 kobject.alpha = ob->alpha;
503 kobject.pass_id = pass_id;
504 kobject.random_number = random_number;
506 kobject.motion_offset = 0;
507 kobject.ao_distance = ob->ao_distance;
508 kobject.receiver_light_set = ob->receiver_light_set >= LIGHT_LINK_SET_MAX ?
509 0 :
510 ob->receiver_light_set;
511 kobject.light_set_membership = ob->light_set_membership;
512 kobject.blocker_shadow_set = ob->blocker_shadow_set >= LIGHT_LINK_SET_MAX ?
513 0 :
514 ob->blocker_shadow_set;
515 kobject.shadow_set_membership = ob->shadow_set_membership;
516
517 if (geom->get_use_motion_blur()) {
518 state->have_motion = true;
519 }
520
521 if (transform_negative_scale(tfm)) {
523 }
524
525 /* TODO: why not check hair? */
526 if (geom->is_pointcloud()) {
529 }
530 }
531 else if (geom->is_mesh()) {
532 Mesh *mesh = static_cast<Mesh *>(geom);
534 (mesh->get_subdivision_type() != Mesh::SUBDIVISION_NONE &&
536 {
538 }
539 }
540 else if (geom->is_volume()) {
541 Volume *volume = static_cast<Volume *>(geom);
542 if (volume->attributes.find(ATTR_STD_VOLUME_VELOCITY) && volume->get_velocity_scale() != 0.0f)
543 {
545 kobject.velocity_scale = volume->get_velocity_scale();
546 }
547 }
548
549 if (state->need_motion == Scene::MOTION_PASS) {
550 /* Clear motion array if there is no actual motion. */
551 ob->update_motion();
552
553 /* Compute motion transforms. */
554 Transform tfm_pre;
555 Transform tfm_post;
556 if (ob->use_motion()) {
557 tfm_pre = ob->motion[0];
558 tfm_post = ob->motion[ob->motion.size() - 1];
559 }
560 else {
561 tfm_pre = tfm;
562 tfm_post = tfm;
563 }
564
565 /* Motion transformations, is world/object space depending if mesh
566 * comes with deformed position in object space, or if we transform
567 * the shading point in world space. */
569 tfm_pre = tfm_pre * itfm;
570 tfm_post = tfm_post * itfm;
571 }
572
573 const int motion_pass_offset = ob->index * OBJECT_MOTION_PASS_SIZE;
574 object_motion_pass[motion_pass_offset + 0] = tfm_pre;
575 object_motion_pass[motion_pass_offset + 1] = tfm_post;
576 }
577 else if (state->need_motion == Scene::MOTION_BLUR) {
578 if (ob->use_motion()) {
579 kobject.motion_offset = state->motion_offset[ob->index];
580
581 /* Decompose transforms for interpolation. */
582 if (ob->tfm_is_modified() || ob->motion_is_modified() || update_all) {
583 DecomposedTransform *decomp = state->object_motion + kobject.motion_offset;
584 transform_motion_decompose(decomp, ob->motion.data(), ob->motion.size());
585 }
586
588 state->have_motion = true;
589 }
590 }
591
592 /* Dupli object coords and motion info. */
593 kobject.dupli_generated[0] = ob->dupli_generated[0];
594 kobject.dupli_generated[1] = ob->dupli_generated[1];
595 kobject.dupli_generated[2] = ob->dupli_generated[2];
596 kobject.dupli_uv[0] = ob->dupli_uv[0];
597 kobject.dupli_uv[1] = ob->dupli_uv[1];
598 kobject.num_geom_steps = (geom->get_motion_steps() - 1) / 2;
599 kobject.num_tfm_steps = ob->motion.size();
600 kobject.numverts = object_num_motion_verts(geom);
601 kobject.attribute_map_offset = 0;
602
603 if (ob->asset_name_is_modified() || update_all) {
604 const uint32_t hash_name = util_murmur_hash3(ob->name.c_str(), ob->name.length(), 0);
605 const uint32_t hash_asset = util_murmur_hash3(
606 ob->asset_name.c_str(), ob->asset_name.length(), 0);
607 kobject.cryptomatte_object = util_hash_to_float(hash_name);
608 kobject.cryptomatte_asset = util_hash_to_float(hash_asset);
609 }
610
611 kobject.shadow_terminator_shading_offset = 1.0f /
612 (1.0f - 0.5f * ob->shadow_terminator_shading_offset);
614
615 kobject.visibility = ob->visibility_for_tracing();
616 kobject.primitive_type = geom->primitive_type();
617
618 /* Object shadow caustics flag */
619 if (ob->is_caustics_caster) {
621 }
622 if (ob->is_caustics_receiver) {
624 }
625
626 /* Object flag. */
627 if (ob->use_holdout) {
629 }
630 state->object_flag[ob->index] = flag;
631 state->object_volume_step[ob->index] = FLT_MAX;
632
633 /* Have curves. */
634 if (geom->is_hair()) {
635 state->have_curves = true;
636 }
637 if (geom->is_pointcloud()) {
638 state->have_points = true;
639 }
640 if (geom->is_volume()) {
641 state->have_volumes = true;
642 }
643
644 /* Light group. */
645 auto it = scene->lightgroups.find(ob->lightgroup);
646 if (it != scene->lightgroups.end()) {
647 kobject.lightgroup = it->second;
648 }
649 else {
650 kobject.lightgroup = LIGHTGROUP_NONE;
651 }
652}
653
655{
656 if (!scene->integrator->get_use_light_tree()) {
657 const BVHLayoutMask layout_mask = device->get_bvh_layout_mask(dscene->data.kernel_features);
658 if (layout_mask != BVH_LAYOUT_METAL && layout_mask != BVH_LAYOUT_MULTI_METAL &&
659 layout_mask != BVH_LAYOUT_MULTI_METAL_EMBREE && layout_mask != BVH_LAYOUT_HIPRT &&
660 layout_mask != BVH_LAYOUT_MULTI_HIPRT && layout_mask != BVH_LAYOUT_MULTI_HIPRT_EMBREE)
661 {
662 return;
663 }
664 }
665
666 /* On MetalRT, primitive / curve segment offsets can't be baked at BVH build time. Intersection
667 * handlers need to apply the offset manually. */
668 uint *object_prim_offset = dscene->object_prim_offset.alloc(scene->objects.size());
669 for (Object *ob : scene->objects) {
670 uint32_t prim_offset = 0;
671 if (Geometry *const geom = ob->geometry) {
672 if (geom->is_hair()) {
673 prim_offset = ((Hair *const)geom)->curve_segment_offset;
674 }
675 else {
676 prim_offset = geom->prim_offset;
677 }
678 }
679 const uint obj_index = ob->get_device_index();
680 object_prim_offset[obj_index] = prim_offset;
681 }
682
685}
686
688{
690 state.need_motion = scene->need_motion();
691 state.have_motion = false;
692 state.have_curves = false;
693 state.have_points = false;
694 state.have_volumes = false;
695 state.scene = scene;
696 state.queue_start_object = 0;
697
698 state.objects = dscene->objects.alloc(scene->objects.size());
699 state.object_flag = dscene->object_flag.alloc(scene->objects.size());
700 state.object_volume_step = dscene->object_volume_step.alloc(scene->objects.size());
701 state.object_motion = nullptr;
702 state.object_motion_pass = nullptr;
703
704 if (state.need_motion == Scene::MOTION_PASS) {
705 state.object_motion_pass = dscene->object_motion_pass.alloc(OBJECT_MOTION_PASS_SIZE *
706 scene->objects.size());
707 }
708 else if (state.need_motion == Scene::MOTION_BLUR) {
709 /* Set object offsets into global object motion array. */
710 uint *motion_offsets = state.motion_offset.resize(scene->objects.size());
711 uint motion_offset = 0;
712
713 for (Object *ob : scene->objects) {
714 *motion_offsets = motion_offset;
715 motion_offsets++;
716
717 /* Clear motion array if there is no actual motion. */
718 ob->update_motion();
719 motion_offset += ob->motion.size();
720 }
721
722 state.object_motion = dscene->object_motion.alloc(motion_offset);
723 }
724
725 /* Particle system device offsets
726 * 0 is dummy particle, index starts at 1.
727 */
728 int numparticles = 1;
729 for (ParticleSystem *psys : scene->particle_systems) {
730 state.particle_offset[psys] = numparticles;
731 numparticles += psys->particles.size();
732 }
733
734 /* as all the arrays are the same size, checking only dscene.objects is sufficient */
735 const bool update_all = dscene->objects.need_realloc();
736
737 /* Parallel object update, with grain size to avoid too much threading overhead
738 * for individual objects. */
739 static const int OBJECTS_PER_TASK = 32;
740 parallel_for(blocked_range<size_t>(0, scene->objects.size(), OBJECTS_PER_TASK),
741 [&](const blocked_range<size_t> &r) {
742 for (size_t i = r.begin(); i != r.end(); i++) {
743 Object *ob = state.scene->objects[i];
744 device_update_object_transform(&state, ob, update_all, scene);
745 }
746 });
747
748 if (progress.get_cancel()) {
749 return;
750 }
751
752 dscene->objects.copy_to_device_if_modified();
753 if (state.need_motion == Scene::MOTION_PASS) {
754 dscene->object_motion_pass.copy_to_device();
755 }
756 else if (state.need_motion == Scene::MOTION_BLUR) {
757 dscene->object_motion.copy_to_device();
758 }
759
760 dscene->data.bvh.have_motion = state.have_motion;
761 dscene->data.bvh.have_curves = state.have_curves;
762 dscene->data.bvh.have_points = state.have_points;
763 dscene->data.bvh.have_volumes = state.have_volumes;
764
765 dscene->objects.clear_modified();
766 dscene->object_motion_pass.clear_modified();
767 dscene->object_motion.clear_modified();
768}
769
771 DeviceScene *dscene,
772 Scene *scene,
774{
775 if (!need_update()) {
776 return;
777 }
778
779 if (update_flags & (OBJECT_ADDED | OBJECT_REMOVED)) {
780 dscene->objects.tag_realloc();
782 dscene->object_motion.tag_realloc();
783 dscene->object_flag.tag_realloc();
785 }
786
787 if (update_flags & HOLDOUT_MODIFIED) {
788 dscene->object_flag.tag_modified();
789 }
790
791 if (update_flags & PARTICLE_MODIFIED) {
792 dscene->objects.tag_modified();
793 }
794
795 VLOG_INFO << "Total " << scene->objects.size() << " objects.";
796
797 device_free(device, dscene, false);
798
799 if (scene->objects.empty()) {
800 return;
801 }
802
803 {
804 /* Assign object IDs. */
805 const scoped_callback_timer timer([scene](double time) {
806 if (scene->update_stats) {
807 scene->update_stats->object.times.add_entry({"device_update (assign index)", time});
808 }
809 });
810
811 int index = 0;
812 for (Object *object : scene->objects) {
813 object->index = index++;
814
815 /* this is a bit too broad, however a bigger refactor might be needed to properly separate
816 * update each type of data (transform, flags, etc.) */
817 if (object->is_modified()) {
818 dscene->objects.tag_modified();
820 dscene->object_motion.tag_modified();
821 dscene->object_flag.tag_modified();
823 }
824 }
825 }
826
827 {
828 /* set object transform matrices, before applying static transforms */
829 const scoped_callback_timer timer([scene](double time) {
830 if (scene->update_stats) {
831 scene->update_stats->object.times.add_entry(
832 {"device_update (copy objects to device)", time});
833 }
834 });
835
836 progress.set_status("Updating Objects", "Copying Transformations to device");
837 device_update_transforms(dscene, scene, progress);
838 }
839
840 for (Object *object : scene->objects) {
841 object->clear_modified();
842 }
843}
844
846 DeviceScene *dscene,
847 Scene *scene,
848 Progress & /*progress*/,
849 bool bounds_valid)
850{
851 if (!need_update() && !need_flags_update) {
852 return;
853 }
854
855 const scoped_callback_timer timer([scene](double time) {
856 if (scene->update_stats) {
857 scene->update_stats->object.times.add_entry({"device_update_flags", time});
858 }
859 });
860
861 if (bounds_valid) {
862 /* Object flags and calculations related to volume depend on proper bounds calculated, which
863 * might not be available yet when object flags are updated for displacement or hair
864 * transparency calculation. In this case do not clear the need_flags_update, so that these
865 * values which depend on bounds are re-calculated when the device_update process comes back
866 * here from the "Updating Objects Flags" stage. */
867 update_flags = UPDATE_NONE;
868 need_flags_update = false;
869 }
870
871 if (scene->objects.empty()) {
872 return;
873 }
874
875 /* Object info flag. */
876 uint *object_flag = dscene->object_flag.data();
877 float *object_volume_step = dscene->object_volume_step.data();
878
879 /* Object volume intersection. */
880 vector<Object *> volume_objects;
881 bool has_volume_objects = false;
882 for (Object *object : scene->objects) {
883 if (object->geometry->has_volume) {
884 /* If the bounds are not valid it is not always possible to calculate the volume step, and
885 * the step size is not needed for the displacement. So, delay calculation of the volume
886 * step size until the final bounds are known. */
887 if (bounds_valid) {
888 volume_objects.push_back(object);
889 object_volume_step[object->index] = object->compute_volume_step_size();
890 }
891 else {
892 object_volume_step[object->index] = FLT_MAX;
893 }
894 has_volume_objects = true;
895 }
896 else {
897 object_volume_step[object->index] = FLT_MAX;
898 }
899 }
900
901 for (Object *object : scene->objects) {
902 if (object->geometry->has_volume) {
903 object_flag[object->index] |= SD_OBJECT_HAS_VOLUME;
904 object_flag[object->index] &= ~SD_OBJECT_HAS_VOLUME_ATTRIBUTES;
905
906 for (const Attribute &attr : object->geometry->attributes.attributes) {
907 if (attr.element == ATTR_ELEMENT_VOXEL) {
908 object_flag[object->index] |= SD_OBJECT_HAS_VOLUME_ATTRIBUTES;
909 }
910 }
911 }
912 else {
913 object_flag[object->index] &= ~(SD_OBJECT_HAS_VOLUME | SD_OBJECT_HAS_VOLUME_ATTRIBUTES);
914 }
915
916 if (object->is_shadow_catcher) {
917 object_flag[object->index] |= SD_OBJECT_SHADOW_CATCHER;
918 }
919 else {
920 object_flag[object->index] &= ~SD_OBJECT_SHADOW_CATCHER;
921 }
922
923 if (bounds_valid) {
924 object->intersects_volume = false;
925 for (Object *volume_object : volume_objects) {
926 if (object == volume_object) {
927 continue;
928 }
929 if (object->bounds.intersects(volume_object->bounds)) {
930 object_flag[object->index] |= SD_OBJECT_INTERSECTS_VOLUME;
931 object->intersects_volume = true;
932 break;
933 }
934 }
935 }
936 else if (has_volume_objects) {
937 /* Not really valid, but can't make more reliable in the case
938 * of bounds not being up to date.
939 */
940 object_flag[object->index] |= SD_OBJECT_INTERSECTS_VOLUME;
941 }
942 }
943
944 /* Copy object flag. */
945 dscene->object_flag.copy_to_device();
947
948 dscene->object_flag.clear_modified();
950}
951
953 DeviceScene *dscene,
954 Scene *scene)
955{
956 if (dscene->objects.size() == 0) {
957 return;
958 }
959
960 KernelObject *kobjects = dscene->objects.data();
961
962 bool update = false;
963
964 for (Object *object : scene->objects) {
965 Geometry *geom = object->geometry;
966
967 size_t attr_map_offset = object->attr_map_offset;
968
969 /* An object attribute map cannot have a zero offset because mesh maps come first. */
970 if (attr_map_offset == 0) {
971 attr_map_offset = geom->attr_map_offset;
972 }
973
974 KernelObject &kobject = kobjects[object->index];
975
976 if (kobject.attribute_map_offset != attr_map_offset) {
977 kobject.attribute_map_offset = attr_map_offset;
978 update = true;
979 }
980
981 const int numverts = object_num_motion_verts(geom);
982 if (kobject.numverts != numverts) {
983 kobject.numverts = numverts;
984 update = true;
985 }
986 }
987
988 if (update) {
989 dscene->objects.copy_to_device();
990 }
991}
992
993void ObjectManager::device_free(Device * /*unused*/, DeviceScene *dscene, bool force_free)
994{
995 dscene->objects.free_if_need_realloc(force_free);
996 dscene->object_motion_pass.free_if_need_realloc(force_free);
997 dscene->object_motion.free_if_need_realloc(force_free);
998 dscene->object_flag.free_if_need_realloc(force_free);
999 dscene->object_volume_step.free_if_need_realloc(force_free);
1000 dscene->object_prim_offset.free_if_need_realloc(force_free);
1001}
1002
1004{
1005 /* todo: normals and displacement should be done before applying transform! */
1006 /* todo: create objects/geometry in right order! */
1007
1008 /* counter geometry users */
1009 map<Geometry *, int> geometry_users;
1010 const Scene::MotionType need_motion = scene->need_motion();
1011 const bool motion_blur = need_motion == Scene::MOTION_BLUR;
1012 const bool apply_to_motion = need_motion != Scene::MOTION_PASS;
1013 int i = 0;
1014
1015 for (Object *object : scene->objects) {
1016 const map<Geometry *, int>::iterator it = geometry_users.find(object->geometry);
1017
1018 if (it == geometry_users.end()) {
1019 geometry_users[object->geometry] = 1;
1020 }
1021 else {
1022 it->second++;
1023 }
1024 }
1025
1026 if (progress.get_cancel()) {
1027 return;
1028 }
1029
1030 uint *object_flag = dscene->object_flag.data();
1031
1032 /* apply transforms for objects with single user geometry */
1033 for (Object *object : scene->objects) {
1034 /* Annoying feedback loop here: we can't use is_instanced() because
1035 * it'll use uninitialized transform_applied flag.
1036 *
1037 * Could be solved by moving reference counter to Geometry.
1038 */
1039 Geometry *geom = object->geometry;
1040 bool apply = (geometry_users[geom] == 1) && !geom->has_surface_bssrdf &&
1041 !geom->has_true_displacement();
1042
1043 if (geom->is_mesh()) {
1044 Mesh *mesh = static_cast<Mesh *>(geom);
1045 apply = apply && mesh->get_subdivision_type() == Mesh::SUBDIVISION_NONE;
1046 }
1047 else if (geom->is_hair() || geom->is_pointcloud()) {
1048 /* Can't apply non-uniform scale to curves and points, this can't be
1049 * represented by control points and radius alone. */
1050 float scale;
1051 apply = apply && transform_uniform_scale(object->tfm, scale);
1052 }
1053
1054 if (apply) {
1055 if (!(motion_blur && object->use_motion())) {
1056 if (!geom->transform_applied) {
1057 object->apply_transform(apply_to_motion);
1058 geom->transform_applied = true;
1059
1060 if (progress.get_cancel()) {
1061 return;
1062 }
1063 }
1064
1065 object_flag[i] |= SD_OBJECT_TRANSFORM_APPLIED;
1066 }
1067 }
1068
1069 i++;
1070 }
1071}
1072
1073void ObjectManager::tag_update(Scene *scene, const uint32_t flag)
1074{
1075 update_flags |= flag;
1076
1077 /* avoid infinite loops if the geometry manager tagged us for an update */
1078 if ((flag & GEOMETRY_MANAGER) == 0) {
1079 uint32_t geometry_flag = GeometryManager::OBJECT_MANAGER;
1080
1081 /* Also notify in case added or removed objects were instances, as no Geometry might have been
1082 * added or removed, but the BVH still needs to updated. */
1083 if ((flag & (OBJECT_ADDED | OBJECT_REMOVED)) != 0) {
1085 }
1086
1087 if ((flag & TRANSFORM_MODIFIED) != 0) {
1088 geometry_flag |= GeometryManager::TRANSFORM_MODIFIED;
1089 }
1090
1091 if ((flag & VISIBILITY_MODIFIED) != 0) {
1092 geometry_flag |= GeometryManager::VISIBILITY_MODIFIED;
1093 }
1094
1095 scene->geometry_manager->tag_update(scene, geometry_flag);
1096 }
1097
1098 scene->light_manager->tag_update(scene, LightManager::OBJECT_MANAGER);
1099
1100 /* Integrator's shadow catcher settings depends on object visibility settings. */
1103 }
1104}
1105
1107{
1108 return update_flags != UPDATE_NONE;
1109}
1110
1112{
1113 string manifest = "{";
1114
1115 unordered_set<ustring> objects;
1116 for (Object *object : scene->objects) {
1117 if (objects.count(object->name)) {
1118 continue;
1119 }
1120 objects.insert(object->name);
1121 const uint32_t hash_name = util_murmur_hash3(object->name.c_str(), object->name.length(), 0);
1122 manifest += string_printf("\"%s\":\"%08x\",", object->name.c_str(), hash_name);
1123 }
1124 manifest[manifest.size() - 1] = '}';
1125 return manifest;
1126}
1127
1129{
1130 string manifest = "{";
1131 unordered_set<ustring> assets;
1132 for (Object *ob : scene->objects) {
1133 if (assets.count(ob->asset_name)) {
1134 continue;
1135 }
1136 assets.insert(ob->asset_name);
1137 const uint32_t hash_asset = util_murmur_hash3(
1138 ob->asset_name.c_str(), ob->asset_name.length(), 0);
1139 manifest += string_printf("\"%s\":\"%08x\",", ob->asset_name.c_str(), hash_asset);
1140 }
1141 manifest[manifest.size() - 1] = '}';
1142 return manifest;
1143}
1144
unsigned int uint
float progress
Definition WM_types.hh:1019
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition btDbvt.cpp:52
list< Attribute > attributes
Attribute * find(ustring name) const
device_vector< DecomposedTransform > object_motion
Definition devicescene.h:43
device_vector< Transform > object_motion_pass
Definition devicescene.h:42
device_vector< float > object_volume_step
Definition devicescene.h:45
device_vector< uint > object_prim_offset
Definition devicescene.h:46
device_vector< uint > object_flag
Definition devicescene.h:44
KernelData data
Definition devicescene.h:89
device_vector< KernelObject > objects
Definition devicescene.h:41
virtual BVHLayoutMask get_bvh_layout_mask(const uint kernel_features) const =0
bool transform_applied
bool has_true_displacement() const
bool is_volume() const
bool is_pointcloud() const
bool is_hair() const
bool has_surface_bssrdf
virtual PrimitiveType primitive_type() const =0
size_t attr_map_offset
AttributeSet attributes
bool is_mesh() const
Definition hair.h:13
ImageMetaData metadata()
ImageDataType type
void tag_update(Scene *scene, const uint32_t flag)
@ EMISSIVE_MESH_MODIFIED
Definition scene/light.h:89
void tag_update(Scene *scene, const uint32_t flag)
string get_cryptomatte_objects(Scene *scene)
void device_update(Device *device, DeviceScene *dscene, Scene *scene, Progress &progress)
void device_update_object_transform(UpdateObjectTransformState *state, Object *ob, bool update_all, const Scene *scene)
void device_free(Device *device, DeviceScene *dscene, bool force_free)
string get_cryptomatte_assets(Scene *scene)
bool need_update() const
void device_update_geom_offsets(Device *device, DeviceScene *dscene, Scene *scene)
void device_update_prim_offsets(Device *device, DeviceScene *dscene, Scene *scene)
void device_update_transforms(DeviceScene *dscene, Scene *scene, Progress &progress)
void apply_static_transforms(DeviceScene *dscene, Scene *scene, Progress &progress)
void device_update_flags(Device *device, DeviceScene *dscene, Scene *scene, Progress &progress, bool bounds_valid=true)
bool has_volume_attribute_dependency
bool has_volume
EmissionSampling emission_sampling
bool has_volume_spatial_varying
size_t size() const
void free_if_need_realloc(bool force_free)
T * alloc(const size_t width, const size_t height=0, const size_t depth=0)
size_t size() const
#define LIGHT_LINK_SET_MAX
#define OBJECT_MOTION_PASS_SIZE
#define LIGHTGROUP_NONE
#define LIGHT_LINK_MASK_ALL
#define SHADOW_CATCHER_OBJECT_VISIBILITY(is_shadow_catcher, visibility)
#define CCL_NAMESPACE_END
ccl_device_forceinline float3 make_float3(const float x, const float y, const float z)
#define fminf(x, y)
VecBase< float, D > normalize(VecOp< float, D >) RET
#define assert(assertion)
VecBase< float, D > step(VecOp< float, D >, VecOp< float, D >) RET
ccl_device_inline float object_volume_density(KernelGlobals kg, const int object)
ccl_device_inline uint particle_index(KernelGlobals kg, const int particle)
@ ATTR_STD_VOLUME_VELOCITY
@ ATTR_STD_MOTION_VERTEX_POSITION
@ PATH_RAY_TRANSMIT
@ PATH_RAY_VOLUME_SCATTER
@ PATH_RAY_GLOSSY
@ PATH_RAY_ALL_VISIBILITY
@ PATH_RAY_DIFFUSE
@ EMISSION_SAMPLING_NONE
@ SD_OBJECT_MOTION
@ SD_OBJECT_HAS_VOLUME_ATTRIBUTES
@ SD_OBJECT_HAS_VOLUME
@ SD_OBJECT_INTERSECTS_VOLUME
@ SD_OBJECT_NEGATIVE_SCALE
@ SD_OBJECT_HOLDOUT_MASK
@ SD_OBJECT_HAS_VOLUME_MOTION
@ SD_OBJECT_CAUSTICS_RECEIVER
@ SD_OBJECT_SHADOW_CATCHER
@ SD_OBJECT_TRANSFORM_APPLIED
@ SD_OBJECT_HAS_VERTEX_MOTION
@ SD_OBJECT_CAUSTICS_CASTER
@ BVH_LAYOUT_MULTI_HIPRT_EMBREE
@ BVH_LAYOUT_METAL
@ BVH_LAYOUT_MULTI_HIPRT
@ BVH_LAYOUT_HIPRT
@ BVH_LAYOUT_MULTI_METAL
@ BVH_LAYOUT_MULTI_METAL_EMBREE
@ ATTR_ELEMENT_VOXEL
#define VLOG_INFO
Definition log.h:71
CCL_NAMESPACE_BEGIN ccl_device_inline float2 zero_float2()
Definition math_float2.h:13
ccl_device_inline float average(const float2 a)
ccl_device_inline float reduce_min(const float2 a)
ccl_device_inline float2 fabs(const float2 a)
ccl_device_inline float3 one_float3()
Definition math_float3.h:24
CCL_NAMESPACE_BEGIN ccl_device_inline float3 zero_float3()
Definition math_float3.h:15
static ulong state[N]
uint32_t util_murmur_hash3(const void *key, const int len, const uint32_t seed)
float util_hash_to_float(const uint32_t hash)
static void update(bNodeTree *ntree)
#define SOCKET_POINT(name, ui_name, default_value,...)
Definition node_type.h:206
#define SOCKET_FLOAT(name, ui_name, default_value,...)
Definition node_type.h:200
#define SOCKET_INT(name, ui_name, default_value,...)
Definition node_type.h:194
#define SOCKET_NODE(name, ui_name, node_type,...)
Definition node_type.h:229
#define SOCKET_TRANSFORM(name, ui_name, default_value,...)
Definition node_type.h:214
#define SOCKET_UINT(name, ui_name, default_value,...)
Definition node_type.h:196
#define NODE_DEFINE(structname)
Definition node_type.h:148
#define SOCKET_COLOR(name, ui_name, default_value,...)
Definition node_type.h:202
#define SOCKET_TRANSFORM_ARRAY(name, ui_name, default_value,...)
Definition node_type.h:269
#define SOCKET_BOOLEAN(name, ui_name, default_value,...)
Definition node_type.h:192
#define SOCKET_POINT2(name, ui_name, default_value,...)
Definition node_type.h:210
#define SOCKET_STRING(name, ui_name, default_value,...)
Definition node_type.h:212
#define SOCKET_UINT64(name, ui_name, default_value,...)
Definition node_type.h:198
int BVHLayoutMask
Definition params.h:50
static float object_volume_density(const Transform &tfm, Geometry *geom)
static int object_num_motion_verts(Geometry *geom)
#define FLT_MAX
Definition stdcycles.h:14
CCL_NAMESPACE_BEGIN string string_printf(const char *format,...)
Definition string.cpp:23
AttributeElement element
ImageHandle & data_voxel()
BoundBox transformed(const Transform *tfm) const
Definition boundbox.h:134
__forceinline bool intersects(const BoundBox &other)
Definition boundbox.h:157
Transform tfm
uint64_t shadow_set_membership
Transform itfm
float dupli_uv[2]
float shadow_terminator_geometry_offset
float cryptomatte_asset
uint blocker_shadow_set
uint attribute_map_offset
uint receiver_light_set
uint64_t light_set_membership
float shadow_terminator_shading_offset
float dupli_generated[3]
float cryptomatte_object
AttributeSet subd_attributes
Definition scene/mesh.h:168
@ SUBDIVISION_NONE
Definition scene/mesh.h:118
static NodeType * add(const char *name, CreateFunc create, Type type=NONE, const NodeType *base=nullptr)
ustring name
Definition graph/node.h:177
bool is_modified() const
Node(const NodeType *type, ustring name=ustring())
void compute_bounds(bool motion_blur)
bool use_motion() const
NODE_DECLARE BoundBox bounds
size_t attr_map_offset
bool usable_as_light() const
float shadow_terminator_shading_offset
vector< ParamValue > attributes
void update_motion()
int get_device_index() const
int motion_step(const float time) const
void apply_transform(bool apply_to_motion)
float compute_volume_step_size() const
bool has_light_linking() const
float shadow_terminator_geometry_offset
bool is_traceable() const
uint visibility_for_tracing() const
void tag_update(Scene *scene)
float color[4]
~Object() override
float motion_time(const int step) const
bool has_shadow_linking() const
struct LightgroupMembership * lightgroup
bool intersects_volume
ParticleData * particles
unique_ptr< ObjectManager > object_manager
Definition scene.h:150
MotionType need_motion() const
Definition scene.cpp:399
unique_ptr< LightManager > light_manager
Definition scene.h:146
unique_ptr< SceneUpdateStats > update_stats
Definition scene.h:174
void tag_shadow_catcher_modified()
Definition scene.cpp:772
MotionType
Definition scene.h:184
@ MOTION_PASS
Definition scene.h:184
@ MOTION_BLUR
Definition scene.h:184
unique_ptr_vector< ParticleSystem > particle_systems
Definition scene.h:139
unique_ptr< GeometryManager > geometry_manager
Definition scene.h:149
unique_ptr_vector< Object > objects
Definition scene.h:141
Integrator * integrator
Definition scene.h:130
struct Object * camera
map< ustring, int > lightgroups
Definition scene.h:120
map< ParticleSystem *, int > particle_offset
DecomposedTransform * object_motion
Scene::MotionType need_motion
float z
Definition sky_float3.h:27
float y
Definition sky_float3.h:27
float x
Definition sky_float3.h:27
i
Definition text_draw.cc:230
void transform_motion_decompose(DecomposedTransform *decomp, const Transform *motion, const size_t size)
ccl_device_inline Transform transform_identity()
Definition transform.h:289
ccl_device_inline Transform transform_empty()
Definition transform.h:378
ccl_device_inline bool transform_negative_scale(const Transform &tfm)
Definition transform.h:358
ccl_device void transform_motion_array_interpolate(ccl_private Transform *tfm, const ccl_global DecomposedTransform *motion, const uint numsteps, const float time)
Definition transform.h:558
ccl_device_inline Transform transform_inverse(const Transform tfm)
Definition transform.h:492
ccl_device_inline bool transform_uniform_scale(const Transform &tfm, float &scale)
Definition transform.h:335
ccl_device_inline float3 transform_direction(const ccl_private Transform *t, const float3 a)
Definition transform.h:87
@ IMAGE_DATA_TYPE_NANOVDB_FP16
@ IMAGE_DATA_TYPE_NANOVDB_FLOAT
@ IMAGE_DATA_TYPE_NANOVDB_FLOAT3
@ IMAGE_DATA_TYPE_NANOVDB_FPN
wmTimer * timer
uint len
uint8_t flag
Definition wm_window.cc:139