Blender V4.3
instances.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2023 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
5#include "BLI_array_utils.hh"
6#include "BLI_listbase.h"
7#include "BLI_rand.hh"
8#include "BLI_task.hh"
9
11#include "DNA_object_types.h"
12
13#include "BKE_customdata.hh"
14#include "BKE_geometry_set.hh"
16#include "BKE_instances.hh"
17
18#include "BLT_translation.hh"
19
20namespace blender::bke {
21
23 : type_(Type::GeometrySet),
24 geometry_set_(std::make_unique<GeometrySet>(std::move(geometry_set)))
25{
26}
27
29 : type_(other.type_), data_(other.data_)
30{
31 if (other.geometry_set_) {
32 geometry_set_ = std::make_unique<GeometrySet>(*other.geometry_set_);
33 }
34}
35
37{
38 if (type_ != Type::GeometrySet) {
39 return;
40 }
41 geometry_set_->ensure_owns_direct_data();
42}
43
45{
46 if (type_ != Type::GeometrySet) {
47 /* The object and collection instances are not direct data. */
48 return true;
49 }
50 return geometry_set_->owns_direct_data();
51}
52
54{
55 switch (type_) {
56 case Type::GeometrySet: {
57 geometry_set_->count_memory(memory);
58 }
59 default: {
60 break;
61 }
62 }
63}
64
65static void convert_collection_to_instances(const Collection &collection,
66 bke::Instances &instances)
67{
68 LISTBASE_FOREACH (CollectionChild *, collection_child, &collection.children) {
69 float4x4 transform = float4x4::identity();
70 transform.location() += float3(collection_child->collection->instance_offset);
71 transform.location() -= float3(collection.instance_offset);
72 const int handle = instances.add_reference(*collection_child->collection);
73 instances.add_instance(handle, transform);
74 }
75
76 LISTBASE_FOREACH (CollectionObject *, collection_object, &collection.gobject) {
77 float4x4 transform = float4x4::identity();
78 transform.location() -= float3(collection.instance_offset);
79 transform *= (collection_object->ob)->object_to_world();
80 const int handle = instances.add_reference(*collection_object->ob);
81 instances.add_instance(handle, transform);
82 }
83}
84
86{
87 r_geometry_set.clear();
88 switch (type_) {
89 case Type::Object: {
90 const Object &object = this->object();
91 r_geometry_set = bke::object_get_evaluated_geometry_set(object);
92 break;
93 }
94 case Type::Collection: {
95 const Collection &collection = this->collection();
96 std::unique_ptr<bke::Instances> instances_ptr = std::make_unique<bke::Instances>();
97 convert_collection_to_instances(collection, *instances_ptr);
98 r_geometry_set.replace_instances(instances_ptr.release());
99 break;
100 }
101 case Type::GeometrySet: {
102 r_geometry_set = this->geometry_set();
103 break;
104 }
105 case Type::None: {
106 break;
107 }
108 }
109}
110
112{
113 switch (type_) {
114 case Type::Object:
115 return this->object().id.name + 2;
116 case Type::Collection:
117 return this->collection().id.name + 2;
119 return this->geometry_set().name;
120 case Type::None:
121 break;
122 }
123 return "";
124}
125
127{
128 if (a.geometry_set_ && b.geometry_set_) {
129 return *a.geometry_set_ == *b.geometry_set_;
130 }
131 return a.type_ == b.type_ && a.data_ == b.data_;
132}
133
135{
136 const uint64_t geometry_hash = geometry_set_ ? geometry_set_->hash() : 0;
137 return get_default_hash(geometry_hash, type_, data_);
138}
139
141{
142 CustomData_reset(&attributes_);
143}
144
146 : references_(std::move(other.references_)),
147 instances_num_(other.instances_num_),
148 attributes_(other.attributes_),
149 reference_user_counts_(std::move(other.reference_user_counts_)),
150 almost_unique_ids_cache_(std::move(other.almost_unique_ids_cache_))
151{
152 CustomData_reset(&other.attributes_);
153}
154
156 : references_(other.references_),
157 instances_num_(other.instances_num_),
158 reference_user_counts_(other.reference_user_counts_),
159 almost_unique_ids_cache_(other.almost_unique_ids_cache_)
160{
161 CustomData_init_from(&other.attributes_, &attributes_, CD_MASK_ALL, other.instances_num_);
162}
163
165{
166 CustomData_free(&attributes_, instances_num_);
167}
168
170{
171 if (this == &other) {
172 return *this;
173 }
174 std::destroy_at(this);
175 new (this) Instances(other);
176 return *this;
177}
178
180{
181 if (this == &other) {
182 return *this;
183 }
184 std::destroy_at(this);
185 new (this) Instances(std::move(other));
186 return *this;
187}
188
189void Instances::resize(int capacity)
190{
191 CustomData_realloc(&attributes_, instances_num_, capacity, CD_SET_DEFAULT);
192 instances_num_ = capacity;
193}
194
195void Instances::add_instance(const int instance_handle, const float4x4 &transform)
196{
197 BLI_assert(instance_handle >= 0);
198 BLI_assert(instance_handle < references_.size());
199 const int old_size = instances_num_;
200 instances_num_++;
201 CustomData_realloc(&attributes_, old_size, instances_num_);
202 this->reference_handles_for_write().last() = instance_handle;
203 this->transforms_for_write().last() = transform;
205}
206
208{
209 return {static_cast<const int *>(
210 CustomData_get_layer_named(&attributes_, CD_PROP_INT32, ".reference_index")),
211 instances_num_};
212}
213
215{
216 int *data = static_cast<int *>(CustomData_get_layer_named_for_write(
217 &attributes_, CD_PROP_INT32, ".reference_index", instances_num_));
218 if (!data) {
219 data = static_cast<int *>(CustomData_add_layer_named(
220 &attributes_, CD_PROP_INT32, CD_SET_DEFAULT, instances_num_, ".reference_index"));
221 }
222 return {data, instances_num_};
223}
224
226{
227 return {static_cast<const float4x4 *>(
228 CustomData_get_layer_named(&attributes_, CD_PROP_FLOAT4X4, "instance_transform")),
229 instances_num_};
230}
231
233{
235 &attributes_, CD_PROP_FLOAT4X4, "instance_transform", instances_num_));
236 if (!data) {
237 data = static_cast<float4x4 *>(CustomData_add_layer_named(
238 &attributes_, CD_PROP_FLOAT4X4, CD_SET_DEFAULT, instances_num_, "instance_transform"));
239 }
240 return {data, instances_num_};
241}
242
244{
245 /* If this assert fails, it means #ensure_geometry_instances must be called first or that the
246 * reference can't be converted to a geometry set. */
247 BLI_assert(references_[reference_index].type() == InstanceReference::Type::GeometrySet);
248
249 return references_[reference_index].geometry_set();
250}
251
253{
254 for (const int i : references_.index_range()) {
255 const InstanceReference &reference = references_[i];
256 if (reference == query) {
257 return i;
258 }
259 }
260 return std::nullopt;
261}
262
264{
265 if (std::optional<int> handle = this->find_reference_handle(reference)) {
266 return *handle;
267 }
268 return this->add_new_reference(reference);
269}
270
272{
274 return references_.append_and_get_index(reference);
275}
276
278{
279 return references_;
280}
281
282void Instances::remove(const IndexMask &mask, const AttributeFilter &attribute_filter)
283{
284 const std::optional<IndexRange> masked_range = mask.to_range();
285 if (masked_range.has_value() && masked_range->start() == 0) {
286 /* Deleting from the end of the array can be much faster since no data has to be shifted. */
287 this->resize(mask.size());
289 return;
290 }
291
292 Instances new_instances;
293 new_instances.references_ = std::move(references_);
294 new_instances.instances_num_ = mask.size();
295
299 attribute_filter,
300 mask,
301 new_instances.attributes_for_write());
302
303 *this = std::move(new_instances);
304
306}
307
309{
310 const int tot_instances = instances_num_;
311 const int tot_references_before = references_.size();
312
313 if (tot_instances == 0) {
314 /* If there are no instances, no reference is needed. */
315 references_.clear();
316 return;
317 }
318 if (tot_references_before == 1) {
319 /* There is only one reference and at least one instance. So the only existing reference is
320 * used. Nothing to do here. */
321 return;
322 }
323
325
326 Array<bool> usage_by_handle(tot_references_before, false);
327 std::mutex mutex;
328
329 /* Loop over all instances to see which references are used. */
330 threading::parallel_for(IndexRange(tot_instances), 1000, [&](IndexRange range) {
331 /* Use local counter to avoid lock contention. */
332 Array<bool> local_usage_by_handle(tot_references_before, false);
333
334 for (const int i : range) {
335 const int handle = reference_handles[i];
336 BLI_assert(handle >= 0 && handle < tot_references_before);
337 local_usage_by_handle[handle] = true;
338 }
339
340 std::lock_guard lock{mutex};
341 for (const int i : IndexRange(tot_references_before)) {
342 usage_by_handle[i] |= local_usage_by_handle[i];
343 }
344 });
345
346 if (!usage_by_handle.as_span().contains(false)) {
347 /* All references are used. */
348 return;
349 }
350
351 /* Create new references and a mapping for the handles. */
352 Vector<int> handle_mapping;
353 Vector<InstanceReference> new_references;
354 int next_new_handle = 0;
355 bool handles_have_to_be_updated = false;
356 for (const int old_handle : IndexRange(tot_references_before)) {
357 if (!usage_by_handle[old_handle]) {
358 /* Add some dummy value. It won't be read again. */
359 handle_mapping.append(-1);
360 }
361 else {
362 const InstanceReference &reference = references_[old_handle];
363 handle_mapping.append(next_new_handle);
364 new_references.append(reference);
365 if (old_handle != next_new_handle) {
366 handles_have_to_be_updated = true;
367 }
368 next_new_handle++;
369 }
370 }
371 references_ = new_references;
372
373 if (!handles_have_to_be_updated) {
374 /* All remaining handles are the same as before, so they don't have to be updated. This happens
375 * when unused handles are only at the end. */
376 return;
377 }
378
379 /* Update handles of instances. */
380 {
382 threading::parallel_for(IndexRange(tot_instances), 1000, [&](IndexRange range) {
383 for (const int i : range) {
384 reference_handles[i] = handle_mapping[reference_handles[i]];
385 }
386 });
387 }
388}
389
391{
392 return this->instances_num_;
393}
394
396{
397 return references_.size();
398}
399
401{
402 for (const InstanceReference &reference : references_) {
403 if (!reference.owns_direct_data()) {
404 return false;
405 }
406 }
407 return true;
408}
409
411{
412 for (const InstanceReference &const_reference : references_) {
413 /* `const` cast is fine because we are not changing anything that would change the hash of the
414 * reference. */
415 InstanceReference &reference = const_cast<InstanceReference &>(const_reference);
416 reference.ensure_owns_direct_data();
417 }
418}
419
421{
422 CustomData_count_memory(attributes_, instances_num_, memory);
423 for (const InstanceReference &reference : references_) {
424 reference.count_memory(memory);
425 }
426}
427
429{
430 Array<int> unique_ids(original_ids.size());
431
432 Set<int> used_unique_ids;
433 used_unique_ids.reserve(original_ids.size());
434 Vector<int> instances_with_id_collision;
435 for (const int instance_index : original_ids.index_range()) {
436 const int original_id = original_ids[instance_index];
437 if (used_unique_ids.add(original_id)) {
438 /* The original id has not been used by another instance yet. */
439 unique_ids[instance_index] = original_id;
440 }
441 else {
442 /* The original id of this instance collided with a previous instance, it needs to be looked
443 * at again in a second pass. Don't generate a new random id here, because this might collide
444 * with other existing ids. */
445 instances_with_id_collision.append(instance_index);
446 }
447 }
448
449 Map<int, RandomNumberGenerator> generator_by_original_id;
450 for (const int instance_index : instances_with_id_collision) {
451 const int original_id = original_ids[instance_index];
452 RandomNumberGenerator &rng = generator_by_original_id.lookup_or_add_cb(original_id, [&]() {
454 rng.seed_random(original_id);
455 return rng;
456 });
457
458 const int max_iteration = 100;
459 for (int iteration = 0;; iteration++) {
460 /* Try generating random numbers until an unused one has been found. */
461 const int random_id = rng.get_int32();
462 if (used_unique_ids.add(random_id)) {
463 /* This random id is not used by another instance. */
464 unique_ids[instance_index] = random_id;
465 break;
466 }
467 if (iteration == max_iteration) {
468 /* It seems to be very unlikely that we ever run into this case (assuming there are less
469 * than 2^30 instances). However, if that happens, it's better to use an id that is not
470 * unique than to be stuck in an infinite loop. */
471 unique_ids[instance_index] = original_id;
472 break;
473 }
474 }
475 }
476
477 return unique_ids;
478}
479
481{
482 reference_user_counts_.ensure([&](Array<int> &r_data) {
483 const int references_num = references_.size();
485 r_data.fill(0);
486
487 const Span<int> handles = this->reference_handles();
488 for (const int handle : handles) {
489 if (handle >= 0 && handle < references_num) {
490 r_data[handle]++;
491 }
492 }
493 });
494 return reference_user_counts_.data();
495}
496
498{
499 almost_unique_ids_cache_.ensure([&](Array<int> &r_data) {
500 bke::AttributeReader<int> instance_ids_attribute = this->attributes().lookup<int>("id");
501 if (instance_ids_attribute) {
502 Span<int> instance_ids = instance_ids_attribute.varray.get_internal_span();
503 if (r_data.size() != instance_ids.size()) {
504 r_data = generate_unique_instance_ids(instance_ids);
505 }
506 }
507 else {
508 r_data.reinitialize(instances_num_);
510 }
511 });
512 return almost_unique_ids_cache_.data();
513}
514
515static float3 get_transform_position(const float4x4 &transform)
516{
517 return transform.location();
518}
519
520static void set_transform_position(float4x4 &transform, const float3 position)
521{
522 transform.location() = position;
523}
524
529
536
537} // namespace blender::bke
CustomData interface, see also DNA_customdata_types.h.
void CustomData_count_memory(const CustomData &data, int totelem, blender::MemoryCounter &memory)
void CustomData_realloc(CustomData *data, int old_size, int new_size, eCDAllocType alloctype=CD_CONSTRUCT)
void * CustomData_get_layer_named_for_write(CustomData *data, eCustomDataType type, blender::StringRef name, int totelem)
@ CD_SET_DEFAULT
const void * CustomData_get_layer_named(const CustomData *data, eCustomDataType type, blender::StringRef name)
void * CustomData_add_layer_named(CustomData *data, eCustomDataType type, eCDAllocType alloctype, int totelem, blender::StringRef name)
void CustomData_reset(CustomData *data)
void CustomData_init_from(const CustomData *source, CustomData *dest, eCustomDataMask mask, int totelem)
void CustomData_free(CustomData *data, int totelem)
#define BLI_assert(a)
Definition BLI_assert.h:50
#define LISTBASE_FOREACH(type, var, list)
ThreadMutex mutex
Object groups, one object can be in many groups at once.
#define CD_MASK_ALL
@ CD_PROP_INT32
@ CD_PROP_FLOAT4X4
Object is a sort of wrapper for general info.
volatile int lock
SIMD_FORCE_INLINE btVector3 transform(const btVector3 &point) const
int64_t size() const
Definition BLI_array.hh:245
Span< T > as_span() const
Definition BLI_array.hh:232
MutableSpan< T > as_mutable_span()
Definition BLI_array.hh:237
void fill(const T &value) const
Definition BLI_array.hh:261
void reinitialize(const int64_t new_size)
Definition BLI_array.hh:388
Value & lookup_or_add_cb(const Key &key, const CreateValueF &create_value)
Definition BLI_map.hh:582
constexpr T & last(const int64_t n=0) const
Definition BLI_span.hh:690
void seed_random(uint32_t seed)
Definition rand.cc:374
void reserve(const int64_t n)
Definition BLI_set.hh:614
bool add(const Key &key)
Definition BLI_set.hh:248
constexpr int64_t size() const
Definition BLI_span.hh:253
constexpr IndexRange index_range() const
Definition BLI_span.hh:402
static VArray ForDerivedSpan(Span< StructT > values)
static VMutableArray ForDerivedSpan(MutableSpan< StructT > values)
void append(const T &value)
GAttributeReader lookup(const StringRef attribute_id) const
void count_memory(MemoryCounter &memory) const
Definition instances.cc:53
void to_geometry_set(GeometrySet &r_geometry_set) const
Definition instances.cc:85
StringRefNull name() const
Definition instances.cc:111
Collection & collection() const
Span< int > reference_handles() const
Definition instances.cc:207
MutableSpan< int > reference_handles_for_write()
Definition instances.cc:214
int add_reference(const InstanceReference &reference)
Definition instances.cc:263
void remove(const IndexMask &mask, const AttributeFilter &attribute_filter)
Definition instances.cc:282
int add_new_reference(const InstanceReference &reference)
Definition instances.cc:271
GeometrySet & geometry_set_from_reference(int reference_index)
Definition instances.cc:243
Span< float4x4 > transforms() const
Definition instances.cc:225
void add_instance(int instance_handle, const float4x4 &transform)
Definition instances.cc:195
Span< InstanceReference > references() const
Definition instances.cc:277
Span< int > reference_user_counts() const
Definition instances.cc:480
Instances & operator=(const Instances &other)
Definition instances.cc:169
void resize(int capacity)
Definition instances.cc:189
int references_num() const
Definition instances.cc:395
bke::MutableAttributeAccessor attributes_for_write()
bke::AttributeAccessor attributes() const
int instances_num() const
Definition instances.cc:390
void count_memory(MemoryCounter &memory) const
Definition instances.cc:420
Span< int > almost_unique_ids() const
Definition instances.cc:497
bool owns_direct_data() const
Definition instances.cc:400
std::optional< int > find_reference_handle(const InstanceReference &query)
Definition instances.cc:252
MutableSpan< float4x4 > transforms_for_write()
Definition instances.cc:232
local_group_size(16, 16) .push_constant(Type b
IndexRange range
void fill_index_range(MutableSpan< T > span, const T start=0)
static void convert_collection_to_instances(const Collection &collection, bke::Instances &instances)
Definition instances.cc:65
bool operator==(const InstanceReference &a, const InstanceReference &b)
Definition instances.cc:126
void gather_attributes(AttributeAccessor src_attributes, AttrDomain src_domain, AttrDomain dst_domain, const AttributeFilter &attribute_filter, const IndexMask &selection, MutableAttributeAccessor dst_attributes)
GeometrySet object_get_evaluated_geometry_set(const Object &object)
static float3 get_transform_position(const float4x4 &transform)
Definition instances.cc:515
VArray< float3 > instance_position_varray(const Instances &instances)
Definition instances.cc:525
static Array< int > generate_unique_instance_ids(Span< int > original_ids)
Definition instances.cc:428
VMutableArray< float3 > instance_position_varray_for_write(Instances &instances)
Definition instances.cc:530
static void set_transform_position(float4x4 &transform, const float3 position)
Definition instances.cc:520
void parallel_for(const IndexRange range, const int64_t grain_size, const Function &function, const TaskSizeHints &size_hints=detail::TaskSizeHints_Static(1))
Definition BLI_task.hh:95
uint64_t get_default_hash(const T &v)
Definition BLI_hash.hh:219
VecBase< float, 3 > float3
unsigned __int64 uint64_t
Definition stdint.h:90
char name[66]
Definition DNA_ID.h:425
void replace_instances(Instances *instances, GeometryOwnershipType ownership=GeometryOwnershipType::Owned)
ParamHandle ** handles