Blender V5.0
instances.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2023 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
5#include "BLI_array_utils.hh"
6#include "BLI_listbase.h"
7#include "BLI_rand.hh"
8#include "BLI_task.hh"
9
11#include "DNA_object_types.h"
12
13#include "BKE_geometry_set.hh"
15#include "BKE_instances.hh"
16
18
19namespace blender::bke {
20
22 : type_(Type::GeometrySet),
23 geometry_set_(std::make_unique<GeometrySet>(std::move(geometry_set)))
24{
25}
26
28 : type_(other.type_), data_(other.data_)
29{
30 if (other.geometry_set_) {
31 geometry_set_ = std::make_unique<GeometrySet>(*other.geometry_set_);
32 }
33}
34
36{
37 if (type_ != Type::GeometrySet) {
38 return;
39 }
40 geometry_set_->ensure_owns_direct_data();
41}
42
44{
45 if (type_ != Type::GeometrySet) {
46 /* The object and collection instances are not direct data. */
47 return true;
48 }
49 return geometry_set_->owns_direct_data();
50}
51
53{
54 switch (type_) {
55 case Type::GeometrySet: {
56 geometry_set_->count_memory(memory);
57 }
58 default: {
59 break;
60 }
61 }
62}
63
68
73
74static void convert_collection_to_instances(const Collection &collection,
75 bke::Instances &instances)
76{
77 LISTBASE_FOREACH (CollectionChild *, collection_child, &collection.children) {
79 transform.location() += float3(collection_child->collection->instance_offset);
80 transform.location() -= float3(collection.instance_offset);
81 const int handle = instances.add_reference(*collection_child->collection);
82 instances.add_instance(handle, transform);
83 }
84
85 LISTBASE_FOREACH (CollectionObject *, collection_object, &collection.gobject) {
87 transform.location() -= float3(collection.instance_offset);
88 transform *= (collection_object->ob)->object_to_world();
89 const int handle = instances.add_reference(*collection_object->ob);
90 instances.add_instance(handle, transform);
91 }
92}
93
95{
96 r_geometry_set.clear();
97 switch (type_) {
98 case Type::Object: {
99 const Object &object = this->object();
100 r_geometry_set = bke::object_get_evaluated_geometry_set(object);
101 break;
102 }
103 case Type::Collection: {
104 const Collection &collection = this->collection();
105 std::unique_ptr<bke::Instances> instances_ptr = std::make_unique<bke::Instances>();
107 r_geometry_set.replace_instances(instances_ptr.release());
108 break;
109 }
110 case Type::GeometrySet: {
111 r_geometry_set = this->geometry_set();
112 break;
113 }
114 case Type::None: {
115 break;
116 }
117 }
118}
119
121{
122 switch (type_) {
123 case Type::Object:
124 return this->object().id.name + 2;
125 case Type::Collection:
126 return this->collection().id.name + 2;
128 return this->geometry_set().name;
129 case Type::None:
130 break;
131 }
132 return "";
133}
134
136{
137 if (a.geometry_set_ && b.geometry_set_) {
138 return *a.geometry_set_ == *b.geometry_set_;
139 }
140 return a.type_ == b.type_ && a.data_ == b.data_;
141}
142
144{
145 const uint64_t geometry_hash = geometry_set_ ? geometry_set_->hash() : 0;
146 return get_default_hash(geometry_hash, type_, data_);
147}
148
149Instances::Instances() = default;
150
152 : references_(std::move(other.references_)),
153 instances_num_(other.instances_num_),
154 attributes_(std::move(other.attributes_)),
155 reference_user_counts_(std::move(other.reference_user_counts_)),
156 unique_ids_cache_(std::move(other.unique_ids_cache_))
157{
158}
159
161 : references_(other.references_),
162 instances_num_(other.instances_num_),
163 attributes_(other.attributes_),
164 reference_user_counts_(other.reference_user_counts_),
165 unique_ids_cache_(other.unique_ids_cache_)
166{
167}
168
169Instances::~Instances() = default;
170
172{
173 if (this == &other) {
174 return *this;
175 }
176 std::destroy_at(this);
177 new (this) Instances(other);
178 return *this;
179}
180
182{
183 if (this == &other) {
184 return *this;
185 }
186 std::destroy_at(this);
187 new (this) Instances(std::move(other));
188 return *this;
189}
190
191void Instances::resize(int capacity)
192{
193 const int old_size = this->instances_num();
194 attributes_.resize(AttrDomain::Instance, capacity);
195 instances_num_ = capacity;
196 if (capacity > old_size) {
199 {},
200 IndexRange::from_begin_end(old_size, capacity));
201 }
202}
203
204void Instances::add_instance(const int instance_handle, const float4x4 &transform)
205{
206 BLI_assert(instance_handle >= 0);
207 BLI_assert(instance_handle < references_.size());
208 instances_num_++;
209 attributes_.resize(AttrDomain::Instance, instances_num_);
210 this->reference_handles_for_write().last() = instance_handle;
211 this->transforms_for_write().last() = transform;
213}
214
216{
218 attributes_, AttrDomain::Instance, ".reference_index", instances_num_)
219 .value_or(Span<int>());
220}
221
223{
225 attributes_, AttrDomain::Instance, ".reference_index", instances_num_);
226}
227
229{
231 attributes_, AttrDomain::Instance, "instance_transform", instances_num_)
232 .value_or(Span<float4x4>());
233}
234
236{
238 attributes_, AttrDomain::Instance, "instance_transform", instances_num_);
239}
240
242{
243 /* If this assert fails, it means #ensure_geometry_instances must be called first or that the
244 * reference can't be converted to a geometry set. */
245 BLI_assert(references_[reference_index].type() == InstanceReference::Type::GeometrySet);
246
247 return references_[reference_index].geometry_set();
248}
249
251{
252 for (const int i : references_.index_range()) {
253 const InstanceReference &reference = references_[i];
254 if (reference == query) {
255 return i;
256 }
257 }
258 return std::nullopt;
259}
260
262{
263 if (std::optional<int> handle = this->find_reference_handle(reference)) {
264 return *handle;
265 }
266 return this->add_new_reference(reference);
267}
268
270{
272 return references_.append_and_get_index(reference);
273}
274
276{
277 return references_;
278}
279
284
285void Instances::remove(const IndexMask &mask, const AttributeFilter &attribute_filter)
286{
287 const std::optional<IndexRange> masked_range = mask.to_range();
288 if (masked_range.has_value() && masked_range->start() == 0) {
289 /* Deleting from the end of the array can be much faster since no data has to be shifted. */
290 this->resize(mask.size());
292 return;
293 }
294
295 Instances new_instances;
296 new_instances.references_ = std::move(references_);
297 new_instances.instances_num_ = mask.size();
298
302 attribute_filter,
303 mask,
304 new_instances.attributes_for_write());
305
306 *this = std::move(new_instances);
307
309}
310
312{
313 const int tot_instances = instances_num_;
314 const int tot_references_before = references_.size();
315
316 if (tot_instances == 0) {
317 /* If there are no instances, no reference is needed. */
318 references_.clear();
319 return;
320 }
321 if (tot_references_before == 1) {
322 /* There is only one reference and at least one instance. So the only existing reference is
323 * used. Nothing to do here. */
324 return;
325 }
326
328
329 Array<bool> usage_by_handle(tot_references_before, false);
330 Mutex mutex;
331
332 /* Loop over all instances to see which references are used. */
333 threading::parallel_for(IndexRange(tot_instances), 1000, [&](IndexRange range) {
334 /* Use local counter to avoid lock contention. */
335 Array<bool> local_usage_by_handle(tot_references_before, false);
336
337 for (const int i : range) {
338 const int handle = reference_handles[i];
339 BLI_assert(handle >= 0 && handle < tot_references_before);
340 local_usage_by_handle[handle] = true;
341 }
342
343 std::lock_guard lock{mutex};
344 for (const int i : IndexRange(tot_references_before)) {
345 usage_by_handle[i] |= local_usage_by_handle[i];
346 }
347 });
348
349 if (!usage_by_handle.as_span().contains(false)) {
350 /* All references are used. */
351 return;
352 }
353
354 /* Create new references and a mapping for the handles. */
355 Vector<int> handle_mapping;
356 Vector<InstanceReference> new_references;
357 int next_new_handle = 0;
358 bool handles_have_to_be_updated = false;
359 for (const int old_handle : IndexRange(tot_references_before)) {
360 if (!usage_by_handle[old_handle]) {
361 /* Add some dummy value. It won't be read again. */
362 handle_mapping.append(-1);
363 }
364 else {
365 const InstanceReference &reference = references_[old_handle];
366 handle_mapping.append(next_new_handle);
367 new_references.append(reference);
368 if (old_handle != next_new_handle) {
369 handles_have_to_be_updated = true;
370 }
371 next_new_handle++;
372 }
373 }
374 references_ = new_references;
375
376 if (!handles_have_to_be_updated) {
377 /* All remaining handles are the same as before, so they don't have to be updated. This happens
378 * when unused handles are only at the end. */
379 return;
380 }
381
382 /* Update handles of instances. */
383 {
385 threading::parallel_for(IndexRange(tot_instances), 1000, [&](IndexRange range) {
386 for (const int i : range) {
387 reference_handles[i] = handle_mapping[reference_handles[i]];
388 }
389 });
390 }
391}
392
394{
395 return this->instances_num_;
396}
397
399{
400 return references_.size();
401}
402
404{
405 for (const InstanceReference &reference : references_) {
406 if (!reference.owns_direct_data()) {
407 return false;
408 }
409 }
410 return true;
411}
412
414{
415 for (const InstanceReference &const_reference : references_) {
416 /* `const` cast is fine because we are not changing anything that would change the hash of the
417 * reference. */
418 InstanceReference &reference = const_cast<InstanceReference &>(const_reference);
419 reference.ensure_owns_direct_data();
420 }
421}
422
424{
425 attributes_.count_memory(memory);
426 for (const InstanceReference &reference : references_) {
427 reference.count_memory(memory);
428 }
429}
430
432{
433 Array<int> unique_ids(original_ids.size());
434
435 Set<int> used_unique_ids;
436 used_unique_ids.reserve(original_ids.size());
437 Vector<int> instances_with_id_collision;
438 for (const int instance_index : original_ids.index_range()) {
439 const int original_id = original_ids[instance_index];
440 if (used_unique_ids.add(original_id)) {
441 /* The original id has not been used by another instance yet. */
442 unique_ids[instance_index] = original_id;
443 }
444 else {
445 /* The original id of this instance collided with a previous instance, it needs to be looked
446 * at again in a second pass. Don't generate a new random id here, because this might collide
447 * with other existing ids. */
448 instances_with_id_collision.append(instance_index);
449 }
450 }
451
452 Map<int, RandomNumberGenerator> generator_by_original_id;
453 for (const int instance_index : instances_with_id_collision) {
454 const int original_id = original_ids[instance_index];
455 RandomNumberGenerator &rng = generator_by_original_id.lookup_or_add_cb(original_id, [&]() {
457 rng.seed_random(original_id);
458 return rng;
459 });
460
461 const int max_iteration = 100;
462 for (int iteration = 0;; iteration++) {
463 /* Try generating random numbers until an unused one has been found. */
464 const int random_id = rng.get_int32();
465 if (used_unique_ids.add(random_id)) {
466 /* This random id is not used by another instance. */
467 unique_ids[instance_index] = random_id;
468 break;
469 }
470 if (iteration == max_iteration) {
471 /* The likelihood of running into this case is very low even if there is a huge number of
472 * instances. For correctness, it's still good to systematically find an unused id instead
473 * of purely relying on randomness. */
474 for (const int generated_id : IndexRange(INT32_MAX)) {
475 if (used_unique_ids.add(generated_id)) {
476 unique_ids[instance_index] = generated_id;
477 break;
478 }
479 }
480 break;
481 }
482 }
483 }
484
485 return unique_ids;
486}
487
489{
490 reference_user_counts_.ensure([&](Array<int> &r_data) {
491 const int references_num = references_.size();
493 r_data.fill(0);
494
495 const Span<int> handles = this->reference_handles();
496 for (const int handle : handles) {
497 if (handle >= 0 && handle < references_num) {
498 r_data[handle]++;
499 }
500 }
501 });
502 return reference_user_counts_.data();
503}
504
506{
507 unique_ids_cache_.ensure([&](Array<int> &r_data) {
508 const VArraySpan<int> instance_ids = *this->attributes().lookup<int>("id");
509 if (instance_ids.is_empty()) {
510 r_data.reinitialize(instances_num_);
512 return;
513 }
514 r_data = generate_unique_instance_ids(instance_ids);
515 });
516 return unique_ids_cache_.data();
517}
518
520{
521 return transform.location();
522}
523
524static void set_transform_position(float4x4 &transform, const float3 position)
525{
526 transform.location() = position;
527}
528
534
541
542} // namespace blender::bke
#define BLI_assert(a)
Definition BLI_assert.h:46
#define LISTBASE_FOREACH(type, var, list)
Object groups, one object can be in many groups at once.
Object is a sort of wrapper for general info.
volatile int lock
SIMD_FORCE_INLINE btVector3 transform(const btVector3 &point) const
unsigned long long int uint64_t
constexpr T & last(const int64_t n=0) const
Definition BLI_span.hh:689
constexpr const T * data() const
Definition BLI_span.hh:215
Span< T > as_span() const
Definition BLI_array.hh:243
MutableSpan< T > as_mutable_span()
Definition BLI_array.hh:248
void fill(const T &value) const
Definition BLI_array.hh:272
void reinitialize(const int64_t new_size)
Definition BLI_array.hh:419
static constexpr IndexRange from_begin_end(const int64_t begin, const int64_t end)
Value & lookup_or_add_cb(const Key &key, const CreateValueF &create_value)
Definition BLI_map.hh:620
void seed_random(uint32_t seed)
Definition rand.cc:297
void reserve(const int64_t n)
Definition BLI_set.hh:637
bool add(const Key &key)
Definition BLI_set.hh:248
constexpr int64_t size() const
Definition BLI_span.hh:252
constexpr IndexRange index_range() const
Definition BLI_span.hh:401
constexpr bool is_empty() const
Definition BLI_span.hh:260
static VArray from_derived_span(Span< StructT > values)
static VMutableArray from_derived_span(MutableSpan< StructT > values)
int64_t size() const
void append(const T &value)
GAttributeReader lookup(const StringRef attribute_id) const
void count_memory(MemoryCounter &memory) const
Definition instances.cc:52
void to_geometry_set(GeometrySet &r_geometry_set) const
Definition instances.cc:94
StringRefNull name() const
Definition instances.cc:120
Collection & collection() const
Span< int > reference_handles() const
Definition instances.cc:215
MutableSpan< int > reference_handles_for_write()
Definition instances.cc:222
int add_reference(const InstanceReference &reference)
Definition instances.cc:261
void remove(const IndexMask &mask, const AttributeFilter &attribute_filter)
Definition instances.cc:285
int add_new_reference(const InstanceReference &reference)
Definition instances.cc:269
GeometrySet & geometry_set_from_reference(int reference_index)
Definition instances.cc:241
Span< float4x4 > transforms() const
Definition instances.cc:228
void add_instance(int instance_handle, const float4x4 &transform)
Definition instances.cc:204
Span< InstanceReference > references() const
Definition instances.cc:275
Span< int > reference_user_counts() const
Definition instances.cc:488
Instances & operator=(const Instances &other)
Definition instances.cc:171
void resize(int capacity)
Definition instances.cc:191
int references_num() const
Definition instances.cc:398
bke::MutableAttributeAccessor attributes_for_write()
Definition instances.cc:69
bke::AttributeAccessor attributes() const
Definition instances.cc:64
MutableSpan< InstanceReference > references_for_write()
Definition instances.cc:280
int instances_num() const
Definition instances.cc:393
void count_memory(MemoryCounter &memory) const
Definition instances.cc:423
bool owns_direct_data() const
Definition instances.cc:403
std::optional< int > find_reference_handle(const InstanceReference &query)
Definition instances.cc:250
MutableSpan< float4x4 > transforms_for_write()
Definition instances.cc:235
Span< int > unique_ids() const
Definition instances.cc:505
#define INT32_MAX
ThreadMutex mutex
ccl_device_inline float2 mask(const MaskType mask, const float2 a)
void fill_index_range(MutableSpan< T > span, const T start=0)
static void convert_collection_to_instances(const Collection &collection, bke::Instances &instances)
Definition instances.cc:74
GMutableSpan get_mutable_attribute(AttributeStorage &storage, const AttrDomain domain, const CPPType &cpp_type, const StringRef name, const int64_t domain_size, const void *custom_default_value)
void fill_attribute_range_default(MutableAttributeAccessor dst_attributes, AttrDomain domain, const AttributeFilter &attribute_filter, IndexRange range)
std::optional< GSpan > get_span_attribute(const AttributeStorage &storage, const AttrDomain domain, const CPPType &cpp_type, const StringRef name, const int64_t domain_size)
bool operator==(const InstanceReference &a, const InstanceReference &b)
Definition instances.cc:135
void gather_attributes(AttributeAccessor src_attributes, AttrDomain src_domain, AttrDomain dst_domain, const AttributeFilter &attribute_filter, const IndexMask &selection, MutableAttributeAccessor dst_attributes)
static float3 get_transform_position(const float4x4 &transform)
Definition instances.cc:519
GeometrySet object_get_evaluated_geometry_set(const Object &object, bool apply_subdiv=true)
VArray< float3 > instance_position_varray(const Instances &instances)
Definition instances.cc:529
static Array< int > generate_unique_instance_ids(Span< int > original_ids)
Definition instances.cc:431
VMutableArray< float3 > instance_position_varray_for_write(Instances &instances)
Definition instances.cc:535
static void set_transform_position(float4x4 &transform, const float3 position)
Definition instances.cc:524
const AttributeAccessorFunctions & instance_attribute_accessor_functions()
void parallel_for(const IndexRange range, const int64_t grain_size, const Function &function, const TaskSizeHints &size_hints=detail::TaskSizeHints_Static(1))
Definition BLI_task.hh:93
MatBase< float, 4, 4 > float4x4
uint64_t get_default_hash(const T &v, const Args &...args)
Definition BLI_hash.hh:233
std::mutex Mutex
Definition BLI_mutex.hh:47
VecBase< float, 3 > float3
char name[258]
Definition DNA_ID.h:432
void replace_instances(Instances *instances, GeometryOwnershipType ownership=GeometryOwnershipType::Owned)
i
Definition text_draw.cc:230
ParamHandle ** handles