Blender V5.0
index_mask.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2023 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
8
9#include <fmt/format.h>
10#include <iostream>
11
12#include "BLI_array.hh"
13#include "BLI_array_utils.hh"
15#include "BLI_bit_span_ops.hh"
17#include "BLI_bit_vector.hh"
19#include "BLI_index_mask.hh"
22#include "BLI_math_base.hh"
23#include "BLI_rand.hh"
24#include "BLI_set.hh"
25#include "BLI_sort.hh"
26#include "BLI_task.hh"
27#include "BLI_threads.h"
28#include "BLI_virtual_array.hh"
29
30#include "BLI_strict_flags.h" /* IWYU pragma: keep. Keep last. */
31
32namespace blender::index_mask {
33
34template<typename T> void build_reverse_map(const IndexMask &mask, MutableSpan<T> r_map)
35{
36#ifndef NDEBUG
37 /* Catch errors with asserts in debug builds. */
38 r_map.fill(-1);
39#endif
40 BLI_assert(r_map.size() >= mask.min_array_size());
41 mask.foreach_index_optimized<T>(GrainSize(4096),
42 [&](const T src, const T dst) { r_map[src] = dst; });
43}
44
46
47std::array<int16_t, max_segment_size> build_static_indices_array()
48{
49 std::array<int16_t, max_segment_size> data;
50 for (int16_t i = 0; i < max_segment_size; i++) {
51 data[size_t(i)] = i;
52 }
53 return data;
54}
55
57{
58 static constexpr int64_t size_shift = 31;
59 static constexpr int64_t max_size = (int64_t(1) << size_shift); /* 2'147'483'648 */
60 static constexpr int64_t segments_num = max_size / max_segment_size; /* 131'072 */
61
62 /* Make sure we are never requesting a size that's larger than what was statically allocated.
63 * If that's ever needed, we can either increase #size_shift or dynamically allocate an even
64 * larger mask. */
65 BLI_assert(min_size <= max_size);
66 UNUSED_VARS_NDEBUG(min_size);
67
68 static IndexMask static_mask = []() {
69 static Array<const int16_t *> indices_by_segment(segments_num);
70 /* The offsets and cumulative segment sizes array contain the same values here, so just use a
71 * single array for both. */
72 static Array<int64_t> segment_offsets(segments_num + 1);
73
74 static const int16_t *static_offsets = get_static_indices_array().data();
75
76 /* Isolate because the mutex protecting the initialization of #static_mask is locked. */
78 threading::parallel_for(IndexRange(segments_num), 1024, [&](const IndexRange range) {
79 for (const int64_t segment_i : range) {
80 indices_by_segment[segment_i] = static_offsets;
81 segment_offsets[segment_i] = segment_i * max_segment_size;
82 }
83 });
84 });
85 segment_offsets.last() = max_size;
86
88 IndexMaskData &data = mask.data_for_inplace_construction();
89 data.indices_num_ = max_size;
90 data.segments_num_ = segments_num;
91 data.indices_by_segment_ = indices_by_segment.data();
92 data.segment_offsets_ = segment_offsets.data();
93 data.cumulative_segment_sizes_ = segment_offsets.data();
94 data.begin_index_in_segment_ = 0;
95 data.end_index_in_segment_ = max_segment_size;
96
97 return mask;
98 }();
99 return static_mask;
100}
101
102std::ostream &operator<<(std::ostream &stream, const IndexMask &mask)
103{
105 mask.to_indices<int64_t>(indices);
109 for (const std::variant<IndexRange, Span<int64_t>> &segment : segments) {
110 if (std::holds_alternative<IndexRange>(segment)) {
111 const IndexRange range = std::get<IndexRange>(segment);
112 parts.append(fmt::format("{}-{}", range.first(), range.last()));
113 }
114 else {
115 const Span<int64_t> segment_indices = std::get<Span<int64_t>>(segment);
116 parts.append(fmt::format("{}", fmt::join(segment_indices, ", ")));
117 }
118 }
119 stream << fmt::format("(Size: {} | {})", mask.size(), fmt::join(parts, ", "));
120 return stream;
121}
122
124{
125 if (size == 0) {
126 return {};
127 }
128 const RawMaskIterator first_it = this->index_to_iterator(start);
129 const RawMaskIterator last_it = this->index_to_iterator(start + size - 1);
130
131 IndexMask sliced = *this;
132 sliced.indices_num_ = size;
133 sliced.segments_num_ = last_it.segment_i - first_it.segment_i + 1;
134 sliced.indices_by_segment_ += first_it.segment_i;
135 sliced.segment_offsets_ += first_it.segment_i;
136 sliced.cumulative_segment_sizes_ += first_it.segment_i;
137 sliced.begin_index_in_segment_ = first_it.index_in_segment;
138 sliced.end_index_in_segment_ = last_it.index_in_segment + 1;
139 return sliced;
140}
141
143 const RawMaskIterator last_it,
144 const int64_t size) const
145{
146 BLI_assert(this->iterator_to_index(last_it) - this->iterator_to_index(first_it) + 1 == size);
147 IndexMask sliced = *this;
148 sliced.indices_num_ = size;
149 sliced.segments_num_ = last_it.segment_i - first_it.segment_i + 1;
150 sliced.indices_by_segment_ += first_it.segment_i;
151 sliced.segment_offsets_ += first_it.segment_i;
152 sliced.cumulative_segment_sizes_ += first_it.segment_i;
153 sliced.begin_index_in_segment_ = first_it.index_in_segment;
154 sliced.end_index_in_segment_ = last_it.index_in_segment + 1;
155 return sliced;
156}
157
159{
160 return this->slice_content(range.start(), range.size());
161}
162
164{
165 if (size <= 0) {
166 return {};
167 }
168 const std::optional<RawMaskIterator> first_it = this->find_larger_equal(start);
169 const std::optional<RawMaskIterator> last_it = this->find_smaller_equal(start + size - 1);
170 if (!first_it || !last_it) {
171 return {};
172 }
173 const int64_t first_index = this->iterator_to_index(*first_it);
174 const int64_t last_index = this->iterator_to_index(*last_it);
175 if (last_index < first_index) {
176 return {};
177 }
178 const int64_t sliced_mask_size = last_index - first_index + 1;
179 return this->slice(*first_it, *last_it, sliced_mask_size);
180}
181
183 const int64_t offset,
184 IndexMaskMemory &memory) const
185{
186 return this->slice_and_shift(range.start(), range.size(), offset, memory);
187}
188
190 const int64_t size,
191 const int64_t offset,
192 IndexMaskMemory &memory) const
193{
194 if (size == 0) {
195 return {};
196 }
197 if (std::optional<IndexRange> range = this->to_range()) {
198 return range->slice(start, size).shift(offset);
199 }
200 return this->slice(start, size).shift(offset, memory);
201}
202
204{
205 if (indices_num_ == 0) {
206 return {};
207 }
208 BLI_assert(this->first() + offset >= 0);
209 if (offset == 0) {
210 return *this;
211 }
212 if (std::optional<IndexRange> range = this->to_range()) {
213 return range->shift(offset);
214 }
215 IndexMask shifted_mask = *this;
216 MutableSpan<int64_t> new_segment_offsets = memory.allocate_array<int64_t>(segments_num_);
217 for (const int64_t i : IndexRange(segments_num_)) {
218 new_segment_offsets[i] = segment_offsets_[i] + offset;
219 }
220 shifted_mask.segment_offsets_ = new_segment_offsets.data();
221 return shifted_mask;
222}
223
225 IndexMaskMemory & /*memory*/)
226{
227 if (segments.is_empty()) {
228 return 0;
229 }
230
231 const Span<int16_t> static_indices = get_static_indices_array();
232
233 /* TODO: Support merging non-range segments in some cases as well. */
234 int64_t group_start_segment_i = 0;
235 int64_t group_first = segments[0][0];
236 int64_t group_last = segments[0].last();
237 bool group_as_range = unique_sorted_indices::non_empty_is_range(segments[0].base_span());
238
239 auto finish_group = [&](const int64_t last_segment_i) {
240 if (group_start_segment_i == last_segment_i) {
241 return;
242 }
243 /* Join multiple ranges together into a bigger range. */
244 const IndexRange range = IndexRange::from_begin_end_inclusive(group_first, group_last);
245 segments[group_start_segment_i] = IndexMaskSegment(range[0],
246 static_indices.take_front(range.size()));
247 for (int64_t i = group_start_segment_i + 1; i <= last_segment_i; i++) {
248 segments[i] = {};
249 }
250 };
251
252 for (const int64_t segment_i : segments.index_range().drop_front(1)) {
253 const IndexMaskSegment segment = segments[segment_i];
254 const std::optional<IndexRange> segment_base_range =
256 const bool segment_is_range = segment_base_range.has_value();
257
258 if (group_as_range && segment_is_range) {
259 if (group_last + 1 == segment[0]) {
260 if (segment.last() - group_first + 1 < max_segment_size) {
261 /* Can combine previous and current range. */
262 group_last = segment.last();
263 continue;
264 }
265 }
266 }
267 finish_group(segment_i - 1);
268
269 group_start_segment_i = segment_i;
270 group_first = segment[0];
271 group_last = segment.last();
272 group_as_range = segment_is_range;
273 }
274 finish_group(segments.size() - 1);
275
276 /* Remove all segments that have been merged into previous segments. */
277 const int64_t new_segments_num = std::remove_if(segments.begin(),
278 segments.end(),
279 [](const IndexMaskSegment segment) {
280 return segment.is_empty();
281 }) -
282 segments.begin();
283 return new_segments_num;
284}
285
287{
288 if (segments.is_empty()) {
289 return {};
290 }
291#ifndef NDEBUG
292 {
293 int64_t last_index = segments[0].last();
294 for (const IndexMaskSegment &segment : segments.drop_front(1)) {
295 BLI_assert(std::is_sorted(segment.base_span().begin(), segment.base_span().end()));
296 BLI_assert(last_index < segment[0]);
297 last_index = segment.last();
298 }
299 }
300#endif
301 const int64_t segments_num = segments.size();
302
303 /* Allocate buffers for the mask. */
304 MutableSpan<const int16_t *> indices_by_segment = memory.allocate_array<const int16_t *>(
306 MutableSpan<int64_t> segment_offsets = memory.allocate_array<int64_t>(segments_num);
307 MutableSpan<int64_t> cumulative_segment_sizes = memory.allocate_array<int64_t>(segments_num + 1);
308
309 /* Fill buffers. */
310 cumulative_segment_sizes[0] = 0;
311 for (const int64_t segment_i : segments.index_range()) {
312 const IndexMaskSegment segment = segments[segment_i];
313 indices_by_segment[segment_i] = segment.base_span().data();
314 segment_offsets[segment_i] = segment.offset();
315 cumulative_segment_sizes[segment_i + 1] = cumulative_segment_sizes[segment_i] + segment.size();
316 }
317
318 /* Initialize mask. */
320 IndexMaskData &data = mask.data_for_inplace_construction();
321 data.indices_num_ = cumulative_segment_sizes.last();
322 data.segments_num_ = segments_num;
323 data.indices_by_segment_ = indices_by_segment.data();
324 data.segment_offsets_ = segment_offsets.data();
325 data.cumulative_segment_sizes_ = cumulative_segment_sizes.data();
326 data.begin_index_in_segment_ = 0;
327 data.end_index_in_segment_ = segments.last().size();
328 return mask;
329}
330
335template<typename T, int64_t InlineBufferSize>
337 LinearAllocator<> &allocator,
339{
341
342 for (int64_t start = 0; start < indices.size(); start += max_segment_size) {
343 /* Slice to make sure that each segment is no longer than #max_segment_size. */
344 const Span<T> indices_slice = indices.slice_safe(start, max_segment_size);
345 unique_sorted_indices::split_to_ranges_and_spans<T>(indices_slice, 64, segments);
346 }
347
348 const Span<int16_t> static_indices = get_static_indices_array();
349 for (const auto &segment : segments) {
350 if (std::holds_alternative<IndexRange>(segment)) {
351 const IndexRange segment_range = std::get<IndexRange>(segment);
352 r_segments.append_as(segment_range.start(), static_indices.take_front(segment_range.size()));
353 }
354 else {
355 Span<T> segment_indices = std::get<Span<T>>(segment);
357 segment_indices.size());
358 while (!segment_indices.is_empty()) {
359 const int64_t offset = segment_indices[0];
360 const int64_t next_segment_size = binary_search::first_if(
361 segment_indices.take_front(max_segment_size),
362 [&](const T value) { return value - offset >= max_segment_size; });
363 for (const int64_t i : IndexRange(next_segment_size)) {
364 const int64_t offset_index = segment_indices[i] - offset;
365 BLI_assert(offset_index < max_segment_size);
366 offset_indices[i] = int16_t(offset_index);
367 }
368 r_segments.append_as(offset, offset_indices.take_front(next_segment_size));
369 segment_indices = segment_indices.drop_front(next_segment_size);
370 offset_indices = offset_indices.drop_front(next_segment_size);
371 }
372 }
373 }
374}
375
384
386
392 void reduce(LinearAllocator<> &main_allocator, Vector<IndexMaskSegment, 16> &main_segments)
393 {
394 for (LocalData &data : this->data_by_thread) {
395 main_allocator.transfer_ownership_from(data.allocator);
396 main_segments.extend(data.segments);
397 }
398 parallel_sort(main_segments.begin(),
399 main_segments.end(),
400 [](const IndexMaskSegment a, const IndexMaskSegment b) { return a[0] < b[0]; });
401 }
402};
403
405{
406 ExprBuilder builder;
407 const Expr &expr = builder.subtract(&universe, {this});
408 return evaluate_expression(expr, memory);
409}
410
411template<typename T>
413{
414 if (indices.is_empty()) {
415 return {};
416 }
417 if (const std::optional<IndexRange> range = unique_sorted_indices::non_empty_as_range_try(
418 indices))
419 {
420 /* Fast case when the indices encode a single range. */
421 return *range;
422 }
423
425
426 constexpr int64_t min_grain_size = 4096;
427 constexpr int64_t max_grain_size = max_segment_size;
428 if (indices.size() <= min_grain_size) {
429 segments_from_indices(indices, memory, segments);
430 }
431 else {
432 const int64_t threads_num = BLI_system_thread_count();
433 /* Can be faster with a larger grain size, but only when there are enough indices. */
434 const int64_t grain_size = std::clamp(
435 indices.size() / (threads_num * 4), min_grain_size, max_grain_size);
436
437 ParallelSegmentsCollector segments_collector;
438 threading::parallel_for(indices.index_range(), grain_size, [&](const IndexRange range) {
439 ParallelSegmentsCollector::LocalData &local_data = segments_collector.data_by_thread.local();
440 segments_from_indices(indices.slice(range), local_data.allocator, local_data.segments);
441 });
442 segments_collector.reduce(memory, segments);
443 }
444 const int64_t consolidated_segments_num = consolidate_index_mask_segments(segments, memory);
445 segments.resize(consolidated_segments_num);
446 return IndexMask::from_segments(segments, memory);
447}
448
450{
451 return IndexMask::from_bits(bits.index_range(), bits, memory);
452}
453
456 const BitSpan bits_slice)
457{
458 const int64_t segment_start = universe_segment[0];
460 bits::bits_to_index_ranges<int16_t>(bits_slice, builder);
461 }
462 else {
463 /* If the universe is not a range, we need to create a new bit span first. In it, bits
464 * that are not part of the universe are set to 0. */
465 const int64_t segment_end = universe_segment.last() + 1;
466 BitVector<max_segment_size> local_bits(segment_end - segment_start, false);
467 for (const int64_t i : universe_segment.index_range()) {
468 const int64_t global_index = universe_segment[i];
469 const int64_t local_index = global_index - segment_start;
470 BLI_assert(local_index < max_segment_size);
471 /* It's not great to handle each index separately instead of working with bigger
472 * chunks, but that works well enough for now. */
473 if (bits_slice[local_index]) {
474 local_bits[local_index].set();
475 }
476 }
477 bits::bits_to_index_ranges<int16_t>(local_bits, builder);
478 }
479 return segment_start;
480}
481
483 const BitSpan bits,
484 IndexMaskMemory &memory)
485{
486 BLI_assert(bits.size() >= universe.min_array_size());
487 /* Use #from_batch_predicate because we can process many bits at once. */
489 universe,
491 memory,
492 [&](const IndexMaskSegment universe_segment, IndexRangesBuilder<int16_t> &builder) {
493 const IndexRange slice = IndexRange::from_begin_end_inclusive(universe_segment[0],
494 universe_segment.last());
495 return from_bits_batch_predicate(universe_segment, builder, bits.slice(slice));
496 });
497}
498
500 const IndexMaskSegment universe_segment,
501 LinearAllocator<> &allocator,
502 const FunctionRef<int64_t(const IndexMaskSegment &universe_segment,
503 IndexRangesBuilder<int16_t> &builder)> batch_predicate,
505{
507 IndexRangesBuilder<int16_t> builder{builder_buffer};
508 const int64_t segment_shift = batch_predicate(universe_segment, builder);
509 if (builder.is_empty()) {
510 return;
511 }
512 const Span<int16_t> static_indices = get_static_indices_array();
513
514 /* This threshold trades off the number of segments and the number of ranges. In some cases,
515 * masks with fewer segments can be build more efficiently, but when iterating over a mask it may
516 * be beneficial to have more ranges if that means that there are more ranges which can be
517 * processed more efficiently. This could be exposed to the caller in the future. */
518 constexpr int64_t threshold = 64;
519 int64_t next_range_to_process = 0;
520 int64_t skipped_indices_num = 0;
521
522 /* Builds an index mask segment from a bunch of smaller ranges (which could be individual
523 * indices). */
524 auto consolidate_skipped_ranges = [&](int64_t end_range_i) {
525 if (skipped_indices_num == 0) {
526 return;
527 }
528 MutableSpan<int16_t> indices = allocator.allocate_array<int16_t>(skipped_indices_num);
529 int64_t counter = 0;
530 for (const int64_t i : IndexRange::from_begin_end(next_range_to_process, end_range_i)) {
531 const IndexRange range = builder[i];
532 array_utils::fill_index_range(indices.slice(counter, range.size()), int16_t(range.first()));
533 counter += range.size();
534 }
535 r_segments.append(IndexMaskSegment{segment_shift, indices});
536 };
537
538 for (const int64_t i : builder.index_range()) {
539 const IndexRange range = builder[i];
540 if (range.size() > threshold || builder.size() == 1) {
541 consolidate_skipped_ranges(i);
542 r_segments.append(IndexMaskSegment{segment_shift, static_indices.slice(range)});
543 next_range_to_process = i + 1;
544 skipped_indices_num = 0;
545 }
546 else {
547 skipped_indices_num += range.size();
548 }
549 }
550 consolidate_skipped_ranges(builder.size());
551}
552
554 const IndexMask &universe,
555 GrainSize grain_size,
556 IndexMaskMemory &memory,
557 const FunctionRef<int64_t(const IndexMaskSegment &universe_segment,
558 IndexRangesBuilder<int16_t> &builder)> batch_predicate)
559{
560 if (universe.is_empty()) {
561 return {};
562 }
563
565 if (universe.size() <= grain_size.value) {
566 for (const int64_t segment_i : IndexRange(universe.segments_num())) {
567 const IndexMaskSegment universe_segment = universe.segment(segment_i);
568 segments_from_batch_predicate(universe_segment, memory, batch_predicate, segments);
569 }
570 }
571 else {
572 ParallelSegmentsCollector segments_collector;
573 universe.foreach_segment(grain_size, [&](const IndexMaskSegment universe_segment) {
576 universe_segment, data.allocator, batch_predicate, data.segments);
577 });
578 segments_collector.reduce(memory, segments);
579 }
580
581 return IndexMask::from_segments(segments, memory);
582}
583
585{
586 return IndexMask::from_bools(bools.index_range(), bools, memory);
587}
588
590{
591 return IndexMask::from_bools(bools.index_range(), bools, memory);
592}
593
595{
596 return IndexMask::from_bools_inverse(bools.index_range(), bools, memory);
597}
598
600 Span<bool> bools,
601 IndexMaskMemory &memory)
602{
603 BLI_assert(bools.size() >= universe.min_array_size());
605 universe,
607 memory,
608 [&](const IndexMaskSegment universe_segment,
610 const IndexRange slice = IndexRange::from_begin_end_inclusive(universe_segment[0],
611 universe_segment.last());
612 /* +16 to allow for some overshoot when converting bools to bits. */
614 bits.resize(slice.size(), false);
615 const int64_t allowed_overshoot = std::min<int64_t>(bits.capacity() - slice.size(),
616 bools.size() - slice.one_after_last());
617 const bool any_true = bits::or_bools_into_bits(
618 bools.slice(slice), bits, allowed_overshoot);
619 if (!any_true) {
620 return 0;
621 }
622 return from_bits_batch_predicate(universe_segment, builder, bits);
623 });
624 BitVector bits(bools);
625 return IndexMask::from_bits(universe, bits, memory);
626}
627
629 Span<bool> bools,
630 IndexMaskMemory &memory)
631{
632 BitVector bits(bools);
634 return IndexMask::from_bits(universe, bits, memory);
635}
636
638 const VArray<bool> &bools,
639 IndexMaskMemory &memory)
640{
641 const CommonVArrayInfo info = bools.common_info();
643 return *static_cast<const bool *>(info.data) ? universe : IndexMask();
644 }
646 const Span<bool> span(static_cast<const bool *>(info.data), bools.size());
647 return IndexMask::from_bools(universe, span, memory);
648 }
650 universe, GrainSize(512), memory, [&](const int64_t index) { return bools[index]; });
651}
652
654 const VArray<bool> &bools,
655 IndexMaskMemory &memory)
656{
657 const CommonVArrayInfo info = bools.common_info();
659 return *static_cast<const bool *>(info.data) ? IndexMask() : universe;
660 }
662 const Span<bool> span(static_cast<const bool *>(info.data), bools.size());
663 return IndexMask::from_bools_inverse(universe, span, memory);
664 }
666 universe, GrainSize(512), memory, [&](const int64_t index) { return !bools[index]; });
667}
668
669template<typename T>
671 const IndexMask &mask,
672 IndexMaskMemory &memory)
673{
675 mask.foreach_range([&](const IndexRange mask_range) {
676 const IndexRange range = offsets[mask_range];
677 index_range_to_mask_segments(range, segments);
678 });
679 return IndexMask::from_segments(segments, memory);
680}
681
683 const IndexMask &mask_b,
684 IndexMaskMemory &memory)
685{
686 return IndexMask::from_union({mask_a, mask_b}, memory);
687}
688
690{
691 ExprBuilder builder;
693 for (const IndexMask &mask : masks) {
694 terms.append(&mask);
695 }
696 const Expr &expr = builder.merge(terms);
697 return evaluate_expression(expr, memory);
698}
699
701 const IndexMask &mask_b,
702 IndexMaskMemory &memory)
703{
704 ExprBuilder builder;
705 const Expr &expr = builder.subtract({&mask_a}, {&mask_b});
706 return evaluate_expression(expr, memory);
707}
708
710 const IndexMask &mask_b,
711 IndexMaskMemory &memory)
712{
713 ExprBuilder builder;
714 const Expr &expr = builder.intersect({&mask_a, &mask_b});
715 return evaluate_expression(expr, memory);
716}
717
719 IndexMaskMemory &memory)
720{
721 Set<int64_t> values;
722 for (const Initializer &item : initializers) {
723 if (const auto *range = std::get_if<IndexRange>(&item)) {
724 for (const int64_t i : *range) {
725 values.add(i);
726 }
727 }
728 else if (const auto *span_i64 = std::get_if<Span<int64_t>>(&item)) {
729 for (const int64_t i : *span_i64) {
730 values.add(i);
731 }
732 }
733 else if (const auto *span_i32 = std::get_if<Span<int>>(&item)) {
734 for (const int i : *span_i32) {
735 values.add(i);
736 }
737 }
738 else if (const auto *index = std::get_if<int64_t>(&item)) {
739 values.add(*index);
740 }
741 }
742 Vector<int64_t> values_vec;
743 values_vec.extend(values.begin(), values.end());
744 std::sort(values_vec.begin(), values_vec.end());
745 return IndexMask::from_indices(values_vec.as_span(), memory);
746}
747
748template<typename T> void IndexMask::to_indices(MutableSpan<T> r_indices) const
749{
750 BLI_assert(this->size() == r_indices.size());
752 GrainSize(1024), [r_indices = r_indices.data()](const int64_t i, const int64_t pos) {
753 r_indices[pos] = T(i);
754 });
755}
756
757void IndexMask::set_bits(MutableBitSpan r_bits, const int64_t offset) const
758{
759 BLI_assert(r_bits.size() >= this->min_array_size() + offset);
760 this->foreach_segment_optimized([&](const auto segment) {
761 if constexpr (std::is_same_v<std::decay_t<decltype(segment)>, IndexRange>) {
762 const IndexRange range = segment;
763 const IndexRange shifted_range = range.shift(offset);
764 r_bits.slice(shifted_range).set_all();
765 }
766 else {
768 const IndexMaskSegment shifted_indices = indices.shift(offset);
769 for (const int64_t i : shifted_indices) {
770 r_bits[i].set();
771 }
772 }
773 });
774}
775
776void IndexMask::to_bits(MutableBitSpan r_bits, const int64_t offset) const
777{
778 BLI_assert(r_bits.size() >= this->min_array_size() + offset);
779 r_bits.reset_all();
780 this->set_bits(r_bits, offset);
781}
782
784{
785 BLI_assert(r_bools.size() >= this->min_array_size());
786 r_bools.fill(false);
788 [&](const int64_t i) { r_bools[i] = true; });
789}
790
792{
793 Vector<IndexRange> ranges;
794 this->foreach_range([&](const IndexRange range) { ranges.append(range); });
795 return ranges;
796}
797
799{
800 IndexMaskMemory memory;
801 return this->complement(universe, memory).to_ranges();
802}
803
804namespace detail {
805
811 const IndexMaskSegment universe_segment,
812 LinearAllocator<> &allocator,
813 const FunctionRef<int64_t(IndexMaskSegment indices, int16_t *r_true_indices)> filter_indices,
815{
816 std::array<int16_t, max_segment_size> indices_array;
817 const int64_t true_indices_num = filter_indices(universe_segment, indices_array.data());
818 if (true_indices_num == 0) {
819 return;
820 }
821 const Span<int16_t> true_indices{indices_array.data(), true_indices_num};
823 unique_sorted_indices::split_to_ranges_and_spans<int16_t>(true_indices, 64, true_segments);
824
825 const Span<int16_t> static_indices = get_static_indices_array();
826
827 for (const auto &true_segment : true_segments) {
828 if (std::holds_alternative<IndexRange>(true_segment)) {
829 const IndexRange segment_range = std::get<IndexRange>(true_segment);
830 r_segments.append_as(universe_segment.offset(), static_indices.slice(segment_range));
831 }
832 else {
833 const Span<int16_t> segment_indices = std::get<Span<int16_t>>(true_segment);
834 r_segments.append_as(universe_segment.offset(),
835 allocator.construct_array_copy(segment_indices));
836 }
837 }
838}
839
841 const IndexMask &universe,
842 const GrainSize grain_size,
843 IndexMaskMemory &memory,
844 const FunctionRef<int64_t(IndexMaskSegment indices, int16_t *r_true_indices)> filter_indices)
845{
846 if (universe.is_empty()) {
847 return {};
848 }
849
851 if (universe.size() <= grain_size.value) {
852 for (const int64_t segment_i : IndexRange(universe.segments_num())) {
853 const IndexMaskSegment universe_segment = universe.segment(segment_i);
854 segments_from_predicate_filter(universe_segment, memory, filter_indices, segments);
855 }
856 }
857 else {
858 ParallelSegmentsCollector segments_collector;
859 universe.foreach_segment(grain_size, [&](const IndexMaskSegment universe_segment) {
862 universe_segment, data.allocator, filter_indices, data.segments);
863 });
864 segments_collector.reduce(memory, segments);
865 }
866
867 const int64_t consolidated_segments_num = consolidate_index_mask_segments(segments, memory);
868 segments.resize(consolidated_segments_num);
869 return IndexMask::from_segments(segments, memory);
870}
871} // namespace detail
872
873std::optional<RawMaskIterator> IndexMask::find(const int64_t query_index) const
874{
875 if (const std::optional<RawMaskIterator> it = this->find_larger_equal(query_index)) {
876 if ((*this)[*it] == query_index) {
877 return it;
878 }
879 }
880 return std::nullopt;
881}
882
883std::optional<RawMaskIterator> IndexMask::find_larger_equal(const int64_t query_index) const
884{
885 const int64_t segment_i = binary_search::first_if(
887 [&](const int64_t seg_i) { return this->segment(seg_i).last() >= query_index; });
888 if (segment_i == segments_num_) {
889 /* The query index is larger than the largest index in this mask. */
890 return std::nullopt;
891 }
892 const IndexMaskSegment segment = this->segment(segment_i);
893 const int64_t segment_begin_index = segment.base_span().data() - indices_by_segment_[segment_i];
894 if (query_index < segment[0]) {
895 /* The query index is the first element in this segment. */
896 const int64_t index_in_segment = segment_begin_index;
897 BLI_assert(index_in_segment < max_segment_size);
898 return RawMaskIterator{segment_i, int16_t(index_in_segment)};
899 }
900 /* The query index is somewhere within this segment. */
901 const int64_t local_index = query_index - segment.offset();
902 const int64_t index_in_segment = binary_search::first_if(
903 segment.base_span(), [&](const int16_t i) { return i >= local_index; });
904 const int64_t actual_index_in_segment = index_in_segment + segment_begin_index;
905 BLI_assert(actual_index_in_segment < max_segment_size);
906 return RawMaskIterator{segment_i, int16_t(actual_index_in_segment)};
907}
908
909std::optional<RawMaskIterator> IndexMask::find_smaller_equal(const int64_t query_index) const
910{
911 if (indices_num_ == 0) {
912 return std::nullopt;
913 }
914 const std::optional<RawMaskIterator> larger_equal_it = this->find_larger_equal(query_index);
915 if (!larger_equal_it) {
916 /* Return the last element. */
917 return RawMaskIterator{segments_num_ - 1, int16_t(end_index_in_segment_ - 1)};
918 }
919 if ((*this)[*larger_equal_it] == query_index) {
920 /* This is an exact hit. */
921 return larger_equal_it;
922 }
923 if (larger_equal_it->segment_i > 0) {
924 if (larger_equal_it->index_in_segment > 0) {
925 /* Previous element in same segment. */
926 return RawMaskIterator{larger_equal_it->segment_i,
927 int16_t(larger_equal_it->index_in_segment - 1)};
928 }
929 /* Last element in previous segment. */
930 return RawMaskIterator{larger_equal_it->segment_i - 1,
931 int16_t(cumulative_segment_sizes_[larger_equal_it->segment_i] -
932 cumulative_segment_sizes_[larger_equal_it->segment_i - 1] - 1)};
933 }
934 if (larger_equal_it->index_in_segment > begin_index_in_segment_) {
935 /* Previous element in same segment. */
936 return RawMaskIterator{0, int16_t(larger_equal_it->index_in_segment - 1)};
937 }
938 return std::nullopt;
939}
940
941bool IndexMask::contains(const int64_t query_index) const
942{
943 return this->find(query_index).has_value();
944}
945
947{
949 for (const int64_t i : data.index_range()) {
950 const int64_t index = i * n;
952 data[i] = int16_t(index);
953 }
954 return data;
955}
956
963 const int64_t repetitions,
964 IndexMaskMemory &memory)
965{
966 BLI_assert(n >= 2);
967 BLI_assert(n * repetitions <= max_segment_size);
968
969 switch (n) {
970 case 2: {
971 static auto data = build_every_nth_index_array(2);
972 return data.as_span().take_front(repetitions);
973 }
974 case 3: {
975 static auto data = build_every_nth_index_array(3);
976 return data.as_span().take_front(repetitions);
977 }
978 case 4: {
979 static auto data = build_every_nth_index_array(4);
980 return data.as_span().take_front(repetitions);
981 }
982 default: {
983 MutableSpan<int16_t> data = memory.allocate_array<int16_t>(repetitions);
984 for (const int64_t i : IndexRange(repetitions)) {
985 const int64_t index = i * n;
987 data[i] = int16_t(index);
988 }
989 return data;
990 }
991 }
992}
993
995 const int64_t repetitions,
996 const int64_t stride,
997 const int64_t initial_offset,
998 IndexMaskMemory &memory)
999{
1000 if (mask_to_repeat.is_empty()) {
1001 return {};
1002 }
1003 BLI_assert(mask_to_repeat.last() < stride);
1004 if (repetitions == 0) {
1005 return {};
1006 }
1007 if (repetitions == 1 && initial_offset == 0) {
1008 /* The output is the same as the input mask. */
1009 return mask_to_repeat;
1010 }
1011 const std::optional<IndexRange> range_to_repeat = mask_to_repeat.to_range();
1012 if (range_to_repeat && range_to_repeat->first() == 0 && range_to_repeat->size() == stride) {
1013 /* The output is a range. */
1014 return IndexRange(initial_offset, repetitions * stride);
1015 }
1016 const int64_t segments_num = mask_to_repeat.segments_num();
1017 const IndexRange bounds = mask_to_repeat.bounds();
1018
1019 /* Avoid having many very small segments by creating a single segment that contains the input
1020 * multiple times already. This way, a lower total number of segments is necessary. */
1021 if (segments_num == 1 && stride <= max_segment_size / 2 && mask_to_repeat.size() <= 256) {
1022 const IndexMaskSegment src_segment = mask_to_repeat.segment(0);
1023 /* Number of repetitions that fit into a single segment. */
1024 const int64_t inline_repetitions_num = std::min(repetitions, max_segment_size / stride);
1025 Span<int16_t> repeated_indices;
1026 if (src_segment.size() == 1) {
1027 /* Optimize the case when a single index is repeated. */
1028 repeated_indices = get_every_nth_index(stride, inline_repetitions_num, memory);
1029 }
1030 else {
1031 /* More general case that repeats multiple indices. */
1032 MutableSpan<int16_t> repeated_indices_mut = memory.allocate_array<int16_t>(
1033 inline_repetitions_num * src_segment.size());
1034 for (const int64_t repetition : IndexRange(inline_repetitions_num)) {
1035 for (const int64_t i : src_segment.index_range()) {
1036 const int64_t index = src_segment[i] - src_segment[0] + repetition * stride;
1038 repeated_indices_mut[repetition * src_segment.size() + i] = int16_t(index);
1039 }
1040 }
1041 repeated_indices = repeated_indices_mut;
1042 }
1043 BLI_assert(repeated_indices[0] == 0);
1044
1045 Vector<IndexMaskSegment, 16> repeated_segments;
1046 const int64_t result_segments_num = ceil_division(repetitions, inline_repetitions_num);
1047 for (const int64_t i : IndexRange(result_segments_num)) {
1048 const int64_t used_repetitions = std::min(inline_repetitions_num,
1049 repetitions - i * inline_repetitions_num);
1050 repeated_segments.append(
1051 IndexMaskSegment(initial_offset + bounds.first() + i * stride * inline_repetitions_num,
1052 repeated_indices.take_front(used_repetitions * src_segment.size())));
1053 }
1054 return IndexMask::from_segments(repeated_segments, memory);
1055 }
1056
1057 /* Simply repeat and offset the existing segments in the input mask. */
1058 Vector<IndexMaskSegment, 16> repeated_segments;
1059 for (const int64_t repetition : IndexRange(repetitions)) {
1060 for (const int64_t segment_i : IndexRange(segments_num)) {
1061 const IndexMaskSegment segment = mask_to_repeat.segment(segment_i);
1062 repeated_segments.append(IndexMaskSegment(
1063 segment.offset() + repetition * stride + initial_offset, segment.base_span()));
1064 }
1065 }
1066 return IndexMask::from_segments(repeated_segments, memory);
1067}
1068
1070 const int64_t indices_num,
1071 const int64_t initial_offset,
1072 IndexMaskMemory &memory)
1073{
1074 BLI_assert(n >= 1);
1075 return IndexMask::from_repeating(IndexRange(1), indices_num, n, initial_offset, memory);
1076}
1077
1079 const FunctionRef<bool(Span<IndexMaskSegment> segments)> fn)
1080{
1081 BLI_assert(!masks.is_empty());
1082 BLI_assert(std::all_of(masks.begin() + 1, masks.end(), [&](const IndexMask &maks) {
1083 return masks[0].size() == maks.size();
1084 }));
1085
1086 Array<int64_t, 8> segment_iter(masks.size(), 0);
1087 Array<int16_t, 8> start_iter(masks.size(), 0);
1088
1089 Array<IndexMaskSegment, 8> segments(masks.size());
1090 Array<IndexMaskSegment, 8> sequences(masks.size());
1091
1092 /* This function only take positions of indices in to account.
1093 * Masks with the same size is fragmented in positions space.
1094 * So, all last segments (index in mask does not matter) of all masks will be ended in the same
1095 * position. All segment iterators will be out of range at the same time. */
1096 while (segment_iter[0] != masks[0].segments_num()) {
1097 for (const int64_t mask_i : masks.index_range()) {
1098 if (start_iter[mask_i] == 0) {
1099 segments[mask_i] = masks[mask_i].segment(segment_iter[mask_i]);
1100 }
1101 }
1102
1103 int16_t next_common_sequence_size = std::numeric_limits<int16_t>::max();
1104 for (const int64_t mask_i : masks.index_range()) {
1105 next_common_sequence_size = math::min(next_common_sequence_size,
1106 int16_t(segments[mask_i].size() - start_iter[mask_i]));
1107 }
1108
1109 for (const int64_t mask_i : masks.index_range()) {
1110 sequences[mask_i] = segments[mask_i].slice(start_iter[mask_i], next_common_sequence_size);
1111 }
1112
1113 if (!fn(sequences)) {
1114 break;
1115 }
1116
1117 for (const int64_t mask_i : masks.index_range()) {
1118 if (segments[mask_i].size() - start_iter[mask_i] == next_common_sequence_size) {
1119 segment_iter[mask_i]++;
1120 start_iter[mask_i] = 0;
1121 }
1122 else {
1123 start_iter[mask_i] += next_common_sequence_size;
1124 }
1125 }
1126 }
1127}
1128
1130{
1131 if (a.size() != b.size()) {
1132 return false;
1133 }
1134 if (a.is_empty()) {
1135 /* Both segments are empty. */
1136 return true;
1137 }
1138 if (a[0] != b[0]) {
1139 return false;
1140 }
1141
1142 const bool a_is_range = unique_sorted_indices::non_empty_is_range(a.base_span());
1143 const bool b_is_range = unique_sorted_indices::non_empty_is_range(b.base_span());
1144 if (a_is_range || b_is_range) {
1145 return a_is_range && b_is_range;
1146 }
1147
1148 const Span<int16_t> a_indices = a.base_span();
1149 [[maybe_unused]] const Span<int16_t> b_indices = b.base_span();
1150
1151 const int64_t offset_difference = int16_t(b.offset() - a.offset());
1152
1153 BLI_assert(a_indices[0] >= 0 && b_indices[0] >= 0);
1154 BLI_assert(b_indices[0] == a_indices[0] - offset_difference);
1155
1156 return std::equal(a_indices.begin(),
1157 a_indices.end(),
1158 b.base_span().begin(),
1159 [offset_difference](const int16_t a_index, const int16_t b_index) -> bool {
1160 return a_index - offset_difference == b_index;
1161 });
1162}
1163
1164bool operator==(const IndexMask &a, const IndexMask &b)
1165{
1166 if (a.size() != b.size()) {
1167 return false;
1168 }
1169
1170 const std::optional<IndexRange> a_as_range = a.to_range();
1171 const std::optional<IndexRange> b_as_range = b.to_range();
1172 if (a_as_range.has_value() || b_as_range.has_value()) {
1173 return a_as_range == b_as_range;
1174 }
1175
1176 bool equals = true;
1178 equals &= segments_is_equal(segments[0], segments[1]);
1179 return equals;
1180 });
1181
1182 return equals;
1183}
1184
1186 const VArray<int> &group_ids,
1187 IndexMaskMemory &memory,
1188 VectorSet<int> &r_index_by_group_id)
1189{
1190 BLI_assert(group_ids.size() >= universe.min_array_size());
1191 Vector<IndexMask, 4> result_masks;
1192 if (const std::optional<int> single_group_id = group_ids.get_if_single()) {
1193 /* Optimize for the case when all group ids are the same. */
1194 const int64_t group_index = r_index_by_group_id.index_of_or_add(*single_group_id);
1195 const int64_t groups_num = r_index_by_group_id.size();
1196 result_masks.resize(groups_num);
1197 result_masks[group_index] = universe;
1198 return result_masks;
1199 }
1200
1201 const VArraySpan<int> group_ids_span{group_ids};
1202 universe.foreach_index([&](const int64_t i) { r_index_by_group_id.add(group_ids_span[i]); });
1203 const int64_t groups_num = r_index_by_group_id.size();
1204 result_masks.resize(groups_num);
1206 universe,
1207 memory,
1208 [&](const int64_t i) {
1209 const int group_id = group_ids_span[i];
1210 return r_index_by_group_id.index_of(group_id);
1211 },
1212 result_masks);
1213 return result_masks;
1214}
1215
1217 IndexMaskMemory &memory,
1218 VectorSet<int> &r_index_by_group_id)
1219{
1221 IndexMask(group_ids.size()), group_ids, memory, r_index_by_group_id);
1222}
1223
1226template void IndexMask::to_indices(MutableSpan<int32_t>) const;
1227template void IndexMask::to_indices(MutableSpan<int64_t>) const;
1229 const IndexMask &,
1230 IndexMaskMemory &);
1232 const IndexMask &,
1233 IndexMaskMemory &);
1234
1236 const int64_t universe_size,
1237 const uint32_t random_seed,
1238 const float probability,
1239 IndexMaskMemory &memory)
1240{
1241 RandomNumberGenerator rng{random_seed};
1242 const auto next_bool_random_value = [&]() { return rng.get_float() <= probability; };
1243
1244 Array<bool> random(universe_size, false);
1245 mask.foreach_index_optimized<int64_t>(
1246 [&](const int64_t i) { random[i] = next_bool_random_value(); });
1247
1248 return IndexMask::from_bools(IndexRange(universe_size), random, memory);
1249}
1250
1251IndexMask random_mask(const int64_t universe_size,
1252 const uint32_t random_seed,
1253 const float probability,
1254 IndexMaskMemory &memory)
1255{
1256 const IndexRange selection(universe_size);
1257 return random_mask(selection, universe_size, random_seed, probability, memory);
1258}
1259
1260} // namespace blender::index_mask
#define BLI_assert(a)
Definition BLI_assert.h:46
int BLI_system_thread_count(void)
Definition threads.cc:253
#define UNUSED_VARS_NDEBUG(...)
BMesh const char void * data
long long int int64_t
const T & last(const int64_t n=0) const
Definition BLI_array.hh:296
const T * data() const
Definition BLI_array.hh:312
constexpr int64_t first() const
constexpr IndexRange shift(int64_t n) const
constexpr int64_t last(const int64_t n=0) const
constexpr int64_t size() const
static constexpr IndexRange from_begin_end(const int64_t begin, const int64_t end)
constexpr int64_t start() const
static constexpr IndexRange from_begin_end_inclusive(const int64_t begin, const int64_t last)
constexpr IndexRange drop_front(int64_t n) const
MutableSpan< T > allocate_array(int64_t size)
MutableSpan< T > construct_array_copy(Span< T > src)
void transfer_ownership_from(LinearAllocator<> &other)
constexpr int64_t size() const
Definition BLI_span.hh:493
constexpr bool is_empty() const
Definition BLI_span.hh:509
constexpr T * data() const
Definition BLI_span.hh:539
constexpr void fill(const T &value) const
Definition BLI_span.hh:517
constexpr T * end() const
Definition BLI_span.hh:548
constexpr T * begin() const
Definition BLI_span.hh:544
constexpr IndexRange index_range() const
Definition BLI_span.hh:670
constexpr T & last(const int64_t n=0) const
Definition BLI_span.hh:689
int64_t size() const
IndexRange index_range() const
T last(const int64_t n=0) const
Span< BaseT > base_span() const
Iterator begin() const
Definition BLI_set.hh:480
Iterator end() const
Definition BLI_set.hh:490
bool add(const Key &key)
Definition BLI_set.hh:248
constexpr Span drop_front(int64_t n) const
Definition BLI_span.hh:171
constexpr Span slice(int64_t start, int64_t size) const
Definition BLI_span.hh:137
constexpr int64_t size() const
Definition BLI_span.hh:252
constexpr const T & last(const int64_t n=0) const
Definition BLI_span.hh:325
constexpr const T * end() const
Definition BLI_span.hh:224
constexpr IndexRange index_range() const
Definition BLI_span.hh:401
constexpr const T * begin() const
Definition BLI_span.hh:220
constexpr Span take_front(int64_t n) const
Definition BLI_span.hh:193
constexpr bool is_empty() const
Definition BLI_span.hh:260
std::optional< T > get_if_single() const
IndexRange index_range() const
CommonVArrayInfo common_info() const
bool add(const Key &key)
int64_t index_of_or_add(const Key &key)
int64_t size() const
int64_t index_of(const Key &key) const
void append(const T &value)
void resize(const int64_t new_size)
void extend(Span< T > array)
Span< T > as_span() const
void append_as(ForwardValue &&...value)
MutableBitSpan slice(const IndexRange range) const
const IntersectionExpr & intersect(const Span< Term > terms)
const UnionExpr & merge(const Span< Term > terms)
const DifferenceExpr & subtract(const Term &main_term, const Span< Term > subtract_terms)
IndexMask slice_and_shift(IndexRange range, int64_t offset, IndexMaskMemory &memory) const
static IndexMask from_predicate(const IndexMask &universe, GrainSize grain_size, IndexMaskMemory &memory, Fn &&predicate)
static IndexMask from_every_nth(int64_t n, int64_t indices_num, const int64_t initial_offset, IndexMaskMemory &memory)
IndexMaskSegment segment(int64_t segment_i) const
void to_indices(MutableSpan< T > r_indices) const
IndexMask slice_content(IndexRange range) const
Vector< IndexRange > to_ranges_invert(IndexRange universe) const
static IndexMask from_bits(BitSpan bits, IndexMaskMemory &memory)
Vector< IndexRange > to_ranges() const
std::optional< RawMaskIterator > find(int64_t query_index) const
std::optional< IndexRange > to_range() const
static Vector< IndexMask, 4 > from_group_ids(const VArray< int > &group_ids, IndexMaskMemory &memory, VectorSet< int > &r_index_by_group_id)
static IndexMask from_repeating(const IndexMask &mask_to_repeat, int64_t repetitions, int64_t stride, int64_t initial_offset, IndexMaskMemory &memory)
static IndexMask from_union(const IndexMask &mask_a, const IndexMask &mask_b, IndexMaskMemory &memory)
IndexMask shift(const int64_t offset, IndexMaskMemory &memory) const
void foreach_index_optimized(Fn &&fn) const
std::optional< RawMaskIterator > find_larger_equal(int64_t query_index) const
IndexMask slice(IndexRange range) const
static IndexMask from_ranges(OffsetIndices< T > offsets, const IndexMask &mask, IndexMaskMemory &memory)
static IndexMask from_intersection(const IndexMask &mask_a, const IndexMask &mask_b, IndexMaskMemory &memory)
static IndexMask from_indices(Span< T > indices, IndexMaskMemory &memory)
int64_t iterator_to_index(const RawMaskIterator &it) const
static IndexMask from_segments(Span< IndexMaskSegment > segments, IndexMaskMemory &memory)
static void from_groups(const IndexMask &universe, IndexMaskMemory &memory, Fn &&get_group_index, MutableSpan< IndexMask > r_masks)
static IndexMask from_batch_predicate(const IndexMask &universe, GrainSize grain_size, IndexMaskMemory &memory, FunctionRef< int64_t(const IndexMaskSegment &universe_segment, IndexRangesBuilder< int16_t > &builder)> batch_predicate)
static IndexMask from_initializers(const Span< Initializer > initializers, IndexMaskMemory &memory)
void set_bits(MutableBitSpan r_bits, int64_t offset=0) const
std::optional< RawMaskIterator > find_smaller_equal(int64_t query_index) const
bool contains(int64_t query_index) const
IndexMask complement(const IndexMask &universe, IndexMaskMemory &memory) const
static IndexMask from_bools(Span< bool > bools, IndexMaskMemory &memory)
void to_bits(MutableBitSpan r_bits, int64_t offset=0) const
static IndexMask from_bools_inverse(const VArray< bool > &bools, IndexMaskMemory &memory)
void foreach_segment_optimized(Fn &&fn) const
void to_bools(MutableSpan< bool > r_bools) const
void foreach_index(Fn &&fn) const
RawMaskIterator index_to_iterator(int64_t index) const
static void foreach_segment_zipped(Span< IndexMask > masks, FunctionRef< bool(Span< IndexMaskSegment > segments)> fn)
static IndexMask from_difference(const IndexMask &mask_a, const IndexMask &mask_b, IndexMaskMemory &memory)
std::variant< IndexRange, Span< int64_t >, Span< int >, int64_t > Initializer
void foreach_segment(Fn &&fn) const
IMETHOD void random(Vector &a)
addDelta operator for displacement rotational velocity.
Definition frames.inl:1282
static ushort indices[]
uint pos
ccl_device_inline float2 mask(const MaskType mask, const float2 a)
#define T
void fill_index_range(MutableSpan< T > span, const T start=0)
static int64_t first_if(Iterator begin, Iterator end, Predicate &&predicate)
bool or_bools_into_bits(Span< bool > bools, MutableBitSpan r_bits, int64_t allowed_overshoot=0)
void bits_to_index_ranges(const BitSpan bits, IndexRangesBuilder< IntT > &builder)
void invert(BitSpanT &&data)
static void segments_from_predicate_filter(const IndexMaskSegment universe_segment, LinearAllocator<> &allocator, const FunctionRef< int64_t(IndexMaskSegment indices, int16_t *r_true_indices)> filter_indices, Vector< IndexMaskSegment, 16 > &r_segments)
IndexMask from_predicate_impl(const IndexMask &universe, GrainSize grain_size, IndexMaskMemory &memory, FunctionRef< int64_t(IndexMaskSegment indices, int16_t *r_true_indices)> filter_indices)
IndexMask evaluate_expression(const Expr &expression, IndexMaskMemory &memory)
void index_range_to_mask_segments(const IndexRange range, Vector< IndexMaskSegment, N > &r_segments)
static constexpr int64_t max_segment_size
static void segments_from_batch_predicate(const IndexMaskSegment universe_segment, LinearAllocator<> &allocator, const FunctionRef< int64_t(const IndexMaskSegment &universe_segment, IndexRangesBuilder< int16_t > &builder)> batch_predicate, Vector< IndexMaskSegment, 16 > &r_segments)
static void segments_from_indices(const Span< T > indices, LinearAllocator<> &allocator, Vector< IndexMaskSegment, InlineBufferSize > &r_segments)
static Span< int16_t > get_every_nth_index(const int64_t n, const int64_t repetitions, IndexMaskMemory &memory)
void build_reverse_map(const IndexMask &mask, MutableSpan< T > r_map)
Definition index_mask.cc:34
std::ostream & operator<<(std::ostream &stream, const IndexMask &mask)
bool operator==(const RawMaskIterator &a, const RawMaskIterator &b)
std::array< int16_t, max_segment_size > build_static_indices_array()
Definition index_mask.cc:47
static Array< int16_t > build_every_nth_index_array(const int64_t n)
static int64_t from_bits_batch_predicate(const IndexMaskSegment universe_segment, IndexRangesBuilder< int16_t > &builder, const BitSpan bits_slice)
const IndexMask & get_static_index_mask_for_min_size(const int64_t min_size)
Definition index_mask.cc:56
int64_t consolidate_index_mask_segments(MutableSpan< IndexMaskSegment > segments, IndexMaskMemory &memory)
static bool segments_is_equal(const IndexMaskSegment &a, const IndexMaskSegment &b)
template void build_reverse_map< int >(const IndexMask &mask, MutableSpan< int > r_map)
IndexMask random_mask(const IndexMask &mask, const int64_t universe_size, const uint32_t random_seed, const float probability, IndexMaskMemory &memory)
const std::array< int16_t, max_segment_size > & get_static_indices_array()
T min(const T &a, const T &b)
void isolate_task(const Function &function)
Definition BLI_task.hh:248
void parallel_for(const IndexRange range, const int64_t grain_size, const Function &function, const TaskSizeHints &size_hints=detail::TaskSizeHints_Static(1))
Definition BLI_task.hh:93
std::optional< IndexRange > non_empty_as_range_try(const Span< T > indices)
int64_t split_to_ranges_and_spans(const Span< T > indices, const int64_t range_threshold, Vector< std::variant< IndexRange, Span< T > >, InlineBufferSize > &r_segments)
bool non_empty_is_range(const Span< T > indices)
void parallel_sort(RandomAccessIterator begin, RandomAccessIterator end)
Definition BLI_sort.hh:23
constexpr IntT ceil_division(const IntT x, const IntT y)
threading::EnumerableThreadSpecific< LocalData > data_by_thread
void reduce(LinearAllocator<> &main_allocator, Vector< IndexMaskSegment, 16 > &main_segments)
i
Definition text_draw.cc:230