Blender V4.3
index_mask.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2023 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
5#include <fmt/format.h>
6#include <iostream>
7#include <mutex>
8
9#include "BLI_array.hh"
10#include "BLI_array_utils.hh"
12#include "BLI_bit_span_ops.hh"
14#include "BLI_bit_vector.hh"
16#include "BLI_index_mask.hh"
19#include "BLI_math_base.hh"
20#include "BLI_math_bits.h"
21#include "BLI_set.hh"
22#include "BLI_sort.hh"
23#include "BLI_task.hh"
24#include "BLI_threads.h"
25#include "BLI_virtual_array.hh"
26
27#include "BLI_strict_flags.h" /* Keep last. */
28
29namespace blender::index_mask {
30
31template<typename T> void build_reverse_map(const IndexMask &mask, MutableSpan<T> r_map)
32{
33#ifndef NDEBUG
34 /* Catch errors with asserts in debug builds. */
35 r_map.fill(-1);
36#endif
37 BLI_assert(r_map.size() >= mask.min_array_size());
38 mask.foreach_index_optimized<T>(GrainSize(4096),
39 [&](const T src, const T dst) { r_map[src] = dst; });
40}
41
42template void build_reverse_map<int>(const IndexMask &mask, MutableSpan<int> r_map);
43
44std::array<int16_t, max_segment_size> build_static_indices_array()
45{
46 std::array<int16_t, max_segment_size> data;
47 for (int16_t i = 0; i < max_segment_size; i++) {
48 data[size_t(i)] = i;
49 }
50 return data;
51}
52
54{
55 static constexpr int64_t size_shift = 31;
56 static constexpr int64_t max_size = (int64_t(1) << size_shift); /* 2'147'483'648 */
57 static constexpr int64_t segments_num = max_size / max_segment_size; /* 131'072 */
58
59 /* Make sure we are never requesting a size that's larger than what was statically allocated.
60 * If that's ever needed, we can either increase #size_shift or dynamically allocate an even
61 * larger mask. */
62 BLI_assert(min_size <= max_size);
63 UNUSED_VARS_NDEBUG(min_size);
64
65 static IndexMask static_mask = []() {
66 static Array<const int16_t *> indices_by_segment(segments_num);
67 /* The offsets and cumulative segment sizes array contain the same values here, so just use a
68 * single array for both. */
69 static Array<int64_t> segment_offsets(segments_num + 1);
70
71 static const int16_t *static_offsets = get_static_indices_array().data();
72
73 /* Isolate because the mutex protecting the initialization of #static_mask is locked. */
75 threading::parallel_for(IndexRange(segments_num), 1024, [&](const IndexRange range) {
76 for (const int64_t segment_i : range) {
77 indices_by_segment[segment_i] = static_offsets;
78 segment_offsets[segment_i] = segment_i * max_segment_size;
79 }
80 });
81 });
82 segment_offsets.last() = max_size;
83
85 IndexMaskData &data = mask.data_for_inplace_construction();
86 data.indices_num_ = max_size;
87 data.segments_num_ = segments_num;
88 data.indices_by_segment_ = indices_by_segment.data();
89 data.segment_offsets_ = segment_offsets.data();
90 data.cumulative_segment_sizes_ = segment_offsets.data();
91 data.begin_index_in_segment_ = 0;
92 data.end_index_in_segment_ = max_segment_size;
93
94 return mask;
95 }();
96 return static_mask;
97}
98
99std::ostream &operator<<(std::ostream &stream, const IndexMask &mask)
100{
101 Array<int64_t> indices(mask.size());
102 mask.to_indices<int64_t>(indices);
106 for (const std::variant<IndexRange, Span<int64_t>> &segment : segments) {
107 if (std::holds_alternative<IndexRange>(segment)) {
108 const IndexRange range = std::get<IndexRange>(segment);
109 parts.append(fmt::format("{}-{}", range.first(), range.last()));
110 }
111 else {
112 const Span<int64_t> segment_indices = std::get<Span<int64_t>>(segment);
113 parts.append(fmt::format("{}", fmt::join(segment_indices, ", ")));
114 }
115 }
116 stream << fmt::format("(Size: {} | {})", mask.size(), fmt::join(parts, ", "));
117 return stream;
118}
119
120IndexMask IndexMask::slice(const int64_t start, const int64_t size) const
121{
122 if (size == 0) {
123 return {};
124 }
125 const RawMaskIterator first_it = this->index_to_iterator(start);
126 const RawMaskIterator last_it = this->index_to_iterator(start + size - 1);
127
128 IndexMask sliced = *this;
129 sliced.indices_num_ = size;
130 sliced.segments_num_ = last_it.segment_i - first_it.segment_i + 1;
131 sliced.indices_by_segment_ += first_it.segment_i;
132 sliced.segment_offsets_ += first_it.segment_i;
133 sliced.cumulative_segment_sizes_ += first_it.segment_i;
134 sliced.begin_index_in_segment_ = first_it.index_in_segment;
135 sliced.end_index_in_segment_ = last_it.index_in_segment + 1;
136 return sliced;
137}
138
140 const RawMaskIterator last_it,
141 const int64_t size) const
142{
143 BLI_assert(this->iterator_to_index(last_it) - this->iterator_to_index(first_it) + 1 == size);
144 IndexMask sliced = *this;
145 sliced.indices_num_ = size;
146 sliced.segments_num_ = last_it.segment_i - first_it.segment_i + 1;
147 sliced.indices_by_segment_ += first_it.segment_i;
148 sliced.segment_offsets_ += first_it.segment_i;
149 sliced.cumulative_segment_sizes_ += first_it.segment_i;
150 sliced.begin_index_in_segment_ = first_it.index_in_segment;
151 sliced.end_index_in_segment_ = last_it.index_in_segment + 1;
152 return sliced;
153}
154
156{
157 return this->slice_content(range.start(), range.size());
158}
159
160IndexMask IndexMask::slice_content(const int64_t start, const int64_t size) const
161{
162 if (size <= 0) {
163 return {};
164 }
165 const std::optional<RawMaskIterator> first_it = this->find_larger_equal(start);
166 const std::optional<RawMaskIterator> last_it = this->find_smaller_equal(start + size - 1);
167 if (!first_it || !last_it) {
168 return {};
169 }
170 const int64_t first_index = this->iterator_to_index(*first_it);
171 const int64_t last_index = this->iterator_to_index(*last_it);
172 if (last_index < first_index) {
173 return {};
174 }
175 const int64_t sliced_mask_size = last_index - first_index + 1;
176 return this->slice(*first_it, *last_it, sliced_mask_size);
177}
178
180 const int64_t offset,
181 IndexMaskMemory &memory) const
182{
183 return this->slice_and_shift(range.start(), range.size(), offset, memory);
184}
185
187 const int64_t size,
188 const int64_t offset,
189 IndexMaskMemory &memory) const
190{
191 if (size == 0) {
192 return {};
193 }
194 if (std::optional<IndexRange> range = this->to_range()) {
195 return range->slice(start, size).shift(offset);
196 }
197 return this->slice(start, size).shift(offset, memory);
198}
199
201{
202 if (indices_num_ == 0) {
203 return {};
204 }
205 BLI_assert(this->first() + offset >= 0);
206 if (offset == 0) {
207 return *this;
208 }
209 if (std::optional<IndexRange> range = this->to_range()) {
210 return range->shift(offset);
211 }
212 IndexMask shifted_mask = *this;
213 MutableSpan<int64_t> new_segment_offsets = memory.allocate_array<int64_t>(segments_num_);
214 for (const int64_t i : IndexRange(segments_num_)) {
215 new_segment_offsets[i] = segment_offsets_[i] + offset;
216 }
217 shifted_mask.segment_offsets_ = new_segment_offsets.data();
218 return shifted_mask;
219}
220
222 IndexMaskMemory & /*memory*/)
223{
224 if (segments.is_empty()) {
225 return 0;
226 }
227
228 const Span<int16_t> static_indices = get_static_indices_array();
229
230 /* TODO: Support merging non-range segments in some cases as well. */
231 int64_t group_start_segment_i = 0;
232 int64_t group_first = segments[0][0];
233 int64_t group_last = segments[0].last();
234 bool group_as_range = unique_sorted_indices::non_empty_is_range(segments[0].base_span());
235
236 auto finish_group = [&](const int64_t last_segment_i) {
237 if (group_start_segment_i == last_segment_i) {
238 return;
239 }
240 /* Join multiple ranges together into a bigger range. */
241 const IndexRange range = IndexRange::from_begin_end_inclusive(group_first, group_last);
242 segments[group_start_segment_i] = IndexMaskSegment(range[0],
243 static_indices.take_front(range.size()));
244 for (int64_t i = group_start_segment_i + 1; i <= last_segment_i; i++) {
245 segments[i] = {};
246 }
247 };
248
249 for (const int64_t segment_i : segments.index_range().drop_front(1)) {
250 const IndexMaskSegment segment = segments[segment_i];
251 const std::optional<IndexRange> segment_base_range =
253 const bool segment_is_range = segment_base_range.has_value();
254
255 if (group_as_range && segment_is_range) {
256 if (group_last + 1 == segment[0]) {
257 if (segment.last() - group_first + 1 < max_segment_size) {
258 /* Can combine previous and current range. */
259 group_last = segment.last();
260 continue;
261 }
262 }
263 }
264 finish_group(segment_i - 1);
265
266 group_start_segment_i = segment_i;
267 group_first = segment[0];
268 group_last = segment.last();
269 group_as_range = segment_is_range;
270 }
271 finish_group(segments.size() - 1);
272
273 /* Remove all segments that have been merged into previous segments. */
274 const int64_t new_segments_num = std::remove_if(segments.begin(),
275 segments.end(),
276 [](const IndexMaskSegment segment) {
277 return segment.is_empty();
278 }) -
279 segments.begin();
280 return new_segments_num;
281}
282
284{
285 if (segments.is_empty()) {
286 return {};
287 }
288#ifndef NDEBUG
289 {
290 int64_t last_index = segments[0].last();
291 for (const IndexMaskSegment &segment : segments.drop_front(1)) {
292 BLI_assert(std::is_sorted(segment.base_span().begin(), segment.base_span().end()));
293 BLI_assert(last_index < segment[0]);
294 last_index = segment.last();
295 }
296 }
297#endif
298 const int64_t segments_num = segments.size();
299
300 /* Allocate buffers for the mask. */
301 MutableSpan<const int16_t *> indices_by_segment = memory.allocate_array<const int16_t *>(
303 MutableSpan<int64_t> segment_offsets = memory.allocate_array<int64_t>(segments_num);
304 MutableSpan<int64_t> cumulative_segment_sizes = memory.allocate_array<int64_t>(segments_num + 1);
305
306 /* Fill buffers. */
307 cumulative_segment_sizes[0] = 0;
308 for (const int64_t segment_i : segments.index_range()) {
309 const IndexMaskSegment segment = segments[segment_i];
310 indices_by_segment[segment_i] = segment.base_span().data();
311 segment_offsets[segment_i] = segment.offset();
312 cumulative_segment_sizes[segment_i + 1] = cumulative_segment_sizes[segment_i] + segment.size();
313 }
314
315 /* Initialize mask. */
317 IndexMaskData &data = mask.data_for_inplace_construction();
318 data.indices_num_ = cumulative_segment_sizes.last();
319 data.segments_num_ = segments_num;
320 data.indices_by_segment_ = indices_by_segment.data();
321 data.segment_offsets_ = segment_offsets.data();
322 data.cumulative_segment_sizes_ = cumulative_segment_sizes.data();
323 data.begin_index_in_segment_ = 0;
324 data.end_index_in_segment_ = segments.last().size();
325 return mask;
326}
327
332template<typename T, int64_t InlineBufferSize>
333static void segments_from_indices(const Span<T> indices,
334 LinearAllocator<> &allocator,
336{
338
339 for (int64_t start = 0; start < indices.size(); start += max_segment_size) {
340 /* Slice to make sure that each segment is no longer than #max_segment_size. */
341 const Span<T> indices_slice = indices.slice_safe(start, max_segment_size);
342 unique_sorted_indices::split_to_ranges_and_spans<T>(indices_slice, 64, segments);
343 }
344
345 const Span<int16_t> static_indices = get_static_indices_array();
346 for (const auto &segment : segments) {
347 if (std::holds_alternative<IndexRange>(segment)) {
348 const IndexRange segment_range = std::get<IndexRange>(segment);
349 r_segments.append_as(segment_range.start(), static_indices.take_front(segment_range.size()));
350 }
351 else {
352 Span<T> segment_indices = std::get<Span<T>>(segment);
353 MutableSpan<int16_t> offset_indices = allocator.allocate_array<int16_t>(
354 segment_indices.size());
355 while (!segment_indices.is_empty()) {
356 const int64_t offset = segment_indices[0];
357 const int64_t next_segment_size = binary_search::find_predicate_begin(
358 segment_indices.take_front(max_segment_size),
359 [&](const T value) { return value - offset >= max_segment_size; });
360 for (const int64_t i : IndexRange(next_segment_size)) {
361 const int64_t offset_index = segment_indices[i] - offset;
362 BLI_assert(offset_index < max_segment_size);
363 offset_indices[i] = int16_t(offset_index);
364 }
365 r_segments.append_as(offset, offset_indices.take_front(next_segment_size));
366 segment_indices = segment_indices.drop_front(next_segment_size);
367 offset_indices = offset_indices.drop_front(next_segment_size);
368 }
369 }
370 }
371}
372
381
383
389 void reduce(LinearAllocator<> &main_allocator, Vector<IndexMaskSegment, 16> &main_segments)
390 {
391 for (LocalData &data : this->data_by_thread) {
392 main_allocator.transfer_ownership_from(data.allocator);
393 main_segments.extend(data.segments);
394 }
395 parallel_sort(main_segments.begin(),
396 main_segments.end(),
397 [](const IndexMaskSegment a, const IndexMaskSegment b) { return a[0] < b[0]; });
398 }
399};
400
402{
403 ExprBuilder builder;
404 const Expr &expr = builder.subtract(&universe, {this});
405 return evaluate_expression(expr, memory);
406}
407
408template<typename T>
410{
411 if (indices.is_empty()) {
412 return {};
413 }
414 if (const std::optional<IndexRange> range = unique_sorted_indices::non_empty_as_range_try(
415 indices))
416 {
417 /* Fast case when the indices encode a single range. */
418 return *range;
419 }
420
422
423 constexpr int64_t min_grain_size = 4096;
424 constexpr int64_t max_grain_size = max_segment_size;
425 if (indices.size() <= min_grain_size) {
426 segments_from_indices(indices, memory, segments);
427 }
428 else {
429 const int64_t threads_num = BLI_system_thread_count();
430 /* Can be faster with a larger grain size, but only when there are enough indices. */
431 const int64_t grain_size = std::clamp(
432 indices.size() / (threads_num * 4), min_grain_size, max_grain_size);
433
434 ParallelSegmentsCollector segments_collector;
435 threading::parallel_for(indices.index_range(), grain_size, [&](const IndexRange range) {
436 ParallelSegmentsCollector::LocalData &local_data = segments_collector.data_by_thread.local();
437 segments_from_indices(indices.slice(range), local_data.allocator, local_data.segments);
438 });
439 segments_collector.reduce(memory, segments);
440 }
441 const int64_t consolidated_segments_num = consolidate_index_mask_segments(segments, memory);
442 segments.resize(consolidated_segments_num);
443 return IndexMask::from_segments(segments, memory);
444}
445
447{
448 return IndexMask::from_bits(bits.index_range(), bits, memory);
449}
450
453 const BitSpan bits_slice)
454{
455 const int64_t segment_start = universe_segment[0];
457 bits::bits_to_index_ranges<int16_t>(bits_slice, builder);
458 }
459 else {
460 /* If the universe is not a range, we need to create a new bit span first. In it, bits
461 * that are not part of the universe are set to 0. */
462 const int64_t segment_end = universe_segment.last() + 1;
463 BitVector<max_segment_size> local_bits(segment_end - segment_start, false);
464 for (const int64_t i : universe_segment.index_range()) {
465 const int64_t global_index = universe_segment[i];
466 const int64_t local_index = global_index - segment_start;
467 BLI_assert(local_index < max_segment_size);
468 /* It's not great to handle each index separately instead of working with bigger
469 * chunks, but that works well enough for now. */
470 if (bits_slice[local_index]) {
471 local_bits[local_index].set();
472 }
473 }
474 bits::bits_to_index_ranges<int16_t>(local_bits, builder);
475 }
476 return segment_start;
477}
478
480 const BitSpan bits,
481 IndexMaskMemory &memory)
482{
483 BLI_assert(bits.size() >= universe.min_array_size());
484 /* Use #from_batch_predicate because we can process many bits at once. */
486 universe,
488 memory,
489 [&](const IndexMaskSegment universe_segment, IndexRangesBuilder<int16_t> &builder) {
490 const IndexRange slice = IndexRange::from_begin_end_inclusive(universe_segment[0],
491 universe_segment.last());
492 return from_bits_batch_predicate(universe_segment, builder, bits.slice(slice));
493 });
494}
495
497 const IndexMaskSegment universe_segment,
498 LinearAllocator<> &allocator,
499 const FunctionRef<int64_t(const IndexMaskSegment &universe_segment,
500 IndexRangesBuilder<int16_t> &builder)> batch_predicate,
502{
504 IndexRangesBuilder<int16_t> builder{builder_buffer};
505 const int64_t segment_shift = batch_predicate(universe_segment, builder);
506 if (builder.is_empty()) {
507 return;
508 }
509 const Span<int16_t> static_indices = get_static_indices_array();
510
511 /* This threshold trades off the number of segments and the number of ranges. In some cases,
512 * masks with fewer segments can be build more efficiently, but when iterating over a mask it may
513 * be beneficial to have more ranges if that means that there are more ranges which can be
514 * processed more efficiently. This could be exposed to the caller in the future. */
515 constexpr int64_t threshold = 64;
516 int64_t next_range_to_process = 0;
517 int64_t skipped_indices_num = 0;
518
519 /* Builds an index mask segment from a bunch of smaller ranges (which could be individual
520 * indices). */
521 auto consolidate_skipped_ranges = [&](int64_t end_range_i) {
522 if (skipped_indices_num == 0) {
523 return;
524 }
525 MutableSpan<int16_t> indices = allocator.allocate_array<int16_t>(skipped_indices_num);
526 int64_t counter = 0;
527 for (const int64_t i : IndexRange::from_begin_end(next_range_to_process, end_range_i)) {
528 const IndexRange range = builder[i];
529 array_utils::fill_index_range(indices.slice(counter, range.size()), int16_t(range.first()));
530 counter += range.size();
531 }
532 r_segments.append(IndexMaskSegment{segment_shift, indices});
533 };
534
535 for (const int64_t i : builder.index_range()) {
536 const IndexRange range = builder[i];
537 if (range.size() > threshold || builder.size() == 1) {
538 consolidate_skipped_ranges(i);
539 r_segments.append(IndexMaskSegment{segment_shift, static_indices.slice(range)});
540 next_range_to_process = i + 1;
541 skipped_indices_num = 0;
542 }
543 else {
544 skipped_indices_num += range.size();
545 }
546 }
547 consolidate_skipped_ranges(builder.size());
548}
549
551 const IndexMask &universe,
552 GrainSize grain_size,
553 IndexMaskMemory &memory,
554 const FunctionRef<int64_t(const IndexMaskSegment &universe_segment,
555 IndexRangesBuilder<int16_t> &builder)> batch_predicate)
556{
557 if (universe.is_empty()) {
558 return {};
559 }
560
562 if (universe.size() <= grain_size.value) {
563 for (const int64_t segment_i : IndexRange(universe.segments_num())) {
564 const IndexMaskSegment universe_segment = universe.segment(segment_i);
565 segments_from_batch_predicate(universe_segment, memory, batch_predicate, segments);
566 }
567 }
568 else {
569 ParallelSegmentsCollector segments_collector;
570 universe.foreach_segment(grain_size, [&](const IndexMaskSegment universe_segment) {
571 ParallelSegmentsCollector::LocalData &data = segments_collector.data_by_thread.local();
573 universe_segment, data.allocator, batch_predicate, data.segments);
574 });
575 segments_collector.reduce(memory, segments);
576 }
577
578 return IndexMask::from_segments(segments, memory);
579}
580
582{
583 return IndexMask::from_bools(bools.index_range(), bools, memory);
584}
585
587{
588 return IndexMask::from_bools(bools.index_range(), bools, memory);
589}
590
592 Span<bool> bools,
593 IndexMaskMemory &memory)
594{
595 BLI_assert(bools.size() >= universe.min_array_size());
597 universe,
599 memory,
600 [&](const IndexMaskSegment universe_segment,
602 const IndexRange slice = IndexRange::from_begin_end_inclusive(universe_segment[0],
603 universe_segment.last());
604 /* +16 to allow for some overshoot when converting bools to bits. */
606 bits.resize(slice.size(), false);
607 const int64_t allowed_overshoot = std::min<int64_t>(bits.capacity() - slice.size(),
608 bools.size() - slice.one_after_last());
609 const bool any_true = bits::or_bools_into_bits(
610 bools.slice(slice), bits, allowed_overshoot);
611 if (!any_true) {
612 return 0;
613 }
614 return from_bits_batch_predicate(universe_segment, builder, bits);
615 });
616 BitVector bits(bools);
617 return IndexMask::from_bits(universe, bits, memory);
618}
619
621 Span<bool> bools,
622 IndexMaskMemory &memory)
623{
624 BitVector bits(bools);
625 bits::invert(bits);
626 return IndexMask::from_bits(universe, bits, memory);
627}
628
630 const VArray<bool> &bools,
631 IndexMaskMemory &memory)
632{
633 const CommonVArrayInfo info = bools.common_info();
635 return *static_cast<const bool *>(info.data) ? universe : IndexMask();
636 }
638 const Span<bool> span(static_cast<const bool *>(info.data), bools.size());
639 return IndexMask::from_bools(universe, span, memory);
640 }
642 universe, GrainSize(512), memory, [&](const int64_t index) { return bools[index]; });
643}
644
646 const IndexMask &mask_b,
647 IndexMaskMemory &memory)
648{
649 ExprBuilder builder;
650 const Expr &expr = builder.merge({&mask_a, &mask_b});
651 return evaluate_expression(expr, memory);
652}
653
655 const IndexMask &mask_b,
656 IndexMaskMemory &memory)
657{
658 ExprBuilder builder;
659 const Expr &expr = builder.subtract({&mask_a}, {&mask_b});
660 return evaluate_expression(expr, memory);
661}
662
664 const IndexMask &mask_b,
665 IndexMaskMemory &memory)
666{
667 ExprBuilder builder;
668 const Expr &expr = builder.intersect({&mask_a, &mask_b});
669 return evaluate_expression(expr, memory);
670}
671
673 IndexMaskMemory &memory)
674{
675 Set<int64_t> values;
676 for (const Initializer &item : initializers) {
677 if (const auto *range = std::get_if<IndexRange>(&item)) {
678 for (const int64_t i : *range) {
679 values.add(i);
680 }
681 }
682 else if (const auto *span_i64 = std::get_if<Span<int64_t>>(&item)) {
683 for (const int64_t i : *span_i64) {
684 values.add(i);
685 }
686 }
687 else if (const auto *span_i32 = std::get_if<Span<int>>(&item)) {
688 for (const int i : *span_i32) {
689 values.add(i);
690 }
691 }
692 else if (const auto *index = std::get_if<int64_t>(&item)) {
693 values.add(*index);
694 }
695 }
696 Vector<int64_t> values_vec;
697 values_vec.extend(values.begin(), values.end());
698 std::sort(values_vec.begin(), values_vec.end());
699 return IndexMask::from_indices(values_vec.as_span(), memory);
700}
701
702template<typename T> void IndexMask::to_indices(MutableSpan<T> r_indices) const
703{
704 BLI_assert(this->size() == r_indices.size());
706 GrainSize(1024), [r_indices = r_indices.data()](const int64_t i, const int64_t pos) {
707 r_indices[pos] = T(i);
708 });
709}
710
711void IndexMask::set_bits(MutableBitSpan r_bits, const int64_t offset) const
712{
713 BLI_assert(r_bits.size() >= this->min_array_size() + offset);
714 this->foreach_segment_optimized([&](const auto segment) {
715 if constexpr (std::is_same_v<std::decay_t<decltype(segment)>, IndexRange>) {
716 const IndexRange range = segment;
717 const IndexRange shifted_range = range.shift(offset);
718 r_bits.slice(shifted_range).set_all();
719 }
720 else {
721 const IndexMaskSegment indices = segment;
722 const IndexMaskSegment shifted_indices = indices.shift(offset);
723 for (const int64_t i : shifted_indices) {
724 r_bits[i].set();
725 }
726 }
727 });
728}
729
730void IndexMask::to_bits(MutableBitSpan r_bits, const int64_t offset) const
731{
732 BLI_assert(r_bits.size() >= this->min_array_size() + offset);
733 r_bits.reset_all();
734 this->set_bits(r_bits, offset);
735}
736
738{
739 BLI_assert(r_bools.size() >= this->min_array_size());
740 r_bools.fill(false);
742 [&](const int64_t i) { r_bools[i] = true; });
743}
744
746{
747 Vector<IndexRange> ranges;
748 this->foreach_range([&](const IndexRange range) { ranges.append(range); });
749 return ranges;
750}
751
753{
754 IndexMaskMemory memory;
755 return this->complement(universe, memory).to_ranges();
756}
757
758namespace detail {
759
765 const IndexMaskSegment universe_segment,
766 LinearAllocator<> &allocator,
767 const FunctionRef<int64_t(IndexMaskSegment indices, int16_t *r_true_indices)> filter_indices,
769{
770 std::array<int16_t, max_segment_size> indices_array;
771 const int64_t true_indices_num = filter_indices(universe_segment, indices_array.data());
772 if (true_indices_num == 0) {
773 return;
774 }
775 const Span<int16_t> true_indices{indices_array.data(), true_indices_num};
777 unique_sorted_indices::split_to_ranges_and_spans<int16_t>(true_indices, 64, true_segments);
778
779 const Span<int16_t> static_indices = get_static_indices_array();
780
781 for (const auto &true_segment : true_segments) {
782 if (std::holds_alternative<IndexRange>(true_segment)) {
783 const IndexRange segment_range = std::get<IndexRange>(true_segment);
784 r_segments.append_as(universe_segment.offset(), static_indices.slice(segment_range));
785 }
786 else {
787 const Span<int16_t> segment_indices = std::get<Span<int16_t>>(true_segment);
788 r_segments.append_as(universe_segment.offset(),
789 allocator.construct_array_copy(segment_indices));
790 }
791 }
792}
793
795 const IndexMask &universe,
796 const GrainSize grain_size,
797 IndexMaskMemory &memory,
798 const FunctionRef<int64_t(IndexMaskSegment indices, int16_t *r_true_indices)> filter_indices)
799{
800 if (universe.is_empty()) {
801 return {};
802 }
803
805 if (universe.size() <= grain_size.value) {
806 for (const int64_t segment_i : IndexRange(universe.segments_num())) {
807 const IndexMaskSegment universe_segment = universe.segment(segment_i);
808 segments_from_predicate_filter(universe_segment, memory, filter_indices, segments);
809 }
810 }
811 else {
812 ParallelSegmentsCollector segments_collector;
813 universe.foreach_segment(grain_size, [&](const IndexMaskSegment universe_segment) {
814 ParallelSegmentsCollector::LocalData &data = segments_collector.data_by_thread.local();
816 universe_segment, data.allocator, filter_indices, data.segments);
817 });
818 segments_collector.reduce(memory, segments);
819 }
820
821 const int64_t consolidated_segments_num = consolidate_index_mask_segments(segments, memory);
822 segments.resize(consolidated_segments_num);
823 return IndexMask::from_segments(segments, memory);
824}
825} // namespace detail
826
827std::optional<RawMaskIterator> IndexMask::find(const int64_t query_index) const
828{
829 if (const std::optional<RawMaskIterator> it = this->find_larger_equal(query_index)) {
830 if ((*this)[*it] == query_index) {
831 return it;
832 }
833 }
834 return std::nullopt;
835}
836
837std::optional<RawMaskIterator> IndexMask::find_larger_equal(const int64_t query_index) const
838{
841 [&](const int64_t seg_i) { return this->segment(seg_i).last() >= query_index; });
842 if (segment_i == segments_num_) {
843 /* The query index is larger than the largest index in this mask. */
844 return std::nullopt;
845 }
846 const IndexMaskSegment segment = this->segment(segment_i);
847 const int64_t segment_begin_index = segment.base_span().data() - indices_by_segment_[segment_i];
848 if (query_index < segment[0]) {
849 /* The query index is the first element in this segment. */
850 const int64_t index_in_segment = segment_begin_index;
851 BLI_assert(index_in_segment < max_segment_size);
852 return RawMaskIterator{segment_i, int16_t(index_in_segment)};
853 }
854 /* The query index is somewhere within this segment. */
855 const int64_t local_index = query_index - segment.offset();
856 const int64_t index_in_segment = binary_search::find_predicate_begin(
857 segment.base_span(), [&](const int16_t i) { return i >= local_index; });
858 const int64_t actual_index_in_segment = index_in_segment + segment_begin_index;
859 BLI_assert(actual_index_in_segment < max_segment_size);
860 return RawMaskIterator{segment_i, int16_t(actual_index_in_segment)};
861}
862
863std::optional<RawMaskIterator> IndexMask::find_smaller_equal(const int64_t query_index) const
864{
865 if (indices_num_ == 0) {
866 return std::nullopt;
867 }
868 const std::optional<RawMaskIterator> larger_equal_it = this->find_larger_equal(query_index);
869 if (!larger_equal_it) {
870 /* Return the last element. */
872 }
873 if ((*this)[*larger_equal_it] == query_index) {
874 /* This is an exact hit. */
875 return larger_equal_it;
876 }
877 if (larger_equal_it->segment_i > 0) {
878 if (larger_equal_it->index_in_segment > 0) {
879 /* Previous element in same segment. */
880 return RawMaskIterator{larger_equal_it->segment_i,
881 int16_t(larger_equal_it->index_in_segment - 1)};
882 }
883 /* Last element in previous segment. */
884 return RawMaskIterator{larger_equal_it->segment_i - 1,
885 int16_t(cumulative_segment_sizes_[larger_equal_it->segment_i] -
886 cumulative_segment_sizes_[larger_equal_it->segment_i - 1] - 1)};
887 }
888 if (larger_equal_it->index_in_segment > begin_index_in_segment_) {
889 /* Previous element in same segment. */
890 return RawMaskIterator{0, int16_t(larger_equal_it->index_in_segment - 1)};
891 }
892 return std::nullopt;
893}
894
895bool IndexMask::contains(const int64_t query_index) const
896{
897 return this->find(query_index).has_value();
898}
899
901{
903 for (const int64_t i : data.index_range()) {
904 const int64_t index = i * n;
906 data[i] = int16_t(index);
907 }
908 return data;
909}
910
917 const int64_t repetitions,
918 IndexMaskMemory &memory)
919{
920 BLI_assert(n >= 2);
921 BLI_assert(n * repetitions <= max_segment_size);
922
923 switch (n) {
924 case 2: {
925 static auto data = build_every_nth_index_array(2);
926 return data.as_span().take_front(repetitions);
927 }
928 case 3: {
929 static auto data = build_every_nth_index_array(3);
930 return data.as_span().take_front(repetitions);
931 }
932 case 4: {
933 static auto data = build_every_nth_index_array(4);
934 return data.as_span().take_front(repetitions);
935 }
936 default: {
937 MutableSpan<int16_t> data = memory.allocate_array<int16_t>(repetitions);
938 for (const int64_t i : IndexRange(repetitions)) {
939 const int64_t index = i * n;
941 data[i] = int16_t(index);
942 }
943 return data;
944 }
945 }
946}
947
949 const int64_t repetitions,
950 const int64_t stride,
951 const int64_t initial_offset,
952 IndexMaskMemory &memory)
953{
954 if (mask_to_repeat.is_empty()) {
955 return {};
956 }
957 BLI_assert(mask_to_repeat.last() < stride);
958 if (repetitions == 0) {
959 return {};
960 }
961 if (repetitions == 1 && initial_offset == 0) {
962 /* The output is the same as the input mask. */
963 return mask_to_repeat;
964 }
965 const std::optional<IndexRange> range_to_repeat = mask_to_repeat.to_range();
966 if (range_to_repeat && range_to_repeat->first() == 0 && range_to_repeat->size() == stride) {
967 /* The output is a range. */
968 return IndexRange(initial_offset, repetitions * stride);
969 }
970 const int64_t segments_num = mask_to_repeat.segments_num();
971 const IndexRange bounds = mask_to_repeat.bounds();
972
973 /* Avoid having many very small segments by creating a single segment that contains the input
974 * multiple times already. This way, a lower total number of segments is necessary. */
975 if (segments_num == 1 && stride <= max_segment_size / 2 && mask_to_repeat.size() <= 256) {
976 const IndexMaskSegment src_segment = mask_to_repeat.segment(0);
977 /* Number of repetitions that fit into a single segment. */
978 const int64_t inline_repetitions_num = std::min(repetitions, max_segment_size / stride);
979 Span<int16_t> repeated_indices;
980 if (src_segment.size() == 1) {
981 /* Optimize the case when a single index is repeated. */
982 repeated_indices = get_every_nth_index(stride, inline_repetitions_num, memory);
983 }
984 else {
985 /* More general case that repeats multiple indices. */
986 MutableSpan<int16_t> repeated_indices_mut = memory.allocate_array<int16_t>(
987 inline_repetitions_num * src_segment.size());
988 for (const int64_t repetition : IndexRange(inline_repetitions_num)) {
989 for (const int64_t i : src_segment.index_range()) {
990 const int64_t index = src_segment[i] - src_segment[0] + repetition * stride;
992 repeated_indices_mut[repetition * src_segment.size() + i] = int16_t(index);
993 }
994 }
995 repeated_indices = repeated_indices_mut;
996 }
997 BLI_assert(repeated_indices[0] == 0);
998
999 Vector<IndexMaskSegment, 16> repeated_segments;
1000 const int64_t result_segments_num = ceil_division(repetitions, inline_repetitions_num);
1001 for (const int64_t i : IndexRange(result_segments_num)) {
1002 const int64_t used_repetitions = std::min(inline_repetitions_num,
1003 repetitions - i * inline_repetitions_num);
1004 repeated_segments.append(
1005 IndexMaskSegment(initial_offset + bounds.first() + i * stride * inline_repetitions_num,
1006 repeated_indices.take_front(used_repetitions * src_segment.size())));
1007 }
1008 return IndexMask::from_segments(repeated_segments, memory);
1009 }
1010
1011 /* Simply repeat and offset the existing segments in the input mask. */
1012 Vector<IndexMaskSegment, 16> repeated_segments;
1013 for (const int64_t repetition : IndexRange(repetitions)) {
1014 for (const int64_t segment_i : IndexRange(segments_num)) {
1015 const IndexMaskSegment segment = mask_to_repeat.segment(segment_i);
1016 repeated_segments.append(IndexMaskSegment(
1017 segment.offset() + repetition * stride + initial_offset, segment.base_span()));
1018 }
1019 }
1020 return IndexMask::from_segments(repeated_segments, memory);
1021}
1022
1024 const int64_t indices_num,
1025 const int64_t initial_offset,
1026 IndexMaskMemory &memory)
1027{
1028 BLI_assert(n >= 1);
1029 return IndexMask::from_repeating(IndexRange(1), indices_num, n, initial_offset, memory);
1030}
1031
1033 const FunctionRef<bool(Span<IndexMaskSegment> segments)> fn)
1034{
1035 BLI_assert(!masks.is_empty());
1036 BLI_assert(std::all_of(masks.begin() + 1, masks.end(), [&](const IndexMask &maks) {
1037 return masks[0].size() == maks.size();
1038 }));
1039
1040 Array<int64_t, 8> segment_iter(masks.size(), 0);
1041 Array<int16_t, 8> start_iter(masks.size(), 0);
1042
1043 Array<IndexMaskSegment, 8> segments(masks.size());
1044 Array<IndexMaskSegment, 8> sequences(masks.size());
1045
1046 /* This function only take positions of indices in to account.
1047 * Masks with the same size is fragmented in positions space.
1048 * So, all last segments (index in mask does not matter) of all masks will be ended in the same
1049 * position. All segment iterators will be out of range at the same time. */
1050 while (segment_iter[0] != masks[0].segments_num()) {
1051 for (const int64_t mask_i : masks.index_range()) {
1052 if (start_iter[mask_i] == 0) {
1053 segments[mask_i] = masks[mask_i].segment(segment_iter[mask_i]);
1054 }
1055 }
1056
1057 int16_t next_common_sequence_size = std::numeric_limits<int16_t>::max();
1058 for (const int64_t mask_i : masks.index_range()) {
1059 next_common_sequence_size = math::min(next_common_sequence_size,
1060 int16_t(segments[mask_i].size() - start_iter[mask_i]));
1061 }
1062
1063 for (const int64_t mask_i : masks.index_range()) {
1064 sequences[mask_i] = segments[mask_i].slice(start_iter[mask_i], next_common_sequence_size);
1065 }
1066
1067 if (!fn(sequences)) {
1068 break;
1069 }
1070
1071 for (const int64_t mask_i : masks.index_range()) {
1072 if (segments[mask_i].size() - start_iter[mask_i] == next_common_sequence_size) {
1073 segment_iter[mask_i]++;
1074 start_iter[mask_i] = 0;
1075 }
1076 else {
1077 start_iter[mask_i] += next_common_sequence_size;
1078 }
1079 }
1080 }
1081}
1082
1084{
1085 if (a.size() != b.size()) {
1086 return false;
1087 }
1088 if (a.is_empty()) {
1089 /* Both segments are empty. */
1090 return true;
1091 }
1092 if (a[0] != b[0]) {
1093 return false;
1094 }
1095
1096 const bool a_is_range = unique_sorted_indices::non_empty_is_range(a.base_span());
1097 const bool b_is_range = unique_sorted_indices::non_empty_is_range(b.base_span());
1098 if (a_is_range || b_is_range) {
1099 return a_is_range && b_is_range;
1100 }
1101
1102 const Span<int16_t> a_indices = a.base_span();
1103 [[maybe_unused]] const Span<int16_t> b_indices = b.base_span();
1104
1105 const int64_t offset_difference = int16_t(b.offset() - a.offset());
1106
1107 BLI_assert(a_indices[0] >= 0 && b_indices[0] >= 0);
1108 BLI_assert(b_indices[0] == a_indices[0] - offset_difference);
1109
1110 return std::equal(a_indices.begin(),
1111 a_indices.end(),
1112 b.base_span().begin(),
1113 [offset_difference](const int16_t a_index, const int16_t b_index) -> bool {
1114 return a_index - offset_difference == b_index;
1115 });
1116}
1117
1118bool operator==(const IndexMask &a, const IndexMask &b)
1119{
1120 if (a.size() != b.size()) {
1121 return false;
1122 }
1123
1124 const std::optional<IndexRange> a_as_range = a.to_range();
1125 const std::optional<IndexRange> b_as_range = b.to_range();
1126 if (a_as_range.has_value() || b_as_range.has_value()) {
1127 return a_as_range == b_as_range;
1128 }
1129
1130 bool equals = true;
1132 equals &= segments_is_equal(segments[0], segments[1]);
1133 return equals;
1134 });
1135
1136 return equals;
1137}
1138
1140 const VArray<int> &group_ids,
1141 IndexMaskMemory &memory,
1142 VectorSet<int> &r_index_by_group_id)
1143{
1144 BLI_assert(group_ids.size() >= universe.min_array_size());
1145 Vector<IndexMask, 4> result_masks;
1146 if (const std::optional<int> single_group_id = group_ids.get_if_single()) {
1147 /* Optimize for the case when all group ids are the same. */
1148 const int64_t group_index = r_index_by_group_id.index_of_or_add(*single_group_id);
1149 const int64_t groups_num = r_index_by_group_id.size();
1150 result_masks.resize(groups_num);
1151 result_masks[group_index] = universe;
1152 return result_masks;
1153 }
1154
1155 const VArraySpan<int> group_ids_span{group_ids};
1156 universe.foreach_index([&](const int64_t i) { r_index_by_group_id.add(group_ids_span[i]); });
1157 const int64_t groups_num = r_index_by_group_id.size();
1158 result_masks.resize(groups_num);
1160 universe,
1161 memory,
1162 [&](const int64_t i) {
1163 const int group_id = group_ids_span[i];
1164 return r_index_by_group_id.index_of(group_id);
1165 },
1166 result_masks);
1167 return result_masks;
1168}
1169
1171 IndexMaskMemory &memory,
1172 VectorSet<int> &r_index_by_group_id)
1173{
1175 IndexMask(group_ids.size()), group_ids, memory, r_index_by_group_id);
1176}
1177
1180template void IndexMask::to_indices(MutableSpan<int32_t>) const;
1181template void IndexMask::to_indices(MutableSpan<int64_t>) const;
1182
1183} // namespace blender::index_mask
#define BLI_assert(a)
Definition BLI_assert.h:50
int BLI_system_thread_count(void)
Definition threads.cc:253
#define UNUSED_VARS_NDEBUG(...)
const T & last(const int64_t n=0) const
Definition BLI_array.hh:285
const T * data() const
Definition BLI_array.hh:301
constexpr int64_t first() const
constexpr IndexRange shift(int64_t n) const
constexpr int64_t size() const
static constexpr IndexRange from_begin_end(const int64_t begin, const int64_t end)
constexpr int64_t start() const
static constexpr IndexRange from_begin_end_inclusive(const int64_t begin, const int64_t last)
constexpr IndexRange drop_front(int64_t n) const
MutableSpan< T > allocate_array(int64_t size)
MutableSpan< T > construct_array_copy(Span< T > src)
void transfer_ownership_from(LinearAllocator<> &other)
constexpr int64_t size() const
Definition BLI_span.hh:494
constexpr bool is_empty() const
Definition BLI_span.hh:510
constexpr T * data() const
Definition BLI_span.hh:540
constexpr void fill(const T &value) const
Definition BLI_span.hh:518
constexpr T * end() const
Definition BLI_span.hh:549
constexpr T * begin() const
Definition BLI_span.hh:545
constexpr IndexRange index_range() const
Definition BLI_span.hh:671
constexpr T & last(const int64_t n=0) const
Definition BLI_span.hh:690
int64_t size() const
IndexRange index_range() const
T last(const int64_t n=0) const
Span< BaseT > base_span() const
bool add(const Key &key)
Definition BLI_set.hh:248
constexpr Span drop_front(int64_t n) const
Definition BLI_span.hh:172
constexpr Span slice_safe(const int64_t start, const int64_t size) const
Definition BLI_span.hh:155
constexpr Span slice(int64_t start, int64_t size) const
Definition BLI_span.hh:138
constexpr const T * data() const
Definition BLI_span.hh:216
constexpr int64_t size() const
Definition BLI_span.hh:253
constexpr const T & last(const int64_t n=0) const
Definition BLI_span.hh:326
constexpr const T * end() const
Definition BLI_span.hh:225
constexpr IndexRange index_range() const
Definition BLI_span.hh:402
constexpr const T * begin() const
Definition BLI_span.hh:221
constexpr Span take_front(int64_t n) const
Definition BLI_span.hh:194
constexpr bool is_empty() const
Definition BLI_span.hh:261
std::optional< T > get_if_single() const
IndexRange index_range() const
CommonVArrayInfo common_info() const
int64_t index_of(const Key &key) const
bool add(const Key &key)
int64_t size() const
int64_t index_of_or_add(const Key &key)
void append(const T &value)
void resize(const int64_t new_size)
void extend(Span< T > array)
Span< T > as_span() const
void append_as(ForwardValue &&...value)
IndexRange index_range() const
int64_t size() const
BitSpan slice(const IndexRange range) const
void resize(const int64_t new_size_in_bits, const bool value=false)
MutableBitSpan slice(const IndexRange range) const
const IntersectionExpr & intersect(const Span< Term > terms)
const UnionExpr & merge(const Span< Term > terms)
const DifferenceExpr & subtract(const Term &main_term, const Span< Term > subtract_terms)
IndexMaskSegment shift(const int64_t shift) const
IndexMask slice_and_shift(IndexRange range, int64_t offset, IndexMaskMemory &memory) const
static IndexMask from_predicate(const IndexMask &universe, GrainSize grain_size, IndexMaskMemory &memory, Fn &&predicate)
static IndexMask from_every_nth(int64_t n, int64_t indices_num, const int64_t initial_offset, IndexMaskMemory &memory)
static IndexMask from_bools_inverse(const IndexMask &universe, Span< bool > bools, IndexMaskMemory &memory)
IndexMaskSegment segment(int64_t segment_i) const
void to_indices(MutableSpan< T > r_indices) const
IndexMask slice_content(IndexRange range) const
Vector< IndexRange > to_ranges_invert(IndexRange universe) const
static IndexMask from_bits(BitSpan bits, IndexMaskMemory &memory)
Vector< IndexRange > to_ranges() const
std::optional< RawMaskIterator > find(int64_t query_index) const
std::optional< IndexRange > to_range() const
static Vector< IndexMask, 4 > from_group_ids(const VArray< int > &group_ids, IndexMaskMemory &memory, VectorSet< int > &r_index_by_group_id)
static IndexMask from_repeating(const IndexMask &mask_to_repeat, int64_t repetitions, int64_t stride, int64_t initial_offset, IndexMaskMemory &memory)
static IndexMask from_union(const IndexMask &mask_a, const IndexMask &mask_b, IndexMaskMemory &memory)
IndexMask shift(const int64_t offset, IndexMaskMemory &memory) const
void foreach_index_optimized(Fn &&fn) const
std::optional< RawMaskIterator > find_larger_equal(int64_t query_index) const
IndexMask slice(IndexRange range) const
static IndexMask from_intersection(const IndexMask &mask_a, const IndexMask &mask_b, IndexMaskMemory &memory)
static IndexMask from_indices(Span< T > indices, IndexMaskMemory &memory)
int64_t iterator_to_index(const RawMaskIterator &it) const
static IndexMask from_segments(Span< IndexMaskSegment > segments, IndexMaskMemory &memory)
static void from_groups(const IndexMask &universe, IndexMaskMemory &memory, Fn &&get_group_index, MutableSpan< IndexMask > r_masks)
static IndexMask from_batch_predicate(const IndexMask &universe, GrainSize grain_size, IndexMaskMemory &memory, FunctionRef< int64_t(const IndexMaskSegment &universe_segment, IndexRangesBuilder< int16_t > &builder)> batch_predicate)
static IndexMask from_initializers(const Span< Initializer > initializers, IndexMaskMemory &memory)
void set_bits(MutableBitSpan r_bits, int64_t offset=0) const
std::optional< RawMaskIterator > find_smaller_equal(int64_t query_index) const
bool contains(int64_t query_index) const
IndexMask complement(const IndexMask &universe, IndexMaskMemory &memory) const
static IndexMask from_bools(Span< bool > bools, IndexMaskMemory &memory)
void foreach_range(Fn &&fn) const
void to_bits(MutableBitSpan r_bits, int64_t offset=0) const
void foreach_segment_optimized(Fn &&fn) const
void to_bools(MutableSpan< bool > r_bools) const
void foreach_index(Fn &&fn) const
RawMaskIterator index_to_iterator(int64_t index) const
static void foreach_segment_zipped(Span< IndexMask > masks, FunctionRef< bool(Span< IndexMaskSegment > segments)> fn)
static IndexMask from_difference(const IndexMask &mask_a, const IndexMask &mask_b, IndexMaskMemory &memory)
std::variant< IndexRange, Span< int64_t >, Span< int >, int64_t > Initializer
void foreach_segment(Fn &&fn) const
local_group_size(16, 16) .push_constant(Type b
static ushort indices[]
IndexRange range
ccl_device_inline float4 mask(const int4 mask, const float4 a)
void fill_index_range(MutableSpan< T > span, const T start=0)
int64_t find_predicate_begin(Iterator begin, Iterator end, Predicate &&predicate)
bool or_bools_into_bits(Span< bool > bools, MutableBitSpan r_bits, int64_t allowed_overshoot=0)
void bits_to_index_ranges(const BitSpan bits, IndexRangesBuilder< IntT > &builder)
void invert(BitSpanT &&data)
static void segments_from_predicate_filter(const IndexMaskSegment universe_segment, LinearAllocator<> &allocator, const FunctionRef< int64_t(IndexMaskSegment indices, int16_t *r_true_indices)> filter_indices, Vector< IndexMaskSegment, 16 > &r_segments)
IndexMask from_predicate_impl(const IndexMask &universe, GrainSize grain_size, IndexMaskMemory &memory, FunctionRef< int64_t(IndexMaskSegment indices, int16_t *r_true_indices)> filter_indices)
IndexMask evaluate_expression(const Expr &expression, IndexMaskMemory &memory)
static constexpr int64_t max_segment_size
static void segments_from_batch_predicate(const IndexMaskSegment universe_segment, LinearAllocator<> &allocator, const FunctionRef< int64_t(const IndexMaskSegment &universe_segment, IndexRangesBuilder< int16_t > &builder)> batch_predicate, Vector< IndexMaskSegment, 16 > &r_segments)
static void segments_from_indices(const Span< T > indices, LinearAllocator<> &allocator, Vector< IndexMaskSegment, InlineBufferSize > &r_segments)
static Span< int16_t > get_every_nth_index(const int64_t n, const int64_t repetitions, IndexMaskMemory &memory)
void build_reverse_map(const IndexMask &mask, MutableSpan< T > r_map)
Definition index_mask.cc:31
std::ostream & operator<<(std::ostream &stream, const IndexMask &mask)
Definition index_mask.cc:99
bool operator==(const RawMaskIterator &a, const RawMaskIterator &b)
std::array< int16_t, max_segment_size > build_static_indices_array()
Definition index_mask.cc:44
static Array< int16_t > build_every_nth_index_array(const int64_t n)
static int64_t from_bits_batch_predicate(const IndexMaskSegment universe_segment, IndexRangesBuilder< int16_t > &builder, const BitSpan bits_slice)
const IndexMask & get_static_index_mask_for_min_size(const int64_t min_size)
Definition index_mask.cc:53
int64_t consolidate_index_mask_segments(MutableSpan< IndexMaskSegment > segments, IndexMaskMemory &memory)
static bool segments_is_equal(const IndexMaskSegment &a, const IndexMaskSegment &b)
template void build_reverse_map< int >(const IndexMask &mask, MutableSpan< int > r_map)
const std::array< int16_t, max_segment_size > & get_static_indices_array()
T min(const T &a, const T &b)
void isolate_task(const Function &function)
Definition BLI_task.hh:226
void parallel_for(const IndexRange range, const int64_t grain_size, const Function &function, const TaskSizeHints &size_hints=detail::TaskSizeHints_Static(1))
Definition BLI_task.hh:95
std::optional< IndexRange > non_empty_as_range_try(const Span< T > indices)
int64_t split_to_ranges_and_spans(const Span< T > indices, const int64_t range_threshold, Vector< std::variant< IndexRange, Span< T > >, InlineBufferSize > &r_segments)
bool non_empty_is_range(const Span< T > indices)
void parallel_sort(RandomAccessIterator begin, RandomAccessIterator end)
Definition BLI_sort.hh:23
constexpr IntT ceil_division(const IntT x, const IntT y)
signed short int16_t
Definition stdint.h:76
__int64 int64_t
Definition stdint.h:89
threading::EnumerableThreadSpecific< LocalData > data_by_thread
void reduce(LinearAllocator<> &main_allocator, Vector< IndexMaskSegment, 16 > &main_segments)