Blender V5.0
index_mask_expression.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2024 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
8
17
18#include "BLI_array.hh"
20#include "BLI_bit_span_ops.hh"
23#include "BLI_stack.hh"
24#include "BLI_task.hh"
25
26#include "BLI_strict_flags.h" /* IWYU pragma: keep */
27
28namespace blender::index_mask {
29
34
55
60
74
79
101
106enum class ExactEvalMode {
119};
120
122{
123 std::sort(boundaries.begin(),
124 boundaries.end(),
125 [](const CourseBoundary &a, const CourseBoundary &b) { return a.index < b.index; });
126}
127
129{
130 std::sort(boundaries.begin(),
131 boundaries.end(),
133 return a.index < b.index;
134 });
135}
136
138static constexpr int64_t segment_size_threshold = 32;
139
142 const int64_t prev_boundary_index,
143 const int64_t current_boundary_index,
145{
146 const int64_t size = current_boundary_index - prev_boundary_index;
147 if (prev_segment) {
148 if (prev_segment->type == CoarseSegment::Type::Full &&
149 prev_segment->bounds.one_after_last() == prev_boundary_index)
150 {
151 prev_segment->bounds = prev_segment->bounds.with_new_end(current_boundary_index);
152 return *prev_segment;
153 }
154 if (current_boundary_index - prev_segment->bounds.start() < max_segment_size) {
155 if (prev_segment->bounds.size() + size < segment_size_threshold) {
156 /* Extend the previous segment because it's so small and change it into an unknown one. */
157 prev_segment->bounds = prev_segment->bounds.with_new_end(current_boundary_index);
158 prev_segment->type = CoarseSegment::Type::Unknown;
159 return *prev_segment;
160 }
161 }
162 }
163 result.segments.append(
165 return result.segments.last();
166}
167
170 const int64_t prev_boundary_index,
171 const int64_t current_boundary_index,
173{
174 if (prev_segment) {
175 if (prev_segment->bounds.start() + segment_size_threshold >= prev_boundary_index) {
176 /* The previous segment is very short, so extend it. */
177 prev_segment->type = CoarseSegment::Type::Unknown;
178 prev_segment->bounds = prev_segment->bounds.with_new_end(current_boundary_index);
179 return *prev_segment;
180 }
181 }
182 result.segments.append(
184 IndexRange::from_begin_end(prev_boundary_index, current_boundary_index)});
185 return result.segments.last();
186}
187
190 const int64_t prev_boundary_index,
191 const int64_t current_boundary_index,
192 const IndexMask &copy_from_mask,
194{
195 if (prev_segment) {
196 if (prev_segment->type == CoarseSegment::Type::Copy &&
197 prev_segment->bounds.one_after_last() == prev_boundary_index &&
198 prev_segment->mask == &copy_from_mask)
199 {
200 /* Can extend the previous copy segment. */
201 prev_segment->bounds = prev_segment->bounds.with_new_end(current_boundary_index);
202 return *prev_segment;
203 }
204 if (prev_segment->bounds.start() + segment_size_threshold >= current_boundary_index) {
205 /* The previous and this segment together are very short, so better merge them together. */
206 prev_segment->bounds = prev_segment->bounds.with_new_end(current_boundary_index);
207 prev_segment->type = CoarseSegment::Type::Unknown;
208 return *prev_segment;
209 }
210 }
211 result.segments.append({CoarseSegment::Type::Copy,
212 IndexRange::from_begin_end(prev_boundary_index, current_boundary_index),
213 &copy_from_mask});
214 return result.segments.last();
215}
216
217static void evaluate_coarse_union(const Span<CourseBoundary> boundaries, CoarseResult &r_result)
218{
219 if (boundaries.is_empty()) {
220 return;
221 }
222
223 CoarseResult &result = r_result;
224 CoarseSegment *prev_segment = nullptr;
225 Vector<const CoarseSegment *, 16> active_segments;
226 int64_t prev_boundary_index = boundaries[0].index;
227
228 for (const CourseBoundary &boundary : boundaries) {
229 if (prev_boundary_index < boundary.index) {
230 /* Compute some properties of the input segments that were active between the current and the
231 * previous boundary. */
232 bool has_full = false;
233 bool has_unknown = false;
234 bool copy_from_single_mask = true;
235 const IndexMask *copy_from_mask = nullptr;
236 for (const CoarseSegment *active_segment : active_segments) {
237 switch (active_segment->type) {
239 has_unknown = true;
240 break;
241 }
243 has_full = true;
244 break;
245 }
247 if (!ELEM(copy_from_mask, nullptr, active_segment->mask)) {
248 copy_from_single_mask = false;
249 }
250 copy_from_mask = active_segment->mask;
251 break;
252 }
253 }
254 }
255 /* Determine the resulting coarse segment type based on the properties computed above. */
256 if (has_full) {
257 prev_segment = &add_coarse_segment__full(
258 prev_segment, prev_boundary_index, boundary.index, result);
259 }
260 else if (has_unknown || !copy_from_single_mask) {
261 prev_segment = &add_coarse_segment__unknown(
262 prev_segment, prev_boundary_index, boundary.index, result);
263 }
264 else if (copy_from_mask != nullptr && copy_from_single_mask) {
265 prev_segment = &add_coarse_segment__copy(
266 prev_segment, prev_boundary_index, boundary.index, *copy_from_mask, result);
267 }
268
269 prev_boundary_index = boundary.index;
270 }
271
272 /* Update active segments. */
273 if (boundary.is_begin) {
274 active_segments.append(boundary.segment);
275 }
276 else {
277 active_segments.remove_first_occurrence_and_reorder(boundary.segment);
278 }
279 }
280}
281
283 const int64_t terms_num,
284 CoarseResult &r_result)
285{
286 if (boundaries.is_empty()) {
287 return;
288 }
289
290 CoarseResult &result = r_result;
291 CoarseSegment *prev_segment = nullptr;
292 Vector<const CoarseSegment *, 16> active_segments;
293 int64_t prev_boundary_index = boundaries[0].index;
294
295 for (const CourseBoundary &boundary : boundaries) {
296 if (prev_boundary_index < boundary.index) {
297 /* Only if one segment of each term is active, it's possible that the output contains
298 * anything. */
299 if (active_segments.size() == terms_num) {
300 /* Compute some properties of the input segments that were active between the current and
301 * previous boundary. */
302 int full_count = 0;
303 int unknown_count = 0;
304 int copy_count = 0;
305 bool copy_from_single_mask = true;
306 const IndexMask *copy_from_mask = nullptr;
307 for (const CoarseSegment *active_segment : active_segments) {
308 switch (active_segment->type) {
310 unknown_count++;
311 break;
312 }
314 full_count++;
315 break;
316 }
318 copy_count++;
319 if (!ELEM(copy_from_mask, nullptr, active_segment->mask)) {
320 copy_from_single_mask = false;
321 }
322 copy_from_mask = active_segment->mask;
323 break;
324 }
325 }
326 }
327 /* Determine the resulting coarse segment type based on the properties computed above. */
328 BLI_assert(full_count + unknown_count + copy_count == terms_num);
329 if (full_count == terms_num) {
330 prev_segment = &add_coarse_segment__full(
331 prev_segment, prev_boundary_index, boundary.index, result);
332 }
333 else if (unknown_count > 0 || copy_count < terms_num || !copy_from_single_mask) {
334 prev_segment = &add_coarse_segment__unknown(
335 prev_segment, prev_boundary_index, boundary.index, result);
336 }
337 else if (copy_count == terms_num && copy_from_single_mask) {
338 prev_segment = &add_coarse_segment__copy(
339 prev_segment, prev_boundary_index, boundary.index, *copy_from_mask, result);
340 }
341 }
342
343 prev_boundary_index = boundary.index;
344 }
345
346 /* Update active segments. */
347 if (boundary.is_begin) {
348 active_segments.append(boundary.segment);
349 }
350 else {
351 active_segments.remove_first_occurrence_and_reorder(boundary.segment);
352 }
353 }
354}
355
357 CoarseResult &r_result)
358{
359 if (boundaries.is_empty()) {
360 return;
361 }
362
363 CoarseResult &result = r_result;
364 CoarseSegment *prev_segment = nullptr;
365 Vector<const CoarseSegment *> active_main_segments;
366 Vector<const CoarseSegment *, 16> active_subtract_segments;
367 int64_t prev_boundary_index = boundaries[0].index;
368
369 for (const DifferenceCourseBoundary &boundary : boundaries) {
370 if (prev_boundary_index < boundary.index) {
371 /* There is only one main term, so at most one main segment can be active at once. */
372 BLI_assert(active_main_segments.size() <= 1);
373 if (active_main_segments.size() == 1) {
374 const CoarseSegment &active_main_segment = *active_main_segments[0];
375 /* Compute some properties of the input segments that were active between the current and
376 * the previous boundary. */
377 bool has_subtract_full = false;
378 bool has_subtract_same_mask = false;
379 for (const CoarseSegment *active_subtract_segment : active_subtract_segments) {
380 switch (active_subtract_segment->type) {
382 break;
383 }
385 has_subtract_full = true;
386 break;
387 }
389 if (active_main_segment.type == CoarseSegment::Type::Copy) {
390 if (active_main_segment.mask == active_subtract_segment->mask) {
391 has_subtract_same_mask = true;
392 }
393 }
394 break;
395 }
396 }
397 }
398 /* Determine the resulting coarse segment type based on the properties computed above. */
399 if (has_subtract_full) {
400 /* Do nothing, the resulting segment is empty for the current range. */
401 }
402 else {
403 switch (active_main_segment.type) {
405 prev_segment = &add_coarse_segment__unknown(
406 prev_segment, prev_boundary_index, boundary.index, result);
407 break;
408 }
410 if (active_subtract_segments.is_empty()) {
411 prev_segment = &add_coarse_segment__full(
412 prev_segment, prev_boundary_index, boundary.index, result);
413 }
414 else {
415 prev_segment = &add_coarse_segment__unknown(
416 prev_segment, prev_boundary_index, boundary.index, result);
417 }
418 break;
419 }
421 if (active_subtract_segments.is_empty()) {
422 prev_segment = &add_coarse_segment__copy(prev_segment,
423 prev_boundary_index,
424 boundary.index,
425 *active_main_segment.mask,
426 result);
427 }
428 else if (has_subtract_same_mask) {
429 /* Do nothing, subtracting a mask from itself results in an empty mask. */
430 }
431 else {
432 prev_segment = &add_coarse_segment__unknown(
433 prev_segment, prev_boundary_index, boundary.index, result);
434 }
435 break;
436 }
437 }
438 }
439 }
440
441 prev_boundary_index = boundary.index;
442 }
443
444 /* Update active segments. */
445 if (boundary.is_main) {
446 if (boundary.is_begin) {
447 active_main_segments.append(boundary.segment);
448 }
449 else {
450 active_main_segments.remove_first_occurrence_and_reorder(boundary.segment);
451 }
452 }
453 else {
454 if (boundary.is_begin) {
455 active_subtract_segments.append(boundary.segment);
456 }
457 else {
458 active_subtract_segments.remove_first_occurrence_and_reorder(boundary.segment);
459 }
460 }
461 }
462}
463
480static CoarseResult evaluate_coarse(const Expr &root_expression,
481 const Span<const Expr *> eval_order,
482 const std::optional<IndexRange> eval_bounds = std::nullopt)
483{
484 /* An expression result for each intermediate expression. */
486 root_expression.expression_array_size());
487
488 /* Process expressions in a pre-determined order. */
489 for (const Expr *expression : eval_order) {
490 CoarseResult &expr_result = expression_results[expression->index].emplace();
491 switch (expression->type) {
492 case Expr::Type::Atomic: {
493 const AtomicExpr &expr = expression->as_atomic();
494
496 if (eval_bounds.has_value()) {
497 mask = expr.mask->slice_content(*eval_bounds);
498 }
499 else {
500 mask = *expr.mask;
501 }
502
503 if (!mask.is_empty()) {
504 const IndexRange bounds = mask.bounds();
505 if (const std::optional<IndexRange> range = mask.to_range()) {
506 expr_result.segments.append({CoarseSegment::Type::Full, bounds});
507 }
508 else {
509 expr_result.segments.append({CoarseSegment::Type::Copy, bounds, expr.mask});
510 }
511 }
512 break;
513 }
514 case Expr::Type::Union: {
515 const UnionExpr &expr = expression->as_union();
517 for (const Expr *term : expr.terms) {
518 const CoarseResult &term_result = *expression_results[term->index];
519 for (const CoarseSegment &segment : term_result.segments) {
520 boundaries.append({segment.bounds.first(), true, &segment});
521 boundaries.append({segment.bounds.one_after_last(), false, &segment});
522 }
523 }
524 sort_course_boundaries(boundaries);
525 evaluate_coarse_union(boundaries, expr_result);
526 break;
527 }
529 const IntersectionExpr &expr = expression->as_intersection();
531 for (const Expr *term : expr.terms) {
532 const CoarseResult &term_result = *expression_results[term->index];
533 for (const CoarseSegment &segment : term_result.segments) {
534 boundaries.append({segment.bounds.first(), true, &segment});
535 boundaries.append({segment.bounds.one_after_last(), false, &segment});
536 }
537 }
538 sort_course_boundaries(boundaries);
539 evaluate_coarse_intersection(boundaries, expr.terms.size(), expr_result);
540 break;
541 }
543 const DifferenceExpr &expr = expression->as_difference();
545 const CoarseResult &main_term_result = *expression_results[expr.terms[0]->index];
546 for (const CoarseSegment &segment : main_term_result.segments) {
547 boundaries.append({{segment.bounds.first(), true, &segment}, true});
548 boundaries.append({{segment.bounds.one_after_last(), false, &segment}, true});
549 }
550 for (const Expr *term : expr.terms.as_span().drop_front(1)) {
551 const CoarseResult &term_result = *expression_results[term->index];
552 for (const CoarseSegment &segment : term_result.segments) {
553 boundaries.append({{segment.bounds.first(), true, &segment}, false});
554 boundaries.append({{segment.bounds.one_after_last(), false, &segment}, false});
555 }
556 }
557 sort_course_boundaries(boundaries);
558 evaluate_coarse_difference(boundaries, expr_result);
559 break;
560 }
561 }
562 }
563
564 CoarseResult &final_result = *expression_results[root_expression.index];
565 return std::move(final_result);
566}
567
569{
570 /* TODO: Could first count the number of set bits. */
572 bits::foreach_1_index(bits, [&](const int64_t i) {
574 indices_vec.append_unchecked(int16_t(i));
575 });
576 return allocator.construct_array_copy<int16_t>(indices_vec);
577}
578
590static IndexMaskSegment evaluate_exact_with_bits(const Expr &root_expression,
591 LinearAllocator<> &allocator,
592 const IndexRange bounds,
593 const Span<const Expr *> eval_order)
594{
596 const int64_t bounds_min = bounds.start();
597 const int expr_array_size = root_expression.expression_array_size();
598
599 /* Make bit span sizes a multiple of `BitsPerInt`. This allows the bit-wise operations to run a
600 * bit more efficiently, because only full integers are processed. */
601 const int64_t ints_in_bounds = ceil_division(bounds.size(), bits::BitsPerInt);
602 BitGroupVector<16 * 1024> expression_results(
603 expr_array_size, ints_in_bounds * bits::BitsPerInt, false);
604
605 for (const Expr *expression : eval_order) {
606 MutableBoundedBitSpan expr_result = expression_results[expression->index];
607 switch (expression->type) {
608 case Expr::Type::Atomic: {
609 const AtomicExpr &expr = expression->as_atomic();
610 const IndexMask mask = expr.mask->slice_content(bounds);
611 mask.to_bits(expr_result, -bounds_min);
612 break;
613 }
614 case Expr::Type::Union: {
615 for (const Expr *term : expression->terms) {
616 expr_result |= expression_results[term->index];
617 }
618 break;
619 }
621 bits::copy_from_or(expr_result, expression_results[expression->terms[0]->index]);
622 for (const Expr *term : expression->terms.as_span().drop_front(1)) {
623 expr_result &= expression_results[term->index];
624 }
625 break;
626 }
628 bits::copy_from_or(expr_result, expression_results[expression->terms[0]->index]);
629 for (const Expr *term : expression->terms.as_span().drop_front(1)) {
631 [](const bits::BitInt a, const bits::BitInt b) { return a & ~b; },
632 expr_result,
633 expression_results[term->index]);
634 }
635 break;
636 }
637 }
638 }
639 const BoundedBitSpan final_bits = expression_results[root_expression.index];
640 const Span<int16_t> indices = bits_to_indices(final_bits, allocator);
641 return IndexMaskSegment(bounds_min, indices);
642}
643
646 const int64_t bounds_min,
647 int16_t *r_values)
648{
649 if (segments.is_empty()) {
650 return {};
651 }
652 if (segments.size() == 1) {
653 return segments[0];
654 }
655 if (segments.size() == 2) {
656 const IndexMaskSegment a = segments[0].shift(-bounds_min);
657 const IndexMaskSegment b = segments[1].shift(-bounds_min);
658 const int64_t size = std::set_union(a.begin(), a.end(), b.begin(), b.end(), r_values) -
659 r_values;
660 return {bounds_min, {r_values, size}};
661 }
662
663 /* Sort input segments by their size, so that smaller segments are unioned first. This results in
664 * smaller intermediate arrays and thus less work overall. */
665 Vector<IndexMaskSegment> sorted_segments(segments);
666 std::sort(
667 sorted_segments.begin(),
668 sorted_segments.end(),
669 [](const IndexMaskSegment &a, const IndexMaskSegment &b) { return a.size() < b.size(); });
670
671 std::array<int16_t, max_segment_size> tmp_indices;
672 /* Can use r_values for temporary values because if it's large enough for the final result, it's
673 * also large enough for intermediate results. */
674 int16_t *buffer_a = r_values;
675 int16_t *buffer_b = tmp_indices.data();
676
677 if (sorted_segments.size() % 2 == 1) {
678 /* Swap buffers so that the result is in #r_values in the end. */
679 std::swap(buffer_a, buffer_b);
680 }
681
682 int64_t count = 0;
683 {
684 /* Initial union. */
685 const IndexMaskSegment a = sorted_segments[0].shift(-bounds_min);
686 const IndexMaskSegment b = sorted_segments[1].shift(-bounds_min);
687 int16_t *dst = buffer_a;
688 count = std::set_union(a.begin(), a.end(), b.begin(), b.end(), dst) - dst;
689 }
690
691 /* Union one input into the result at a time. In theory, one could write an algorithm that unions
692 * multiple sorted arrays at once, but that's more complex and it's not obvious that it would be
693 * faster in the end. */
694 for (const int64_t segment_i : sorted_segments.index_range().drop_front(2)) {
695 const int16_t *a = buffer_a;
696 const IndexMaskSegment b = sorted_segments[segment_i].shift(-bounds_min);
697 int16_t *dst = buffer_b;
698 count = std::set_union(a, a + count, b.begin(), b.end(), dst) - dst;
699 std::swap(buffer_a, buffer_b);
700 }
701 return {bounds_min, {r_values, count}};
702}
703
706 const int64_t bounds_min,
707 int16_t *r_values)
708{
709 if (segments.is_empty()) {
710 return {};
711 }
712 if (segments.size() == 1) {
713 return segments[0];
714 }
715 if (segments.size() == 2) {
716 const IndexMaskSegment a = segments[0].shift(-bounds_min);
717 const IndexMaskSegment b = segments[1].shift(-bounds_min);
718 const int64_t size = std::set_intersection(a.begin(), a.end(), b.begin(), b.end(), r_values) -
719 r_values;
720 return {bounds_min, {r_values, size}};
721 }
722
723 /* Intersect smaller segments first, because then the intermediate results will generally be
724 * smaller. */
725 Vector<IndexMaskSegment> sorted_segments(segments);
726 std::sort(
727 sorted_segments.begin(),
728 sorted_segments.end(),
729 [](const IndexMaskSegment &a, const IndexMaskSegment &b) { return a.size() < b.size(); });
730
731 std::array<int16_t, max_segment_size> tmp_indices_1;
732 std::array<int16_t, max_segment_size> tmp_indices_2;
733 int16_t *buffer_a = tmp_indices_1.data();
734 int16_t *buffer_b = tmp_indices_2.data();
735
736 int64_t count = 0;
737 {
738 /* Initial intersection. */
739 const IndexMaskSegment a = sorted_segments[0].shift(-bounds_min);
740 const IndexMaskSegment b = sorted_segments[1].shift(-bounds_min);
741 int16_t *dst = buffer_a;
742 count = std::set_intersection(a.begin(), a.end(), b.begin(), b.end(), dst) - dst;
743 }
744
745 for (const int64_t segment_i : sorted_segments.index_range().drop_front(2)) {
746 const int16_t *a = buffer_a;
747 const IndexMaskSegment b = sorted_segments[segment_i].shift(-bounds_min);
748 /* The result of the final intersection should be written directly to #r_values to avoid an
749 * additional copy in the end. */
750 int16_t *dst = (segment_i == sorted_segments.size() - 1) ? r_values : buffer_b;
751 count = std::set_intersection(a, a + count, b.begin(), b.end(), dst) - dst;
752 std::swap(buffer_a, buffer_b);
753 }
754 return {bounds_min, {r_values, count}};
755}
756
762 const IndexMaskSegment main_segment,
763 const Span<IndexMaskSegment> subtract_segments,
764 const int64_t bounds_min,
765 int16_t *r_values)
766{
767 if (main_segment.is_empty()) {
768 return {};
769 }
770 if (subtract_segments.is_empty()) {
771 return main_segment;
772 }
773 if (subtract_segments.size() == 1) {
774 const IndexMaskSegment shifted_main_segment = main_segment.shift(-bounds_min);
775 const IndexMaskSegment subtract_segment = subtract_segments[0].shift(-bounds_min);
776 const int64_t size = std::set_difference(shifted_main_segment.begin(),
777 shifted_main_segment.end(),
778 subtract_segment.begin(),
779 subtract_segment.end(),
780 r_values) -
781 r_values;
782 return {bounds_min, {r_values, size}};
783 }
784
785 int64_t subtract_count = 0;
786 for (const IndexMaskSegment &segment : subtract_segments) {
787 subtract_count += segment.size();
788 }
789 if (subtract_count < main_segment.size() / 2) {
790 /* Can be more efficient to union all the subtract indices first before computing the
791 * difference. This avoids potentially multiple larger intermediate arrays. */
792 std::array<int16_t, max_segment_size> union_indices;
793 const IndexMaskSegment shifted_main_segment = main_segment.shift(-bounds_min);
794 const IndexMaskSegment unioned_subtract_segment =
795 union_index_mask_segments(subtract_segments, bounds_min, union_indices.data())
796 .shift(-bounds_min);
797 const int64_t size = std::set_difference(shifted_main_segment.begin(),
798 shifted_main_segment.end(),
799 unioned_subtract_segment.begin(),
800 unioned_subtract_segment.end(),
801 r_values) -
802 r_values;
803 return {bounds_min, {r_values, size}};
804 }
805
806 /* Sort larger segments to the front. This way the intermediate arrays are likely smaller. */
807 Vector<IndexMaskSegment> sorted_subtract_segments(subtract_segments);
808 std::sort(
809 sorted_subtract_segments.begin(),
810 sorted_subtract_segments.end(),
811 [](const IndexMaskSegment &a, const IndexMaskSegment &b) { return a.size() > b.size(); });
812
813 std::array<int16_t, max_segment_size> tmp_indices_1;
814 std::array<int16_t, max_segment_size> tmp_indices_2;
815 int16_t *buffer_a = tmp_indices_1.data();
816 int16_t *buffer_b = tmp_indices_2.data();
817
818 int64_t count = 0;
819 {
820 /* Initial difference. */
821 const IndexMaskSegment shifted_main_segment = main_segment.shift(-bounds_min);
822 const IndexMaskSegment subtract_segment = sorted_subtract_segments[0].shift(-bounds_min);
823 int16_t *dst = buffer_a;
824 count = std::set_difference(shifted_main_segment.begin(),
825 shifted_main_segment.end(),
826 subtract_segment.begin(),
827 subtract_segment.end(),
828 dst) -
829 dst;
830 }
831
832 for (const int64_t segment_i : sorted_subtract_segments.index_range().drop_front(1)) {
833 const IndexMaskSegment &subtract_segment = sorted_subtract_segments[segment_i].shift(
834 -bounds_min);
835 /* The final result should be written directly to #r_values to avoid an additional copy. */
836 int16_t *dst = (segment_i == sorted_subtract_segments.size() - 1) ? r_values : buffer_b;
837 count = std::set_difference(buffer_a,
838 buffer_a + count,
839 subtract_segment.begin(),
840 subtract_segment.end(),
841 dst) -
842 dst;
843 std::swap(buffer_a, buffer_b);
844 }
845 return {bounds_min, {r_values, count}};
846}
847
854 LinearAllocator<> &allocator,
855 const IndexRange bounds,
856 const Span<const Expr *> eval_order)
857{
859 const int64_t bounds_min = bounds.start();
860 const int expr_array_size = root_expression.expression_array_size();
862 for (const Expr *expression : eval_order) {
863 switch (expression->type) {
864 case Expr::Type::Atomic: {
865 const AtomicExpr &expr = expression->as_atomic();
866 const IndexMask mask = expr.mask->slice_content(bounds);
867 /* The caller should make sure that the bounds are aligned to segment bounds. */
868 BLI_assert(mask.segments_num() <= 1);
869 if (mask.segments_num() == 1) {
870 results[expression->index] = mask.segment(0);
871 }
872 break;
873 }
874 case Expr::Type::Union: {
875 const UnionExpr &expr = expression->as_union();
876 Array<IndexMaskSegment> term_segments(expr.terms.size());
877 int64_t result_size_upper_bound = 0;
878 bool used_short_circuit = false;
879 for (const int64_t term_i : expr.terms.index_range()) {
880 const Expr &term = *expr.terms[term_i];
881 const IndexMaskSegment term_segment = results[term.index];
882 if (term_segment.size() == bounds.size()) {
883 /* Can skip computing the union if we know that one of the inputs contains all possible
884 * indices already. */
885 results[expression->index] = term_segment;
886 used_short_circuit = true;
887 break;
888 }
889 term_segments[term_i] = term_segment;
890 result_size_upper_bound += term_segment.size();
891 }
892 if (used_short_circuit) {
893 break;
894 }
895 result_size_upper_bound = std::min(result_size_upper_bound, bounds.size());
896 MutableSpan<int16_t> dst = allocator.allocate_array<int16_t>(result_size_upper_bound);
897 const IndexMaskSegment result_segment = union_index_mask_segments(
898 term_segments, bounds_min, dst.data());
900 result_segment.base_span().end());
901 results[expression->index] = result_segment;
902 break;
903 }
905 const IntersectionExpr &expr = expression->as_intersection();
906 Array<IndexMaskSegment> term_segments(expr.terms.size());
907 int64_t result_size_upper_bound = bounds.size();
908 bool used_short_circuit = false;
909 for (const int64_t term_i : expr.terms.index_range()) {
910 const Expr &term = *expr.terms[term_i];
911 const IndexMaskSegment term_segment = results[term.index];
912 if (term_segment.is_empty()) {
913 /* Can skip computing the intersection if we know that one of the inputs is empty. */
914 results[expression->index] = {};
915 used_short_circuit = true;
916 break;
917 }
918 result_size_upper_bound = std::min(result_size_upper_bound, term_segment.size());
919 term_segments[term_i] = term_segment;
920 }
921 if (used_short_circuit) {
922 break;
923 }
924 MutableSpan<int16_t> dst = allocator.allocate_array<int16_t>(result_size_upper_bound);
925 const IndexMaskSegment result_segment = intersect_index_mask_segments(
926 term_segments, bounds_min, dst.data());
928 result_segment.base_span().end());
929 results[expression->index] = result_segment;
930 break;
931 }
933 const DifferenceExpr &expr = expression->as_difference();
934 const Expr &main_term = *expr.terms[0];
935 const IndexMaskSegment main_segment = results[main_term.index];
936 if (main_segment.is_empty()) {
937 /* Can skip the computation of the main segment is empty. */
938 results[expression->index] = {};
939 break;
940 }
941 int64_t result_size_upper_bound = main_segment.size();
942 bool used_short_circuit = false;
943 Array<IndexMaskSegment> subtract_segments(expr.terms.size() - 1);
944 for (const int64_t term_i : expr.terms.index_range().drop_front(1)) {
945 const Expr &subtract_term = *expr.terms[term_i];
946 const IndexMaskSegment term_segment = results[subtract_term.index];
947 if (term_segment.size() == bounds.size()) {
948 /* Can skip computing the difference if we know that one of the subtract-terms is
949 * full. */
950 results[expression->index] = {};
951 used_short_circuit = true;
952 break;
953 }
954 result_size_upper_bound = std::min(result_size_upper_bound,
955 bounds.size() - term_segment.size());
956 subtract_segments[term_i - 1] = term_segment;
957 }
958 if (used_short_circuit) {
959 break;
960 }
961 MutableSpan<int16_t> dst = allocator.allocate_array<int16_t>(result_size_upper_bound);
963 main_segment, subtract_segments, bounds_min, dst.data());
965 result_segment.base_span().end());
966 results[expression->index] = result_segment;
967 break;
968 }
969 }
970 }
971 return results[root_expression.index];
972}
973
979 const Span<EvaluatedSegment> evaluated_segments)
980{
981 Vector<IndexMaskSegment> result_mask_segments;
982 for (const EvaluatedSegment &evaluated_segment : evaluated_segments) {
983 switch (evaluated_segment.type) {
985 index_range_to_mask_segments(evaluated_segment.bounds, result_mask_segments);
986 break;
987 }
989 const IndexMask sliced_mask = evaluated_segment.copy_mask->slice_content(
990 evaluated_segment.bounds);
991 sliced_mask.foreach_segment(
992 [&](const IndexMaskSegment &segment) { result_mask_segments.append(segment); });
993 break;
994 }
996 result_mask_segments.append(evaluated_segment.indices);
997 break;
998 }
999 }
1000 }
1001 return result_mask_segments;
1002}
1003
1009{
1011 if (root_expression.type == Expr::Type::Atomic) {
1012 eval_order.append(&root_expression);
1013 return eval_order;
1014 }
1015
1016 Array<bool, inline_expr_array_size> is_evaluated_states(root_expression.expression_array_size(),
1017 false);
1019 expr_stack.push(&root_expression);
1020
1021 while (!expr_stack.is_empty()) {
1022 const Expr &expression = *expr_stack.peek();
1023 bool &is_evaluated = is_evaluated_states[expression.index];
1024 if (is_evaluated) {
1025 expr_stack.pop();
1026 continue;
1027 }
1028 bool all_terms_evaluated = true;
1029 for (const Expr *term : expression.terms) {
1030 bool &term_evaluated = is_evaluated_states[term->index];
1031 if (!term_evaluated) {
1032 if (term->type == Expr::Type::Atomic) {
1033 eval_order.append(term);
1034 term_evaluated = true;
1035 }
1036 else {
1037 expr_stack.push(term);
1038 all_terms_evaluated = false;
1039 }
1040 }
1041 }
1042 if (all_terms_evaluated) {
1043 eval_order.append(&expression);
1044 is_evaluated = true;
1045 expr_stack.pop();
1046 }
1047 }
1048
1049 return eval_order;
1050}
1051
1053static ExactEvalMode determine_exact_eval_mode(const Expr &root_expression)
1054{
1055 for (const Expr *term : root_expression.terms) {
1056 if (!term->terms.is_empty()) {
1057 /* Use bits when there are nested expressions as this is often faster. */
1058 return ExactEvalMode::Bits;
1059 }
1060 }
1062}
1063
1065 const Expr &root_expression,
1066 const Span<const Expr *> eval_order,
1067 Vector<EvaluatedSegment, 16> &r_evaluated_segments,
1068 Vector<IndexRange, 16> &r_short_unknown_segments)
1069{
1070 /* Coarse evaluation splits the full range into segments. Long segments are split up and get
1071 * another coarse evaluation. Short segments will be evaluated exactly. */
1072 Stack<IndexRange, 16> long_unknown_segments;
1073
1074 /* The point at which a range starts being "short". */
1075 const int64_t coarse_segment_size_threshold = max_segment_size;
1076
1077 /* Checks the coarse results and inserts its segments into either `long_unknown_segments` for
1078 * further coarse evaluation, `r_short_unknown_segments` for exact evaluation or
1079 * `r_evaluated_segments` if no further evaluation is necessary. */
1080 auto handle_coarse_result = [&](const CoarseResult &coarse_result) {
1081 for (const CoarseSegment &segment : coarse_result.segments) {
1082 switch (segment.type) {
1084 if (segment.bounds.size() > coarse_segment_size_threshold) {
1085 long_unknown_segments.push(segment.bounds);
1086 }
1087 else {
1088 r_short_unknown_segments.append(segment.bounds);
1089 }
1090 break;
1091 }
1093 BLI_assert(segment.mask);
1094 r_evaluated_segments.append(
1095 {EvaluatedSegment::Type::Copy, segment.bounds, segment.mask});
1096 break;
1097 }
1099 r_evaluated_segments.append({EvaluatedSegment::Type::Full, segment.bounds});
1100 break;
1101 }
1102 }
1103 }
1104 };
1105
1106 /* Initial coarse evaluation without any explicit bounds. The bounds are implied by the index
1107 * masks used in the expression. */
1108 const CoarseResult initial_coarse_result = evaluate_coarse(root_expression, eval_order);
1109 handle_coarse_result(initial_coarse_result);
1110
1111 /* Do coarse evaluation until all unknown segments are short enough to do exact evaluation. */
1112 while (!long_unknown_segments.is_empty()) {
1113 const IndexRange unknown_bounds = long_unknown_segments.pop();
1114 const int64_t split_pos = unknown_bounds.size() / 2;
1115 const IndexRange left_half = unknown_bounds.take_front(split_pos);
1116 const IndexRange right_half = unknown_bounds.drop_front(split_pos);
1117 const CoarseResult left_result = evaluate_coarse(root_expression, eval_order, left_half);
1118 const CoarseResult right_result = evaluate_coarse(root_expression, eval_order, right_half);
1119 handle_coarse_result(left_result);
1120 handle_coarse_result(right_result);
1121 }
1122}
1123
1125 const Expr &root_expression,
1126 const ExactEvalMode exact_eval_mode,
1127 const Span<const Expr *> eval_order,
1128 const Span<IndexRange> short_unknown_segments,
1129 IndexMaskMemory &memory,
1130 Vector<EvaluatedSegment, 16> &r_evaluated_segments)
1131{
1132 /* Evaluate a segment exactly. */
1133 auto evaluate_unknown_segment = [&](const IndexRange bounds,
1134 LinearAllocator<> &allocator,
1135 Vector<EvaluatedSegment, 16> &r_local_evaluated_segments) {
1136 /* Use the predetermined evaluation mode. */
1137 switch (exact_eval_mode) {
1138 case ExactEvalMode::Bits: {
1140 root_expression, allocator, bounds, eval_order);
1141 if (!indices.is_empty()) {
1142 r_local_evaluated_segments.append(
1144 }
1145 break;
1146 }
1148 /* #evaluate_exact_with_indices requires that all index masks have a single segment in the
1149 * provided bounds. So split up the range into sub-ranges first if necessary. */
1150 Vector<int64_t, 16> split_indices;
1151 /* Always adding the beginning and end of the bounds simplifies the code below. */
1152 split_indices.extend({bounds.first(), bounds.one_after_last()});
1153 for (const int64_t eval_order_i : eval_order.index_range()) {
1154 const Expr &expr = *eval_order[eval_order_i];
1155 if (expr.type != Expr::Type::Atomic) {
1156 continue;
1157 }
1158 const AtomicExpr &atomic_expr = expr.as_atomic();
1159 const IndexMask mask = atomic_expr.mask->slice_content(bounds);
1160 const int64_t segments_num = mask.segments_num();
1161 if (segments_num <= 1) {
1162 /* This mask only has a single segment in the bounds anyway, so no extra split-position
1163 * is necessary. */
1164 continue;
1165 }
1166 /* Split at the beginning of each segment. Skipping the first, because that does not need
1167 * an extra split position. Alternatively, one could also split at the end of each
1168 * segment except the last one. It doesn't matter much. */
1169 for (const int64_t segment_i : IndexRange(segments_num).drop_front(1)) {
1170 const IndexMaskSegment segment = mask.segment(segment_i);
1171 split_indices.append(segment[0]);
1172 }
1173 }
1174 std::sort(split_indices.begin(), split_indices.end());
1175 for (const int64_t boundary_i : split_indices.index_range().drop_back(1)) {
1176 const IndexRange sub_bounds = IndexRange::from_begin_end(split_indices[boundary_i],
1177 split_indices[boundary_i + 1]);
1178 if (sub_bounds.is_empty()) {
1179 continue;
1180 }
1182 root_expression, allocator, sub_bounds, eval_order);
1183 if (!indices.is_empty()) {
1184 r_local_evaluated_segments.append(
1185 {EvaluatedSegment::Type::Indices, sub_bounds, nullptr, indices});
1186 }
1187 }
1188 break;
1189 }
1190 }
1191 };
1192
1193 /* Decide whether multi-threading should be used or not. There is some extra overhead even when
1194 * just attempting to use multi-threading. */
1195 const int64_t unknown_segment_eval_grain_size = 8;
1196 if (short_unknown_segments.size() < unknown_segment_eval_grain_size) {
1197 for (const IndexRange &bounds : short_unknown_segments) {
1198 evaluate_unknown_segment(bounds, memory, r_evaluated_segments);
1199 }
1200 }
1201 else {
1202 /* Do exact evaluation in multiple threads. The allocators and evaluated segments created by
1203 * each thread are merged in the end. */
1204 struct LocalData {
1205 LinearAllocator<> allocator;
1206 Vector<EvaluatedSegment, 16> evaluated_segments;
1207 };
1209 threading::parallel_for(short_unknown_segments.index_range(),
1210 unknown_segment_eval_grain_size,
1211 [&](const IndexRange range) {
1212 LocalData &data = data_by_thread.local();
1213 for (const IndexRange &bounds : short_unknown_segments.slice(range))
1214 {
1215 evaluate_unknown_segment(
1216 bounds, data.allocator, data.evaluated_segments);
1217 }
1218 });
1219 for (LocalData &data : data_by_thread) {
1220 if (!data.evaluated_segments.is_empty()) {
1221 r_evaluated_segments.extend(data.evaluated_segments);
1222 memory.transfer_ownership_from(data.allocator);
1223 }
1224 }
1225 }
1226}
1227
1229 IndexMaskMemory &memory)
1230{
1231 if (evaluated_segments.is_empty()) {
1232 return {};
1233 }
1234 if (evaluated_segments.size() == 1) {
1235 const EvaluatedSegment &evaluated_segment = evaluated_segments[0];
1236 switch (evaluated_segment.type) {
1238 return IndexMask(IndexRange(evaluated_segment.bounds));
1239 }
1241 return evaluated_segment.copy_mask->slice_content(evaluated_segment.bounds);
1242 }
1244 return IndexMask::from_segments({evaluated_segment.indices}, memory);
1245 }
1246 }
1247 }
1248
1249 std::sort(evaluated_segments.begin(),
1250 evaluated_segments.end(),
1251 [](const EvaluatedSegment &a, const EvaluatedSegment &b) {
1252 return a.bounds.start() < b.bounds.start();
1253 });
1254
1255 Vector<IndexMaskSegment> result_segments = build_result_mask_segments(evaluated_segments);
1256 return IndexMask::from_segments(result_segments, memory);
1257}
1258
1259static IndexMask evaluate_expression_impl(const Expr &root_expression,
1260 IndexMaskMemory &memory,
1261 const ExactEvalMode exact_eval_mode)
1262{
1263 /* Precompute the evaluation order here, because it's used potentially many times throughout the
1264 * algorithm. */
1266 root_expression);
1267
1268 /* Non-overlapping evaluated segments which become the resulting index mask in the end. Note that
1269 * these segments are only sorted in the end. */
1270 Vector<EvaluatedSegment, 16> evaluated_segments;
1271 Vector<IndexRange, 16> short_unknown_segments;
1272
1274 root_expression, eval_order, evaluated_segments, short_unknown_segments);
1276 exact_eval_mode,
1277 eval_order,
1278 short_unknown_segments,
1279 memory,
1280 evaluated_segments);
1281 return evaluated_segments_to_index_mask(evaluated_segments, memory);
1282}
1283
1285{
1286 const ExactEvalMode exact_eval_mode = determine_exact_eval_mode(expression);
1287 IndexMask mask = evaluate_expression_impl(expression, memory, exact_eval_mode);
1288#ifndef NDEBUG
1289 {
1290 /* Check that both exact eval modes have the same result. */
1291 const ExactEvalMode other_exact_eval_mode = (exact_eval_mode == ExactEvalMode::Bits) ?
1294 IndexMask other_mask = evaluate_expression_impl(expression, memory, other_exact_eval_mode);
1295 BLI_assert(mask == other_mask);
1296 }
1297#endif
1298 return mask;
1299}
1300
1302{
1303 Vector<const Expr *> term_expressions;
1304 for (const Term &term : terms) {
1305 term_expressions.append(&this->term_to_expr(term));
1306 }
1307 UnionExpr &expr = scope_.construct<UnionExpr>();
1308 expr.type = Expr::Type::Union;
1309 expr.index = expr_count_++;
1310 expr.terms = std::move(term_expressions);
1311 return expr;
1312}
1313
1314const DifferenceExpr &ExprBuilder::subtract(const Term &main_term, const Span<Term> subtract_terms)
1315{
1316 Vector<const Expr *> term_expressions;
1317 term_expressions.append(&this->term_to_expr(main_term));
1318 for (const Term &subtract_term : subtract_terms) {
1319 term_expressions.append(&this->term_to_expr(subtract_term));
1320 }
1321 DifferenceExpr &expr = scope_.construct<DifferenceExpr>();
1323 expr.index = expr_count_++;
1324 expr.terms = std::move(term_expressions);
1325 return expr;
1326}
1327
1329{
1330 Vector<const Expr *> term_expressions;
1331 for (const Term &term : terms) {
1332 term_expressions.append(&this->term_to_expr(term));
1333 }
1334 IntersectionExpr &expr = scope_.construct<IntersectionExpr>();
1336 expr.index += expr_count_++;
1337 expr.terms = std::move(term_expressions);
1338 return expr;
1339}
1340
1341const Expr &ExprBuilder::term_to_expr(const Term &term)
1342{
1343 if (const Expr *const *expr = std::get_if<const Expr *>(&term)) {
1344 return **expr;
1345 }
1346 AtomicExpr &expr = scope_.construct<AtomicExpr>();
1347 expr.type = Expr::Type::Atomic;
1348 expr.index = expr_count_++;
1349 if (const IndexRange *range = std::get_if<IndexRange>(&term)) {
1350 expr.mask = &scope_.construct<IndexMask>(*range);
1351 }
1352 else {
1353 expr.mask = std::get<const IndexMask *>(term);
1354 }
1355 return expr;
1356}
1357
1358} // namespace blender::index_mask
#define BLI_assert(a)
Definition BLI_assert.h:46
#define ELEM(...)
BMesh const char void * data
long long int int64_t
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition btDbvt.cpp:52
static IndexMask from_segments(Span< IndexMaskSegment > segments, IndexMaskMemory &memory)
constexpr int64_t one_after_last() const
constexpr IndexRange drop_back(int64_t n) const
constexpr int64_t size() const
constexpr bool is_empty() const
static constexpr IndexRange from_begin_end(const int64_t begin, const int64_t end)
constexpr IndexRange with_new_end(const int64_t new_end) const
static constexpr IndexRange from_begin_size(const int64_t begin, const int64_t size)
constexpr int64_t start() const
constexpr IndexRange take_front(int64_t n) const
constexpr IndexRange drop_front(int64_t n) const
MutableSpan< T > allocate_array(int64_t size)
void free_end_of_previous_allocation(const int64_t original_allocation_size, const void *free_after)
MutableSpan< T > construct_array_copy(Span< T > src)
void transfer_ownership_from(LinearAllocator<> &other)
constexpr int64_t size() const
Definition BLI_span.hh:493
constexpr bool is_empty() const
Definition BLI_span.hh:509
constexpr T * data() const
Definition BLI_span.hh:539
constexpr int64_t size_in_bytes() const
Definition BLI_span.hh:501
constexpr T * end() const
Definition BLI_span.hh:548
constexpr T * begin() const
Definition BLI_span.hh:544
Iterator begin() const
int64_t size() const
Iterator end() const
constexpr const T * data() const
Definition BLI_span.hh:215
constexpr int64_t size() const
Definition BLI_span.hh:252
constexpr IndexRange index_range() const
Definition BLI_span.hh:401
constexpr bool is_empty() const
Definition BLI_span.hh:260
bool is_empty() const
Definition BLI_stack.hh:308
void push(const T &value)
Definition BLI_stack.hh:213
int64_t size() const
void append(const T &value)
void remove_first_occurrence_and_reorder(const T &value)
IndexRange index_range() const
void append_unchecked(const T &value)
void extend(Span< T > array)
std::variant< const Expr *, const IndexMask *, IndexRange > Term
const IntersectionExpr & intersect(const Span< Term > terms)
const UnionExpr & merge(const Span< Term > terms)
const DifferenceExpr & subtract(const Term &main_term, const Span< Term > subtract_terms)
IndexMaskSegment shift(const int64_t shift) const
IndexMask slice_content(IndexRange range) const
void foreach_segment(Fn &&fn) const
static ushort indices[]
int count
ccl_device_inline float2 mask(const MaskType mask, const float2 a)
uint64_t BitInt
static constexpr int64_t BitsPerInt
void mix_into_first_expr(ExprFn &&expr, FirstBitSpanT &&first_arg, const BitSpanT &...args)
void copy_from_or(FirstBitSpanT &first_arg, const BitSpanT &...args)
void foreach_1_index(const BitSpanT &data, Fn &&fn)
static CoarseSegment & add_coarse_segment__full(CoarseSegment *prev_segment, const int64_t prev_boundary_index, const int64_t current_boundary_index, CoarseResult &result)
static IndexMaskSegment evaluate_exact_with_indices(const Expr &root_expression, LinearAllocator<> &allocator, const IndexRange bounds, const Span< const Expr * > eval_order)
static void evaluate_coarse_and_split_until_segments_are_short(const Expr &root_expression, const Span< const Expr * > eval_order, Vector< EvaluatedSegment, 16 > &r_evaluated_segments, Vector< IndexRange, 16 > &r_short_unknown_segments)
IndexMask evaluate_expression(const Expr &expression, IndexMaskMemory &memory)
static void evaluate_short_unknown_segments_exactly(const Expr &root_expression, const ExactEvalMode exact_eval_mode, const Span< const Expr * > eval_order, const Span< IndexRange > short_unknown_segments, IndexMaskMemory &memory, Vector< EvaluatedSegment, 16 > &r_evaluated_segments)
static IndexMaskSegment union_index_mask_segments(const Span< IndexMaskSegment > segments, const int64_t bounds_min, int16_t *r_values)
static constexpr int64_t segment_size_threshold
static void sort_course_boundaries(MutableSpan< CourseBoundary > boundaries)
static ExactEvalMode determine_exact_eval_mode(const Expr &root_expression)
static CoarseResult evaluate_coarse(const Expr &root_expression, const Span< const Expr * > eval_order, const std::optional< IndexRange > eval_bounds=std::nullopt)
static IndexMaskSegment evaluate_exact_with_bits(const Expr &root_expression, LinearAllocator<> &allocator, const IndexRange bounds, const Span< const Expr * > eval_order)
static IndexMask evaluated_segments_to_index_mask(MutableSpan< EvaluatedSegment > evaluated_segments, IndexMaskMemory &memory)
void index_range_to_mask_segments(const IndexRange range, Vector< IndexMaskSegment, N > &r_segments)
static Span< int16_t > bits_to_indices(const BoundedBitSpan bits, LinearAllocator<> &allocator)
static constexpr int64_t max_segment_size
static Vector< const Expr *, inline_expr_array_size > compute_eval_order(const Expr &root_expression)
static IndexMaskSegment intersect_index_mask_segments(const Span< IndexMaskSegment > segments, const int64_t bounds_min, int16_t *r_values)
static CoarseSegment & add_coarse_segment__unknown(CoarseSegment *prev_segment, const int64_t prev_boundary_index, const int64_t current_boundary_index, CoarseResult &result)
static Vector< IndexMaskSegment > build_result_mask_segments(const Span< EvaluatedSegment > evaluated_segments)
static void evaluate_coarse_difference(const Span< DifferenceCourseBoundary > boundaries, CoarseResult &r_result)
static IndexMask evaluate_expression_impl(const Expr &root_expression, IndexMaskMemory &memory, const ExactEvalMode exact_eval_mode)
static CoarseSegment & add_coarse_segment__copy(CoarseSegment *prev_segment, const int64_t prev_boundary_index, const int64_t current_boundary_index, const IndexMask &copy_from_mask, CoarseResult &result)
static IndexMaskSegment difference_index_mask_segments(const IndexMaskSegment main_segment, const Span< IndexMaskSegment > subtract_segments, const int64_t bounds_min, int16_t *r_values)
static void evaluate_coarse_intersection(const Span< CourseBoundary > boundaries, const int64_t terms_num, CoarseResult &r_result)
static void evaluate_coarse_union(const Span< CourseBoundary > boundaries, CoarseResult &r_result)
constexpr int64_t inline_expr_array_size
void parallel_for(const IndexRange range, const int64_t grain_size, const Function &function, const TaskSizeHints &size_hints=detail::TaskSizeHints_Static(1))
Definition BLI_task.hh:93
constexpr IntT ceil_division(const IntT x, const IntT y)
const DifferenceExpr & as_difference() const
const IntersectionExpr & as_intersection() const
const AtomicExpr & as_atomic() const
const UnionExpr & as_union() const
i
Definition text_draw.cc:230