Blender V4.3
blender::index_mask Namespace Reference

Namespaces

namespace  detail
 
namespace  tests
 

Classes

struct  AtomicExpr
 
struct  CoarseResult
 
struct  CoarseSegment
 
struct  CourseBoundary
 
struct  DifferenceCourseBoundary
 
struct  DifferenceExpr
 
struct  EvaluatedSegment
 
struct  Expr
 
class  ExprBuilder
 
class  IndexMask
 
struct  IndexMaskData
 
class  IndexMaskFromSegment
 
class  IndexMaskMemory
 
class  IndexMaskSegment
 
struct  IntersectionExpr
 
struct  ParallelSegmentsCollector
 
struct  RawMaskIterator
 
struct  UnionExpr
 

Enumerations

enum class  ExactEvalMode { Indices , Bits }
 

Functions

std::array< int16_t, max_segment_sizebuild_static_indices_array ()
 
const IndexMaskget_static_index_mask_for_min_size (const int64_t min_size)
 
std::ostream & operator<< (std::ostream &stream, const IndexMask &mask)
 
IndexMask evaluate_expression (const Expr &expression, IndexMaskMemory &memory)
 
template void build_reverse_map< int > (const IndexMask &mask, MutableSpan< int > r_map)
 
template<typename T , int64_t InlineBufferSize>
static void segments_from_indices (const Span< T > indices, LinearAllocator<> &allocator, Vector< IndexMaskSegment, InlineBufferSize > &r_segments)
 
static int64_t from_bits_batch_predicate (const IndexMaskSegment universe_segment, IndexRangesBuilder< int16_t > &builder, const BitSpan bits_slice)
 
static void segments_from_batch_predicate (const IndexMaskSegment universe_segment, LinearAllocator<> &allocator, const FunctionRef< int64_t(const IndexMaskSegment &universe_segment, IndexRangesBuilder< int16_t > &builder)> batch_predicate, Vector< IndexMaskSegment, 16 > &r_segments)
 
static Array< int16_tbuild_every_nth_index_array (const int64_t n)
 
static Span< int16_tget_every_nth_index (const int64_t n, const int64_t repetitions, IndexMaskMemory &memory)
 
static bool segments_is_equal (const IndexMaskSegment &a, const IndexMaskSegment &b)
 
bool operator== (const IndexMask &a, const IndexMask &b)
 
static void sort_course_boundaries (MutableSpan< CourseBoundary > boundaries)
 
static void sort_course_boundaries (MutableSpan< DifferenceCourseBoundary > boundaries)
 
static CoarseSegmentadd_coarse_segment__full (CoarseSegment *prev_segment, const int64_t prev_boundary_index, const int64_t current_boundary_index, CoarseResult &result)
 
static CoarseSegmentadd_coarse_segment__unknown (CoarseSegment *prev_segment, const int64_t prev_boundary_index, const int64_t current_boundary_index, CoarseResult &result)
 
static CoarseSegmentadd_coarse_segment__copy (CoarseSegment *prev_segment, const int64_t prev_boundary_index, const int64_t current_boundary_index, const IndexMask &copy_from_mask, CoarseResult &result)
 
static void evaluate_coarse_union (const Span< CourseBoundary > boundaries, CoarseResult &r_result)
 
static void evaluate_coarse_intersection (const Span< CourseBoundary > boundaries, const int64_t terms_num, CoarseResult &r_result)
 
static void evaluate_coarse_difference (const Span< DifferenceCourseBoundary > boundaries, CoarseResult &r_result)
 
static CoarseResult evaluate_coarse (const Expr &root_expression, const Span< const Expr * > eval_order, const std::optional< IndexRange > eval_bounds=std::nullopt)
 
static Span< int16_tbits_to_indices (const BoundedBitSpan bits, LinearAllocator<> &allocator)
 
static IndexMaskSegment evaluate_exact_with_bits (const Expr &root_expression, LinearAllocator<> &allocator, const IndexRange bounds, const Span< const Expr * > eval_order)
 
static IndexMaskSegment union_index_mask_segments (const Span< IndexMaskSegment > segments, const int64_t bounds_min, int16_t *r_values)
 
static IndexMaskSegment intersect_index_mask_segments (const Span< IndexMaskSegment > segments, const int64_t bounds_min, int16_t *r_values)
 
static IndexMaskSegment difference_index_mask_segments (const IndexMaskSegment main_segment, const Span< IndexMaskSegment > subtract_segments, const int64_t bounds_min, int16_t *r_values)
 
static IndexMaskSegment evaluate_exact_with_indices (const Expr &root_expression, LinearAllocator<> &allocator, const IndexRange bounds, const Span< const Expr * > eval_order)
 
static Vector< IndexMaskSegmentbuild_result_mask_segments (const Span< EvaluatedSegment > evaluated_segments)
 
static Vector< const Expr *, inline_expr_array_sizecompute_eval_order (const Expr &root_expression)
 
static ExactEvalMode determine_exact_eval_mode (const Expr &root_expression)
 
static void evaluate_coarse_and_split_until_segments_are_short (const Expr &root_expression, const Span< const Expr * > eval_order, Vector< EvaluatedSegment, 16 > &r_evaluated_segments, Vector< IndexRange, 16 > &r_short_unknown_segments)
 
static void evaluate_short_unknown_segments_exactly (const Expr &root_expression, const ExactEvalMode exact_eval_mode, const Span< const Expr * > eval_order, const Span< IndexRange > short_unknown_segments, IndexMaskMemory &memory, Vector< EvaluatedSegment, 16 > &r_evaluated_segments)
 
static IndexMask evaluated_segments_to_index_mask (MutableSpan< EvaluatedSegment > evaluated_segments, IndexMaskMemory &memory)
 
static IndexMask evaluate_expression_impl (const Expr &root_expression, IndexMaskMemory &memory, const ExactEvalMode exact_eval_mode)
 
Utilities
const std::array< int16_t, max_segment_size > & get_static_indices_array ()
 
template<typename T >
void masked_fill (MutableSpan< T > data, const T &value, const IndexMask &mask)
 
template<typename T >
void build_reverse_map (const IndexMask &mask, MutableSpan< T > r_map)
 
int64_t consolidate_index_mask_segments (MutableSpan< IndexMaskSegment > segments, IndexMaskMemory &memory)
 
#RawMaskIterator Inline Methods
bool operator!= (const RawMaskIterator &a, const RawMaskIterator &b)
 
bool operator== (const RawMaskIterator &a, const RawMaskIterator &b)
 

Variables

static constexpr int64_t max_segment_size_shift = 14
 
static constexpr int64_t max_segment_size = (1 << max_segment_size_shift)
 
static constexpr int64_t max_segment_size_mask_low = max_segment_size - 1
 
static constexpr int64_t max_segment_size_mask_high = ~max_segment_size_mask_low
 
constexpr int64_t inline_expr_array_size = 16
 
static constexpr int64_t segment_size_threshold = 32
 

#IndexMask Inline Methods

template<typename Fn >
constexpr bool has_segment_and_start_parameter
 
void init_empty_mask (IndexMaskData &data)
 
template<typename T , typename Fn >
void optimized_foreach_index (const IndexMaskSegment segment, const Fn fn)
 
template<typename T , typename Fn >
void optimized_foreach_index_with_pos (const IndexMaskSegment segment, const int64_t segment_pos, const Fn fn)
 
bool operator!= (const IndexMask &a, const IndexMask &b)
 

Detailed Description

Expression evaluation has multiple phases:

  1. A coarse evaluation that tries to find segments which can be trivially evaluated. For example, taking the union of two overlapping ranges can be done in O(1) time.
  2. For all segments which can't be fully evaluated using coarse evaluation, an exact evaluation is done. This uses either an index-based or bit-based approach depending on a heuristic.
  3. Construct the final index mask based on the resulting intermediate segments.

Enumeration Type Documentation

◆ ExactEvalMode

There are different ways to do the exact evaluation. Depending on the expression or data, one or the other is more efficient.

Enumerator
Indices 

Does the evaluation by working directly with arrays of sorted indices. This is usually best when the expression does not have intermediate results, i.e. it is very simple.

Bits 

The evaluation works with bits. There is extra overhead to convert the input masks to bit arrays and to convert the final result back into indices. In exchange, the actual expression evaluation is significantly cheaper because it's just a bunch of bit operations. For larger expressions, this is typically much more efficient.

Definition at line 102 of file index_mask_expression.cc.

Function Documentation

◆ add_coarse_segment__copy()

◆ add_coarse_segment__full()

◆ add_coarse_segment__unknown()

static CoarseSegment & blender::index_mask::add_coarse_segment__unknown ( CoarseSegment * prev_segment,
const int64_t prev_boundary_index,
const int64_t current_boundary_index,
CoarseResult & result )
static

◆ bits_to_indices()

◆ build_every_nth_index_array()

static Array< int16_t > blender::index_mask::build_every_nth_index_array ( const int64_t n)
static

Definition at line 900 of file index_mask.cc.

References BLI_assert, data, and max_segment_size.

Referenced by get_every_nth_index().

◆ build_result_mask_segments()

◆ build_reverse_map()

template<typename T >
void blender::index_mask::build_reverse_map ( const IndexMask & mask,
MutableSpan< T > r_map )

Fill masked indices of r_mask with the index of that item in the mask such that r_map[mask[i]] == i for the whole mask. The size of r_map needs to be at least mask.min_array_size().

Definition at line 31 of file index_mask.cc.

References BLI_assert, blender::MutableSpan< T >::fill(), and blender::MutableSpan< T >::size().

◆ build_reverse_map< int >()

template void blender::index_mask::build_reverse_map< int > ( const IndexMask & mask,
MutableSpan< int > r_map )

◆ build_static_indices_array()

std::array< int16_t, max_segment_size > blender::index_mask::build_static_indices_array ( )

Definition at line 44 of file index_mask.cc.

References data, and max_segment_size.

Referenced by get_static_indices_array().

◆ compute_eval_order()

◆ consolidate_index_mask_segments()

int64_t blender::index_mask::consolidate_index_mask_segments ( MutableSpan< IndexMaskSegment > segments,
IndexMaskMemory & memory )

Joins segments together based on heuristics. Generally, one wants as few segments as possible, but one also wants full-range-segments if possible and we don't want to copy too many indices around to reduce the number of segments.

Returns
Number of consolidated segments. Those are ordered to the beginning of the span.

Definition at line 221 of file index_mask.cc.

References blender::MutableSpan< T >::begin(), blender::IndexRange::drop_front(), blender::MutableSpan< T >::end(), blender::IndexRange::from_begin_end_inclusive(), get_static_indices_array(), blender::MutableSpan< T >::index_range(), blender::MutableSpan< T >::is_empty(), blender::MutableSpan< T >::last(), max_segment_size, blender::unique_sorted_indices::non_empty_as_range_try(), blender::unique_sorted_indices::non_empty_is_range(), blender::MutableSpan< T >::size(), and blender::Span< T >::take_front().

Referenced by blender::index_mask::IndexMask::from_indices(), and blender::index_mask::detail::from_predicate_impl().

◆ determine_exact_eval_mode()

static ExactEvalMode blender::index_mask::determine_exact_eval_mode ( const Expr & root_expression)
static

Uses a heuristic to decide which exact evaluation mode probably works best.

Definition at line 1056 of file index_mask_expression.cc.

References Bits, Indices, and blender::index_mask::Expr::terms.

Referenced by evaluate_expression().

◆ difference_index_mask_segments()

◆ evaluate_coarse()

static CoarseResult blender::index_mask::evaluate_coarse ( const Expr & root_expression,
const Span< const Expr * > eval_order,
const std::optional< IndexRange > eval_bounds = std::nullopt )
static

The coarse evaluation only looks at the index masks as a whole within the given bounds. This limitation allows it to do many operations in constant time independent of the number of indices within each mask. For example, it can detect that two full index masks that overlap result in a new full index mask when the union of intersection is computed.

For more complex index-masks, coarse evaluation outputs segments with type CoarseSegment::Type::Unknown. Those segments can be evaluated in more detail afterwards.

Parameters
root_expressionExpression to be evaluated.
eval_orderPre-computed evaluation order. All children of a term must come before the term itself.
eval_boundsIf given, the evaluation is restricted to those bounds. Otherwise, the full referenced masks are used.

Definition at line 476 of file index_mask_expression.cc.

References blender::index_mask::Expr::as_atomic(), blender::index_mask::Expr::as_difference(), blender::index_mask::Expr::as_intersection(), blender::index_mask::Expr::as_union(), blender::index_mask::Expr::Atomic, blender::index_mask::CoarseSegment::Copy, blender::index_mask::Expr::Difference, evaluate_coarse_difference(), evaluate_coarse_intersection(), evaluate_coarse_union(), blender::index_mask::Expr::expression_array_size(), blender::index_mask::CoarseSegment::Full, blender::index_mask::Expr::index, inline_expr_array_size, blender::index_mask::Expr::Intersection, blender::index_mask::AtomicExpr::mask, mask(), blender::index_mask::CoarseResult::segments, blender::index_mask::IndexMask::slice_content(), sort_course_boundaries(), blender::index_mask::Expr::terms, and blender::index_mask::Expr::Union.

Referenced by evaluate_coarse_and_split_until_segments_are_short().

◆ evaluate_coarse_and_split_until_segments_are_short()

◆ evaluate_coarse_difference()

◆ evaluate_coarse_intersection()

◆ evaluate_coarse_union()

◆ evaluate_exact_with_bits()

static IndexMaskSegment blender::index_mask::evaluate_exact_with_bits ( const Expr & root_expression,
LinearAllocator<> & allocator,
const IndexRange bounds,
const Span< const Expr * > eval_order )
static

Does an exact evaluation of the expression within the given bounds. The evaluation generally works in three steps:

  1. Convert input indices into bit spans.
  2. Use bit operations to evaluate the expression.
  3. Convert resulting bit span back to indices.

The trade-off here is that the actual expression evaluation is much faster but the conversions take some extra time. Therefore, this approach is best when the evaluation would otherwise take longer than the conversions which is usually the case for non-trivial expressions.

Definition at line 586 of file index_mask_expression.cc.

References blender::index_mask::Expr::as_atomic(), blender::index_mask::Expr::Atomic, b, bits_to_indices(), blender::bits::BitsPerInt, BLI_assert, blender::ceil_division(), blender::bits::copy_from_or(), blender::index_mask::Expr::Difference, blender::index_mask::Expr::expression_array_size(), blender::index_mask::Expr::index, blender::index_mask::Expr::Intersection, blender::index_mask::AtomicExpr::mask, max_segment_size, blender::bits::mix_into_first_expr(), blender::IndexRange::size(), blender::index_mask::IndexMask::slice_content(), blender::IndexRange::start(), blender::index_mask::IndexMask::to_bits(), and blender::index_mask::Expr::Union.

Referenced by evaluate_short_unknown_segments_exactly().

◆ evaluate_exact_with_indices()

static IndexMaskSegment blender::index_mask::evaluate_exact_with_indices ( const Expr & root_expression,
LinearAllocator<> & allocator,
const IndexRange bounds,
const Span< const Expr * > eval_order )
static

◆ evaluate_expression()

◆ evaluate_expression_impl()

static IndexMask blender::index_mask::evaluate_expression_impl ( const Expr & root_expression,
IndexMaskMemory & memory,
const ExactEvalMode exact_eval_mode )
static

◆ evaluate_short_unknown_segments_exactly()

◆ evaluated_segments_to_index_mask()

◆ from_bits_batch_predicate()

◆ get_every_nth_index()

static Span< int16_t > blender::index_mask::get_every_nth_index ( const int64_t n,
const int64_t repetitions,
IndexMaskMemory & memory )
static

Returns a span containing every nth index. This is optimized for a few special values of n which are cached. The returned indices have either static life-time, or they are freed when the given memory is feed.

Definition at line 916 of file index_mask.cc.

References blender::LinearAllocator< Allocator >::allocate_array(), BLI_assert, build_every_nth_index_array(), data, and max_segment_size.

Referenced by blender::index_mask::IndexMask::from_repeating().

◆ get_static_index_mask_for_min_size()

◆ get_static_indices_array()

◆ init_empty_mask()

void blender::index_mask::init_empty_mask ( IndexMaskData & data)
inline

◆ intersect_index_mask_segments()

◆ masked_fill()

◆ operator!=() [1/2]

bool blender::index_mask::operator!= ( const IndexMask & a,
const IndexMask & b )
inline

Definition at line 1079 of file BLI_index_mask.hh.

◆ operator!=() [2/2]

bool blender::index_mask::operator!= ( const RawMaskIterator & a,
const RawMaskIterator & b )
inline

Definition at line 587 of file BLI_index_mask.hh.

References b.

◆ operator<<()

std::ostream & blender::index_mask::operator<< ( std::ostream & stream,
const IndexMask & mask )

◆ operator==() [1/2]

bool blender::index_mask::operator== ( const IndexMask & a,
const IndexMask & b )

Definition at line 1118 of file index_mask.cc.

◆ operator==() [2/2]

bool blender::index_mask::operator== ( const RawMaskIterator & a,
const RawMaskIterator & b )
inline

Definition at line 592 of file BLI_index_mask.hh.

References b.

◆ optimized_foreach_index()

template<typename T , typename Fn >
void blender::index_mask::optimized_foreach_index ( const IndexMaskSegment segment,
const Fn fn )
inline

◆ optimized_foreach_index_with_pos()

template<typename T , typename Fn >
void blender::index_mask::optimized_foreach_index_with_pos ( const IndexMaskSegment segment,
const int64_t segment_pos,
const Fn fn )
inline

◆ segments_from_batch_predicate()

◆ segments_from_indices()

template<typename T , int64_t InlineBufferSize>
static void blender::index_mask::segments_from_indices ( const Span< T > indices,
LinearAllocator<> & allocator,
Vector< IndexMaskSegment, InlineBufferSize > & r_segments )
static

◆ segments_is_equal()

static bool blender::index_mask::segments_is_equal ( const IndexMaskSegment & a,
const IndexMaskSegment & b )
static

◆ sort_course_boundaries() [1/2]

static void blender::index_mask::sort_course_boundaries ( MutableSpan< CourseBoundary > boundaries)
static

Definition at line 117 of file index_mask_expression.cc.

References b.

Referenced by evaluate_coarse().

◆ sort_course_boundaries() [2/2]

static void blender::index_mask::sort_course_boundaries ( MutableSpan< DifferenceCourseBoundary > boundaries)
static

Definition at line 124 of file index_mask_expression.cc.

References b.

◆ union_index_mask_segments()

Variable Documentation

◆ has_segment_and_start_parameter

template<typename Fn >
bool blender::index_mask::has_segment_and_start_parameter
constexpr
Initial value:
=
std::is_invocable_r_v<void, Fn, IndexMaskSegment, int64_t> ||
std::is_invocable_r_v<void, Fn, IndexRange, int64_t>

Definition at line 791 of file BLI_index_mask.hh.

Referenced by blender::index_mask::IndexMask::foreach_segment(), and blender::index_mask::IndexMask::foreach_segment_optimized().

◆ inline_expr_array_size

int64_t blender::index_mask::inline_expr_array_size = 16
constexpr

Number of expression terms which don't require extra allocations in some places.

Definition at line 29 of file index_mask_expression.cc.

Referenced by evaluate_coarse().

◆ max_segment_size

◆ max_segment_size_mask_high

int64_t blender::index_mask::max_segment_size_mask_high = ~max_segment_size_mask_low
staticconstexpr

◆ max_segment_size_mask_low

int64_t blender::index_mask::max_segment_size_mask_low = max_segment_size - 1
staticconstexpr

Definition at line 43 of file BLI_index_mask.hh.

Referenced by blender::index_mask::IndexMask::IndexMask().

◆ max_segment_size_shift

int64_t blender::index_mask::max_segment_size_shift = 14
staticconstexpr

Constants that define the maximum segment size. Segment sizes are limited so that the indices within each segment can be stored as int16_t, which allows the mask to stored much more compactly than if 32 or 64 bit ints would be used.

  • Using 8 bit ints does not work well, because then the maximum segment size would be too small for eliminate per-segment overhead in many cases and also leads to many more segments.
  • The most-significant-bit is not used so that signed integers can be used which avoids common issues when mixing signed and unsigned ints.
  • The second most-significant bit is not used for indices so that max_segment_size itself can be stored in the int16_t.
  • The maximum number of indices in a segment is 16384, which is generally enough to make the overhead per segment negligible when processing large index masks.
  • A power of two is used for max_segment_size, because that allows for faster construction of index masks for index ranges.

Definition at line 41 of file BLI_index_mask.hh.

Referenced by blender::index_mask::IndexMask::IndexMask(), and blender::index_mask::IndexMask::IndexMask().

◆ segment_size_threshold

int64_t blender::index_mask::segment_size_threshold = 32
staticconstexpr

Smaller segments should generally be merged together.

Definition at line 134 of file index_mask_expression.cc.

Referenced by add_coarse_segment__copy(), add_coarse_segment__full(), and add_coarse_segment__unknown().