Blender V4.3
BLI_array_utils.hh
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2023 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
5#pragma once
6
7#include <numeric>
8
9#include "BLI_generic_span.hh"
11#include "BLI_index_mask.hh"
12#include "BLI_offset_indices.hh"
13#include "BLI_task.hh"
14#include "BLI_virtual_array.hh"
15
17
22void copy(const GVArray &src, GMutableSpan dst, int64_t grain_size = 4096);
23template<typename T>
24inline void copy(const VArray<T> &src, MutableSpan<T> dst, const int64_t grain_size = 4096)
25{
26 BLI_assert(src.size() == dst.size());
27 threading::parallel_for(src.index_range(), grain_size, [&](const IndexRange range) {
28 src.materialize_to_uninitialized(range, dst);
29 });
30}
31
36template<typename T>
37inline void copy(const Span<T> src, MutableSpan<T> dst, const int64_t grain_size = 4096)
38{
39 BLI_assert(src.size() == dst.size());
40 threading::parallel_for(src.index_range(), grain_size, [&](const IndexRange range) {
41 dst.slice(range).copy_from(src.slice(range));
42 });
43}
44
49void copy(const GVArray &src,
50 const IndexMask &selection,
51 GMutableSpan dst,
52 int64_t grain_size = 4096);
53
58template<typename T>
59inline void copy(const Span<T> src,
60 const IndexMask &selection,
62 const int64_t grain_size = 4096)
63{
64 BLI_assert(src.size() == dst.size());
65 selection.foreach_index_optimized<int64_t>(GrainSize(grain_size),
66 [&](const int64_t i) { dst[i] = src[i]; });
67}
68
72template<typename T, typename IndexT>
73inline void scatter(const Span<T> src,
74 const Span<IndexT> indices,
76 const int64_t grain_size = 4096)
77{
78 BLI_assert(indices.size() == src.size());
79 threading::parallel_for(indices.index_range(), grain_size, [&](const IndexRange range) {
80 for (const int64_t i : range) {
81 dst[indices[i]] = src[i];
82 }
83 });
84}
85
86template<typename T>
87inline void scatter(const Span<T> src,
88 const IndexMask &indices,
90 const int64_t grain_size = 4096)
91{
92 BLI_assert(indices.size() == src.size());
93 BLI_assert(indices.min_array_size() <= dst.size());
94 indices.foreach_index_optimized<int64_t>(
95 GrainSize(grain_size),
96 [&](const int64_t index, const int64_t pos) { dst[index] = src[pos]; });
97}
98
102void gather(const GVArray &src,
103 const IndexMask &indices,
104 GMutableSpan dst,
105 int64_t grain_size = 4096);
106
110void gather(GSpan src, const IndexMask &indices, GMutableSpan dst, int64_t grain_size = 4096);
111
115template<typename T>
116inline void gather(const VArray<T> &src,
117 const IndexMask &indices,
118 MutableSpan<T> dst,
119 const int64_t grain_size = 4096)
120{
121 BLI_assert(indices.size() == dst.size());
122 threading::parallel_for(indices.index_range(), grain_size, [&](const IndexRange range) {
123 src.materialize_compressed_to_uninitialized(indices.slice(range), dst.slice(range));
124 });
125}
126
130template<typename T, typename IndexT>
131inline void gather(const Span<T> src,
132 const IndexMask &indices,
133 MutableSpan<T> dst,
134 const int64_t grain_size = 4096)
135{
136 BLI_assert(indices.size() == dst.size());
137 indices.foreach_segment(GrainSize(grain_size),
138 [&](const IndexMaskSegment segment, const int64_t segment_pos) {
139 for (const int64_t i : segment.index_range()) {
140 dst[segment_pos + i] = src[segment[i]];
141 }
142 });
143}
144
148template<typename T, typename IndexT>
149inline void gather(const Span<T> src,
150 const Span<IndexT> indices,
151 MutableSpan<T> dst,
152 const int64_t grain_size = 4096)
153{
154 BLI_assert(indices.size() == dst.size());
155 threading::parallel_for(indices.index_range(), grain_size, [&](const IndexRange range) {
156 for (const int64_t i : range) {
157 dst[i] = src[indices[i]];
158 }
159 });
160}
161
165template<typename T, typename IndexT>
166inline void gather(const VArray<T> &src,
167 const Span<IndexT> indices,
168 MutableSpan<T> dst,
169 const int64_t grain_size = 4096)
170{
171 BLI_assert(indices.size() == dst.size());
172 devirtualize_varray(src, [&](const auto &src) {
173 threading::parallel_for(indices.index_range(), grain_size, [&](const IndexRange range) {
174 for (const int64_t i : range) {
175 dst[i] = src[indices[i]];
176 }
177 });
178 });
179}
180
181template<typename T>
182inline void gather_group_to_group(const OffsetIndices<int> src_offsets,
183 const OffsetIndices<int> dst_offsets,
184 const IndexMask &selection,
185 const Span<T> src,
186 MutableSpan<T> dst)
187{
188 selection.foreach_index(GrainSize(512), [&](const int64_t src_i, const int64_t dst_i) {
189 dst.slice(dst_offsets[dst_i]).copy_from(src.slice(src_offsets[src_i]));
190 });
191}
192
193template<typename T>
194inline void gather_group_to_group(const OffsetIndices<int> src_offsets,
195 const OffsetIndices<int> dst_offsets,
196 const IndexMask &selection,
197 const VArray<T> src,
198 MutableSpan<T> dst)
199{
200 selection.foreach_index(GrainSize(512), [&](const int64_t src_i, const int64_t dst_i) {
201 src.materialize_compressed(src_offsets[src_i], dst.slice(dst_offsets[dst_i]));
202 });
203}
204
205template<typename T>
206inline void gather_to_groups(const OffsetIndices<int> dst_offsets,
207 const IndexMask &src_selection,
208 const Span<T> src,
209 MutableSpan<T> dst)
210{
211 src_selection.foreach_index(GrainSize(1024), [&](const int src_i, const int dst_i) {
212 dst.slice(dst_offsets[dst_i]).fill(src[src_i]);
213 });
214}
215
221void copy_group_to_group(OffsetIndices<int> src_offsets,
222 OffsetIndices<int> dst_offsets,
223 const IndexMask &selection,
224 GSpan src,
225 GMutableSpan dst);
226template<typename T>
228 OffsetIndices<int> dst_offsets,
229 const IndexMask &selection,
230 Span<T> src,
231 MutableSpan<T> dst)
232{
233 copy_group_to_group(src_offsets, dst_offsets, selection, GSpan(src), GMutableSpan(dst));
234}
235
244void count_indices(Span<int> indices, MutableSpan<int> counts);
245
246void invert_booleans(MutableSpan<bool> span);
247void invert_booleans(MutableSpan<bool> span, const IndexMask &mask);
248
249int64_t count_booleans(const VArray<bool> &varray);
250int64_t count_booleans(const VArray<bool> &varray, const IndexMask &mask);
251
252enum class BooleanMix {
253 None,
254 AllFalse,
255 AllTrue,
256 Mixed,
257};
258BooleanMix booleans_mix_calc(const VArray<bool> &varray, IndexRange range_to_check);
260{
261 return booleans_mix_calc(varray, varray.index_range());
262}
263
267template<typename T> inline Vector<IndexRange> find_all_ranges(const Span<T> span, const T &value)
268{
269 if (span.is_empty()) {
270 return Vector<IndexRange>();
271 }
272 Vector<IndexRange> ranges;
273 int64_t length = (span.first() == value) ? 1 : 0;
274 for (const int64_t i : span.index_range().drop_front(1)) {
275 if (span[i - 1] == value && span[i] != value) {
276 ranges.append(IndexRange::from_end_size(i, length));
277 length = 0;
278 }
279 else if (span[i] == value) {
280 length++;
281 }
282 }
283 if (length > 0) {
284 ranges.append(IndexRange::from_end_size(span.size(), length));
285 }
286 return ranges;
287}
288
293template<typename T> inline void fill_index_range(MutableSpan<T> span, const T start = 0)
294{
295 std::iota(span.begin(), span.end(), start);
296}
297
298template<typename T>
299bool indexed_data_equal(const Span<T> all_values, const Span<int> indices, const Span<T> values)
300{
301 for (const int i : indices.index_range()) {
302 if (all_values[indices[i]] != values[i]) {
303 return false;
304 }
305 }
306 return true;
307}
308
309bool indices_are_range(Span<int> indices, IndexRange range);
310
311} // namespace blender::array_utils
#define BLI_assert(a)
Definition BLI_assert.h:50
constexpr IndexRange drop_front(int64_t n) const
constexpr int64_t size() const
Definition BLI_span.hh:494
constexpr MutableSpan slice(const int64_t start, const int64_t size) const
Definition BLI_span.hh:574
constexpr T * end() const
Definition BLI_span.hh:549
constexpr T * begin() const
Definition BLI_span.hh:545
constexpr Span slice(int64_t start, int64_t size) const
Definition BLI_span.hh:138
constexpr const T & first() const
Definition BLI_span.hh:316
constexpr int64_t size() const
Definition BLI_span.hh:253
constexpr IndexRange index_range() const
Definition BLI_span.hh:402
constexpr bool is_empty() const
Definition BLI_span.hh:261
IndexRange index_range() const
void materialize_compressed(const IndexMask &mask, MutableSpan< T > r_span) const
void foreach_index(Fn &&fn) const
void copy(const GVArray &src, GMutableSpan dst, int64_t grain_size=4096)
void scatter(const Span< T > src, const Span< IndexT > indices, MutableSpan< T > dst, const int64_t grain_size=4096)
void copy_group_to_group(OffsetIndices< int > src_offsets, OffsetIndices< int > dst_offsets, const IndexMask &selection, GSpan src, GMutableSpan dst)
Vector< IndexRange > find_all_ranges(const Span< T > span, const T &value)
bool indexed_data_equal(const Span< T > all_values, const Span< int > indices, const Span< T > values)
BooleanMix booleans_mix_calc(const VArray< bool > &varray, IndexRange range_to_check)
void fill_index_range(MutableSpan< T > span, const T start=0)
void parallel_for(const IndexRange range, const int64_t grain_size, const Function &function, const TaskSizeHints &size_hints=detail::TaskSizeHints_Static(1))
Definition BLI_task.hh:95
void devirtualize_varray(const VArray< T > &varray, const Func &func, bool enable=true)
__int64 int64_t
Definition stdint.h:89