Blender V5.0
mesh_calc_edges.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2023 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
8
9#include "BLI_array_utils.hh"
10#include "BLI_math_base.h"
11#include "BLI_ordered_edge.hh"
12#include "BLI_task.hh"
13#include "BLI_threads.h"
14#include "BLI_vector_set.hh"
15
16#include "BKE_attribute.hh"
18#include "BKE_attribute_math.hh"
19#include "BKE_customdata.hh"
20#include "BKE_mesh.hh"
21
22namespace blender::bke {
23
24namespace calc_edges {
25
30static uint64_t edge_hash_2(const OrderedEdge &edge)
31{
32 return edge.v_low;
33}
34
36 16,
42
43static void reserve_hash_maps(const Mesh &mesh,
44 const bool keep_existing_edges,
45 MutableSpan<EdgeMap> edge_maps)
46{
47 const int totedge_guess = std::max(keep_existing_edges ? mesh.edges_num : 0, mesh.faces_num * 2);
49 edge_maps, [&](EdgeMap &edge_map) { edge_map.reserve(totedge_guess / edge_maps.size()); });
50}
51
53{
54 r_sizes.reinitialize(maps.size() + 1);
55 for (const int map_i : maps.index_range()) {
56 r_sizes[map_i] = maps[map_i].size();
57 }
59}
60
61static int edge_to_hash_map_i(const OrderedEdge edge, const uint32_t parallel_mask)
62{
63 return parallel_mask & edge_hash_2(edge);
64}
65
67 const uint32_t parallel_mask,
68 MutableSpan<EdgeMap> edge_maps)
69{
70 /* Assume existing edges are valid. */
71 const Span<int2> edges = mesh.edges();
72 threading::parallel_for_each(edge_maps, [&](EdgeMap &edge_map) {
73 const int task_index = &edge_map - edge_maps.data();
74 for (const int2 edge : edges) {
75 const OrderedEdge ordered_edge(edge);
76 /* Only add the edge when it belongs into this map. */
77 if (task_index == edge_to_hash_map_i(ordered_edge, parallel_mask)) {
78 edge_map.add(ordered_edge);
79 }
80 }
81 });
82}
83
85 const uint32_t parallel_mask,
86 MutableSpan<EdgeMap> edge_maps)
87{
88 const OffsetIndices<int> faces = mesh.faces();
89 const Span<int> corner_verts = mesh.corner_verts();
90 threading::parallel_for_each(edge_maps, [&](EdgeMap &edge_map) {
91 const int task_index = &edge_map - edge_maps.data();
92 for (const int face_i : faces.index_range()) {
93 const IndexRange face = faces[face_i];
94 for (const int corner : face) {
95 const int vert = corner_verts[corner];
96 const int vert_prev = corner_verts[bke::mesh::face_corner_prev(face, corner)];
97 const OrderedEdge ordered_edge(vert_prev, vert);
98 /* Only add the edge when it belongs into this map. */
99 if (task_index == edge_to_hash_map_i(ordered_edge, parallel_mask)) {
100 edge_map.add(ordered_edge);
101 }
102 }
103 }
104 });
105}
106
108 MutableSpan<EdgeMap> edge_maps,
109 const OffsetIndices<int> edge_offsets,
110 const OffsetIndices<int> prefix_skip_offsets,
111 MutableSpan<int2> new_edges)
112{
113 threading::parallel_for_each(edge_maps, [&](EdgeMap &edge_map) {
114 const int task_index = &edge_map - edge_maps.data();
115 if (edge_offsets[task_index].is_empty()) {
116 return;
117 }
118
119 if (prefix_skip_offsets[task_index].size() == edge_offsets[task_index].size()) {
120 return;
121 }
122
123 const IndexRange all_map_edges = edge_offsets[task_index];
124 const IndexRange prefix_to_skip = prefix_skip_offsets[task_index];
125 const IndexRange map_edges = IndexRange::from_begin_size(
126 all_map_edges.start() - prefix_to_skip.start(),
127 all_map_edges.size() - prefix_to_skip.size());
128
129 MutableSpan<int2> result_edges = new_edges.slice(map_edges);
130 result_edges.copy_from(edge_map.as_span().drop_front(prefix_to_skip.size()).cast<int2>());
131 });
132}
133
135 const Span<int> corner_verts,
136 const Span<EdgeMap> edge_maps,
137 const uint32_t parallel_mask,
138 const OffsetIndices<int> edge_offsets,
139 MutableSpan<int> corner_edges)
140{
141 threading::parallel_for(faces.index_range(), 100, [&](IndexRange range) {
142 for (const int face_index : range) {
143 const IndexRange face = faces[face_index];
144 for (const int corner : face) {
145 const int vert = corner_verts[corner];
146 const int vert_prev = corner_verts[bke::mesh::face_corner_next(face, corner)];
147 const OrderedEdge ordered_edge(vert_prev, vert);
148 const int task_index = edge_to_hash_map_i(ordered_edge, parallel_mask);
149 const EdgeMap &edge_map = edge_maps[task_index];
150 const int edge_i = edge_map.index_of(ordered_edge);
151 const int edge_index = edge_offsets[task_index][edge_i];
152 corner_edges[corner] = edge_index;
153 }
154 }
155 });
156}
157
159{
160 /* Don't use parallelization when the mesh is small. */
161 if (mesh.faces_num < 1000) {
162 return 1;
163 }
164 /* Use at most 8 separate hash tables. Using more threads has diminishing returns. These threads
165 * are better off doing something more useful instead. */
166 const int system_thread_count = BLI_system_thread_count();
167 return power_of_2_min_i(std::min(8, system_thread_count));
168}
169
171{
172 threading::parallel_for_each(edge_maps, [](EdgeMap &edge_map) { edge_map.clear(); });
173}
174
176 const IndexMask &edges_to_check,
177 const Span<EdgeMap> edge_maps,
178 const uint32_t parallel_mask,
179 const OffsetIndices<int> edge_offsets,
180 IndexMaskMemory &memory)
181{
182 if (edges_to_check.is_empty()) {
183 return {};
184 }
185
186 constexpr int no_original_edge = std::numeric_limits<int>::max();
187 Array<int> map_edge_to_first_original(edge_offsets.total_size());
188 map_edge_to_first_original.as_mutable_span().fill(no_original_edge);
189
190 /* TODO: Lock-free parallel version? BLI' "atomic::min<T>(T&, T);" ? */
191 edges_to_check.foreach_index_optimized<int>([&](const int edge_i) {
192 const OrderedEdge edge = edges[edge_i];
193 const int map_i = calc_edges::edge_to_hash_map_i(edge, parallel_mask);
194 const int edge_index = edge_maps[map_i].index_of(edge);
195
196 int &original_edge = map_edge_to_first_original[edge_offsets[map_i][edge_index]];
197 original_edge = math::min(original_edge, edge_i);
198 });
199
200 /* Note: #map_edge_to_first_original might still contains #no_original_edge if edges was both non
201 * distinct and not full set. */
202
204 edges_to_check, GrainSize(2048), memory, [&](const int srd_edge_i) {
205 const OrderedEdge edge = edges[srd_edge_i];
206 const int map_i = calc_edges::edge_to_hash_map_i(edge, parallel_mask);
207 const int edge_index = edge_maps[map_i].index_of(edge);
208 return map_edge_to_first_original[edge_offsets[map_i][edge_index]] == srd_edge_i;
209 });
210}
211
212static void map_edge_to_span_index(const Span<int2> edges,
213 const Span<EdgeMap> edge_maps,
214 const uint32_t parallel_mask,
215 const OffsetIndices<int> edge_offsets,
217{
218 threading::parallel_for_each(edge_maps.index_range(), [&](const int map_i) {
219 int edge_map_iter = 0;
220 for (const int edge_i : edges.index_range()) {
221 const int edge_map = calc_edges::edge_to_hash_map_i(edges[edge_i], parallel_mask);
222 if (map_i != edge_map) {
223 continue;
224 }
225 indices[edge_offsets[edge_map][edge_map_iter]] = edge_i;
226 edge_map_iter++;
227 }
228 });
229}
230
231} // namespace calc_edges
232
234 bool keep_existing_edges,
235 const bool select_new_edges,
236 const AttributeFilter &attribute_filter)
237{
238
239 if (mesh.edges_num == 0 && mesh.corners_num == 0) {
240 /* BLI_assert(BKE_mesh_is_valid(&mesh)); */
241 return;
242 }
243
244 if (mesh.corners_num == 0 && !keep_existing_edges) {
245 CustomData_free(&mesh.edge_data);
246 mesh.edges_num = 0;
247 mesh.tag_loose_edges_none();
248 /* BLI_assert(BKE_mesh_is_valid(&mesh)); */
249 return;
250 }
251
252 BLI_assert(std::all_of(mesh.edges().begin(), mesh.edges().end(), [&](const int2 edge) {
253 return edge.x != edge.y;
254 }));
255
256 /* Parallelization is achieved by having multiple hash tables for different subsets of edges.
257 * Each edge is assigned to one of the hash maps based on the lower bits of a hash value. */
258 const int parallel_maps = calc_edges::get_parallel_maps_count(mesh);
259 BLI_assert(is_power_of_2_i(parallel_maps));
260 const uint32_t parallel_mask = uint32_t(parallel_maps) - 1;
261 Array<calc_edges::EdgeMap> edge_maps(parallel_maps);
262 calc_edges::reserve_hash_maps(mesh, keep_existing_edges, edge_maps);
263
264 Array<int> original_edge_maps_prefix_size(edge_maps.size() + 1, 0);
265 if (keep_existing_edges) {
266 calc_edges::add_existing_edges_to_hash_maps(mesh, parallel_mask, edge_maps);
267 calc_edges::edge_map_offsets(edge_maps, original_edge_maps_prefix_size);
268 }
269 const OffsetIndices<int> original_edge_maps_prefix(original_edge_maps_prefix_size.as_span());
270 const int original_unique_edge_num = original_edge_maps_prefix.total_size();
271 const bool original_edges_are_distinct = original_unique_edge_num == mesh.edges_num;
272
273 if (mesh.corners_num == 0 && keep_existing_edges && original_edges_are_distinct) {
274 /* BLI_assert(BKE_mesh_is_valid(&mesh)); */
275 return;
276 }
277
278 calc_edges::add_face_edges_to_hash_maps(mesh, parallel_mask, edge_maps);
279 Array<int> edge_sizes;
280 const OffsetIndices<int> edge_offsets = calc_edges::edge_map_offsets(edge_maps, edge_sizes);
281 const bool no_new_edges = edge_offsets.total_size() == original_unique_edge_num;
282
283 MutableAttributeAccessor dst_attributes = mesh.attributes_for_write();
284 dst_attributes.add<int>(".corner_edge", AttrDomain::Corner, AttributeInitConstruct());
285 MutableSpan<int> corner_edges = mesh.corner_edges_for_write();
286#ifndef NDEBUG
287 corner_edges.fill(-1);
288#endif
289
290 const int result_edges_num = edge_offsets.total_size();
291
292 const OffsetIndices<int> faces = mesh.faces();
293 const Span<int2> original_edges = mesh.edges();
294 const Span<int> corner_verts = mesh.corner_verts();
295 if (keep_existing_edges && original_edges_are_distinct && no_new_edges) {
296 /* We need a way to say from caller side if we should generate corner edge attribute even in
297 * that case. TODO: make this optional. */
299 faces, corner_verts, edge_maps, parallel_mask, edge_offsets, corner_edges);
300
301 Array<int> edge_map_to_result_index(result_edges_num);
302#ifndef NDEBUG
303 edge_map_to_result_index.as_mutable_span().fill(-1);
304#endif
306 original_edges, edge_maps, parallel_mask, edge_offsets, edge_map_to_result_index);
307 array_utils::gather(edge_map_to_result_index.as_span(), corner_edges.as_span(), corner_edges);
308
309 BLI_assert(!corner_edges.contains(-1));
311 return;
312 }
313
314 IndexMaskMemory memory;
315 IndexRange back_range_of_new_edges;
316 IndexMask src_to_dst_mask;
317
318 MutableSpan<int2> edge_verts(MEM_malloc_arrayN<int2>(result_edges_num, AT), result_edges_num);
319#ifndef NDEBUG
320 edge_verts.fill(int2(-1));
321#endif
322
323 if (keep_existing_edges) {
324 back_range_of_new_edges = IndexRange(result_edges_num).drop_front(original_unique_edge_num);
325
326 if (original_edges_are_distinct) {
327 src_to_dst_mask = IndexRange(original_unique_edge_num);
328 }
329 else {
330 src_to_dst_mask = calc_edges::mask_first_distinct_edges(original_edges,
331 original_edges.index_range(),
332 edge_maps,
333 parallel_mask,
334 edge_offsets,
335 memory);
336 }
337 BLI_assert(src_to_dst_mask.size() == original_unique_edge_num);
338
340 original_edges, src_to_dst_mask, edge_verts.take_front(original_unique_edge_num));
341
342 /* In order to reduce permutations of edge attributes we must provide result edge indices near
343 * to original. */
344 Array<int> edge_map_to_result_index(result_edges_num);
345#ifndef NDEBUG
346 edge_map_to_result_index.as_mutable_span().fill(-1);
347#endif
348
349 if (original_edges_are_distinct) {
350 /* TODO: Do we can group edges by .low vertex? Or by hash, but with Span<int> of edges by
351 * group?... */
352 calc_edges::map_edge_to_span_index(original_edges.take_front(mesh.edges_num),
353 edge_maps,
354 parallel_mask,
355 edge_offsets,
356 edge_map_to_result_index);
357 }
358 else {
359 src_to_dst_mask.foreach_index(
360 GrainSize(1024), [&](const int src_index, const int dst_index) {
361 const OrderedEdge edge = original_edges[src_index];
362 const int map_i = calc_edges::edge_to_hash_map_i(edge, parallel_mask);
363 const int edge_index = edge_maps[map_i].index_of(edge);
364 edge_map_to_result_index[edge_offsets[map_i][edge_index]] = dst_index;
365 });
366 }
367
368 if (!no_new_edges) {
369 BLI_assert(edge_offsets.data().size() == original_edge_maps_prefix.data().size());
370
371 /* TODO: Check if all new edges are range. */
372 const int new_edges_start = original_unique_edge_num;
373 for (const int map_i : edge_maps.index_range()) {
374 const IndexRange map_edges = edge_offsets[map_i];
375 const IndexRange prefix_edges = original_edge_maps_prefix[map_i];
376 const IndexRange new_edges_in_map = map_edges.drop_front(prefix_edges.size());
377
378 const int new_edges_start_pos = map_edges.start() - prefix_edges.start();
379 const int map_new_edges_start = new_edges_start + new_edges_start_pos;
381 edge_map_to_result_index.as_mutable_span().slice(new_edges_in_map),
382 map_new_edges_start);
383 }
384 }
385
386 BLI_assert(!edge_map_to_result_index.as_span().contains(-1));
388 faces, corner_verts, edge_maps, parallel_mask, edge_offsets, corner_edges);
389 array_utils::gather(edge_map_to_result_index.as_span(), corner_edges.as_span(), corner_edges);
390
392 edge_maps,
393 edge_offsets,
394 original_edge_maps_prefix,
395 edge_verts.drop_front(original_unique_edge_num));
396 }
397 else {
398 if (mesh.edges_num != 0) {
399 const IndexMask original_corner_edges = IndexMask::from_predicate(
400 IndexRange(mesh.edges_num), GrainSize(2048), memory, [&](const int edge_i) {
401 const OrderedEdge edge = original_edges[edge_i];
402 const int map_i = calc_edges::edge_to_hash_map_i(edge, parallel_mask);
403 return edge_maps[map_i].contains(edge);
404 });
406 original_edges, original_corner_edges, edge_maps, parallel_mask, edge_offsets, memory);
407
408 const int old_corner_edges_num = src_to_dst_mask.size();
409 back_range_of_new_edges = IndexRange(result_edges_num).drop_front(old_corner_edges_num);
410
411 Array<int> edge_map_to_result_index;
412 if (!src_to_dst_mask.is_empty()) {
413 /* TODO: Check if mask is range. */
414 edge_map_to_result_index.reinitialize(result_edges_num);
415 edge_map_to_result_index.as_mutable_span().fill(1);
416 src_to_dst_mask.foreach_index([&](const int original_edge_i) {
417 const OrderedEdge edge = original_edges[original_edge_i];
418 const int edge_map = calc_edges::edge_to_hash_map_i(edge, parallel_mask);
419 const int edge_index = edge_maps[edge_map].index_of(edge);
420 edge_map_to_result_index[edge_offsets[edge_map][edge_index]] = 0;
421 });
422
424 old_corner_edges_num);
425
426 src_to_dst_mask.foreach_index([&](const int original_edge_i, const int dst_edge_i) {
427 const OrderedEdge edge = original_edges[original_edge_i];
428 const int edge_map = calc_edges::edge_to_hash_map_i(edge, parallel_mask);
429 const int edge_index = edge_maps[edge_map].index_of(edge);
430 edge_map_to_result_index[edge_offsets[edge_map][edge_index]] = dst_edge_i;
431 });
432
434 original_edges, src_to_dst_mask, edge_verts.take_front(old_corner_edges_num));
435
436 threading::parallel_for_each(edge_maps, [&](calc_edges::EdgeMap &edge_map) {
437 const int task_index = &edge_map - edge_maps.data();
438 if (edge_offsets[task_index].is_empty()) {
439 return;
440 }
441
443 edge_map.as_span().cast<int2>(),
444 edge_map_to_result_index.as_span().slice(edge_offsets[task_index]),
445 edge_verts);
446 });
447
449 faces, corner_verts, edge_maps, parallel_mask, edge_offsets, corner_edges);
450
452 edge_map_to_result_index.as_span(), corner_edges.as_span(), corner_edges);
453 }
454 else {
456 faces, corner_verts, edge_maps, parallel_mask, edge_offsets, corner_edges);
458 edge_maps, edge_offsets, original_edge_maps_prefix, edge_verts);
459 }
460 }
461 else {
462 back_range_of_new_edges = IndexRange(result_edges_num);
463 BLI_assert(original_edge_maps_prefix.total_size() == 0);
465 faces, corner_verts, edge_maps, parallel_mask, edge_offsets, corner_edges);
467 edge_maps, edge_offsets, original_edge_maps_prefix, edge_verts);
468 }
469 }
470
471 BLI_assert(std::all_of(
472 edge_verts.begin(), edge_verts.end(), [&](const int2 edge) { return edge.x != edge.y; }));
473
474 BLI_assert(!corner_edges.contains(-1));
475 BLI_assert(!edge_verts.contains(int2(-1)));
476
477 BLI_assert(src_to_dst_mask.size() + back_range_of_new_edges.size() == result_edges_num);
478 BLI_assert(back_range_of_new_edges.one_after_last() == result_edges_num);
479
480 Vector<std::string> attributes_to_drop;
481 /* TODO: Need ::all_pass() on #attribute_filter to know if this loop can be skipped. */
482 mesh.attributes().foreach_attribute([&](const AttributeIter &attribute) {
483 if (attribute.data_type == AttrType::String) {
484 return;
485 }
486 if (attribute.domain != AttrDomain::Edge) {
487 return;
488 }
489 if (!attribute_filter.allow_skip(attribute.name)) {
490 return;
491 }
492 attributes_to_drop.append(attribute.name);
493 });
494
495 for (const StringRef attribute : attributes_to_drop) {
496 dst_attributes.remove(attribute);
497 }
498
499 CustomData_free_layer_named(&mesh.edge_data, ".edge_verts");
500 for (CustomDataLayer &layer : MutableSpan(mesh.edge_data.layers, mesh.edge_data.totlayer)) {
501 const void *src_data = layer.data;
502 const size_t elem_size = CustomData_sizeof(eCustomDataType(layer.type));
503
504 void *dst_data = MEM_malloc_arrayN(result_edges_num, elem_size, AT);
505 if (src_data != nullptr) {
506 if (layer.type == CD_ORIGINDEX) {
507 const Span src(static_cast<const int *>(src_data), mesh.edges_num);
508 MutableSpan dst(static_cast<int *>(dst_data), result_edges_num);
509 array_utils::gather(src, src_to_dst_mask, dst.take_front(src_to_dst_mask.size()));
510 dst.slice(back_range_of_new_edges).fill(-1);
511 }
512 else {
513 const CPPType *type = custom_data_type_to_cpp_type(eCustomDataType(layer.type));
514 BLI_assert(type != nullptr);
515 const GSpan src(type, src_data, mesh.edges_num);
516 GMutableSpan dst(type, dst_data, result_edges_num);
517 array_utils::gather(src, src_to_dst_mask, dst.take_front(src_to_dst_mask.size()));
518 type->fill_assign_n(type->default_value(),
519 dst.slice(back_range_of_new_edges).data(),
520 dst.slice(back_range_of_new_edges).size());
521 }
522 layer.sharing_info->remove_user_and_delete_if_last();
523 }
524
525 layer.data = dst_data;
526 layer.sharing_info = implicit_sharing::info_for_mem_free(dst_data);
527 }
528
529 mesh.edges_num = result_edges_num;
530
531 dst_attributes.add<int2>(
532 ".edge_verts", AttrDomain::Edge, AttributeInitMoveArray(edge_verts.data()));
533
534 if (select_new_edges) {
535 dst_attributes.remove(".select_edge");
536 if (ELEM(back_range_of_new_edges.size(), 0, mesh.edges_num)) {
537 const bool fill_value = back_range_of_new_edges.size() == mesh.edges_num;
538 dst_attributes.add<int2>(
539 ".select_edge",
541 AttributeInitVArray(VArray<bool>::from_single(fill_value, mesh.edges_num)));
542 }
543 else {
544 SpanAttributeWriter<bool> select_edge = dst_attributes.lookup_or_add_for_write_span<bool>(
545 ".select_edge", AttrDomain::Edge);
546 select_edge.span.drop_back(back_range_of_new_edges.size()).fill(false);
547 select_edge.span.take_back(back_range_of_new_edges.size()).fill(true);
548 select_edge.finish();
549 }
550 }
551
552 if (!keep_existing_edges) {
553 /* All edges are rebuilt from the faces, so there are no loose edges. */
554 mesh.tag_loose_edges_none();
555 }
556
557 /* Explicitly clear edge maps, because that way it can be parallelized. */
559
560 /* BLI_assert(BKE_mesh_is_valid(&mesh)); */
561}
562
563void mesh_calc_edges(Mesh &mesh, bool keep_existing_edges, const bool select_new_edges)
564{
565 mesh_calc_edges(mesh, keep_existing_edges, select_new_edges, AttributeFilter::default_filter());
566}
567
568} // namespace blender::bke
CustomData interface, see also DNA_customdata_types.h.
int CustomData_sizeof(eCustomDataType type)
void CustomData_free(CustomData *data)
bool CustomData_free_layer_named(CustomData *data, blender::StringRef name)
bool BKE_mesh_is_valid(Mesh *mesh)
#define BLI_assert(a)
Definition BLI_assert.h:46
MINLINE int power_of_2_min_i(int n)
MINLINE int is_power_of_2_i(int n)
int BLI_system_thread_count(void)
Definition threads.cc:253
#define ELEM(...)
#define AT
unsigned long long int uint64_t
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition btDbvt.cpp:52
static IndexMask from_predicate(const IndexMask &universe, GrainSize grain_size, IndexMaskMemory &memory, Fn &&predicate)
int64_t size() const
Definition BLI_array.hh:256
Span< T > as_span() const
Definition BLI_array.hh:243
MutableSpan< T > as_mutable_span()
Definition BLI_array.hh:248
const T * data() const
Definition BLI_array.hh:312
IndexRange index_range() const
Definition BLI_array.hh:360
void reinitialize(const int64_t new_size)
Definition BLI_array.hh:419
void fill_assign_n(const void *value, void *dst, int64_t n) const
const void * default_value() const
GMutableSpan take_front(const int64_t n) const
GMutableSpan slice(const int64_t start, int64_t size) const
static IndexMask from_predicate(const IndexMask &universe, GrainSize grain_size, IndexMaskMemory &memory, Fn &&predicate)
constexpr int64_t one_after_last() const
constexpr int64_t size() const
static constexpr IndexRange from_begin_size(const int64_t begin, const int64_t size)
constexpr int64_t start() const
constexpr IndexRange drop_front(int64_t n) const
constexpr int64_t size() const
Definition BLI_span.hh:493
constexpr bool contains(const T &value) const
Definition BLI_span.hh:715
constexpr MutableSpan slice(const int64_t start, const int64_t size) const
Definition BLI_span.hh:573
constexpr T * data() const
Definition BLI_span.hh:539
constexpr void fill(const T &value) const
Definition BLI_span.hh:517
constexpr MutableSpan drop_front(const int64_t n) const
Definition BLI_span.hh:607
constexpr T * end() const
Definition BLI_span.hh:548
constexpr T * begin() const
Definition BLI_span.hh:544
constexpr void copy_from(Span< T > values) const
Definition BLI_span.hh:739
constexpr MutableSpan take_front(const int64_t n) const
Definition BLI_span.hh:629
constexpr int64_t size() const
Definition BLI_span.hh:252
constexpr IndexRange index_range() const
Definition BLI_span.hh:401
constexpr Span take_front(int64_t n) const
Definition BLI_span.hh:193
static VArray from_single(T value, const int64_t size)
bool add(const Key &key)
void reserve(const int64_t n)
Span< Key > as_span() const
void append(const T &value)
bool add(const StringRef attribute_id, const AttrDomain domain, const AttrType data_type, const AttributeInit &initializer)
GSpanAttributeWriter lookup_or_add_for_write_span(StringRef attribute_id, AttrDomain domain, AttrType data_type, const AttributeInit &initializer=AttributeInitDefaultValue())
bool remove(const StringRef attribute_id)
void foreach_index_optimized(Fn &&fn) const
void foreach_index(Fn &&fn) const
static ushort indices[]
VecBase< int, 2 > int2
void * MEM_malloc_arrayN(size_t len, size_t size, const char *str)
Definition mallocn.cc:133
static char faces[256]
void scatter(const Span< T > src, const Span< IndexT > indices, MutableSpan< T > dst, const int64_t grain_size=4096)
void gather(const GVArray &src, const IndexMask &indices, GMutableSpan dst, int64_t grain_size=4096)
void fill_index_range(MutableSpan< T > span, const T start=0)
static void add_existing_edges_to_hash_maps(const Mesh &mesh, const uint32_t parallel_mask, MutableSpan< EdgeMap > edge_maps)
static uint64_t edge_hash_2(const OrderedEdge &edge)
static void update_edge_indices_in_face_loops(const OffsetIndices< int > faces, const Span< int > corner_verts, const Span< EdgeMap > edge_maps, const uint32_t parallel_mask, const OffsetIndices< int > edge_offsets, MutableSpan< int > corner_edges)
static int get_parallel_maps_count(const Mesh &mesh)
static int edge_to_hash_map_i(const OrderedEdge edge, const uint32_t parallel_mask)
static OffsetIndices< int > edge_map_offsets(const Span< EdgeMap > maps, Array< int > &r_sizes)
static void serialize_and_initialize_deduplicated_edges(MutableSpan< EdgeMap > edge_maps, const OffsetIndices< int > edge_offsets, const OffsetIndices< int > prefix_skip_offsets, MutableSpan< int2 > new_edges)
static void map_edge_to_span_index(const Span< int2 > edges, const Span< EdgeMap > edge_maps, const uint32_t parallel_mask, const OffsetIndices< int > edge_offsets, MutableSpan< int > indices)
static void clear_hash_tables(MutableSpan< EdgeMap > edge_maps)
static void reserve_hash_maps(const Mesh &mesh, const bool keep_existing_edges, MutableSpan< EdgeMap > edge_maps)
static void add_face_edges_to_hash_maps(const Mesh &mesh, const uint32_t parallel_mask, MutableSpan< EdgeMap > edge_maps)
static IndexMask mask_first_distinct_edges(const Span< int2 > edges, const IndexMask &edges_to_check, const Span< EdgeMap > edge_maps, const uint32_t parallel_mask, const OffsetIndices< int > edge_offsets, IndexMaskMemory &memory)
VectorSet< OrderedEdge, 16, DefaultProbingStrategy, DefaultHash< OrderedEdge >, DefaultEquality< OrderedEdge >, SimpleVectorSetSlot< OrderedEdge, int >, GuardedAllocator > EdgeMap
int face_corner_prev(const IndexRange face, const int corner)
Definition BKE_mesh.hh:306
void mesh_calc_edges(Mesh &mesh, bool keep_existing_edges, bool select_new_edges)
const CPPType * custom_data_type_to_cpp_type(eCustomDataType type)
const ImplicitSharingInfo * info_for_mem_free(void *data)
T min(const T &a, const T &b)
OffsetIndices< int > accumulate_counts_to_offsets(MutableSpan< int > counts_to_offsets, int start_offset=0)
void parallel_for_each(Range &&range, const Function &function)
Definition BLI_task.hh:56
void parallel_for(const IndexRange range, const int64_t grain_size, const Function &function, const TaskSizeHints &size_hints=detail::TaskSizeHints_Static(1))
Definition BLI_task.hh:93
VecBase< int32_t, 2 > int2
PythonProbingStrategy<> DefaultProbingStrategy
static const AttributeFilter & default_filter()
bool allow_skip(const StringRef name) const