Blender V5.0
mesh_triangulate.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2023 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
5#include "BLI_array_utils.hh"
7#include "BLI_index_mask.hh"
10#include "BLI_math_geom.h"
11#include "BLI_math_matrix.h"
12#include "BLI_polyfill_2d.h"
14#include "BLI_vector_set.hh"
15
16#include "BLI_heap.h"
17#include "BLI_memarena.h"
18
19#include "BKE_attribute.hh"
20#include "BKE_attribute_math.hh"
21#include "BKE_customdata.hh"
22#include "BKE_mesh.hh"
23
25
26namespace blender::geometry {
27
28static void gather(const Span<int> src, const Span<int16_t> indices, MutableSpan<int> dst)
29{
30 for (const int i : indices.index_range()) {
31 dst[i] = src[indices[i]];
32 }
33}
34
37 Vector<int> &dst)
38{
40 return src.slice(indices[0], indices.size());
41 }
42 dst.reinitialize(indices.size());
43 gather(src, indices, dst);
44 return dst.as_span();
45}
46
49 Vector<int> &dst)
50{
51 return gather_or_reference(src.drop_front(mask.offset()), mask.base_span(), dst);
52}
53
58static Span<float3> face_normals_if_worthwhile(const Mesh &src_mesh, const int selection_size)
59{
60 if (src_mesh.runtime->face_normals_cache.is_cached()) {
61 return src_mesh.face_normals();
62 }
63 if (selection_size > src_mesh.faces_num / 4) {
64 return src_mesh.face_normals();
65 }
66 return {};
67}
68
69static void copy_loose_vert_hint(const Mesh &src, Mesh &dst)
70{
71 const auto &src_cache = src.runtime->loose_verts_cache;
72 if (src_cache.is_cached() && src_cache.data().count == 0) {
73 dst.tag_loose_verts_none();
74 }
75}
76
77namespace quad {
78
88enum class QuadDirection : int8_t {
91};
92
98 const float3 &v1,
99 const float3 &v2,
100 const float3 &v3)
101{
102 const int flip_flag = is_quad_flip_v3(v1, v2, v3, v0);
103 if (UNLIKELY(flip_flag & (1 << 0))) {
105 }
106 if (UNLIKELY(flip_flag & (1 << 1))) {
108 }
109 return BLI_polyfill_edge_calc_rotate_beauty__area(v1, v2, v3, v0, false) > 0.0f ?
112}
113
114static void calc_quad_directions(const Span<float3> positions,
115 const Span<int> face_offsets,
116 const Span<int> corner_verts,
117 const TriangulateQuadMode quad_mode,
119{
120 switch (quad_mode) {
122 directions.fill(QuadDirection::Edge_0_2);
123 break;
124 }
126 directions.fill(QuadDirection::Edge_1_3);
127 break;
128 }
130 for (const int i : face_offsets.index_range()) {
131 const Span<int> verts = corner_verts.slice(face_offsets[i], 4);
132 const float dist_0_2 = math::distance_squared(positions[verts[0]], positions[verts[2]]);
133 const float dist_1_3 = math::distance_squared(positions[verts[1]], positions[verts[3]]);
134 directions[i] = dist_0_2 < dist_1_3 ? QuadDirection::Edge_0_2 : QuadDirection::Edge_1_3;
135 }
136 break;
137 }
139 for (const int i : face_offsets.index_range()) {
140 const Span<int> verts = corner_verts.slice(face_offsets[i], 4);
141 const float dist_0_2 = math::distance_squared(positions[verts[0]], positions[verts[2]]);
142 const float dist_1_3 = math::distance_squared(positions[verts[1]], positions[verts[3]]);
143 directions[i] = dist_0_2 > dist_1_3 ? QuadDirection::Edge_0_2 : QuadDirection::Edge_1_3;
144 }
145 break;
146 }
148 for (const int i : face_offsets.index_range()) {
149 const Span<int> verts = corner_verts.slice(face_offsets[i], 4);
150 directions[i] = calc_quad_direction_beauty(
151 positions[verts[0]], positions[verts[1]], positions[verts[2]], positions[verts[3]]);
152 }
153
154 break;
155 }
156 }
157}
158
159static void calc_corner_tris(const Span<int> face_offsets,
160 const Span<QuadDirection> directions,
161 MutableSpan<int3> corner_tris)
162{
163 for (const int i : face_offsets.index_range()) {
164 MutableSpan<int> quad_map = corner_tris.slice(2 * i, 2).cast<int>();
165 /* These corner orders give new edges based on the first vertex of each triangle. */
166 switch (directions[i]) {
168 quad_map.copy_from({2, 0, 1, 0, 2, 3});
169 break;
171 quad_map.copy_from({1, 3, 0, 3, 1, 2});
172 break;
173 }
174 const int src_face_start = face_offsets[i];
175 for (int &i : quad_map) {
176 i += src_face_start;
177 }
178 }
179}
180
181static void calc_corner_tris(const Span<float3> positions,
182 const OffsetIndices<int> src_faces,
183 const Span<int> src_corner_verts,
184 const IndexMask &quads,
185 const TriangulateQuadMode quad_mode,
186 MutableSpan<int3> corner_tris)
187{
188 struct TLS {
189 Vector<int> offsets;
190 Vector<QuadDirection> directions;
191 };
193
194 quads.foreach_segment(GrainSize(1024), [&](const IndexMaskSegment quads, const int64_t pos) {
195 TLS &data = tls.local();
196 data.directions.reinitialize(quads.size());
197
198 /* Find the offsets of each face in the local selection. We can gather them together even if
199 * they aren't contiguous because we only need to know the start of each face; the size is
200 * just 4. */
201 const Span<int> offsets = gather_or_reference(src_faces.data(), quads, data.offsets);
202 calc_quad_directions(positions, offsets, src_corner_verts, quad_mode, data.directions);
203 const IndexRange tris_range(pos * 2, offsets.size() * 2);
204 quad::calc_corner_tris(offsets, data.directions, corner_tris.slice(tris_range));
205 });
206}
207
208} // namespace quad
209
211 const IndexMaskSegment selection,
212 MutableSpan<int> dst_offsets)
213{
214 int offset = 0;
215 for (const int64_t i : selection.index_range()) {
216 dst_offsets[i] = offset;
217 offset += src_offsets[selection[i]].size();
218 }
219 dst_offsets.last() = offset;
220 return OffsetIndices<int>(dst_offsets);
221}
222
223namespace ngon {
224
226 const IndexMask &ngons,
227 MutableSpan<int> face_offset_data)
228{
229 ngons.foreach_index(GrainSize(2048), [&](const int face, const int mask) {
230 face_offset_data[mask] = bke::mesh::face_triangles_num(src_faces[face].size());
231 });
232 return offset_indices::accumulate_counts_to_offsets(face_offset_data);
233}
234
235static void calc_corner_tris(const Span<float3> positions,
236 const OffsetIndices<int> src_faces,
237 const Span<int> src_corner_verts,
238 const Span<float3> face_normals,
239 const IndexMask &ngons,
240 const OffsetIndices<int> tris_by_ngon,
241 const TriangulateNGonMode ngon_mode,
242 MutableSpan<int3> corner_tris)
243{
244 struct LocalData {
245 Vector<float3x3> projections;
246 Array<int> offset_data;
247 Vector<float2> projected_positions;
248
249 /* Only used for the "Beauty" method. */
250 MemArena *arena = nullptr;
251 Heap *heap = nullptr;
252
253 ~LocalData()
254 {
255 if (arena) {
256 BLI_memarena_free(arena);
257 }
258 if (heap) {
259 BLI_heap_free(heap, nullptr);
260 }
261 }
262 };
264
265 ngons.foreach_segment(GrainSize(128), [&](const IndexMaskSegment ngons, const int pos) {
266 LocalData &data = tls.local();
267
268 /* In order to simplify and "parallelize" the next loops, gather offsets used to group an array
269 * large enough for all the local face corners. */
270 data.offset_data.reinitialize(ngons.size() + 1);
271 const OffsetIndices local_corner_offsets = gather_selected_offsets(
272 src_faces, ngons, data.offset_data);
273
274 /* Use face normals to build projection matrices to make the face positions 2D. */
275 data.projections.reinitialize(ngons.size());
276 MutableSpan<float3x3> projections = data.projections;
277 if (face_normals.is_empty()) {
278 for (const int i : ngons.index_range()) {
279 const IndexRange src_face = src_faces[ngons[i]];
280 const Span<int> face_verts = src_corner_verts.slice(src_face);
281 const float3 normal = bke::mesh::face_normal_calc(positions, face_verts);
282 axis_dominant_v3_to_m3_negate(projections[i].ptr(), normal);
283 }
284 }
285 else {
286 for (const int i : ngons.index_range()) {
287 axis_dominant_v3_to_m3_negate(projections[i].ptr(), face_normals[ngons[i]]);
288 }
289 }
290
291 /* Project the face positions into 2D using the matrices calculated above. */
292 data.projected_positions.reinitialize(local_corner_offsets.total_size());
293 MutableSpan<float2> projected_positions = data.projected_positions;
294 for (const int i : ngons.index_range()) {
295 const IndexRange src_face = src_faces[ngons[i]];
296 const Span<int> face_verts = src_corner_verts.slice(src_face);
297 const float3x3 &matrix = projections[i];
298
299 MutableSpan<float2> positions_2d = projected_positions.slice(local_corner_offsets[i]);
300 for (const int i : face_verts.index_range()) {
301 mul_v2_m3v3(positions_2d[i], matrix.ptr(), positions[face_verts[i]]);
302 }
303 }
304
305 if (ngon_mode == TriangulateNGonMode::Beauty) {
306 if (!data.arena) {
308 }
309 if (!data.heap) {
311 }
312 }
313
314 /* Calculate the triangulation of corners indices local to each face. */
315 for (const int i : ngons.index_range()) {
316 const Span<float2> positions_2d = projected_positions.slice(local_corner_offsets[i]);
317 const IndexRange tris_range = tris_by_ngon[pos + i];
318 MutableSpan<int> map = corner_tris.slice(tris_range).cast<int>();
319 BLI_polyfill_calc(reinterpret_cast<const float (*)[2]>(positions_2d.data()),
320 positions_2d.size(),
321 1,
322 reinterpret_cast<uint(*)[3]>(map.data()));
323 if (ngon_mode == TriangulateNGonMode::Beauty) {
324 BLI_polyfill_beautify(reinterpret_cast<const float (*)[2]>(positions_2d.data()),
325 positions_2d.size(),
326 reinterpret_cast<uint(*)[3]>(map.data()),
327 data.arena,
328 data.heap);
330 }
331 }
332
333 /* "Globalize" the triangulation created above so the map source indices reference _all_ of the
334 * source vertices, not just within the source face. */
335 for (const int i : ngons.index_range()) {
336 const IndexRange tris_range = tris_by_ngon[pos + i];
337 const int src_face_start = src_faces[ngons[i]].start();
338 MutableSpan<int> map = corner_tris.slice(tris_range).cast<int>();
339 for (int &vert : map) {
340 vert += src_face_start;
341 }
342 }
343 });
344}
345
346} // namespace ngon
347
348struct TriKey {
350 /* The lowest vertex index in the face is used as a hash value and a way to compare face keys to
351 * avoid memory lookup in all false cases. */
353
354 TriKey(const int tri_index, Span<int3> tris)
356 {
357 [[maybe_unused]] const int3 &tri_verts = tris[tri_index];
358 BLI_assert(std::is_sorted(&tri_verts[0], &tri_verts[0] + 3));
359 }
360};
361
362struct FaceHash {
363 uint64_t operator()(const TriKey value) const
364 {
365 return uint64_t(value.tri_lower_vert);
366 }
367
368 uint64_t operator()(const int3 value) const
369 {
370 BLI_assert(std::is_sorted(&value[0], &value[0] + 3));
371 return uint64_t(value[0]);
372 }
373};
374
377 bool operator()(const TriKey a, const TriKey b) const
378 {
379 return a.tri_lower_vert == b.tri_lower_vert && tris[a.tri_index] == tris[b.tri_index];
380 }
381
382 bool operator()(const int3 a, const TriKey b) const
383 {
384 BLI_assert(std::is_sorted(&a[0], &a[0] + 3));
385 return b.tri_lower_vert == a[0] && tris[b.tri_index] == a;
386 }
387};
388
389static int3 tri_to_ordered(const int3 tri)
390{
391 int3 res;
392 res[0] = std::min({tri[0], tri[1], tri[2]});
393 res[2] = std::max({tri[0], tri[1], tri[2]});
394 res[1] = (tri[0] - res[0]) + (tri[2] - res[2]) + tri[1];
395 return res;
396}
397
399{
400 threading::parallel_for(tris.index_range(), 4096, [&](const IndexRange range) {
401 for (int3 &tri : tris.slice(range)) {
402 tri = tri_to_ordered(tri);
403 }
404 });
405 return tris;
406}
407
409 const IndexMask &mask,
410 IndexMaskMemory &memory)
411{
413 mask,
414 GrainSize(4096),
415 memory,
416 [&](const IndexMaskSegment universe_segment, IndexRangesBuilder<int16_t> &builder) {
419 universe_segment.base_span());
420 const IndexRange segment_range = universe_as_range.shift(universe_segment.offset());
421 const OffsetIndices segment_faces = src_faces.slice(segment_range);
422 if (segment_faces.total_size() == segment_faces.size() * 3) {
423 /* All faces in segment are triangles. */
424 builder.add_range(universe_as_range.start(), universe_as_range.one_after_last());
425 return universe_segment.offset();
426 }
427 }
428
429 for (const int16_t i : universe_segment.base_span()) {
430 const int face = int(universe_segment.offset() + i);
431 if (src_faces[face].size() == 3) {
432 builder.add(i);
433 }
434 }
435 return universe_segment.offset();
436 });
437}
438
439static IndexMask tris_in_set(const IndexMask &tri_mask,
441 const Span<int> corner_verts,
442 const VectorSet<TriKey,
443 4,
445 FaceHash,
448 IndexMaskMemory &memory)
449{
450 return IndexMask::from_predicate(tri_mask, GrainSize(4096), memory, [&](const int face_i) {
451 BLI_assert(faces[face_i].size() == 3);
452 const int3 corner_tri(&corner_verts[faces[face_i].start()]);
453 return unique_tris.contains_as(tri_to_ordered(corner_tri));
454 });
455}
456
458{
459 BLI_assert(faces.size() == indices.size());
460 threading::parallel_for(faces.index_range(), 4096, [&](const IndexRange range) {
461 for (const int face_i : range) {
462 indices[face_i] = faces[face_i].tri_index;
463 }
464 });
465}
466
468{
469 BLI_assert(quads.size() * 2 == indices.size());
470 quads.foreach_index_optimized<int>(GrainSize(4096), [&](const int index, const int pos) {
471 indices[2 * pos + 0] = index;
472 indices[2 * pos + 1] = index;
473 });
474}
475
476static void ngon_indices_of_tris(const IndexMask &ngons,
477 const OffsetIndices<int> tris_by_ngon,
479{
480 BLI_assert(tris_by_ngon.size() == ngons.size());
481 BLI_assert(tris_by_ngon.total_size() == indices.size());
482 ngons.foreach_index_optimized<int>(GrainSize(4096), [&](const int index, const int pos) {
483 indices.slice(tris_by_ngon[pos]).fill(index);
484 });
485}
486
487std::optional<Mesh *> mesh_triangulate(const Mesh &src_mesh,
488 const IndexMask &selection_with_tris,
489 const TriangulateNGonMode ngon_mode,
490 const TriangulateQuadMode quad_mode,
491 const bke::AttributeFilter &attribute_filter)
492{
493 const Span<float3> positions = src_mesh.vert_positions();
494 const OffsetIndices src_faces = src_mesh.faces();
495 const Span<int> src_corner_verts = src_mesh.corner_verts();
496 const bke::AttributeAccessor src_attributes = src_mesh.attributes();
497
498 IndexMaskMemory memory;
499
500 /* If there are a lot of triangles, they can be skipped quickly for filtering. */
501 const IndexMask src_tris = face_tris_mask(src_faces, src_faces.index_range(), memory);
502 const IndexMask selection = IndexMask::from_difference(selection_with_tris, src_tris, memory);
503
504 /* Divide the input selection into separate selections for each face type. This isn't necessary
505 * for correctness, but considering groups of each face type separately simplifies optimizing
506 * for each type. For example, quad triangulation is much simpler than Ngon triangulation. */
508 selection, GrainSize(4096), memory, [&](const int i) { return src_faces[i].size() == 4; });
510 selection, GrainSize(4096), memory, [&](const int i) { return src_faces[i].size() > 4; });
511 if (quads.is_empty() && ngons.is_empty()) {
512 /* All selected faces are already triangles. */
513 return std::nullopt;
514 }
515
516 /* Calculate group of triangle indices for each selected Ngon to facilitate calculating them in
517 * parallel later. */
518 Array<int> tris_by_ngon_data(ngons.size() + 1);
519 const OffsetIndices tris_by_ngon = ngon::calc_tris_by_ngon(src_faces, ngons, tris_by_ngon_data);
520 const int ngon_tris_num = tris_by_ngon.total_size();
521 const int quad_tris_num = quads.size() * 2;
522 const IndexRange tris_range(ngon_tris_num + quad_tris_num);
523 const IndexRange ngon_tris_range = tris_range.take_front(ngon_tris_num);
524 const IndexRange quad_tris_range = tris_range.take_back(quad_tris_num);
525
526 Array<int3> corner_tris(ngon_tris_num + quad_tris_num);
527
528 if (!ngons.is_empty()) {
529 ngon::calc_corner_tris(positions,
530 src_faces,
531 src_corner_verts,
532 face_normals_if_worthwhile(src_mesh, ngons.size()),
533 ngons,
534 tris_by_ngon,
535 ngon_mode,
536 corner_tris.as_mutable_span().slice(ngon_tris_range));
537 }
538 if (!quads.is_empty()) {
539 quad::calc_corner_tris(positions,
540 src_faces,
541 src_corner_verts,
542 quads,
543 quad_mode,
544 corner_tris.as_mutable_span().slice(quad_tris_range));
545 }
546
547 /* There are 3 separate sets of triangles: original mesh triangles, new triangles from quads,
548 * and triangles from n-gons. Deduplication can result in a mix of parts of multiple quads,
549 * multiple quads, original triangle, and even concatenation of parts of multiple n-gons.
550 * So we have to deduplicate all triangles together. */
551 Array<int3> vert_tris(ngon_tris_num + quad_tris_num);
552 array_utils::gather(src_corner_verts,
553 corner_tris.as_span().cast<int>(),
554 vert_tris.as_mutable_span().cast<int>());
555 const Span<int3> ordered_vert_tris = tri_to_ordered_tri(vert_tris.as_mutable_span());
556
557 /* Use ordered vertex triplets (a < b < c) to represent all new triangles.
558 * #TriKey knows indices of the face and points into #ordered_vert_tris, but probe can be done
559 * without #TriKey but dirrectly with a triplet so probe not necessary to be a part of
560 * #ordered_vert_tris. */
562 4,
564 FaceHash,
567 unique_tris(FaceHash{}, FacesEquality{ordered_vert_tris});
568
569 /* Could be done parallel using grouping of faces by their lowest vertex and the next linear
570 * deduplication, but right now this is just a sequential hash-set. */
571 for (const int face_i : ordered_vert_tris.index_range()) {
572 const TriKey face_key(face_i, ordered_vert_tris);
573 unique_tris.add(face_key);
574 }
575 const int unique_tri_num = unique_tris.size();
576
577 /* Since currently deduplication is greedy, there is no mix of data of deduplicated triangles,
578 * instead some of them are removed. Priority: Original triangles removed if any of new triangles
579 * are the same. For all new triangles here is direct order dependency. */
580 const IndexMask src_tris_duplicated = tris_in_set(
581 src_tris, src_faces, src_corner_verts, unique_tris, memory);
582
583 index_mask::ExprBuilder mask_builder;
584 const IndexMask unique_src_faces = index_mask::evaluate_expression(
585 mask_builder.subtract(src_faces.index_range(), {&quads, &ngons, &src_tris_duplicated}),
586 memory);
587
588 const IndexRange unique_faces_range(unique_tri_num + unique_src_faces.size());
589 const IndexRange unique_tri_range = unique_faces_range.take_front(unique_tri_num);
590 const IndexRange unique_src_faces_range = unique_faces_range.take_back(unique_src_faces.size());
591
592 /* Create a mesh with no face corners.
593 * - We haven't yet counted the number of corners from unselected faces. Creating the final face
594 * offsets will give us that number anyway, so wait to create the edges.
595 * - Don't create attributes to facilitate implicit sharing of the positions array. */
597 src_mesh.verts_num, src_mesh.edges_num, unique_faces_range.size(), 0);
598 BKE_mesh_copy_parameters_for_eval(mesh, &src_mesh);
599
600 MutableSpan<int> dst_offsets = mesh->face_offsets_for_write();
602 3, 0, dst_offsets.take_front(unique_tri_range.size() + 1));
603 const int total_new_tri_corners = unique_tri_range.size() * 3;
605 src_faces,
606 unique_src_faces,
607 total_new_tri_corners,
608 dst_offsets.take_back(unique_src_faces_range.size() + 1));
609
610 const OffsetIndices<int> faces(dst_offsets);
611 mesh->corners_num = faces.total_size();
612
613 /* Vertex attributes are totally unaffected and can be shared with implicit sharing.
614 * Use the #CustomData API for simpler support for vertex groups. */
615 CustomData_merge(&src_mesh.vert_data, &mesh->vert_data, CD_MASK_MESH.vmask, mesh->verts_num);
616 /* Edge attributes are the same for original edges. New edges will be generated by
617 * #bke::mesh_calc_edges later. */
618 CustomData_merge(&src_mesh.edge_data, &mesh->edge_data, CD_MASK_MESH.emask, mesh->edges_num);
619
620 bke::MutableAttributeAccessor attributes = mesh->attributes_for_write();
621
622 const bool has_duplicate_faces = unique_tri_num != (ngon_tris_num + quad_tris_num);
623
624 Array<int> dst_tri_to_src_face(unique_tri_num);
625 face_keys_to_face_indices(unique_tris.as_span(), dst_tri_to_src_face.as_mutable_span());
626
627 Array<int3> unique_corner_tris_data;
628 if (has_duplicate_faces) {
629 unique_corner_tris_data.reinitialize(unique_tri_num);
630 array_utils::gather(corner_tris.as_span(),
631 dst_tri_to_src_face.as_span(),
632 unique_corner_tris_data.as_mutable_span());
633 }
634
635 {
636 Array<int> src_to_unique_map(ngon_tris_num + quad_tris_num);
637 quad_indices_of_tris(quads, src_to_unique_map.as_mutable_span().slice(quad_tris_range));
639 ngons, tris_by_ngon, src_to_unique_map.as_mutable_span().slice(ngon_tris_range));
640
641 array_utils::gather(src_to_unique_map.as_span(),
642 dst_tri_to_src_face.as_span(),
643 dst_tri_to_src_face.as_mutable_span());
644 }
645
646 const Span<int3> unique_corner_tris = has_duplicate_faces ? unique_corner_tris_data.as_span() :
647 corner_tris.as_span();
648
649 for (auto &attribute : bke::retrieve_attributes_for_transfer(
650 src_attributes, attributes, ATTR_DOMAIN_MASK_FACE, attribute_filter))
651 {
653 attribute.src, dst_tri_to_src_face.as_span(), attribute.dst.span.slice(unique_tri_range));
655 attribute.src, unique_src_faces, attribute.dst.span.slice(unique_src_faces_range));
656 attribute.dst.finish();
657 }
659 const Span src(
660 static_cast<const int *>(CustomData_get_layer(&src_mesh.face_data, CD_ORIGINDEX)),
661 src_mesh.faces_num);
662 MutableSpan dst(static_cast<int *>(CustomData_add_layer(
664 mesh->faces_num);
665
666 array_utils::gather(src, dst_tri_to_src_face.as_span(), dst.slice(unique_tri_range));
667 array_utils::gather(src, unique_src_faces, dst.slice(unique_src_faces_range));
668 }
669
670 attributes.add<int>(".corner_vert", bke::AttrDomain::Corner, bke::AttributeInitConstruct());
671
672 MutableSpan<int> corner_verts = mesh->corner_verts_for_write();
674 faces.slice(unique_src_faces_range),
675 unique_src_faces,
676 src_corner_verts,
677 corner_verts);
678 array_utils::gather(src_corner_verts,
679 unique_corner_tris.cast<int>(),
680 corner_verts.take_front(total_new_tri_corners));
681
682 for (auto &attribute : bke::retrieve_attributes_for_transfer(
683 src_attributes,
684 attributes,
687 {".corner_vert", ".corner_edge"})))
688 {
690 src_faces,
691 faces.slice(IndexRange(unique_tri_num, unique_src_faces.size())),
692 unique_src_faces,
693 attribute.src,
694 attribute.dst.span);
695 bke::attribute_math::gather(attribute.src,
696 unique_corner_tris.cast<int>(),
697 attribute.dst.span.slice(0, unique_tri_num * 3));
698 attribute.dst.finish();
699 }
700
701 /* Automatically generate new edges between new triangles, with necessary deduplication. */
702 bke::mesh_calc_edges(*mesh, true, false, attribute_filter);
703
704 mesh->runtime->bounds_cache = src_mesh.runtime->bounds_cache;
705 copy_loose_vert_hint(src_mesh, *mesh);
706 if (src_mesh.no_overlapping_topology()) {
707 mesh->tag_overlapping_none();
708 }
710 return mesh;
711}
712
713} // namespace blender::geometry
@ ATTR_DOMAIN_MASK_CORNER
@ ATTR_DOMAIN_MASK_FACE
CustomData interface, see also DNA_customdata_types.h.
const void * CustomData_get_layer(const CustomData *data, eCustomDataType type)
@ CD_CONSTRUCT
bool CustomData_merge(const CustomData *source, CustomData *dest, eCustomDataMask mask, int totelem)
bool CustomData_has_layer(const CustomData *data, eCustomDataType type)
void * CustomData_add_layer(CustomData *data, eCustomDataType type, eCDAllocType alloctype, int totelem)
const CustomData_MeshMasks CD_MASK_MESH
void BKE_mesh_copy_parameters_for_eval(Mesh *me_dst, const Mesh *me_src)
bool BKE_mesh_is_valid(Mesh *mesh)
#define BLI_assert(a)
Definition BLI_assert.h:46
A min-heap / priority queue ADT.
void BLI_heap_free(Heap *heap, HeapFreeFP ptrfreefp) ATTR_NONNULL(1)
Definition BLI_heap.cc:191
Heap * BLI_heap_new_ex(unsigned int reserve_num) ATTR_WARN_UNUSED_RESULT
Definition BLI_heap.cc:171
int is_quad_flip_v3(const float v1[3], const float v2[3], const float v3[3], const float v4[3])
void axis_dominant_v3_to_m3_negate(float r_mat[3][3], const float normal[3])
void mul_v2_m3v3(float r[2], const float M[3][3], const float a[3])
MemArena * BLI_memarena_new(size_t bufsize, const char *name) ATTR_WARN_UNUSED_RESULT ATTR_RETURNS_NONNULL ATTR_NONNULL(2) ATTR_MALLOC
void BLI_memarena_free(MemArena *ma) ATTR_NONNULL(1)
void BLI_memarena_clear(MemArena *ma) ATTR_NONNULL(1)
#define BLI_POLYFILL_ARENA_SIZE
void BLI_polyfill_calc(const float(*coords)[2], unsigned int coords_num, int coords_sign, unsigned int(*r_tris)[3])
#define BLI_POLYFILL_ALLOC_NGON_RESERVE
void BLI_polyfill_beautify(const float(*coords)[2], unsigned int coords_num, unsigned int(*tris)[3], struct MemArena *arena, struct Heap *eheap)
float BLI_polyfill_edge_calc_rotate_beauty__area(const float v1[3], const float v2[3], const float v3[3], const float v4[3], bool lock_degenerate)
unsigned int uint
#define UNLIKELY(x)
BMesh const char void * data
ATTR_WARN_UNUSED_RESULT const BMVert * v2
long long int int64_t
unsigned long long int uint64_t
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition btDbvt.cpp:52
AttributeSet attributes
static IndexMask from_predicate(const IndexMask &universe, GrainSize grain_size, IndexMaskMemory &memory, Fn &&predicate)
static IndexMask from_batch_predicate(const IndexMask &universe, GrainSize grain_size, IndexMaskMemory &memory, FunctionRef< int64_t(const IndexMaskSegment &universe_segment, IndexRangesBuilder< int16_t > &builder)> batch_predicate)
static IndexMask from_difference(const IndexMask &mask_a, const IndexMask &mask_b, IndexMaskMemory &memory)
Span< T > as_span() const
Definition BLI_array.hh:243
MutableSpan< T > as_mutable_span()
Definition BLI_array.hh:248
void reinitialize(const int64_t new_size)
Definition BLI_array.hh:419
constexpr int64_t one_after_last() const
constexpr IndexRange shift(int64_t n) const
constexpr int64_t size() const
constexpr int64_t start() const
constexpr IndexRange take_back(int64_t n) const
constexpr IndexRange take_front(int64_t n) const
bool add_range(const T start, const T end)
constexpr MutableSpan slice(const int64_t start, const int64_t size) const
Definition BLI_span.hh:573
constexpr MutableSpan< NewT > cast() const
Definition BLI_span.hh:749
constexpr MutableSpan take_back(const int64_t n) const
Definition BLI_span.hh:640
constexpr T * data() const
Definition BLI_span.hh:539
constexpr void fill(const T &value) const
Definition BLI_span.hh:517
constexpr IndexRange index_range() const
Definition BLI_span.hh:670
constexpr void copy_from(Span< T > values) const
Definition BLI_span.hh:739
constexpr MutableSpan take_front(const int64_t n) const
Definition BLI_span.hh:629
constexpr T & last(const int64_t n=0) const
Definition BLI_span.hh:689
IndexRange index_range() const
Span< BaseT > base_span() const
constexpr Span drop_front(int64_t n) const
Definition BLI_span.hh:171
Span< NewT > constexpr cast() const
Definition BLI_span.hh:418
constexpr Span slice(int64_t start, int64_t size) const
Definition BLI_span.hh:137
constexpr const T * data() const
Definition BLI_span.hh:215
constexpr int64_t size() const
Definition BLI_span.hh:252
constexpr IndexRange index_range() const
Definition BLI_span.hh:401
constexpr bool is_empty() const
Definition BLI_span.hh:260
void reinitialize(const int64_t new_size)
Span< T > as_span() const
bool add(const StringRef attribute_id, const AttrDomain domain, const AttrType data_type, const AttributeInit &initializer)
const DifferenceExpr & subtract(const Term &main_term, const Span< Term > subtract_terms)
void foreach_index_optimized(Fn &&fn) const
IndexMask slice(IndexRange range) const
void foreach_index(Fn &&fn) const
void foreach_segment(Fn &&fn) const
OffsetIndices slice(const IndexRange range) const
static ushort indices[]
static float verts[][3]
uint pos
ccl_device_inline float2 mask(const MaskType mask, const float2 a)
static char faces[256]
static void copy_loose_vert_hint(const Mesh &src, Mesh &dst)
void gather_group_to_group(const OffsetIndices< int > src_offsets, const OffsetIndices< int > dst_offsets, const IndexMask &selection, const Span< T > src, MutableSpan< T > dst)
void gather(const GVArray &src, const IndexMask &indices, GMutableSpan dst, int64_t grain_size=4096)
void gather(GSpan src, Span< int > map, GMutableSpan dst)
void gather_group_to_group(OffsetIndices< int > src_offsets, OffsetIndices< int > dst_offsets, const IndexMask &selection, GSpan src, GMutableSpan dst)
float3 face_normal_calc(Span< float3 > vert_positions, Span< int > face_verts)
int face_triangles_num(const int face_size)
Definition BKE_mesh.hh:350
Vector< AttributeTransferData > retrieve_attributes_for_transfer(const AttributeAccessor src_attributes, MutableAttributeAccessor dst_attributes, AttrDomainMask domain_mask, const AttributeFilter &attribute_filter={})
auto attribute_filter_with_skip_ref(AttributeFilter filter, const Span< StringRef > skip)
Mesh * mesh_new_no_attributes(int verts_num, int edges_num, int faces_num, int corners_num)
void mesh_calc_edges(Mesh &mesh, bool keep_existing_edges, bool select_new_edges)
static void calc_corner_tris(const Span< float3 > positions, const OffsetIndices< int > src_faces, const Span< int > src_corner_verts, const Span< float3 > face_normals, const IndexMask &ngons, const OffsetIndices< int > tris_by_ngon, const TriangulateNGonMode ngon_mode, MutableSpan< int3 > corner_tris)
static OffsetIndices< int > calc_tris_by_ngon(const OffsetIndices< int > src_faces, const IndexMask &ngons, MutableSpan< int > face_offset_data)
static void calc_corner_tris(const Span< int > face_offsets, const Span< QuadDirection > directions, MutableSpan< int3 > corner_tris)
static void calc_quad_directions(const Span< float3 > positions, const Span< int > face_offsets, const Span< int > corner_verts, const TriangulateQuadMode quad_mode, MutableSpan< QuadDirection > directions)
static QuadDirection calc_quad_direction_beauty(const float3 &v0, const float3 &v1, const float3 &v2, const float3 &v3)
std::optional< Mesh * > mesh_triangulate(const Mesh &src_mesh, const IndexMask &selection, TriangulateNGonMode ngon_mode, TriangulateQuadMode quad_mode, const bke::AttributeFilter &attribute_filter)
static void ngon_indices_of_tris(const IndexMask &ngons, const OffsetIndices< int > tris_by_ngon, MutableSpan< int > indices)
static void gather(const Span< int > src, const Span< int16_t > indices, MutableSpan< int > dst)
static Span< int3 > tri_to_ordered_tri(MutableSpan< int3 > tris)
static void quad_indices_of_tris(const IndexMask &quads, MutableSpan< int > indices)
static int3 tri_to_ordered(const int3 tri)
static void face_keys_to_face_indices(const Span< TriKey > faces, MutableSpan< int > indices)
static OffsetIndices< int > gather_selected_offsets(const OffsetIndices< int > src_offsets, const IndexMaskSegment selection, MutableSpan< int > dst_offsets)
static Span< float3 > face_normals_if_worthwhile(const Mesh &src_mesh, const int selection_size)
static Span< int > gather_or_reference(const Span< int > src, const Span< int16_t > indices, Vector< int > &dst)
static IndexMask face_tris_mask(const OffsetIndices< int > src_faces, const IndexMask &mask, IndexMaskMemory &memory)
static void copy_loose_vert_hint(const Mesh &src, Mesh &dst)
static IndexMask tris_in_set(const IndexMask &tri_mask, const OffsetIndices< int > faces, const Span< int > corner_verts, const VectorSet< TriKey, 4, DefaultProbingStrategy, FaceHash, FacesEquality, SimpleVectorSetSlot< TriKey, int > > &unique_tris, IndexMaskMemory &memory)
IndexMask evaluate_expression(const Expr &expression, IndexMaskMemory &memory)
T distance_squared(const VecBase< T, Size > &a, const VecBase< T, Size > &b)
OffsetIndices< int > accumulate_counts_to_offsets(MutableSpan< int > counts_to_offsets, int start_offset=0)
void fill_constant_group_size(int size, int start_offset, MutableSpan< int > offsets)
OffsetIndices< int > gather_selected_offsets(OffsetIndices< int > src_offsets, const IndexMask &selection, int start_offset, MutableSpan< int > dst_offsets)
void parallel_for(const IndexRange range, const int64_t grain_size, const Function &function, const TaskSizeHints &size_hints=detail::TaskSizeHints_Static(1))
Definition BLI_task.hh:93
bool non_empty_is_range(const Span< T > indices)
IndexRange non_empty_as_range(const Span< T > indices)
VecBase< int32_t, 3 > int3
MatBase< float, 3, 3 > float3x3
PythonProbingStrategy<> DefaultProbingStrategy
VecBase< float, 3 > float3
int corners_num
CustomData edge_data
int edges_num
MeshRuntimeHandle * runtime
CustomData face_data
CustomData vert_data
int faces_num
int verts_num
const c_style_mat & ptr() const
uint64_t operator()(const TriKey value) const
uint64_t operator()(const int3 value) const
bool operator()(const TriKey a, const TriKey b) const
bool operator()(const int3 a, const TriKey b) const
TriKey(const int tri_index, Span< int3 > tris)
i
Definition text_draw.cc:230
PointerRNA * ptr
Definition wm_files.cc:4238