Blender V4.5
mesh_triangulate.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2023 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
5#include "atomic_ops.h"
6
7#include "BLI_array_utils.hh"
9#include "BLI_index_mask.hh"
10#include "BLI_math_geom.h"
11#include "BLI_math_matrix.h"
12#include "BLI_ordered_edge.hh"
13#include "BLI_polyfill_2d.h"
15#include "BLI_vector_set.hh"
16
17#include "BLI_heap.h"
19#include "BLI_memarena.h"
20
21#include "BKE_attribute.hh"
22#include "BKE_attribute_math.hh"
23#include "BKE_customdata.hh"
24#include "BKE_mesh.hh"
25#include "BKE_mesh_mapping.hh"
26
28
29namespace blender::geometry {
30
31static void gather(const Span<int> src, const Span<int16_t> indices, MutableSpan<int> dst)
32{
33 for (const int i : indices.index_range()) {
34 dst[i] = src[indices[i]];
35 }
36}
37
40 Vector<int> &dst)
41{
43 return src.slice(indices[0], indices.size());
44 }
45 dst.reinitialize(indices.size());
46 gather(src, indices, dst);
47 return dst.as_span();
48}
49
52 Vector<int> &dst)
53{
54 return gather_or_reference(src.drop_front(mask.offset()), mask.base_span(), dst);
55}
56
61static Span<float3> face_normals_if_worthwhile(const Mesh &src_mesh, const int selection_size)
62{
63 if (src_mesh.runtime->face_normals_cache.is_cached()) {
64 return src_mesh.face_normals();
65 }
66 if (selection_size > src_mesh.faces_num / 4) {
67 return src_mesh.face_normals();
68 }
69 return {};
70}
71
72static void copy_loose_vert_hint(const Mesh &src, Mesh &dst)
73{
74 const auto &src_cache = src.runtime->loose_verts_cache;
75 if (src_cache.is_cached() && src_cache.data().count == 0) {
76 dst.tag_loose_verts_none();
77 }
78}
79
80static void copy_loose_edge_hint(const Mesh &src, Mesh &dst)
81{
82 const auto &src_cache = src.runtime->loose_edges_cache;
83 if (src_cache.is_cached() && src_cache.data().count == 0) {
84 dst.tag_loose_edges_none();
85 }
86}
87
89 const IndexMask &unselected,
90 MutableSpan<int> offsets)
91{
92 MutableSpan<int> new_tri_offsets = offsets.drop_back(unselected.size());
93 offset_indices::fill_constant_group_size(3, new_tri_offsets.first(), new_tri_offsets);
95 src_faces, unselected, new_tri_offsets.last(), offsets.take_back(unselected.size() + 1));
96 return OffsetIndices<int>(offsets);
97}
98
99namespace quad {
100
110enum class QuadDirection : int8_t {
113};
114
120 const float3 &v1,
121 const float3 &v2,
122 const float3 &v3)
123{
124 const int flip_flag = is_quad_flip_v3(v1, v2, v3, v0);
125 if (UNLIKELY(flip_flag & (1 << 0))) {
127 }
128 if (UNLIKELY(flip_flag & (1 << 1))) {
130 }
131 return BLI_polyfill_edge_calc_rotate_beauty__area(v1, v2, v3, v0, false) > 0.0f ?
134}
135
136static void calc_quad_directions(const Span<float3> positions,
137 const Span<int> face_offsets,
138 const Span<int> corner_verts,
139 const TriangulateQuadMode quad_mode,
141{
142 switch (quad_mode) {
144 directions.fill(QuadDirection::Edge_0_2);
145 break;
146 }
148 directions.fill(QuadDirection::Edge_1_3);
149 break;
150 }
152 for (const int i : face_offsets.index_range()) {
153 const Span<int> verts = corner_verts.slice(face_offsets[i], 4);
154 const float dist_0_2 = math::distance_squared(positions[verts[0]], positions[verts[2]]);
155 const float dist_1_3 = math::distance_squared(positions[verts[1]], positions[verts[3]]);
156 directions[i] = dist_0_2 < dist_1_3 ? QuadDirection::Edge_0_2 : QuadDirection::Edge_1_3;
157 }
158 break;
159 }
161 for (const int i : face_offsets.index_range()) {
162 const Span<int> verts = corner_verts.slice(face_offsets[i], 4);
163 const float dist_0_2 = math::distance_squared(positions[verts[0]], positions[verts[2]]);
164 const float dist_1_3 = math::distance_squared(positions[verts[1]], positions[verts[3]]);
165 directions[i] = dist_0_2 > dist_1_3 ? QuadDirection::Edge_0_2 : QuadDirection::Edge_1_3;
166 }
167 break;
168 }
170 for (const int i : face_offsets.index_range()) {
171 const Span<int> verts = corner_verts.slice(face_offsets[i], 4);
172 directions[i] = calc_quad_direction_beauty(
173 positions[verts[0]], positions[verts[1]], positions[verts[2]], positions[verts[3]]);
174 }
175
176 break;
177 }
178 }
179}
180
181static void calc_corner_tris(const Span<int> face_offsets,
182 const Span<QuadDirection> directions,
183 MutableSpan<int3> corner_tris)
184{
185 for (const int i : face_offsets.index_range()) {
186 MutableSpan<int> quad_map = corner_tris.slice(2 * i, 2).cast<int>();
187 /* These corner orders give new edges based on the first vertex of each triangle. */
188 switch (directions[i]) {
190 quad_map.copy_from({2, 0, 1, 0, 2, 3});
191 break;
193 quad_map.copy_from({1, 3, 0, 3, 1, 2});
194 break;
195 }
196 const int src_face_start = face_offsets[i];
197 for (int &i : quad_map) {
198 i += src_face_start;
199 }
200 }
201}
202
203static void calc_corner_tris(const Span<float3> positions,
204 const OffsetIndices<int> src_faces,
205 const Span<int> src_corner_verts,
206 const IndexMask &quads,
207 const TriangulateQuadMode quad_mode,
208 MutableSpan<int3> corner_tris)
209{
210 struct TLS {
211 Vector<int> offsets;
212 Vector<QuadDirection> directions;
213 };
215
216 quads.foreach_segment(GrainSize(1024), [&](const IndexMaskSegment quads, const int64_t pos) {
217 TLS &data = tls.local();
218 data.directions.reinitialize(quads.size());
219
220 /* Find the offsets of each face in the local selection. We can gather them together even if
221 * they aren't contiguous because we only need to know the start of each face; the size is
222 * just 4. */
223 const Span<int> offsets = gather_or_reference(src_faces.data(), quads, data.offsets);
224 calc_quad_directions(positions, offsets, src_corner_verts, quad_mode, data.directions);
225 const IndexRange tris_range(pos * 2, offsets.size() * 2);
226 quad::calc_corner_tris(offsets, data.directions, corner_tris.slice(tris_range));
227 });
228}
229
235static void calc_edges(const Span<int> quad_corner_verts, MutableSpan<int2> new_quad_edges)
236{
237 const int quads_num = quad_corner_verts.size() / 6;
238 for (const int i : IndexRange(quads_num)) {
239 const Span<int> verts = quad_corner_verts.slice(6 * i, 6);
240 /* Use the first vertex of each triangle. */
241 new_quad_edges[i] = int2(verts[0], verts[1]);
242 }
243}
244
245static void calc_quad_corner_edges(const Span<int> src_corner_edges,
246 const Span<int3> corner_tris,
247 const int edges_start,
248 MutableSpan<int> corner_edges)
249{
250 /* Each triangle starts at the new edge and winds in the same order as corner vertices
251 * described by the corner map. */
252 for (const int tri : corner_tris.index_range()) {
253 corner_edges[3 * tri + 0] = edges_start + tri / 2;
254 corner_edges[3 * tri + 1] = src_corner_edges[corner_tris[tri][1]];
255 corner_edges[3 * tri + 2] = src_corner_edges[corner_tris[tri][2]];
256 }
257}
258
259static void calc_edges(const Span<int> src_corner_edges,
260 const Span<int3> corner_tris,
261 const Span<int> corner_verts,
262 const int edges_start,
263 MutableSpan<int2> edges,
264 MutableSpan<int> quad_corner_edges)
265{
266 const int quads_num = corner_tris.size() / 2;
267 threading::parallel_for(IndexRange(quads_num), 1024, [&](const IndexRange quads) {
268 const IndexRange tris_range(quads.start() * 2, quads.size() * 2);
269 const IndexRange corners(quads.start() * 6, quads.size() * 6);
270 calc_edges(corner_verts.slice(corners), edges.slice(quads));
271 calc_quad_corner_edges(src_corner_edges,
272 corner_tris.slice(tris_range),
273 edges_start + quads.start(),
274 quad_corner_edges.slice(corners));
275 });
276}
277
278template<typename T>
279static void copy_quad_data_to_tris(const Span<T> src, const IndexMask &quads, MutableSpan<T> dst)
280{
281 quads.foreach_index_optimized<int>([&](const int src_i, const int dst_i) {
282 dst[2 * dst_i + 0] = src[src_i];
283 dst[2 * dst_i + 1] = src[src_i];
284 });
285}
286
287static void copy_quad_data_to_tris(const GSpan src, const IndexMask &quads, GMutableSpan dst)
288{
290 using T = decltype(dummy);
291 copy_quad_data_to_tris(src.typed<T>(), quads, dst.typed<T>());
292 });
293}
294
295} // namespace quad
296
298 const IndexMaskSegment selection,
299 MutableSpan<int> dst_offsets)
300{
301 int offset = 0;
302 for (const int64_t i : selection.index_range()) {
303 dst_offsets[i] = offset;
304 offset += src_offsets[selection[i]].size();
305 }
306 dst_offsets.last() = offset;
307 return OffsetIndices<int>(dst_offsets);
308}
309
310namespace ngon {
311
313 const IndexMask &ngons,
314 MutableSpan<int> face_offset_data)
315{
316 ngons.foreach_index(GrainSize(2048), [&](const int face, const int mask) {
317 face_offset_data[mask] = bke::mesh::face_triangles_num(src_faces[face].size());
318 });
319 return offset_indices::accumulate_counts_to_offsets(face_offset_data);
320}
321
323 const IndexMask &selection,
324 MutableSpan<int> edge_offset_data)
325{
326 selection.foreach_index(GrainSize(2048), [&](const int face, const int mask) {
327 /* The number of new inner edges for each face is the number of corners - 3. */
328 edge_offset_data[mask] = src_faces[face].size() - 3;
329 });
330 return offset_indices::accumulate_counts_to_offsets(edge_offset_data);
331}
332
333static void calc_corner_tris(const Span<float3> positions,
334 const OffsetIndices<int> src_faces,
335 const Span<int> src_corner_verts,
336 const Span<float3> face_normals,
337 const IndexMask &ngons,
338 const OffsetIndices<int> tris_by_ngon,
339 const TriangulateNGonMode ngon_mode,
340 MutableSpan<int3> corner_tris)
341{
342 struct LocalData {
343 Vector<float3x3> projections;
344 Array<int> offset_data;
345 Vector<float2> projected_positions;
346
347 /* Only used for the "Beauty" method. */
348 MemArena *arena = nullptr;
349 Heap *heap = nullptr;
350
351 ~LocalData()
352 {
353 if (arena) {
354 BLI_memarena_free(arena);
355 }
356 if (heap) {
357 BLI_heap_free(heap, nullptr);
358 }
359 }
360 };
362
363 ngons.foreach_segment(GrainSize(128), [&](const IndexMaskSegment ngons, const int pos) {
364 LocalData &data = tls.local();
365
366 /* In order to simplify and "parallelize" the next loops, gather offsets used to group an array
367 * large enough for all the local face corners. */
368 data.offset_data.reinitialize(ngons.size() + 1);
369 const OffsetIndices local_corner_offsets = gather_selected_offsets(
370 src_faces, ngons, data.offset_data);
371
372 /* Use face normals to build projection matrices to make the face positions 2D. */
373 data.projections.reinitialize(ngons.size());
374 MutableSpan<float3x3> projections = data.projections;
375 if (face_normals.is_empty()) {
376 for (const int i : ngons.index_range()) {
377 const IndexRange src_face = src_faces[ngons[i]];
378 const Span<int> face_verts = src_corner_verts.slice(src_face);
379 const float3 normal = bke::mesh::face_normal_calc(positions, face_verts);
380 axis_dominant_v3_to_m3_negate(projections[i].ptr(), normal);
381 }
382 }
383 else {
384 for (const int i : ngons.index_range()) {
385 axis_dominant_v3_to_m3_negate(projections[i].ptr(), face_normals[ngons[i]]);
386 }
387 }
388
389 /* Project the face positions into 2D using the matrices calculated above. */
390 data.projected_positions.reinitialize(local_corner_offsets.total_size());
391 MutableSpan<float2> projected_positions = data.projected_positions;
392 for (const int i : ngons.index_range()) {
393 const IndexRange src_face = src_faces[ngons[i]];
394 const Span<int> face_verts = src_corner_verts.slice(src_face);
395 const float3x3 &matrix = projections[i];
396
397 MutableSpan<float2> positions_2d = projected_positions.slice(local_corner_offsets[i]);
398 for (const int i : face_verts.index_range()) {
399 mul_v2_m3v3(positions_2d[i], matrix.ptr(), positions[face_verts[i]]);
400 }
401 }
402
403 if (ngon_mode == TriangulateNGonMode::Beauty) {
404 if (!data.arena) {
406 }
407 if (!data.heap) {
409 }
410 }
411
412 /* Calculate the triangulation of corners indices local to each face. */
413 for (const int i : ngons.index_range()) {
414 const Span<float2> positions_2d = projected_positions.slice(local_corner_offsets[i]);
415 const IndexRange tris_range = tris_by_ngon[pos + i];
416 MutableSpan<int> map = corner_tris.slice(tris_range).cast<int>();
417 BLI_polyfill_calc(reinterpret_cast<const float(*)[2]>(positions_2d.data()),
418 positions_2d.size(),
419 1,
420 reinterpret_cast<uint(*)[3]>(map.data()));
421 if (ngon_mode == TriangulateNGonMode::Beauty) {
422 BLI_polyfill_beautify(reinterpret_cast<const float(*)[2]>(positions_2d.data()),
423 positions_2d.size(),
424 reinterpret_cast<uint(*)[3]>(map.data()),
425 data.arena,
426 data.heap);
428 }
429 }
430
431 /* "Globalize" the triangulation created above so the map source indices reference _all_ of the
432 * source vertices, not just within the source face. */
433 for (const int i : ngons.index_range()) {
434 const IndexRange tris_range = tris_by_ngon[pos + i];
435 const int src_face_start = src_faces[ngons[i]].start();
436 MutableSpan<int> map = corner_tris.slice(tris_range).cast<int>();
437 for (int &vert : map) {
438 vert += src_face_start;
439 }
440 }
441 });
442}
443
444static void calc_inner_tri_edges(const IndexRange src_face,
445 const Span<int> src_corner_verts,
446 const Span<int> src_corner_edges,
447 const Span<int3> corner_tris,
448 const int edges_start,
449 MutableSpan<int> corner_edges,
451{
452 const OrderedEdge last_edge(int(src_face.first()), int(src_face.last()));
453 auto add_edge = [&](const OrderedEdge corner_edge) -> int {
454 if (corner_edge == last_edge) {
455 return src_corner_edges[src_face.last()];
456 }
457 if (corner_edge.v_high == corner_edge.v_low + 1) {
458 return src_corner_edges[corner_edge.v_low];
459 }
460 const OrderedEdge vert_edge(src_corner_verts[corner_edge.v_low],
461 src_corner_verts[corner_edge.v_high]);
462 return edges_start + deduplication.index_of_or_add(vert_edge);
463 };
464
465 for (const int i : corner_tris.index_range()) {
466 const int3 tri = corner_tris[i];
467 corner_edges[3 * i + 0] = add_edge({tri[0], tri[1]});
468 corner_edges[3 * i + 1] = add_edge({tri[1], tri[2]});
469 corner_edges[3 * i + 2] = add_edge({tri[2], tri[0]});
470 }
471}
472
473static void calc_edges(const OffsetIndices<int> src_faces,
474 const Span<int> src_corner_verts,
475 const Span<int> src_corner_edges,
476 const IndexMask &ngons,
477 const OffsetIndices<int> tris_by_ngon,
478 const OffsetIndices<int> edges_by_ngon,
479 const IndexRange ngon_edges_range,
480 const Span<int3> corner_tris,
481 MutableSpan<int2> edges,
482 MutableSpan<int> corner_edges)
483{
484 MutableSpan<int2> inner_edges = edges.slice(ngon_edges_range);
486 ngons.foreach_segment(GrainSize(128), [&](const IndexMaskSegment ngons, const int pos) {
488 for (const int16_t i : ngons.index_range()) {
489 const IndexRange edges = edges_by_ngon[pos + i];
490 const IndexRange tris_range = tris_by_ngon[pos + i];
491 const IndexRange corners(tris_range.start() * 3, tris_range.size() * 3);
492 deduplication.clear();
493 calc_inner_tri_edges(src_faces[ngons[i]],
494 src_corner_verts,
495 src_corner_edges,
496 corner_tris.slice(tris_range),
497 ngon_edges_range[edges.start()],
498 corner_edges.slice(corners),
500 inner_edges.slice(edges).copy_from(deduplication.as_span().cast<int2>());
501 }
502 });
503}
504
505} // namespace ngon
506
507namespace deduplication {
508
509static GroupedSpan<int> build_vert_to_tri_map(const int verts_num,
510 const Span<int3> vert_tris,
511 Array<int> &r_offsets,
512 Array<int> &r_indices)
513{
514 r_offsets = Array<int>(verts_num + 1, 0);
515 offset_indices::build_reverse_offsets(vert_tris.cast<int>(), r_offsets);
516 const OffsetIndices offsets(r_offsets.as_span());
517
518 r_indices.reinitialize(offsets.total_size());
519 int *counts = MEM_calloc_arrayN<int>(offsets.size(), __func__);
520 BLI_SCOPED_DEFER([&]() { MEM_freeN(counts); })
521 threading::parallel_for(vert_tris.index_range(), 1024, [&](const IndexRange range) {
522 for (const int tri : range) {
523 for (const int vert : {vert_tris[tri][0], vert_tris[tri][1], vert_tris[tri][2]}) {
524 const int index_in_group = atomic_fetch_and_add_int32(&counts[vert], 1);
525 r_indices[offsets[vert][index_in_group]] = tri;
526 }
527 }
528 });
529
530 return {r_offsets.as_span(), r_indices.as_span()};
531}
532
539 const OffsetIndices<int> src_faces,
540 const Span<int> src_corner_verts,
541 const IndexMask &selection,
542 const Span<int3> corner_tris,
543 IndexMaskMemory &memory)
544{
545 const IndexMask unselected = selection.complement(src_faces.index_range(), memory);
546 if (mesh.no_overlapping_topology()) {
547 return unselected;
548 }
549 const IndexMask unselected_tris = IndexMask::from_batch_predicate(
550 unselected,
551 GrainSize(4096),
552 memory,
553 [&](const IndexMaskSegment universe_segment, IndexRangesBuilder<int16_t> &builder) {
556 universe_segment.base_span());
557 const IndexRange segment_range = universe_as_range.shift(universe_segment.offset());
558 const OffsetIndices segment_faces = src_faces.slice(segment_range);
559 if (segment_faces.total_size() == segment_faces.size() * 3) {
560 /* All faces in segment are triangles. */
561 builder.add_range(universe_as_range.start(), universe_as_range.one_after_last());
562 return universe_segment.offset();
563 }
564 }
565
566 for (const int16_t i : universe_segment.base_span()) {
567 const int face = int(universe_segment.offset() + i);
568 if (src_faces[face].size() == 3) {
569 builder.add(i);
570 }
571 }
572 return universe_segment.offset();
573 });
574
575 if (unselected_tris.is_empty()) {
576 return unselected;
577 }
578
579 Array<int3> vert_tris(corner_tris.size());
581 src_corner_verts, corner_tris.cast<int>(), vert_tris.as_mutable_span().cast<int>());
582
583 Array<int> vert_to_tri_offsets;
584 Array<int> vert_to_tri_indices;
585 const GroupedSpan<int> vert_to_tri = build_vert_to_tri_map(
586 mesh.verts_num, vert_tris, vert_to_tri_offsets, vert_to_tri_indices);
587
588 auto tri_exists = [&](const std::array<int, 3> &tri_verts) {
589 /* TODO: Sorting the three values with a few comparisons would be faster than a #Set. */
590 const Set<int, 3> vert_set(tri_verts);
591 return std::any_of(tri_verts.begin(), tri_verts.end(), [&](const int vert) {
592 return std::any_of(vert_to_tri[vert].begin(), vert_to_tri[vert].end(), [&](const int tri) {
593 const Set<int, 3> other_tri_verts(Span(&vert_tris[tri].x, 3));
594 return other_tri_verts == vert_set;
595 });
596 });
597 };
598
599 const IndexMask duplicate_triangles = IndexMask::from_predicate(
600 unselected_tris, GrainSize(1024), memory, [&](const int i) {
601 const Span<int> face_verts = src_corner_verts.slice(src_faces[i]);
602 return tri_exists({face_verts[0], face_verts[1], face_verts[2]});
603 });
604
605 return IndexMask::from_difference(unselected, duplicate_triangles, memory);
606}
607
608static std::optional<int> find_edge_duplicate(const GroupedSpan<int> vert_to_edge_map,
609 const Span<int2> edges,
610 const OrderedEdge edge)
611{
612 for (const int vert : {edge.v_low, edge.v_high}) {
613 for (const int src_edge : vert_to_edge_map[vert]) {
614 if (OrderedEdge(edges[src_edge]) == edge) {
615 return src_edge;
616 }
617 }
618 }
619 return std::nullopt;
620}
621
628static int calc_new_edges(const Mesh &src_mesh,
629 const Span<int2> src_edges,
630 const IndexRange new_edges_range,
631 MutableSpan<int2> edges,
632 MutableSpan<int> corner_edges)
633{
634 if (src_mesh.no_overlapping_topology()) {
635 return edges.size();
636 }
637
638 Array<int> vert_to_edge_offsets;
639 Array<int> vert_to_edge_indices;
641 src_edges, src_mesh.verts_num, vert_to_edge_offsets, vert_to_edge_indices);
642
643 const Span<int2> new_edges = edges.slice(new_edges_range);
644 Array<int> duplicate_remap(new_edges.size());
645 threading::parallel_for(new_edges.index_range(), 1024, [&](const IndexRange range) {
646 for (const int i : range) {
647 duplicate_remap[i] = find_edge_duplicate(vert_to_edge, src_edges, new_edges[i]).value_or(-1);
648 }
649 });
650 IndexMaskMemory memory;
651 const IndexMask non_duplicate_new_edges = IndexMask::from_predicate(
652 new_edges.index_range(), GrainSize(4096), memory, [&](const int i) {
653 return duplicate_remap[i] == -1;
654 });
655 if (non_duplicate_new_edges.size() == new_edges.size()) {
656 return edges.size();
657 }
658
659 non_duplicate_new_edges.foreach_index_optimized<int>(
660 GrainSize(4096), [&](const int index, const int pos) {
661 duplicate_remap[index] = pos + new_edges_range.start();
662 });
663 threading::parallel_for(corner_edges.index_range(), 4096, [&](const IndexRange range) {
664 for (const int corner : range) {
665 const int edge = corner_edges[corner];
666 if (edge < new_edges_range.start()) {
667 continue;
668 }
669 const int remap_index = edge - new_edges_range.start();
670 corner_edges[corner] = duplicate_remap[remap_index];
671 }
672 });
673
674 Array<int2> edges_with_duplicates = new_edges;
675 array_utils::gather(edges_with_duplicates.as_span(),
676 non_duplicate_new_edges,
677 edges.slice(new_edges_range.start(), non_duplicate_new_edges.size()));
678 return src_edges.size() + non_duplicate_new_edges.size();
679}
680
681} // namespace deduplication
682
683std::optional<Mesh *> mesh_triangulate(const Mesh &src_mesh,
684 const IndexMask &selection_with_tris,
685 const TriangulateNGonMode ngon_mode,
686 const TriangulateQuadMode quad_mode,
687 const bke::AttributeFilter &attribute_filter)
688{
689 const Span<float3> positions = src_mesh.vert_positions();
690 const Span<int2> src_edges = src_mesh.edges();
691 const OffsetIndices src_faces = src_mesh.faces();
692 const Span<int> src_corner_verts = src_mesh.corner_verts();
693 const Span<int> src_corner_edges = src_mesh.corner_edges();
694 const bke::AttributeAccessor src_attributes = src_mesh.attributes();
695
696 /* Divide the input selection into separate selections for each face type. This isn't necessary
697 * for correctness, but considering groups of each face type separately simplifies optimizing
698 * for each type. For example, quad triangulation is much simpler than Ngon triangulation. */
699 IndexMaskMemory memory;
701 selection_with_tris, GrainSize(4096), memory, [&](const int i) {
702 return src_faces[i].size() == 4;
703 });
705 selection_with_tris, GrainSize(4096), memory, [&](const int i) {
706 return src_faces[i].size() > 4;
707 });
708 if (quads.is_empty() && ngons.is_empty()) {
709 /* All selected faces are already triangles. */
710 return std::nullopt;
711 }
712
713 const IndexMask selection = IndexMask::from_union(quads, ngons, memory);
714
715 /* Calculate group of triangle indices for each selected Ngon to facilitate calculating them in
716 * parallel later. */
717 Array<int> tris_by_ngon_data(ngons.size() + 1);
718 const OffsetIndices tris_by_ngon = ngon::calc_tris_by_ngon(src_faces, ngons, tris_by_ngon_data);
719 const int ngon_tris_num = tris_by_ngon.total_size();
720 const int quad_tris_num = quads.size() * 2;
721 const IndexRange tris_range(ngon_tris_num + quad_tris_num);
722 const IndexRange ngon_tris_range = tris_range.take_front(ngon_tris_num);
723 const IndexRange quad_tris_range = tris_range.take_back(quad_tris_num);
724
725 const int ngon_corners_num = tris_by_ngon.total_size() * 3;
726 const int quad_corners_num = quads.size() * 6;
727 const IndexRange tri_corners_range(quad_corners_num + ngon_corners_num);
728 const IndexRange ngon_corners_range = tri_corners_range.take_front(ngon_corners_num);
729 const IndexRange quad_corners_range = tri_corners_range.take_back(quad_corners_num);
730
731 /* Calculate groups of new inner edges for each selected Ngon so they can be filled in parallel
732 * later. */
733 Array<int> edge_offset_data(ngons.size() + 1);
734 const OffsetIndices edges_by_ngon = ngon::calc_edges_by_ngon(src_faces, ngons, edge_offset_data);
735 const int ngon_edges_num = edges_by_ngon.total_size();
736 const int quad_edges_num = quads.size();
737 const IndexRange src_edges_range(0, src_edges.size());
738 const IndexRange tri_edges_range(src_edges_range.one_after_last(),
739 ngon_edges_num + quad_edges_num);
740 const IndexRange ngon_edges_range = tri_edges_range.take_front(ngon_edges_num);
741 const IndexRange quad_edges_range = tri_edges_range.take_back(quad_edges_num);
742
743 /* An index map that maps from newly created corners in `tri_corners_range` to original corner
744 * indices. This is used to interpolate `corner_vert` indices and face corner attributes. If
745 * there are no face corner attributes, theoretically the map could be skipped and corner
746 * vertex indices could be interpolated immediately, but that isn't done for simplicity. */
747 Array<int3> corner_tris(tris_range.size());
748
749 if (!ngons.is_empty()) {
750 ngon::calc_corner_tris(positions,
751 src_faces,
752 src_corner_verts,
753 face_normals_if_worthwhile(src_mesh, ngons.size()),
754 ngons,
755 tris_by_ngon,
756 ngon_mode,
757 corner_tris.as_mutable_span().slice(ngon_tris_range));
758 }
759 if (!quads.is_empty()) {
760 quad::calc_corner_tris(positions,
761 src_faces,
762 src_corner_verts,
763 quads,
764 quad_mode,
765 corner_tris.as_mutable_span().slice(quad_tris_range));
766 }
767
769 src_mesh, src_faces, src_corner_verts, selection, corner_tris, memory);
770 const IndexRange unselected_range(tris_range.one_after_last(), unselected.size());
771
772 /* Create a mesh with no face corners.
773 * - We haven't yet counted the number of corners from unselected faces. Creating the final face
774 * offsets will give us that number anyway, so wait to create the edges.
775 * - The number of edges is a guess that doesn't include deduplication of new edges with
776 * existing edges. If those are found, the mesh will be resized later.
777 * - Don't create attributes to facilitate implicit sharing of the positions array. */
779 src_edges.size() + tri_edges_range.size(),
780 tris_range.size() + unselected.size(),
781 0);
782 BKE_mesh_copy_parameters_for_eval(mesh, &src_mesh);
783
784 /* Find the face corner ranges using the offsets array from the new mesh. That gives us the
785 * final number of face corners. */
787 src_faces, unselected, mesh->face_offsets_for_write());
788 mesh->corners_num = faces.total_size();
789 const OffsetIndices faces_unselected = faces.slice(unselected_range);
790
791 bke::MutableAttributeAccessor attributes = mesh->attributes_for_write();
792 attributes.add<int2>(".edge_verts", bke::AttrDomain::Edge, bke::AttributeInitConstruct());
793 attributes.add<int>(".corner_vert", bke::AttrDomain::Corner, bke::AttributeInitConstruct());
794 attributes.add<int>(".corner_edge", bke::AttrDomain::Corner, bke::AttributeInitConstruct());
795
796 MutableSpan<int2> edges_with_duplicates = mesh->edges_for_write();
797 MutableSpan<int> corner_verts = mesh->corner_verts_for_write();
798 MutableSpan<int> corner_edges = mesh->corner_edges_for_write();
799
801 src_corner_verts, corner_tris.as_span().cast<int>(), corner_verts.slice(tri_corners_range));
802
803 if (!ngons.is_empty()) {
804 ngon::calc_edges(src_faces,
805 src_corner_verts,
806 src_corner_edges,
807 ngons,
808 tris_by_ngon,
809 edges_by_ngon,
810 ngon_edges_range,
811 corner_tris.as_mutable_span().slice(ngon_tris_range),
812 edges_with_duplicates,
813 corner_edges.slice(ngon_corners_range));
814 }
815
816 if (!quads.is_empty()) {
817 quad::calc_edges(src_corner_edges,
818 corner_tris.as_mutable_span().slice(quad_tris_range),
819 corner_verts.slice(quad_corners_range),
820 quad_edges_range.start(),
821 edges_with_duplicates.slice(quad_edges_range),
822 corner_edges.slice(quad_corners_range));
823 }
824
826 src_mesh, src_edges, tri_edges_range, edges_with_duplicates, corner_edges);
827
828 edges_with_duplicates.take_front(src_edges.size()).copy_from(src_edges);
829
830 /* Vertex attributes are totally unaffected and can be shared with implicit sharing.
831 * Use the #CustomData API for simpler support for vertex groups. */
832 CustomData_merge(&src_mesh.vert_data, &mesh->vert_data, CD_MASK_MESH.vmask, mesh->verts_num);
833
834 for (auto &attribute : bke::retrieve_attributes_for_transfer(
835 src_attributes,
836 attributes,
838 bke::attribute_filter_with_skip_ref(attribute_filter, {".edge_verts"})))
839 {
840 attribute.dst.span.slice(src_edges_range).copy_from(attribute.src);
841 GMutableSpan new_data = attribute.dst.span.drop_front(src_edges.size());
842 /* It would be reasonable interpolate data from connected edges within each face.
843 * Currently the data from new edges is just set to the type's default value. */
844 const void *default_value = new_data.type().default_value();
845 new_data.type().fill_construct_n(default_value, new_data.data(), new_data.size());
846 attribute.dst.finish();
847 }
849 const Span src(
850 static_cast<const int *>(CustomData_get_layer(&src_mesh.edge_data, CD_ORIGINDEX)),
851 src_mesh.edges_num);
852 MutableSpan dst(static_cast<int *>(CustomData_add_layer(
854 mesh->edges_num);
855 dst.drop_front(src_edges.size()).fill(ORIGINDEX_NONE);
856 array_utils::copy(src, dst.slice(src_edges_range));
857 }
858
859 for (auto &attribute : bke::retrieve_attributes_for_transfer(
860 src_attributes, attributes, ATTR_DOMAIN_MASK_FACE, attribute_filter))
861 {
863 tris_by_ngon, ngons, attribute.src, attribute.dst.span.slice(ngon_tris_range));
864 quad::copy_quad_data_to_tris(attribute.src, quads, attribute.dst.span.slice(quad_tris_range));
865 array_utils::gather(attribute.src, unselected, attribute.dst.span.slice(unselected_range));
866 attribute.dst.finish();
867 }
869 const Span src(
870 static_cast<const int *>(CustomData_get_layer(&src_mesh.face_data, CD_ORIGINDEX)),
871 src_mesh.faces_num);
872 MutableSpan dst(static_cast<int *>(CustomData_add_layer(
874 mesh->faces_num);
875 bke::attribute_math::gather_to_groups(tris_by_ngon, ngons, src, dst.slice(ngon_tris_range));
876 quad::copy_quad_data_to_tris(src, quads, dst.slice(quad_tris_range));
877 array_utils::gather(src, unselected, dst.slice(unselected_range));
878 }
879
881 src_faces, faces_unselected, unselected, src_corner_verts, corner_verts);
883 src_faces, faces_unselected, unselected, src_corner_edges, corner_edges);
884 for (auto &attribute : bke::retrieve_attributes_for_transfer(
885 src_attributes,
886 attributes,
889 {".corner_vert", ".corner_edge"})))
890 {
892 src_faces, faces_unselected, unselected, attribute.src, attribute.dst.span);
893 bke::attribute_math::gather(attribute.src,
894 corner_tris.as_span().cast<int>(),
895 attribute.dst.span.slice(tri_corners_range));
896 attribute.dst.finish();
897 }
898
899 mesh->runtime->bounds_cache = src_mesh.runtime->bounds_cache;
900 copy_loose_vert_hint(src_mesh, *mesh);
901 copy_loose_edge_hint(src_mesh, *mesh);
902 if (src_mesh.no_overlapping_topology()) {
903 mesh->tag_overlapping_none();
904 }
906 return mesh;
907}
908
909} // namespace blender::geometry
@ ATTR_DOMAIN_MASK_EDGE
@ ATTR_DOMAIN_MASK_CORNER
@ ATTR_DOMAIN_MASK_FACE
CustomData interface, see also DNA_customdata_types.h.
const void * CustomData_get_layer(const CustomData *data, eCustomDataType type)
@ CD_CONSTRUCT
bool CustomData_merge(const CustomData *source, CustomData *dest, eCustomDataMask mask, int totelem)
#define ORIGINDEX_NONE
bool CustomData_has_layer(const CustomData *data, eCustomDataType type)
void * CustomData_add_layer(CustomData *data, eCustomDataType type, eCDAllocType alloctype, int totelem)
const CustomData_MeshMasks CD_MASK_MESH
void BKE_mesh_copy_parameters_for_eval(Mesh *me_dst, const Mesh *me_src)
bool BKE_mesh_is_valid(Mesh *mesh)
#define BLI_assert(a)
Definition BLI_assert.h:46
A min-heap / priority queue ADT.
void BLI_heap_free(Heap *heap, HeapFreeFP ptrfreefp) ATTR_NONNULL(1)
Definition BLI_heap.cc:191
Heap * BLI_heap_new_ex(unsigned int reserve_num) ATTR_WARN_UNUSED_RESULT
Definition BLI_heap.cc:171
int is_quad_flip_v3(const float v1[3], const float v2[3], const float v3[3], const float v4[3])
void axis_dominant_v3_to_m3_negate(float r_mat[3][3], const float normal[3])
void mul_v2_m3v3(float r[2], const float M[3][3], const float a[3])
MemArena * BLI_memarena_new(size_t bufsize, const char *name) ATTR_WARN_UNUSED_RESULT ATTR_RETURNS_NONNULL ATTR_NONNULL(2) ATTR_MALLOC
void BLI_memarena_free(MemArena *ma) ATTR_NONNULL(1)
void BLI_memarena_clear(MemArena *ma) ATTR_NONNULL(1)
#define BLI_SCOPED_DEFER(function_to_defer)
#define BLI_POLYFILL_ARENA_SIZE
void BLI_polyfill_calc(const float(*coords)[2], unsigned int coords_num, int coords_sign, unsigned int(*r_tris)[3])
#define BLI_POLYFILL_ALLOC_NGON_RESERVE
void BLI_polyfill_beautify(const float(*coords)[2], unsigned int coords_num, unsigned int(*tris)[3], struct MemArena *arena, struct Heap *eheap)
float BLI_polyfill_edge_calc_rotate_beauty__area(const float v1[3], const float v2[3], const float v3[3], const float v4[3], bool lock_degenerate)
unsigned int uint
#define UNLIKELY(x)
Provides wrapper around system-specific atomic primitives, and some extensions (faked-atomic operatio...
BMesh const char void * data
ATTR_WARN_UNUSED_RESULT const BMVert * v2
long long int int64_t
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition btDbvt.cpp:52
Span< T > as_span() const
Definition BLI_array.hh:232
AttributeSet attributes
static IndexMask from_predicate(const IndexMask &universe, GrainSize grain_size, IndexMaskMemory &memory, Fn &&predicate)
static IndexMask from_union(const IndexMask &mask_a, const IndexMask &mask_b, IndexMaskMemory &memory)
Span< T > as_span() const
Definition BLI_array.hh:232
MutableSpan< T > as_mutable_span()
Definition BLI_array.hh:237
void reinitialize(const int64_t new_size)
Definition BLI_array.hh:398
void fill_construct_n(const void *value, void *dst, int64_t n) const
const void * default_value() const
const CPPType & type() const
GMutableSpan drop_front(const int64_t n) const
const CPPType & type() const
static IndexMask from_predicate(const IndexMask &universe, GrainSize grain_size, IndexMaskMemory &memory, Fn &&predicate)
static IndexMask from_batch_predicate(const IndexMask &universe, GrainSize grain_size, IndexMaskMemory &memory, FunctionRef< int64_t(const IndexMaskSegment &universe_segment, IndexRangesBuilder< int16_t > &builder)> batch_predicate)
static IndexMask from_difference(const IndexMask &mask_a, const IndexMask &mask_b, IndexMaskMemory &memory)
constexpr int64_t first() const
constexpr int64_t one_after_last() const
constexpr IndexRange shift(int64_t n) const
constexpr int64_t last(const int64_t n=0) const
constexpr int64_t size() const
constexpr int64_t start() const
constexpr IndexRange take_back(int64_t n) const
constexpr IndexRange take_front(int64_t n) const
bool add_range(const T start, const T end)
constexpr int64_t size() const
Definition BLI_span.hh:493
constexpr MutableSpan slice(const int64_t start, const int64_t size) const
Definition BLI_span.hh:573
constexpr MutableSpan< NewT > cast() const
Definition BLI_span.hh:749
constexpr MutableSpan take_back(const int64_t n) const
Definition BLI_span.hh:640
constexpr T * data() const
Definition BLI_span.hh:539
constexpr MutableSpan drop_back(const int64_t n) const
Definition BLI_span.hh:618
constexpr void fill(const T &value) const
Definition BLI_span.hh:517
constexpr MutableSpan drop_front(const int64_t n) const
Definition BLI_span.hh:607
constexpr T & first() const
Definition BLI_span.hh:679
constexpr void copy_from(Span< T > values) const
Definition BLI_span.hh:739
constexpr T & last(const int64_t n=0) const
Definition BLI_span.hh:689
IndexRange index_range() const
Span< BaseT > base_span() const
constexpr Span drop_front(int64_t n) const
Definition BLI_span.hh:171
Span< NewT > constexpr cast() const
Definition BLI_span.hh:418
constexpr Span slice(int64_t start, int64_t size) const
Definition BLI_span.hh:137
constexpr const T * data() const
Definition BLI_span.hh:215
constexpr int64_t size() const
Definition BLI_span.hh:252
constexpr IndexRange index_range() const
Definition BLI_span.hh:401
constexpr bool is_empty() const
Definition BLI_span.hh:260
void reinitialize(const int64_t new_size)
Span< T > as_span() const
bool add(const StringRef attribute_id, const AttrDomain domain, const eCustomDataType data_type, const AttributeInit &initializer)
void foreach_index_optimized(Fn &&fn) const
IndexMask slice(IndexRange range) const
IndexMask complement(const IndexMask &universe, IndexMaskMemory &memory) const
void foreach_index(Fn &&fn) const
void foreach_segment(Fn &&fn) const
OffsetIndices slice(const IndexRange range) const
static ushort indices[]
static float verts[][3]
uint pos
void * MEM_calloc_arrayN(size_t len, size_t size, const char *str)
Definition mallocn.cc:123
void MEM_freeN(void *vmemh)
Definition mallocn.cc:113
ccl_device_inline float2 mask(const MaskType mask, const float2 a)
static char faces[256]
static void copy_loose_edge_hint(const Mesh &src, Mesh &dst)
static void copy_loose_vert_hint(const Mesh &src, Mesh &dst)
void copy(const GVArray &src, GMutableSpan dst, int64_t grain_size=4096)
void gather_group_to_group(const OffsetIndices< int > src_offsets, const OffsetIndices< int > dst_offsets, const IndexMask &selection, const Span< T > src, MutableSpan< T > dst)
void gather(const GVArray &src, const IndexMask &indices, GMutableSpan dst, int64_t grain_size=4096)
void gather(GSpan src, Span< int > map, GMutableSpan dst)
void gather_to_groups(OffsetIndices< int > dst_offsets, const IndexMask &src_selection, GSpan src, GMutableSpan dst)
void convert_to_static_type(const CPPType &cpp_type, const Func &func)
void gather_group_to_group(OffsetIndices< int > src_offsets, OffsetIndices< int > dst_offsets, const IndexMask &selection, GSpan src, GMutableSpan dst)
float3 face_normal_calc(Span< float3 > vert_positions, Span< int > face_verts)
int face_triangles_num(const int face_size)
Definition BKE_mesh.hh:334
GroupedSpan< int > build_vert_to_edge_map(Span< int2 > edges, int verts_num, Array< int > &r_offsets, Array< int > &r_indices)
Vector< AttributeTransferData > retrieve_attributes_for_transfer(const AttributeAccessor src_attributes, MutableAttributeAccessor dst_attributes, AttrDomainMask domain_mask, const AttributeFilter &attribute_filter={})
auto attribute_filter_with_skip_ref(AttributeFilter filter, const Span< StringRef > skip)
Mesh * mesh_new_no_attributes(int verts_num, int edges_num, int faces_num, int corners_num)
static IndexMask calc_unselected_faces(const Mesh &mesh, const OffsetIndices< int > src_faces, const Span< int > src_corner_verts, const IndexMask &selection, const Span< int3 > corner_tris, IndexMaskMemory &memory)
static std::optional< int > find_edge_duplicate(const GroupedSpan< int > vert_to_edge_map, const Span< int2 > edges, const OrderedEdge edge)
static int calc_new_edges(const Mesh &src_mesh, const Span< int2 > src_edges, const IndexRange new_edges_range, MutableSpan< int2 > edges, MutableSpan< int > corner_edges)
static GroupedSpan< int > build_vert_to_tri_map(const int verts_num, const Span< int3 > vert_tris, Array< int > &r_offsets, Array< int > &r_indices)
static void calc_corner_tris(const Span< float3 > positions, const OffsetIndices< int > src_faces, const Span< int > src_corner_verts, const Span< float3 > face_normals, const IndexMask &ngons, const OffsetIndices< int > tris_by_ngon, const TriangulateNGonMode ngon_mode, MutableSpan< int3 > corner_tris)
static void calc_inner_tri_edges(const IndexRange src_face, const Span< int > src_corner_verts, const Span< int > src_corner_edges, const Span< int3 > corner_tris, const int edges_start, MutableSpan< int > corner_edges, VectorSet< OrderedEdge > &deduplication)
static void calc_edges(const OffsetIndices< int > src_faces, const Span< int > src_corner_verts, const Span< int > src_corner_edges, const IndexMask &ngons, const OffsetIndices< int > tris_by_ngon, const OffsetIndices< int > edges_by_ngon, const IndexRange ngon_edges_range, const Span< int3 > corner_tris, MutableSpan< int2 > edges, MutableSpan< int > corner_edges)
static OffsetIndices< int > calc_edges_by_ngon(const OffsetIndices< int > src_faces, const IndexMask &selection, MutableSpan< int > edge_offset_data)
static OffsetIndices< int > calc_tris_by_ngon(const OffsetIndices< int > src_faces, const IndexMask &ngons, MutableSpan< int > face_offset_data)
static void copy_quad_data_to_tris(const Span< T > src, const IndexMask &quads, MutableSpan< T > dst)
static void calc_corner_tris(const Span< int > face_offsets, const Span< QuadDirection > directions, MutableSpan< int3 > corner_tris)
static void calc_edges(const Span< int > quad_corner_verts, MutableSpan< int2 > new_quad_edges)
static void calc_quad_directions(const Span< float3 > positions, const Span< int > face_offsets, const Span< int > corner_verts, const TriangulateQuadMode quad_mode, MutableSpan< QuadDirection > directions)
static void calc_quad_corner_edges(const Span< int > src_corner_edges, const Span< int3 > corner_tris, const int edges_start, MutableSpan< int > corner_edges)
static QuadDirection calc_quad_direction_beauty(const float3 &v0, const float3 &v1, const float3 &v2, const float3 &v3)
std::optional< Mesh * > mesh_triangulate(const Mesh &src_mesh, const IndexMask &selection, TriangulateNGonMode ngon_mode, TriangulateQuadMode quad_mode, const bke::AttributeFilter &attribute_filter)
static void copy_loose_edge_hint(const Mesh &src, Mesh &dst)
static void gather(const Span< int > src, const Span< int16_t > indices, MutableSpan< int > dst)
static OffsetIndices< int > gather_selected_offsets(const OffsetIndices< int > src_offsets, const IndexMaskSegment selection, MutableSpan< int > dst_offsets)
static Span< float3 > face_normals_if_worthwhile(const Mesh &src_mesh, const int selection_size)
static Span< int > gather_or_reference(const Span< int > src, const Span< int16_t > indices, Vector< int > &dst)
static OffsetIndices< int > calc_face_offsets(const OffsetIndices< int > src_faces, const IndexMask &unselected, MutableSpan< int > offsets)
static void copy_loose_vert_hint(const Mesh &src, Mesh &dst)
T distance_squared(const VecBase< T, Size > &a, const VecBase< T, Size > &b)
OffsetIndices< int > accumulate_counts_to_offsets(MutableSpan< int > counts_to_offsets, int start_offset=0)
void fill_constant_group_size(int size, int start_offset, MutableSpan< int > offsets)
void build_reverse_offsets(Span< int > indices, MutableSpan< int > offsets)
OffsetIndices< int > gather_selected_offsets(OffsetIndices< int > src_offsets, const IndexMask &selection, int start_offset, MutableSpan< int > dst_offsets)
void parallel_for(const IndexRange range, const int64_t grain_size, const Function &function, const TaskSizeHints &size_hints=detail::TaskSizeHints_Static(1))
Definition BLI_task.hh:93
bool non_empty_is_range(const Span< T > indices)
IndexRange non_empty_as_range(const Span< T > indices)
VecBase< int32_t, 2 > int2
VecBase< int32_t, 3 > int3
MatBase< float, 3, 3 > float3x3
VecBase< float, 3 > float3
int corners_num
CustomData edge_data
int edges_num
MeshRuntimeHandle * runtime
CustomData face_data
CustomData vert_data
int faces_num
int verts_num
const c_style_mat & ptr() const
i
Definition text_draw.cc:230
PointerRNA * ptr
Definition wm_files.cc:4227