Blender V5.0
mesh_mapping.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2023 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
11
12#include <algorithm>
13
14#include "MEM_guardedalloc.h"
15
16#include "atomic_ops.h"
17
18#include "BLI_array.hh"
19#include "BLI_bitmap.h"
20#include "BLI_function_ref.hh"
21#include "BLI_math_geom.h"
22#include "BLI_math_vector.h"
23#include "BLI_task.hh"
24#include "BLI_utildefines.h"
25
26#include "BKE_customdata.hh"
27#include "BKE_mesh.hh"
28#include "BKE_mesh_mapping.hh"
29#include "BLI_memarena.h"
30
31#include "BLI_strict_flags.h" /* IWYU pragma: keep. Keep last. */
32
33/* -------------------------------------------------------------------- */
36
38 blender::Span<int> corner_verts,
40 int verts_num,
41 const blender::float2 &limit,
42 bool use_winding)
43{
44 using namespace blender;
45 /* NOTE: N-gon version WIP, based on #BM_uv_vert_map_create. */
46 if (faces.is_empty()) {
47 return nullptr;
48 }
49 const int corners_num = faces.total_size();
50
51 UvVertMap *vmap = MEM_callocN<UvVertMap>("UvVertMap");
52 UvMapVert *buf = vmap->buf = MEM_calloc_arrayN<UvMapVert>(size_t(corners_num), "UvMapVert");
53 vmap->vert = MEM_calloc_arrayN<UvMapVert *>(size_t(verts_num), "UvMapVert*");
54
55 if (!vmap->vert || !vmap->buf) {
57 return nullptr;
58 }
59
60 Array<bool> winding;
61 if (use_winding) {
62 winding = Array<bool>(faces.size(), false);
63 threading::parallel_for(faces.index_range(), 1024, [&](const IndexRange range) {
64 for (const int64_t face : range) {
65 const Span<float2> face_uvs = uv_map.slice(faces[face]);
66 winding[face] = cross_poly_v2(reinterpret_cast<const float (*)[2]>(face_uvs.data()),
67 uint(faces[face].size())) < 0.0f;
68 }
69 });
70 }
71
72 for (const int64_t a : faces.index_range()) {
73 const IndexRange face = faces[a];
74 for (const int64_t i : face.index_range()) {
75 buf->loop_of_face_index = ushort(i);
76 buf->face_index = uint(a);
77 buf->separate = false;
78 buf->next = vmap->vert[corner_verts[face[i]]];
79 vmap->vert[corner_verts[face[i]]] = buf;
80 buf++;
81 }
82 }
83
84 /* sort individual uvs for each vert */
85 for (const int64_t vert : IndexRange(verts_num)) {
86 UvMapVert *newvlist = nullptr, *vlist = vmap->vert[vert];
87 UvMapVert *iterv, *v, *lastv, *next;
88 const float *uv, *uv2;
89 float uvdiff[2];
90
91 while (vlist) {
92 v = vlist;
93 vlist = vlist->next;
94 v->next = newvlist;
95 newvlist = v;
96
97 uv = uv_map[faces[v->face_index].start() + v->loop_of_face_index];
98 lastv = nullptr;
99 iterv = vlist;
100
101 while (iterv) {
102 next = iterv->next;
103
104 uv2 = uv_map[faces[iterv->face_index].start() + iterv->loop_of_face_index];
105 sub_v2_v2v2(uvdiff, uv2, uv);
106
107 if (fabsf(uv[0] - uv2[0]) < limit[0] && fabsf(uv[1] - uv2[1]) < limit[1] &&
108 (!use_winding || winding[iterv->face_index] == winding[v->face_index]))
109 {
110 if (lastv) {
111 lastv->next = next;
112 }
113 else {
114 vlist = next;
115 }
116 iterv->next = newvlist;
117 newvlist = iterv;
118 }
119 else {
120 lastv = iterv;
121 }
122
123 iterv = next;
124 }
125
126 newvlist->separate = true;
127 }
128
129 vmap->vert[vert] = newvlist;
130 }
131
132 return vmap;
133}
134
136{
137 return vmap->vert[v];
138}
139
141{
142 if (vmap) {
143 if (vmap->vert) {
144 MEM_freeN(vmap->vert);
145 }
146 if (vmap->buf) {
147 MEM_freeN(vmap->buf);
148 }
149 MEM_freeN(vmap);
150 }
151}
152
154 int **r_mem,
155 const int totvert,
156 const blender::int3 *corner_tris,
157 const int tris_num,
158 const int *corner_verts,
159 const int /*corners_num*/)
160{
161 MeshElemMap *map = MEM_calloc_arrayN<MeshElemMap>(size_t(totvert), __func__);
162 int *indices = MEM_malloc_arrayN<int>(size_t(tris_num) * 3, __func__);
163 int *index_step;
164 int i;
165
166 /* count face users */
167 for (i = 0; i < tris_num; i++) {
168 for (int j = 3; j--;) {
169 map[corner_verts[corner_tris[i][j]]].count++;
170 }
171 }
172
173 /* create offsets */
174 index_step = indices;
175 for (i = 0; i < totvert; i++) {
176 map[i].indices = index_step;
177 index_step += map[i].count;
178
179 /* re-count, using this as an index below */
180 map[i].count = 0;
181 }
182
183 /* assign corner_tri-edge users */
184 for (i = 0; i < tris_num; i++) {
185 for (int j = 3; j--;) {
186 MeshElemMap *map_ele = &map[corner_verts[corner_tris[i][j]]];
187 map_ele->indices[map_ele->count++] = i;
188 }
189 }
190
191 *r_map = map;
192 *r_mem = indices;
193}
194
196 int **r_mem,
197 const int totsource,
198 const int *final_origindex,
199 const int totfinal)
200{
201 MeshElemMap *map = MEM_calloc_arrayN<MeshElemMap>(size_t(totsource), __func__);
202 int *indices = MEM_malloc_arrayN<int>(size_t(totfinal), __func__);
203 int *index_step;
204 int i;
205
206 /* count face users */
207 for (i = 0; i < totfinal; i++) {
208 if (final_origindex[i] != ORIGINDEX_NONE) {
209 BLI_assert(final_origindex[i] < totsource);
210 map[final_origindex[i]].count++;
211 }
212 }
213
214 /* create offsets */
215 index_step = indices;
216 for (i = 0; i < totsource; i++) {
217 map[i].indices = index_step;
218 index_step += map[i].count;
219
220 /* re-count, using this as an index below */
221 map[i].count = 0;
222 }
223
224 /* Assign face-tessellation users. */
225 for (i = 0; i < totfinal; i++) {
226 if (final_origindex[i] != ORIGINDEX_NONE) {
227 MeshElemMap *map_ele = &map[final_origindex[i]];
228 map_ele->indices[map_ele->count++] = i;
229 }
230 }
231
232 *r_map = map;
233 *r_mem = indices;
234}
235
237 int **r_mem,
239 const int *corner_tri_faces,
240 const int corner_tris_num)
241{
242 MeshElemMap *map = MEM_calloc_arrayN<MeshElemMap>(size_t(faces.size()), __func__);
243 int *indices = MEM_malloc_arrayN<int>(size_t(corner_tris_num), __func__);
244 int *index_step;
245
246 /* create offsets */
247 index_step = indices;
248 for (const int64_t i : faces.index_range()) {
249 map[i].indices = index_step;
250 index_step += blender::bke::mesh::face_triangles_num(int(faces[i].size()));
251 }
252
253 /* Assign face-tessellation users. */
254 for (int i = 0; i < corner_tris_num; i++) {
255 MeshElemMap *map_ele = &map[corner_tri_faces[i]];
256 map_ele->indices[map_ele->count++] = i;
257 }
258
259 *r_map = map;
260 *r_mem = indices;
261}
262
263namespace blender::bke::mesh {
264
265static Array<int> create_reverse_offsets(const Span<int> indices, const int items_num)
266{
267 Array<int> offsets(items_num + 1, 0);
269 return offsets;
270}
271
272static void sort_small_groups(const OffsetIndices<int> groups,
273 const int grain_size,
275{
276 threading::parallel_for(groups.index_range(), grain_size, [&](const IndexRange range) {
277 for (const int64_t index : range) {
278 MutableSpan<int> group = indices.slice(groups[index]);
279 std::sort(group.begin(), group.end());
280 }
281 });
282}
283
285 const OffsetIndices<int> offsets)
286{
287 if (group_indices.is_empty()) {
288 return {};
289 }
290 BLI_assert(*std::max_element(group_indices.begin(), group_indices.end()) < offsets.size());
291 BLI_assert(*std::min_element(group_indices.begin(), group_indices.end()) >= 0);
292
293 /* `counts` keeps track of how many elements have been added to each group, and is incremented
294 * atomically by many threads in parallel. `calloc` can be measurably faster than a parallel fill
295 * of zero. Alternatively the offsets could be copied and incremented directly, but the cost of
296 * the copy is slightly higher than the cost of `calloc`. */
297 int *counts = MEM_calloc_arrayN<int>(size_t(offsets.size()), __func__);
298 BLI_SCOPED_DEFER([&]() { MEM_freeN(counts); })
299 Array<int> results(group_indices.size());
300 threading::parallel_for(group_indices.index_range(), 1024, [&](const IndexRange range) {
301 for (const int64_t i : range) {
302 const int group_index = group_indices[i];
303 const int index_in_group = atomic_fetch_and_add_int32(&counts[group_index], 1);
304 results[offsets[group_index][index_in_group]] = int(i);
305 }
306 });
307 sort_small_groups(offsets, 1024, results);
308 return results;
309}
310
311/* A version of #reverse_indices_in_groups that stores face indices instead of corner indices. */
313 const Span<int> group_to_elem,
314 const OffsetIndices<int> offsets,
315 MutableSpan<int> results)
316{
317 int *counts = MEM_calloc_arrayN<int>(size_t(offsets.size()), __func__);
318 BLI_SCOPED_DEFER([&]() { MEM_freeN(counts); })
319 threading::parallel_for(groups.index_range(), 1024, [&](const IndexRange range) {
320 for (const int64_t face : range) {
321 for (const int elem : group_to_elem.slice(groups[face])) {
322 const int index_in_group = atomic_fetch_and_add_int32(&counts[elem], 1);
323 results[offsets[elem][index_in_group]] = int(face);
324 }
325 }
326 });
327 sort_small_groups(offsets, 1024, results);
328}
329
330static GroupedSpan<int> gather_groups(const Span<int> group_indices,
331 const int groups_num,
332 Array<int> &r_offsets,
333 Array<int> &r_indices)
334{
335 r_offsets = create_reverse_offsets(group_indices, groups_num);
336 r_indices = reverse_indices_in_groups(group_indices, r_offsets.as_span());
337 return {OffsetIndices<int>(r_offsets), r_indices};
338}
339
346
348 const int verts_num,
349 Array<int> &r_offsets,
350 Array<int> &r_indices)
351{
352 r_offsets = create_reverse_offsets(edges.cast<int>(), verts_num);
353 const OffsetIndices<int> offsets(r_offsets);
354 r_indices.reinitialize(offsets.total_size());
355
356 /* Version of #reverse_indices_in_groups that accounts for storing two indices for each edge. */
357 int *counts = MEM_calloc_arrayN<int>(size_t(offsets.size()), __func__);
358 BLI_SCOPED_DEFER([&]() { MEM_freeN(counts); })
359 threading::parallel_for(edges.index_range(), 1024, [&](const IndexRange range) {
360 for (const int64_t edge : range) {
361 for (const int vert : {edges[edge][0], edges[edge][1]}) {
362 const int index_in_group = atomic_fetch_and_add_int32(&counts[vert], 1);
363 r_indices[offsets[vert][index_in_group]] = int(edge);
364 }
365 }
366 });
367 sort_small_groups(offsets, 1024, r_indices);
368 return {offsets, r_indices};
369}
370
372 const Span<int> corner_verts,
373 const OffsetIndices<int> offsets,
374 MutableSpan<int> face_indices)
375{
376 reverse_group_indices_in_groups(faces, corner_verts, offsets, face_indices);
377}
378
380 const Span<int> corner_verts,
381 const int verts_num,
382 Array<int> &r_offsets,
383 Array<int> &r_indices)
384{
385 r_offsets = create_reverse_offsets(corner_verts, verts_num);
386 r_indices.reinitialize(r_offsets.last());
387 build_vert_to_face_indices(faces, corner_verts, OffsetIndices<int>(r_offsets), r_indices);
388 return {OffsetIndices<int>(r_offsets), r_indices};
389}
390
392 const OffsetIndices<int> offsets)
393{
394 return reverse_indices_in_groups(corner_verts, offsets);
395}
396
398 const int verts_num,
399 Array<int> &r_offsets,
400 Array<int> &r_indices)
401{
402 return gather_groups(corner_verts, verts_num, r_offsets, r_indices);
403}
404
406 const int edges_num,
407 Array<int> &r_offsets,
408 Array<int> &r_indices)
409{
410 return gather_groups(corner_edges, edges_num, r_offsets, r_indices);
411}
412
414 const Span<int> corner_edges,
415 const int edges_num,
416 Array<int> &r_offsets,
417 Array<int> &r_indices)
418{
419 r_offsets = create_reverse_offsets(corner_edges, edges_num);
420 r_indices.reinitialize(r_offsets.last());
421 reverse_group_indices_in_groups(faces, corner_edges, OffsetIndices<int>(r_offsets), r_indices);
422 return {OffsetIndices<int>(r_offsets), r_indices};
423}
424
425} // namespace blender::bke::mesh
426
428
429/* -------------------------------------------------------------------- */
433
438 blender::FunctionRef<bool(int face_index,
439 int corner,
440 int edge_index,
441 int edge_user_count,
442 const blender::Span<int> edge_face_map_elem)>;
443
445 const int *face_groups,
446 const blender::Span<int> faces_from_item,
447 const int face_group_id,
448 const int face_group_id_overflowed,
449 int &r_bit_face_group_mask)
450{
451 /* Find neighbor faces (either from a boundary edge, or a boundary vertex) that already have a
452 * group assigned, and exclude these groups' bits from the available set of groups bits that can
453 * be assigned to the currently processed group. */
454 for (const int face_idx : faces_from_item) {
455 int bit = face_groups[face_idx];
456 if (!ELEM(bit, 0, face_group_id, face_group_id_overflowed) && !(r_bit_face_group_mask & bit)) {
457 r_bit_face_group_mask |= bit;
458 }
459 }
460}
461
478static void face_edge_loop_islands_calc(const int totedge,
479 const int totvert,
481 const blender::Span<int> corner_edges,
482 const blender::Span<int> corner_verts,
483 blender::GroupedSpan<int> edge_face_map,
484 blender::GroupedSpan<int> vert_face_map,
485 const bool use_bitflags,
486 const bool use_boundary_vertices_for_bitflags,
487 MeshRemap_CheckIslandBoundary edge_boundary_check,
488 int **r_face_groups,
489 int *r_totgroup,
490 BLI_bitmap **r_edge_boundaries,
491 int *r_totedgeboundaries)
492{
493 int *face_groups;
494 int *face_stack;
495
496 BLI_bitmap *edge_boundaries = nullptr;
497 int num_edgeboundaries = 0;
498
499 int face_prev = 0;
500 constexpr int temp_face_group_id = 3; /* Placeholder value. */
501
502 /* For bitflags groups, group we could not find any available bit for, will be reset to 0 at the
503 * end. */
504 constexpr int face_group_id_overflowed = 5;
505
506 int tot_group = 0;
507 bool group_id_overflow = false;
508
509 if (faces.is_empty()) {
510 *r_totgroup = 0;
511 *r_face_groups = nullptr;
512 if (r_edge_boundaries) {
513 *r_edge_boundaries = nullptr;
514 *r_totedgeboundaries = 0;
515 }
516 return;
517 }
518
519 if (r_edge_boundaries) {
520 edge_boundaries = BLI_BITMAP_NEW(totedge, __func__);
521 *r_totedgeboundaries = 0;
522 }
523
524 blender::Array<int> edge_to_face_src_offsets;
525 blender::Array<int> edge_to_face_src_indices;
526 if (edge_face_map.is_empty()) {
528 faces, corner_edges, totedge, edge_to_face_src_offsets, edge_to_face_src_indices);
529 }
530 blender::Array<int> vert_to_face_src_offsets;
531 blender::Array<int> vert_to_face_src_indices;
532 if (use_bitflags && vert_face_map.is_empty()) {
534 faces, corner_verts, totvert, vert_to_face_src_offsets, vert_to_face_src_indices);
535 }
536
537 face_groups = MEM_calloc_arrayN<int>(size_t(faces.size()), __func__);
538 face_stack = MEM_malloc_arrayN<int>(size_t(faces.size()), __func__);
539
540 while (true) {
541 int face;
542 int bit_face_group_mask = 0;
543 int face_group_id;
544 int ps_curr_idx = 0, ps_end_idx = 0; /* stack indices */
545
546 for (face = face_prev; face < int(faces.size()); face++) {
547 if (face_groups[face] == 0) {
548 break;
549 }
550 }
551
552 if (face == int(faces.size())) {
553 /* all done */
554 break;
555 }
556
557 face_group_id = use_bitflags ? temp_face_group_id : ++tot_group;
558
559 /* start searching from here next time */
560 face_prev = face + 1;
561
562 face_groups[face] = face_group_id;
563 face_stack[ps_end_idx++] = face;
564
565 while (ps_curr_idx != ps_end_idx) {
566 face = face_stack[ps_curr_idx++];
567 BLI_assert(face_groups[face] == face_group_id);
568
569 for (const int64_t loop : faces[face]) {
570 const int edge = corner_edges[loop];
571 /* loop over face users */
572 const blender::Span<int> map_ele = edge_face_map[edge];
573 const int *p = map_ele.data();
574 int i = int(map_ele.size());
575 if (!edge_boundary_check(face, int(loop), edge, i, map_ele)) {
576 for (; i--; p++) {
577 /* if we meet other non initialized its a bug */
578 BLI_assert(ELEM(face_groups[*p], 0, face_group_id));
579
580 if (face_groups[*p] == 0) {
581 face_groups[*p] = face_group_id;
582 face_stack[ps_end_idx++] = *p;
583 }
584 }
585 }
586 else {
587 if (edge_boundaries && !BLI_BITMAP_TEST(edge_boundaries, edge)) {
588 BLI_BITMAP_ENABLE(edge_boundaries, edge);
589 num_edgeboundaries++;
590 }
591 if (use_bitflags) {
592 /* Exclude bits used in other groups sharing the same boundary edge. */
594 map_ele,
595 face_group_id,
596 face_group_id_overflowed,
597 bit_face_group_mask);
598 if (use_boundary_vertices_for_bitflags) {
599 /* Exclude bits used in other groups sharing the same boundary vertex. */
600 /* NOTE: Checking one vertex for each edge (the corner vertex) should be enough:
601 * - Thanks to winding, a fully boundary vertex (i.e. a vertex for which at least
602 * two of the adjacent edges in the same group are boundary ones) will be
603 * processed by at least one of the edges/corners. If not when processing the
604 * first face's corner, then when processing the other face's corner in the same
605 * group.
606 * - Isolated boundary edges (i.e. boundary edges only connected to faces of the
607 * same group) cannot be represented by bit-flags groups, at least not with
608 * current algorithm (they cannot define more than one group).
609 * - Inversions of winding (aka flipped faces) always generate boundary edges in
610 * current use-case (smooth groups), i.e. two faces with opposed winding cannot
611 * belong to the same group. */
612 const int vert = corner_verts[loop];
614 vert_face_map[vert],
615 face_group_id,
616 face_group_id_overflowed,
617 bit_face_group_mask);
618 }
619 }
620 }
621 }
622 }
623 /* And now, we have all our face from current group in face_stack
624 * (from 0 to (ps_end_idx - 1)),
625 * as well as all smoothgroups bits we can't use in bit_face_group_mask.
626 */
627 if (use_bitflags) {
628 int i, *p, gid_bit = 0;
629 face_group_id = 1;
630
631 /* Find first bit available! */
632 for (; (face_group_id & bit_face_group_mask) && (gid_bit < 32); gid_bit++) {
633 face_group_id <<= 1; /* will 'overflow' on last possible iteration. */
634 }
635 if (UNLIKELY(gid_bit > 31)) {
636 /* All bits used in contiguous smooth groups, not much to do.
637 *
638 * NOTE: If only considering boundary edges, this is *very* unlikely to happen.
639 * Theoretically, four groups are enough, this is probably not achievable with such a
640 * simple algorithm, but 32 groups should always be more than enough.
641 *
642 * When also considering boundary vertices (which is the case currently, see comment
643 * above), a fairly simple fan case with over 30 faces all belonging to different groups
644 * will be enough to cause an overflow.
645 */
646 printf(
647 "Warning, could not find an available id for current smooth group, faces will me "
648 "marked "
649 "as out of any smooth group...\n");
650
651 /* Can't use 0, will have to set them to this value later. */
652 face_group_id = face_group_id_overflowed;
653
654 group_id_overflow = true;
655 }
656 tot_group = std::max(gid_bit, tot_group);
657 /* And assign the final smooth group id to that face group! */
658 for (i = ps_end_idx, p = face_stack; i--; p++) {
659 face_groups[*p] = face_group_id;
660 }
661 }
662 }
663
664 if (use_bitflags) {
665 /* used bits are zero-based. */
666 tot_group++;
667 }
668
669 if (UNLIKELY(group_id_overflow)) {
670 int i = int(faces.size()), *gid = face_groups;
671 for (; i--; gid++) {
672 if (*gid == face_group_id_overflowed) {
673 *gid = 0;
674 }
675 }
676 /* Using 0 as group id adds one more group! */
677 tot_group++;
678 }
679
680 MEM_freeN(face_stack);
681
682 *r_totgroup = tot_group;
683 *r_face_groups = face_groups;
684 if (r_edge_boundaries) {
685 *r_edge_boundaries = edge_boundaries;
686 *r_totedgeboundaries = num_edgeboundaries;
687 }
688}
689
690static int *mesh_calc_smoothgroups(const int edges_num,
691 const int verts_num,
693 const blender::Span<int> corner_edges,
694 const blender::Span<int> corner_verts,
695 const blender::Span<bool> sharp_edges,
696 const blender::Span<bool> sharp_faces,
697 int *r_totgroup,
698 const bool use_bitflags,
699 const bool use_boundary_vertices_for_bitflags)
700{
701 int *face_groups = nullptr;
702
703 auto face_is_smooth = [&](const int i) { return sharp_faces.is_empty() || !sharp_faces[i]; };
704
705 auto face_is_island_boundary_smooth = [&](const int face_index,
706 const int /*corner*/,
707 const int edge_index,
708 const int edge_user_count,
709 const blender::Span<int> edge_face_map_elem) {
710 /* Edge is sharp if one of its faces is flat, or edge itself is sharp,
711 * or edge is not used by exactly two faces. */
712 if (face_is_smooth(face_index) && !(!sharp_edges.is_empty() && sharp_edges[edge_index]) &&
713 (edge_user_count == 2))
714 {
715 /* In that case, edge appears to be smooth, but we need to check its other face too. */
716 const int other_face_index = (face_index == edge_face_map_elem[0]) ? edge_face_map_elem[1] :
717 edge_face_map_elem[0];
718 return !face_is_smooth(other_face_index);
719 }
720 return true;
721 };
722
724 verts_num,
725 faces,
726 corner_edges,
727 corner_verts,
728 {},
729 {},
730 use_bitflags,
731 use_boundary_vertices_for_bitflags,
732 face_is_island_boundary_smooth,
733 &face_groups,
734 r_totgroup,
735 nullptr,
736 nullptr);
737
738 return face_groups;
739}
740
741int *BKE_mesh_calc_smoothgroups(int edges_num,
743 const blender::Span<int> corner_edges,
744 const blender::Span<bool> sharp_edges,
745 const blender::Span<bool> sharp_faces,
746 int *r_totgroup)
747{
749 edges_num, 0, faces, corner_edges, {}, sharp_edges, sharp_faces, r_totgroup, false, false);
750}
751
753 int verts_num,
755 const blender::Span<int> corner_edges,
756 const blender::Span<int> corner_verts,
757 const blender::Span<bool> sharp_edges,
758 const blender::Span<bool> sharp_faces,
759 const bool use_boundary_vertices_for_bitflags,
760 int *r_totgroup)
761{
762 return mesh_calc_smoothgroups(edges_num,
763 verts_num,
764 faces,
765 corner_edges,
766 corner_verts,
767 sharp_edges,
768 sharp_faces,
769 r_totgroup,
770 true,
771 use_boundary_vertices_for_bitflags);
772}
773
774#define MISLAND_DEFAULT_BUFSIZE 64
775
777 const short item_type,
778 const int items_num,
779 const short island_type,
780 const short innercut_type)
781{
782 MemArena *mem = island_store->mem;
783
784 if (mem == nullptr) {
786 island_store->mem = mem;
787 }
788 /* else memarena should be cleared */
789
794
795 island_store->item_type = item_type;
796 island_store->items_to_islands_num = items_num;
797 island_store->items_to_islands = static_cast<int *>(
798 BLI_memarena_alloc(mem, sizeof(*island_store->items_to_islands) * size_t(items_num)));
799
800 island_store->island_type = island_type;
802 island_store->islands = static_cast<MeshElemMap **>(
803 BLI_memarena_alloc(mem, sizeof(*island_store->islands) * island_store->islands_num_alloc));
804
805 island_store->innercut_type = innercut_type;
806 island_store->innercuts = static_cast<MeshElemMap **>(
807 BLI_memarena_alloc(mem, sizeof(*island_store->innercuts) * island_store->islands_num_alloc));
808}
809
811{
812 island_store->item_type = MISLAND_TYPE_NONE;
813 island_store->items_to_islands_num = 0;
814 island_store->items_to_islands = nullptr;
815
816 island_store->island_type = MISLAND_TYPE_NONE;
817 island_store->islands_num = 0;
818 island_store->islands = nullptr;
819
820 island_store->innercut_type = MISLAND_TYPE_NONE;
821 island_store->innercuts = nullptr;
822
823 if (island_store->mem) {
824 BLI_memarena_clear(island_store->mem);
825 }
826
827 island_store->islands_num_alloc = 0;
828}
829
831{
832 if (island_store->mem) {
833 BLI_memarena_free(island_store->mem);
834 island_store->mem = nullptr;
835 }
836}
837
839 const int item_num,
840 const int *items_indices,
841 const int num_island_items,
842 int *island_item_indices,
843 const int num_innercut_items,
844 int *innercut_item_indices)
845{
846 MemArena *mem = island_store->mem;
847
848 MeshElemMap *isld, *innrcut;
849 const int curr_island_idx = island_store->islands_num++;
850 const size_t curr_num_islands = size_t(island_store->islands_num);
851 int i = item_num;
852
853 while (i--) {
854 island_store->items_to_islands[items_indices[i]] = curr_island_idx;
855 }
856
857 if (UNLIKELY(curr_num_islands > island_store->islands_num_alloc)) {
858 MeshElemMap **islds, **innrcuts;
859
860 island_store->islands_num_alloc *= 2;
861 islds = static_cast<MeshElemMap **>(
862 BLI_memarena_alloc(mem, sizeof(*islds) * island_store->islands_num_alloc));
863 memcpy(islds, island_store->islands, sizeof(*islds) * (curr_num_islands - 1));
864 island_store->islands = islds;
865
866 innrcuts = static_cast<MeshElemMap **>(
867 BLI_memarena_alloc(mem, sizeof(*innrcuts) * island_store->islands_num_alloc));
868 memcpy(innrcuts, island_store->innercuts, sizeof(*innrcuts) * (curr_num_islands - 1));
869 island_store->innercuts = innrcuts;
870 }
871
872 island_store->islands[curr_island_idx] = isld = static_cast<MeshElemMap *>(
873 BLI_memarena_alloc(mem, sizeof(*isld)));
874 isld->count = num_island_items;
875 isld->indices = static_cast<int *>(
876 BLI_memarena_alloc(mem, sizeof(*isld->indices) * size_t(num_island_items)));
877 memcpy(isld->indices, island_item_indices, sizeof(*isld->indices) * size_t(num_island_items));
878
879 island_store->innercuts[curr_island_idx] = innrcut = static_cast<MeshElemMap *>(
880 BLI_memarena_alloc(mem, sizeof(*innrcut)));
881 innrcut->count = num_innercut_items;
882 innrcut->indices = static_cast<int *>(
883 BLI_memarena_alloc(mem, sizeof(*innrcut->indices) * size_t(num_innercut_items)));
884 memcpy(innrcut->indices,
885 innercut_item_indices,
886 sizeof(*innrcut->indices) * size_t(num_innercut_items));
887}
888
889static bool mesh_calc_islands_loop_face_uv(const int totedge,
890 const blender::Span<bool> uv_seams,
892 const blender::Span<int> corner_edges,
893 MeshIslandStore *r_island_store)
894{
895 using namespace blender;
896 int *face_groups = nullptr;
897 int num_face_groups;
898
899 int *face_indices;
900 int *loop_indices;
901 int num_pidx, num_lidx;
902
903 /* Those are used to detect 'inner cuts', i.e. edges that are boundaries,
904 * and yet have two or more faces of a same group using them
905 * (typical case: seam used to unwrap properly a cylinder). */
906 BLI_bitmap *edge_boundaries = nullptr;
907 int num_edge_boundaries = 0;
908 char *edge_boundary_count = nullptr;
909 int *edge_innercut_indices = nullptr;
910 int num_einnercuts = 0;
911
912 int grp_idx;
913
914 BKE_mesh_loop_islands_clear(r_island_store);
915 BKE_mesh_loop_islands_init(r_island_store,
917 int(corner_edges.size()),
920
921 Array<int> edge_to_face_offsets;
922 Array<int> edge_to_face_indices;
924 faces, corner_edges, totedge, edge_to_face_offsets, edge_to_face_indices);
925
926 /* TODO: I'm not sure edge seam flag is enough to define UV islands?
927 * Maybe we should also consider UV-maps values
928 * themselves (i.e. different UV-edges for a same mesh-edge => boundary edge too?).
929 * Would make things much more complex though,
930 * and each UVMap would then need its own mesh mapping, not sure we want that at all!
931 */
932 auto mesh_check_island_boundary_uv = [&](const int /*face_index*/,
933 const int /*corner*/,
934 const int edge_index,
935 const int /*edge_user_count*/,
936 const Span<int> /*edge_face_map_elem*/) -> bool {
937 /* Edge is UV boundary if tagged as seam. */
938 return !uv_seams.is_empty() && uv_seams[edge_index];
939 };
940
942 0,
943 faces,
944 corner_edges,
945 {},
946 edge_to_face_map,
947 {},
948 false,
949 false,
950 mesh_check_island_boundary_uv,
951 &face_groups,
952 &num_face_groups,
953 &edge_boundaries,
954 &num_edge_boundaries);
955
956 if (!num_face_groups) {
957 if (num_edge_boundaries) {
958 MEM_freeN(edge_boundaries);
959 }
960 return false;
961 }
962
963 if (num_edge_boundaries) {
964 edge_boundary_count = MEM_malloc_arrayN<char>(size_t(totedge), __func__);
965 edge_innercut_indices = MEM_malloc_arrayN<int>(size_t(num_edge_boundaries), __func__);
966 }
967
968 face_indices = MEM_malloc_arrayN<int>(size_t(faces.size()), __func__);
969 loop_indices = MEM_malloc_arrayN<int>(size_t(corner_edges.size()), __func__);
970
971 /* NOTE: here we ignore '0' invalid group - this should *never* happen in this case anyway? */
972 for (grp_idx = 1; grp_idx <= num_face_groups; grp_idx++) {
973 num_pidx = num_lidx = 0;
974 if (num_edge_boundaries) {
975 num_einnercuts = 0;
976 memset(edge_boundary_count, 0, sizeof(*edge_boundary_count) * size_t(totedge));
977 }
978
979 for (const int64_t p_idx : faces.index_range()) {
980 if (face_groups[p_idx] != grp_idx) {
981 continue;
982 }
983 face_indices[num_pidx++] = int(p_idx);
984 for (const int64_t corner : faces[p_idx]) {
985 const int edge_i = corner_edges[corner];
986 loop_indices[num_lidx++] = int(corner);
987 if (num_edge_boundaries && BLI_BITMAP_TEST(edge_boundaries, edge_i) &&
988 (edge_boundary_count[edge_i] < 2))
989 {
990 edge_boundary_count[edge_i]++;
991 if (edge_boundary_count[edge_i] == 2) {
992 edge_innercut_indices[num_einnercuts++] = edge_i;
993 }
994 }
995 }
996 }
997
998 BKE_mesh_loop_islands_add(r_island_store,
999 num_lidx,
1000 loop_indices,
1001 num_pidx,
1002 face_indices,
1003 num_einnercuts,
1004 edge_innercut_indices);
1005 }
1006
1007 MEM_freeN(face_indices);
1008 MEM_freeN(loop_indices);
1009 MEM_freeN(face_groups);
1010
1011 if (num_edge_boundaries) {
1012 MEM_freeN(edge_boundaries);
1013 }
1014
1015 if (num_edge_boundaries) {
1016 MEM_freeN(edge_boundary_count);
1017 MEM_freeN(edge_innercut_indices);
1018 }
1019 return true;
1020}
1021
1023 const blender::Span<blender::int2> edges,
1024 const blender::Span<bool> uv_seams,
1026 const blender::Span<int> /*corner_verts*/,
1027 const blender::Span<int> corner_edges,
1028 MeshIslandStore *r_island_store)
1029{
1030 UNUSED_VARS(vert_positions);
1032 int(edges.size()), uv_seams, faces, corner_edges, r_island_store);
1033}
1034
CustomData interface, see also DNA_customdata_types.h.
#define ORIGINDEX_NONE
@ MISLAND_TYPE_POLY
@ MISLAND_TYPE_VERT
@ MISLAND_TYPE_LOOP
@ MISLAND_TYPE_EDGE
@ MISLAND_TYPE_NONE
#define BLI_assert(a)
Definition BLI_assert.h:46
#define BLI_BITMAP_NEW(_num, _alloc_string)
Definition BLI_bitmap.h:37
#define BLI_BITMAP_TEST(_bitmap, _index)
Definition BLI_bitmap.h:61
#define BLI_BITMAP_ENABLE(_bitmap, _index)
Definition BLI_bitmap.h:78
unsigned int BLI_bitmap
Definition BLI_bitmap.h:13
MINLINE void sub_v2_v2v2(float r[2], const float a[2], const float b[2])
#define BLI_MEMARENA_STD_BUFSIZE
MemArena * BLI_memarena_new(size_t bufsize, const char *name) ATTR_WARN_UNUSED_RESULT ATTR_RETURNS_NONNULL ATTR_NONNULL(2) ATTR_MALLOC
void BLI_memarena_free(MemArena *ma) ATTR_NONNULL(1)
void * BLI_memarena_alloc(MemArena *ma, size_t size) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1) ATTR_MALLOC ATTR_ALLOC_SIZE(2)
void BLI_memarena_clear(MemArena *ma) ATTR_NONNULL(1)
#define BLI_SCOPED_DEFER(function_to_defer)
unsigned int uint
unsigned short ushort
#define UNUSED_VARS(...)
#define UNLIKELY(x)
#define ELEM(...)
Read Guarded memory(de)allocation.
Provides wrapper around system-specific atomic primitives, and some extensions (faked-atomic operatio...
ATTR_WARN_UNUSED_RESULT const BMVert * v
long long int int64_t
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition btDbvt.cpp:52
constexpr IndexRange index_range() const
Span< T > as_span() const
Definition BLI_array.hh:243
const T & last(const int64_t n=0) const
Definition BLI_array.hh:296
void reinitialize(const int64_t new_size)
Definition BLI_array.hh:419
Span< NewT > constexpr cast() const
Definition BLI_span.hh:418
constexpr const T * data() const
Definition BLI_span.hh:215
constexpr int64_t size() const
Definition BLI_span.hh:252
constexpr const T * end() const
Definition BLI_span.hh:224
constexpr IndexRange index_range() const
Definition BLI_span.hh:401
constexpr const T * begin() const
Definition BLI_span.hh:220
constexpr bool is_empty() const
Definition BLI_span.hh:260
static ushort indices[]
#define printf(...)
void * MEM_calloc_arrayN(size_t len, size_t size, const char *str)
Definition mallocn.cc:123
void * MEM_callocN(size_t len, const char *str)
Definition mallocn.cc:118
void * MEM_malloc_arrayN(size_t len, size_t size, const char *str)
Definition mallocn.cc:133
void MEM_freeN(void *vmemh)
Definition mallocn.cc:113
static ulong * next
static char faces[256]
UvMapVert * BKE_mesh_uv_vert_map_get_vert(UvVertMap *vmap, uint v)
int * BKE_mesh_calc_smoothgroups_bitflags(int edges_num, int verts_num, const blender::OffsetIndices< int > faces, const blender::Span< int > corner_edges, const blender::Span< int > corner_verts, const blender::Span< bool > sharp_edges, const blender::Span< bool > sharp_faces, const bool use_boundary_vertices_for_bitflags, int *r_totgroup)
void BKE_mesh_origindex_map_create_corner_tri(MeshElemMap **r_map, int **r_mem, const blender::OffsetIndices< int > faces, const int *corner_tri_faces, const int corner_tris_num)
void BKE_mesh_origindex_map_create(MeshElemMap **r_map, int **r_mem, const int totsource, const int *final_origindex, const int totfinal)
void BKE_mesh_loop_islands_add(MeshIslandStore *island_store, const int item_num, const int *items_indices, const int num_island_items, int *island_item_indices, const int num_innercut_items, int *innercut_item_indices)
int * BKE_mesh_calc_smoothgroups(int edges_num, const blender::OffsetIndices< int > faces, const blender::Span< int > corner_edges, const blender::Span< bool > sharp_edges, const blender::Span< bool > sharp_faces, int *r_totgroup)
bool BKE_mesh_calc_islands_loop_face_edgeseam(const blender::Span< blender::float3 > vert_positions, const blender::Span< blender::int2 > edges, const blender::Span< bool > uv_seams, const blender::OffsetIndices< int > faces, const blender::Span< int >, const blender::Span< int > corner_edges, MeshIslandStore *r_island_store)
static bool mesh_calc_islands_loop_face_uv(const int totedge, const blender::Span< bool > uv_seams, const blender::OffsetIndices< int > faces, const blender::Span< int > corner_edges, MeshIslandStore *r_island_store)
static void face_edge_loop_islands_calc(const int totedge, const int totvert, const blender::OffsetIndices< int > faces, const blender::Span< int > corner_edges, const blender::Span< int > corner_verts, blender::GroupedSpan< int > edge_face_map, blender::GroupedSpan< int > vert_face_map, const bool use_bitflags, const bool use_boundary_vertices_for_bitflags, MeshRemap_CheckIslandBoundary edge_boundary_check, int **r_face_groups, int *r_totgroup, BLI_bitmap **r_edge_boundaries, int *r_totedgeboundaries)
void BKE_mesh_loop_islands_init(MeshIslandStore *island_store, const short item_type, const int items_num, const short island_type, const short innercut_type)
#define MISLAND_DEFAULT_BUFSIZE
void BKE_mesh_loop_islands_clear(MeshIslandStore *island_store)
blender::FunctionRef< bool(int face_index, int corner, int edge_index, int edge_user_count, const blender::Span< int > edge_face_map_elem)> MeshRemap_CheckIslandBoundary
void BKE_mesh_uv_vert_map_free(UvVertMap *vmap)
void BKE_mesh_vert_corner_tri_map_create(MeshElemMap **r_map, int **r_mem, const int totvert, const blender::int3 *corner_tris, const int tris_num, const int *corner_verts, const int)
static int * mesh_calc_smoothgroups(const int edges_num, const int verts_num, const blender::OffsetIndices< int > faces, const blender::Span< int > corner_edges, const blender::Span< int > corner_verts, const blender::Span< bool > sharp_edges, const blender::Span< bool > sharp_faces, int *r_totgroup, const bool use_bitflags, const bool use_boundary_vertices_for_bitflags)
void BKE_mesh_loop_islands_free(MeshIslandStore *island_store)
static void face_edge_loop_islands_calc_bitflags_exclude_at_boundary(const int *face_groups, const blender::Span< int > faces_from_item, const int face_group_id, const int face_group_id_overflowed, int &r_bit_face_group_mask)
UvVertMap * BKE_mesh_uv_vert_map_create(blender::OffsetIndices< int > faces, blender::Span< int > corner_verts, blender::Span< blender::float2 > uv_map, int verts_num, const blender::float2 &limit, bool use_winding)
void build_vert_to_face_indices(OffsetIndices< int > faces, Span< int > corner_verts, OffsetIndices< int > offsets, MutableSpan< int > face_indices)
GroupedSpan< int > build_vert_to_corner_map(Span< int > corner_verts, int verts_num, Array< int > &r_offsets, Array< int > &r_indices)
static Array< int > reverse_indices_in_groups(const Span< int > group_indices, const OffsetIndices< int > offsets)
GroupedSpan< int > build_edge_to_corner_map(Span< int > corner_edges, int edges_num, Array< int > &r_offsets, Array< int > &r_indices)
static Array< int > create_reverse_offsets(const Span< int > indices, const int items_num)
static GroupedSpan< int > gather_groups(const Span< int > group_indices, const int groups_num, Array< int > &r_offsets, Array< int > &r_indices)
static void sort_small_groups(const OffsetIndices< int > groups, const int grain_size, MutableSpan< int > indices)
int face_triangles_num(const int face_size)
Definition BKE_mesh.hh:350
GroupedSpan< int > build_edge_to_face_map(OffsetIndices< int > faces, Span< int > corner_edges, int edges_num, Array< int > &r_offsets, Array< int > &r_indices)
Array< int > build_corner_to_face_map(OffsetIndices< int > faces)
Array< int > build_vert_to_corner_indices(Span< int > corner_verts, OffsetIndices< int > offsets)
GroupedSpan< int > build_vert_to_face_map(OffsetIndices< int > faces, Span< int > corner_verts, int verts_num, Array< int > &r_offsets, Array< int > &r_indices)
static void reverse_group_indices_in_groups(const OffsetIndices< int > groups, const Span< int > group_to_elem, const OffsetIndices< int > offsets, MutableSpan< int > results)
GroupedSpan< int > build_vert_to_edge_map(Span< int2 > edges, int verts_num, Array< int > &r_offsets, Array< int > &r_indices)
void build_reverse_map(OffsetIndices< int > offsets, MutableSpan< int > r_map)
void build_reverse_offsets(Span< int > indices, MutableSpan< int > offsets)
void parallel_for(const IndexRange range, const int64_t grain_size, const Function &function, const TaskSizeHints &size_hints=detail::TaskSizeHints_Static(1))
Definition BLI_task.hh:93
VecBase< float, 2 > float2
VecBase< int32_t, 3 > int3
#define fabsf
MeshElemMap ** islands
MeshElemMap ** innercuts
unsigned int face_index
UvMapVert * next
unsigned short loop_of_face_index
UvMapVert * buf
UvMapVert ** vert
i
Definition text_draw.cc:230