Blender V4.3
mesh_mapping.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2023 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
12#include "MEM_guardedalloc.h"
13
14#include "atomic_ops.h"
15
16#include "BLI_array.hh"
17#include "BLI_bitmap.h"
18#include "BLI_buffer.h"
19#include "BLI_function_ref.hh"
20#include "BLI_math_geom.h"
21#include "BLI_math_vector.h"
22#include "BLI_task.hh"
23#include "BLI_utildefines.h"
24
25#include "BKE_customdata.hh"
26#include "BKE_mesh.hh"
27#include "BKE_mesh_mapping.hh"
28#include "BLI_memarena.h"
29
30#include "BLI_strict_flags.h" /* Keep last. */
31
32/* -------------------------------------------------------------------- */
37 const bool *hide_poly,
38 const bool *select_poly,
39 const int *corner_verts,
40 const float (*mloopuv)[2],
41 uint totvert,
42 const float limit[2],
43 const bool selected,
44 const bool use_winding)
45{
46 /* NOTE: N-gon version WIP, based on #BM_uv_vert_map_create. */
47
48 UvVertMap *vmap;
49 UvMapVert *buf;
50 int i, totuv, nverts;
51
53
54 totuv = 0;
55
56 /* generate UvMapVert array */
57 for (const int64_t a : faces.index_range()) {
58 if (!selected || (!(hide_poly && hide_poly[a]) && (select_poly && select_poly[a]))) {
59 totuv += int(faces[a].size());
60 }
61 }
62
63 if (totuv == 0) {
64 return nullptr;
65 }
66
67 vmap = (UvVertMap *)MEM_callocN(sizeof(*vmap), "UvVertMap");
68 buf = vmap->buf = (UvMapVert *)MEM_callocN(sizeof(*vmap->buf) * size_t(totuv), "UvMapVert");
69 vmap->vert = (UvMapVert **)MEM_callocN(sizeof(*vmap->vert) * totvert, "UvMapVert*");
70
71 if (!vmap->vert || !vmap->buf) {
73 return nullptr;
74 }
75
76 bool *winding = nullptr;
77 if (use_winding) {
78 winding = static_cast<bool *>(
79 MEM_calloc_arrayN(sizeof(*winding), size_t(faces.size()), "winding"));
80 }
81
82 for (const int64_t a : faces.index_range()) {
83 const blender::IndexRange face = faces[a];
84 if (!selected || (!(hide_poly && hide_poly[a]) && (select_poly && select_poly[a]))) {
85 float(*tf_uv)[2] = nullptr;
86
87 if (use_winding) {
88 tf_uv = (float(*)[2])BLI_buffer_reinit_data(
89 &tf_uv_buf, blender::float2, size_t(face.size()));
90 }
91
92 nverts = int(face.size());
93
94 for (i = 0; i < nverts; i++) {
95 buf->loop_of_face_index = ushort(i);
96 buf->face_index = uint(a);
97 buf->separate = false;
98 buf->next = vmap->vert[corner_verts[face[i]]];
99 vmap->vert[corner_verts[face[i]]] = buf;
100
101 if (use_winding) {
102 copy_v2_v2(tf_uv[i], mloopuv[face[i]]);
103 }
104
105 buf++;
106 }
107
108 if (use_winding) {
109 winding[a] = cross_poly_v2(tf_uv, uint(nverts)) < 0;
110 }
111 }
112 }
113
114 /* sort individual uvs for each vert */
115 for (uint a = 0; a < totvert; a++) {
116 UvMapVert *newvlist = nullptr, *vlist = vmap->vert[a];
117 UvMapVert *iterv, *v, *lastv, *next;
118 const float *uv, *uv2;
119 float uvdiff[2];
120
121 while (vlist) {
122 v = vlist;
123 vlist = vlist->next;
124 v->next = newvlist;
125 newvlist = v;
126
127 uv = mloopuv[faces[v->face_index].start() + v->loop_of_face_index];
128 lastv = nullptr;
129 iterv = vlist;
130
131 while (iterv) {
132 next = iterv->next;
133
134 uv2 = mloopuv[faces[iterv->face_index].start() + iterv->loop_of_face_index];
135 sub_v2_v2v2(uvdiff, uv2, uv);
136
137 if (fabsf(uv[0] - uv2[0]) < limit[0] && fabsf(uv[1] - uv2[1]) < limit[1] &&
138 (!use_winding || winding[iterv->face_index] == winding[v->face_index]))
139 {
140 if (lastv) {
141 lastv->next = next;
142 }
143 else {
144 vlist = next;
145 }
146 iterv->next = newvlist;
147 newvlist = iterv;
148 }
149 else {
150 lastv = iterv;
151 }
152
153 iterv = next;
154 }
155
156 newvlist->separate = true;
157 }
158
159 vmap->vert[a] = newvlist;
160 }
161
162 if (use_winding) {
163 MEM_freeN(winding);
164 }
165
166 BLI_buffer_free(&tf_uv_buf);
167
168 return vmap;
169}
170
172{
173 return vmap->vert[v];
174}
175
177{
178 if (vmap) {
179 if (vmap->vert) {
180 MEM_freeN(vmap->vert);
181 }
182 if (vmap->buf) {
183 MEM_freeN(vmap->buf);
184 }
185 MEM_freeN(vmap);
186 }
187}
188
190 int **r_mem,
191 const int totvert,
192 const blender::int3 *corner_tris,
193 const int tris_num,
194 const int *corner_verts,
195 const int /*corners_num*/)
196{
197 MeshElemMap *map = MEM_cnew_array<MeshElemMap>(size_t(totvert), __func__);
198 int *indices = static_cast<int *>(MEM_mallocN(sizeof(int) * size_t(tris_num) * 3, __func__));
199 int *index_step;
200 int i;
201
202 /* count face users */
203 for (i = 0; i < tris_num; i++) {
204 for (int j = 3; j--;) {
205 map[corner_verts[corner_tris[i][j]]].count++;
206 }
207 }
208
209 /* create offsets */
210 index_step = indices;
211 for (i = 0; i < totvert; i++) {
212 map[i].indices = index_step;
213 index_step += map[i].count;
214
215 /* re-count, using this as an index below */
216 map[i].count = 0;
217 }
218
219 /* assign corner_tri-edge users */
220 for (i = 0; i < tris_num; i++) {
221 for (int j = 3; j--;) {
222 MeshElemMap *map_ele = &map[corner_verts[corner_tris[i][j]]];
223 map_ele->indices[map_ele->count++] = i;
224 }
225 }
226
227 *r_map = map;
228 *r_mem = indices;
229}
230
232 int **r_mem,
233 const int totsource,
234 const int *final_origindex,
235 const int totfinal)
236{
237 MeshElemMap *map = MEM_cnew_array<MeshElemMap>(size_t(totsource), __func__);
238 int *indices = static_cast<int *>(MEM_mallocN(sizeof(int) * size_t(totfinal), __func__));
239 int *index_step;
240 int i;
241
242 /* count face users */
243 for (i = 0; i < totfinal; i++) {
244 if (final_origindex[i] != ORIGINDEX_NONE) {
245 BLI_assert(final_origindex[i] < totsource);
246 map[final_origindex[i]].count++;
247 }
248 }
249
250 /* create offsets */
251 index_step = indices;
252 for (i = 0; i < totsource; i++) {
253 map[i].indices = index_step;
254 index_step += map[i].count;
255
256 /* re-count, using this as an index below */
257 map[i].count = 0;
258 }
259
260 /* Assign face-tessellation users. */
261 for (i = 0; i < totfinal; i++) {
262 if (final_origindex[i] != ORIGINDEX_NONE) {
263 MeshElemMap *map_ele = &map[final_origindex[i]];
264 map_ele->indices[map_ele->count++] = i;
265 }
266 }
267
268 *r_map = map;
269 *r_mem = indices;
270}
271
273 int **r_mem,
274 const blender::OffsetIndices<int> faces,
275 const int *corner_tri_faces,
276 const int corner_tris_num)
277{
278 MeshElemMap *map = MEM_cnew_array<MeshElemMap>(size_t(faces.size()), __func__);
279 int *indices = static_cast<int *>(MEM_mallocN(sizeof(int) * size_t(corner_tris_num), __func__));
280 int *index_step;
281
282 /* create offsets */
283 index_step = indices;
284 for (const int64_t i : faces.index_range()) {
285 map[i].indices = index_step;
286 index_step += blender::bke::mesh::face_triangles_num(int(faces[i].size()));
287 }
288
289 /* Assign face-tessellation users. */
290 for (int i = 0; i < corner_tris_num; i++) {
291 MeshElemMap *map_ele = &map[corner_tri_faces[i]];
292 map_ele->indices[map_ele->count++] = i;
293 }
294
295 *r_map = map;
296 *r_mem = indices;
297}
298
299namespace blender::bke::mesh {
300
301static Array<int> create_reverse_offsets(const Span<int> indices, const int items_num)
302{
303 Array<int> offsets(items_num + 1, 0);
305 return offsets;
306}
307
308static void sort_small_groups(const OffsetIndices<int> groups,
309 const int grain_size,
310 MutableSpan<int> indices)
311{
312 threading::parallel_for(groups.index_range(), grain_size, [&](const IndexRange range) {
313 for (const int64_t index : range) {
314 MutableSpan<int> group = indices.slice(groups[index]);
315 std::sort(group.begin(), group.end());
316 }
317 });
318}
319
321 const OffsetIndices<int> offsets)
322{
323 if (group_indices.is_empty()) {
324 return {};
325 }
326 BLI_assert(*std::max_element(group_indices.begin(), group_indices.end()) < offsets.size());
327 BLI_assert(*std::min_element(group_indices.begin(), group_indices.end()) >= 0);
328
329 /* `counts` keeps track of how many elements have been added to each group, and is incremented
330 * atomically by many threads in parallel. `calloc` can be measurably faster than a parallel fill
331 * of zero. Alternatively the offsets could be copied and incremented directly, but the cost of
332 * the copy is slightly higher than the cost of `calloc`. */
333 int *counts = MEM_cnew_array<int>(size_t(offsets.size()), __func__);
334 BLI_SCOPED_DEFER([&]() { MEM_freeN(counts); })
335 Array<int> results(group_indices.size());
336 threading::parallel_for(group_indices.index_range(), 1024, [&](const IndexRange range) {
337 for (const int64_t i : range) {
338 const int group_index = group_indices[i];
339 const int index_in_group = atomic_fetch_and_add_int32(&counts[group_index], 1);
340 results[offsets[group_index][index_in_group]] = int(i);
341 }
342 });
343 sort_small_groups(offsets, 1024, results);
344 return results;
345}
346
347/* A version of #reverse_indices_in_groups that stores face indices instead of corner indices. */
349 const Span<int> group_to_elem,
350 const OffsetIndices<int> offsets,
351 MutableSpan<int> results)
352{
353 int *counts = MEM_cnew_array<int>(size_t(offsets.size()), __func__);
354 BLI_SCOPED_DEFER([&]() { MEM_freeN(counts); })
355 threading::parallel_for(groups.index_range(), 1024, [&](const IndexRange range) {
356 for (const int64_t face : range) {
357 for (const int elem : group_to_elem.slice(groups[face])) {
358 const int index_in_group = atomic_fetch_and_add_int32(&counts[elem], 1);
359 results[offsets[elem][index_in_group]] = int(face);
360 }
361 }
362 });
363 sort_small_groups(offsets, 1024, results);
364}
365
366static GroupedSpan<int> gather_groups(const Span<int> group_indices,
367 const int groups_num,
368 Array<int> &r_offsets,
369 Array<int> &r_indices)
370{
371 r_offsets = create_reverse_offsets(group_indices, groups_num);
372 r_indices = reverse_indices_in_groups(group_indices, r_offsets.as_span());
373 return {OffsetIndices<int>(r_offsets), r_indices};
374}
375
377{
378 Array<int> map(faces.total_size());
379 offset_indices::build_reverse_map(faces, map);
380 return map;
381}
382
384 const int verts_num,
385 Array<int> &r_offsets,
386 Array<int> &r_indices)
387{
388 r_offsets = create_reverse_offsets(edges.cast<int>(), verts_num);
389 const OffsetIndices<int> offsets(r_offsets);
390 r_indices.reinitialize(offsets.total_size());
391
392 /* Version of #reverse_indices_in_groups that accounts for storing two indices for each edge. */
393 int *counts = MEM_cnew_array<int>(size_t(offsets.size()), __func__);
394 BLI_SCOPED_DEFER([&]() { MEM_freeN(counts); })
395 threading::parallel_for(edges.index_range(), 1024, [&](const IndexRange range) {
396 for (const int64_t edge : range) {
397 for (const int vert : {edges[edge][0], edges[edge][1]}) {
398 const int index_in_group = atomic_fetch_and_add_int32(&counts[vert], 1);
399 r_indices[offsets[vert][index_in_group]] = int(edge);
400 }
401 }
402 });
403 sort_small_groups(offsets, 1024, r_indices);
404 return {offsets, r_indices};
405}
406
408 const Span<int> corner_verts,
409 const OffsetIndices<int> offsets,
410 MutableSpan<int> face_indices)
411{
412 reverse_group_indices_in_groups(faces, corner_verts, offsets, face_indices);
413}
414
416 const Span<int> corner_verts,
417 const int verts_num,
418 Array<int> &r_offsets,
419 Array<int> &r_indices)
420{
421 r_offsets = create_reverse_offsets(corner_verts, verts_num);
422 r_indices.reinitialize(r_offsets.last());
423 build_vert_to_face_indices(faces, corner_verts, OffsetIndices<int>(r_offsets), r_indices);
424 return {OffsetIndices<int>(r_offsets), r_indices};
425}
426
428 const OffsetIndices<int> offsets)
429{
430 return reverse_indices_in_groups(corner_verts, offsets);
431}
432
434 const int verts_num,
435 Array<int> &r_offsets,
436 Array<int> &r_indices)
437{
438 return gather_groups(corner_verts, verts_num, r_offsets, r_indices);
439}
440
442 const int edges_num,
443 Array<int> &r_offsets,
444 Array<int> &r_indices)
445{
446 return gather_groups(corner_edges, edges_num, r_offsets, r_indices);
447}
448
450 const Span<int> corner_edges,
451 const int edges_num,
452 Array<int> &r_offsets,
453 Array<int> &r_indices)
454{
455 r_offsets = create_reverse_offsets(corner_edges, edges_num);
456 r_indices.reinitialize(r_offsets.last());
457 reverse_group_indices_in_groups(faces, corner_edges, OffsetIndices<int>(r_offsets), r_indices);
458 return {OffsetIndices<int>(r_offsets), r_indices};
459}
460
461} // namespace blender::bke::mesh
462
465/* -------------------------------------------------------------------- */
474 blender::FunctionRef<bool(int face_index,
475 int loop_index,
476 int edge_index,
477 int edge_user_count,
478 const blender::Span<int> edge_face_map_elem)>;
479
480static void face_edge_loop_islands_calc(const int totedge,
481 const blender::OffsetIndices<int> faces,
482 const blender::Span<int> corner_edges,
483 blender::GroupedSpan<int> edge_face_map,
484 const bool use_bitflags,
485 MeshRemap_CheckIslandBoundary edge_boundary_check,
486 int **r_face_groups,
487 int *r_totgroup,
488 BLI_bitmap **r_edge_borders,
489 int *r_totedgeborder)
490{
491 int *face_groups;
492 int *face_stack;
493
494 BLI_bitmap *edge_borders = nullptr;
495 int num_edgeborders = 0;
496
497 int face_prev = 0;
498 const int temp_face_group_id = 3; /* Placeholder value. */
499
500 /* Group we could not find any available bit, will be reset to 0 at end. */
501 const int face_group_id_overflowed = 5;
502
503 int tot_group = 0;
504 bool group_id_overflow = false;
505
506 if (faces.is_empty()) {
507 *r_totgroup = 0;
508 *r_face_groups = nullptr;
509 if (r_edge_borders) {
510 *r_edge_borders = nullptr;
511 *r_totedgeborder = 0;
512 }
513 return;
514 }
515
516 if (r_edge_borders) {
517 edge_borders = BLI_BITMAP_NEW(totedge, __func__);
518 *r_totedgeborder = 0;
519 }
520
521 blender::Array<int> edge_to_face_src_offsets;
522 blender::Array<int> edge_to_face_src_indices;
523 if (edge_face_map.is_empty()) {
525 faces, corner_edges, totedge, edge_to_face_src_offsets, edge_to_face_src_indices);
526 }
527
528 face_groups = static_cast<int *>(MEM_callocN(sizeof(int) * size_t(faces.size()), __func__));
529 face_stack = static_cast<int *>(MEM_mallocN(sizeof(int) * size_t(faces.size()), __func__));
530
531 while (true) {
532 int face;
533 int bit_face_group_mask = 0;
534 int face_group_id;
535 int ps_curr_idx = 0, ps_end_idx = 0; /* stack indices */
536
537 for (face = face_prev; face < int(faces.size()); face++) {
538 if (face_groups[face] == 0) {
539 break;
540 }
541 }
542
543 if (face == int(faces.size())) {
544 /* all done */
545 break;
546 }
547
548 face_group_id = use_bitflags ? temp_face_group_id : ++tot_group;
549
550 /* start searching from here next time */
551 face_prev = face + 1;
552
553 face_groups[face] = face_group_id;
554 face_stack[ps_end_idx++] = face;
555
556 while (ps_curr_idx != ps_end_idx) {
557 face = face_stack[ps_curr_idx++];
558 BLI_assert(face_groups[face] == face_group_id);
559
560 for (const int64_t loop : faces[face]) {
561 const int edge = corner_edges[loop];
562 /* loop over face users */
563 const blender::Span<int> map_ele = edge_face_map[edge];
564 const int *p = map_ele.data();
565 int i = int(map_ele.size());
566 if (!edge_boundary_check(face, int(loop), edge, i, map_ele)) {
567 for (; i--; p++) {
568 /* if we meet other non initialized its a bug */
569 BLI_assert(ELEM(face_groups[*p], 0, face_group_id));
570
571 if (face_groups[*p] == 0) {
572 face_groups[*p] = face_group_id;
573 face_stack[ps_end_idx++] = *p;
574 }
575 }
576 }
577 else {
578 if (edge_borders && !BLI_BITMAP_TEST(edge_borders, edge)) {
579 BLI_BITMAP_ENABLE(edge_borders, edge);
580 num_edgeborders++;
581 }
582 if (use_bitflags) {
583 /* Find contiguous smooth groups already assigned,
584 * these are the values we can't reuse! */
585 for (; i--; p++) {
586 int bit = face_groups[*p];
587 if (!ELEM(bit, 0, face_group_id, face_group_id_overflowed) &&
588 !(bit_face_group_mask & bit))
589 {
590 bit_face_group_mask |= bit;
591 }
592 }
593 }
594 }
595 }
596 }
597 /* And now, we have all our face from current group in face_stack
598 * (from 0 to (ps_end_idx - 1)),
599 * as well as all smoothgroups bits we can't use in bit_face_group_mask.
600 */
601 if (use_bitflags) {
602 int i, *p, gid_bit = 0;
603 face_group_id = 1;
604
605 /* Find first bit available! */
606 for (; (face_group_id & bit_face_group_mask) && (gid_bit < 32); gid_bit++) {
607 face_group_id <<= 1; /* will 'overflow' on last possible iteration. */
608 }
609 if (UNLIKELY(gid_bit > 31)) {
610 /* All bits used in contiguous smooth groups, we can't do much!
611 * NOTE: this is *very* unlikely - theoretically, four groups are enough,
612 * I don't think we can reach this goal with such a simple algorithm,
613 * but I don't think either we'll never need all 32 groups!
614 */
615 printf(
616 "Warning, could not find an available id for current smooth group, faces will me "
617 "marked "
618 "as out of any smooth group...\n");
619
620 /* Can't use 0, will have to set them to this value later. */
621 face_group_id = face_group_id_overflowed;
622
623 group_id_overflow = true;
624 }
625 if (gid_bit > tot_group) {
626 tot_group = gid_bit;
627 }
628 /* And assign the final smooth group id to that face group! */
629 for (i = ps_end_idx, p = face_stack; i--; p++) {
630 face_groups[*p] = face_group_id;
631 }
632 }
633 }
634
635 if (use_bitflags) {
636 /* used bits are zero-based. */
637 tot_group++;
638 }
639
640 if (UNLIKELY(group_id_overflow)) {
641 int i = int(faces.size()), *gid = face_groups;
642 for (; i--; gid++) {
643 if (*gid == face_group_id_overflowed) {
644 *gid = 0;
645 }
646 }
647 /* Using 0 as group id adds one more group! */
648 tot_group++;
649 }
650
651 MEM_freeN(face_stack);
652
653 *r_totgroup = tot_group;
654 *r_face_groups = face_groups;
655 if (r_edge_borders) {
656 *r_edge_borders = edge_borders;
657 *r_totedgeborder = num_edgeborders;
658 }
659}
660
661int *BKE_mesh_calc_smoothgroups(int edges_num,
662 const blender::OffsetIndices<int> faces,
663 const blender::Span<int> corner_edges,
664 const blender::Span<bool> sharp_edges,
665 const blender::Span<bool> sharp_faces,
666 int *r_totgroup,
667 bool use_bitflags)
668{
669 int *face_groups = nullptr;
670
671 auto face_is_smooth = [&](const int i) { return sharp_faces.is_empty() || !sharp_faces[i]; };
672
673 auto face_is_island_boundary_smooth = [&](const int face_index,
674 const int /*loop_index*/,
675 const int edge_index,
676 const int edge_user_count,
677 const blender::Span<int> edge_face_map_elem) {
678 /* Edge is sharp if one of its faces is flat, or edge itself is sharp,
679 * or edge is not used by exactly two faces. */
680 if (face_is_smooth(face_index) && !(!sharp_edges.is_empty() && sharp_edges[edge_index]) &&
681 (edge_user_count == 2))
682 {
683 /* In that case, edge appears to be smooth, but we need to check its other face too. */
684 const int other_face_index = (face_index == edge_face_map_elem[0]) ? edge_face_map_elem[1] :
685 edge_face_map_elem[0];
686 return !face_is_smooth(other_face_index);
687 }
688 return true;
689 };
690
692 faces,
693 corner_edges,
694 {},
695 use_bitflags,
696 face_is_island_boundary_smooth,
697 &face_groups,
698 r_totgroup,
699 nullptr,
700 nullptr);
701
702 return face_groups;
703}
704
705#define MISLAND_DEFAULT_BUFSIZE 64
706
708 const short item_type,
709 const int items_num,
710 const short island_type,
711 const short innercut_type)
712{
713 MemArena *mem = island_store->mem;
714
715 if (mem == nullptr) {
717 island_store->mem = mem;
718 }
719 /* else memarena should be cleared */
720
725
726 island_store->item_type = item_type;
727 island_store->items_to_islands_num = items_num;
728 island_store->items_to_islands = static_cast<int *>(
729 BLI_memarena_alloc(mem, sizeof(*island_store->items_to_islands) * size_t(items_num)));
730
731 island_store->island_type = island_type;
733 island_store->islands = static_cast<MeshElemMap **>(
734 BLI_memarena_alloc(mem, sizeof(*island_store->islands) * island_store->islands_num_alloc));
735
736 island_store->innercut_type = innercut_type;
737 island_store->innercuts = static_cast<MeshElemMap **>(
738 BLI_memarena_alloc(mem, sizeof(*island_store->innercuts) * island_store->islands_num_alloc));
739}
740
742{
743 island_store->item_type = MISLAND_TYPE_NONE;
744 island_store->items_to_islands_num = 0;
745 island_store->items_to_islands = nullptr;
746
747 island_store->island_type = MISLAND_TYPE_NONE;
748 island_store->islands_num = 0;
749 island_store->islands = nullptr;
750
751 island_store->innercut_type = MISLAND_TYPE_NONE;
752 island_store->innercuts = nullptr;
753
754 if (island_store->mem) {
755 BLI_memarena_clear(island_store->mem);
756 }
757
758 island_store->islands_num_alloc = 0;
759}
760
762{
763 if (island_store->mem) {
764 BLI_memarena_free(island_store->mem);
765 island_store->mem = nullptr;
766 }
767}
768
770 const int item_num,
771 const int *items_indices,
772 const int num_island_items,
773 int *island_item_indices,
774 const int num_innercut_items,
775 int *innercut_item_indices)
776{
777 MemArena *mem = island_store->mem;
778
779 MeshElemMap *isld, *innrcut;
780 const int curr_island_idx = island_store->islands_num++;
781 const size_t curr_num_islands = size_t(island_store->islands_num);
782 int i = item_num;
783
784 while (i--) {
785 island_store->items_to_islands[items_indices[i]] = curr_island_idx;
786 }
787
788 if (UNLIKELY(curr_num_islands > island_store->islands_num_alloc)) {
789 MeshElemMap **islds, **innrcuts;
790
791 island_store->islands_num_alloc *= 2;
792 islds = static_cast<MeshElemMap **>(
793 BLI_memarena_alloc(mem, sizeof(*islds) * island_store->islands_num_alloc));
794 memcpy(islds, island_store->islands, sizeof(*islds) * (curr_num_islands - 1));
795 island_store->islands = islds;
796
797 innrcuts = static_cast<MeshElemMap **>(
798 BLI_memarena_alloc(mem, sizeof(*innrcuts) * island_store->islands_num_alloc));
799 memcpy(innrcuts, island_store->innercuts, sizeof(*innrcuts) * (curr_num_islands - 1));
800 island_store->innercuts = innrcuts;
801 }
802
803 island_store->islands[curr_island_idx] = isld = static_cast<MeshElemMap *>(
804 BLI_memarena_alloc(mem, sizeof(*isld)));
805 isld->count = num_island_items;
806 isld->indices = static_cast<int *>(
807 BLI_memarena_alloc(mem, sizeof(*isld->indices) * size_t(num_island_items)));
808 memcpy(isld->indices, island_item_indices, sizeof(*isld->indices) * size_t(num_island_items));
809
810 island_store->innercuts[curr_island_idx] = innrcut = static_cast<MeshElemMap *>(
811 BLI_memarena_alloc(mem, sizeof(*innrcut)));
812 innrcut->count = num_innercut_items;
813 innrcut->indices = static_cast<int *>(
814 BLI_memarena_alloc(mem, sizeof(*innrcut->indices) * size_t(num_innercut_items)));
815 memcpy(innrcut->indices,
816 innercut_item_indices,
817 sizeof(*innrcut->indices) * size_t(num_innercut_items));
818}
819
820static bool mesh_calc_islands_loop_face_uv(const int totedge,
821 const bool *uv_seams,
822 const blender::OffsetIndices<int> faces,
823 const int *corner_verts,
824 const int *corner_edges,
825 const int corners_num,
826 const float (*luvs)[2],
827 MeshIslandStore *r_island_store)
828{
829 using namespace blender;
830 int *face_groups = nullptr;
831 int num_face_groups;
832
833 int *face_indices;
834 int *loop_indices;
835 int num_pidx, num_lidx;
836
837 /* Those are used to detect 'inner cuts', i.e. edges that are borders,
838 * and yet have two or more faces of a same group using them
839 * (typical case: seam used to unwrap properly a cylinder). */
840 BLI_bitmap *edge_borders = nullptr;
841 int num_edge_borders = 0;
842 char *edge_border_count = nullptr;
843 int *edge_innercut_indices = nullptr;
844 int num_einnercuts = 0;
845
846 int grp_idx;
847
848 BKE_mesh_loop_islands_clear(r_island_store);
850 r_island_store, MISLAND_TYPE_LOOP, corners_num, MISLAND_TYPE_POLY, MISLAND_TYPE_EDGE);
851
852 Array<int> edge_to_face_offsets;
853 Array<int> edge_to_face_indices;
854 const GroupedSpan<int> edge_to_face_map = bke::mesh::build_edge_to_face_map(
855 faces, {corner_edges, corners_num}, totedge, edge_to_face_offsets, edge_to_face_indices);
856
857 Array<int> edge_to_loop_offsets;
858 Array<int> edge_to_loop_indices;
859 GroupedSpan<int> edge_to_loop_map;
860 if (luvs) {
861 edge_to_loop_map = bke::mesh::build_edge_to_corner_map(
862 {corner_edges, corners_num}, totedge, edge_to_loop_offsets, edge_to_loop_indices);
863 }
864
865 /* TODO: I'm not sure edge seam flag is enough to define UV islands?
866 * Maybe we should also consider UV-maps values
867 * themselves (i.e. different UV-edges for a same mesh-edge => boundary edge too?).
868 * Would make things much more complex though,
869 * and each UVMap would then need its own mesh mapping, not sure we want that at all!
870 */
871 auto mesh_check_island_boundary_uv = [&](const int /*face_index*/,
872 const int loop_index,
873 const int edge_index,
874 const int /*edge_user_count*/,
875 const Span<int> /*edge_face_map_elem*/) -> bool {
876 if (luvs) {
877 const Span<int> edge_to_loops = edge_to_loop_map[corner_edges[loop_index]];
878
879 BLI_assert(edge_to_loops.size() >= 2 && (edge_to_loops.size() % 2) == 0);
880
881 const int v1 = corner_verts[edge_to_loops[0]];
882 const int v2 = corner_verts[edge_to_loops[1]];
883 const float *uvco_v1 = luvs[edge_to_loops[0]];
884 const float *uvco_v2 = luvs[edge_to_loops[1]];
885 for (int i = 2; i < edge_to_loops.size(); i += 2) {
886 if (corner_verts[edge_to_loops[i]] == v1) {
887 if (!equals_v2v2(uvco_v1, luvs[edge_to_loops[i]]) ||
888 !equals_v2v2(uvco_v2, luvs[edge_to_loops[i + 1]]))
889 {
890 return true;
891 }
892 }
893 else {
894 BLI_assert(corner_verts[edge_to_loops[i]] == v2);
896 if (!equals_v2v2(uvco_v2, luvs[edge_to_loops[i]]) ||
897 !equals_v2v2(uvco_v1, luvs[edge_to_loops[i + 1]]))
898 {
899 return true;
900 }
901 }
902 }
903 return false;
904 }
905
906 /* Edge is UV boundary if tagged as seam. */
907 return uv_seams && uv_seams[edge_index];
908 };
909
911 faces,
912 {corner_edges, corners_num},
913 edge_to_face_map,
914 false,
915 mesh_check_island_boundary_uv,
916 &face_groups,
917 &num_face_groups,
918 &edge_borders,
919 &num_edge_borders);
920
921 if (!num_face_groups) {
922 if (edge_borders) {
923 MEM_freeN(edge_borders);
924 }
925 return false;
926 }
927
928 if (num_edge_borders) {
929 edge_border_count = static_cast<char *>(
930 MEM_mallocN(sizeof(*edge_border_count) * size_t(totedge), __func__));
931 edge_innercut_indices = static_cast<int *>(
932 MEM_mallocN(sizeof(*edge_innercut_indices) * size_t(num_edge_borders), __func__));
933 }
934
935 face_indices = static_cast<int *>(
936 MEM_mallocN(sizeof(*face_indices) * size_t(faces.size()), __func__));
937 loop_indices = static_cast<int *>(
938 MEM_mallocN(sizeof(*loop_indices) * size_t(corners_num), __func__));
939
940 /* NOTE: here we ignore '0' invalid group - this should *never* happen in this case anyway? */
941 for (grp_idx = 1; grp_idx <= num_face_groups; grp_idx++) {
942 num_pidx = num_lidx = 0;
943 if (num_edge_borders) {
944 num_einnercuts = 0;
945 memset(edge_border_count, 0, sizeof(*edge_border_count) * size_t(totedge));
946 }
947
948 for (const int64_t p_idx : faces.index_range()) {
949 if (face_groups[p_idx] != grp_idx) {
950 continue;
951 }
952 face_indices[num_pidx++] = int(p_idx);
953 for (const int64_t corner : faces[p_idx]) {
954 const int edge_i = corner_edges[corner];
955 loop_indices[num_lidx++] = int(corner);
956 if (num_edge_borders && BLI_BITMAP_TEST(edge_borders, edge_i) &&
957 (edge_border_count[edge_i] < 2))
958 {
959 edge_border_count[edge_i]++;
960 if (edge_border_count[edge_i] == 2) {
961 edge_innercut_indices[num_einnercuts++] = edge_i;
962 }
963 }
964 }
965 }
966
967 BKE_mesh_loop_islands_add(r_island_store,
968 num_lidx,
969 loop_indices,
970 num_pidx,
971 face_indices,
972 num_einnercuts,
973 edge_innercut_indices);
974 }
975
976 MEM_freeN(face_indices);
977 MEM_freeN(loop_indices);
978 MEM_freeN(face_groups);
979
980 if (edge_borders) {
981 MEM_freeN(edge_borders);
982 }
983
984 if (num_edge_borders) {
985 MEM_freeN(edge_border_count);
986 MEM_freeN(edge_innercut_indices);
987 }
988 return true;
989}
990
991bool BKE_mesh_calc_islands_loop_face_edgeseam(const float (*vert_positions)[3],
992 const int totvert,
993 const blender::int2 *edges,
994 const int totedge,
995 const bool *uv_seams,
996 const blender::OffsetIndices<int> faces,
997 const int *corner_verts,
998 const int *corner_edges,
999 const int corners_num,
1000 MeshIslandStore *r_island_store)
1001{
1002 UNUSED_VARS(vert_positions, totvert, edges);
1004 totedge, uv_seams, faces, corner_verts, corner_edges, corners_num, nullptr, r_island_store);
1005}
1006
1007bool BKE_mesh_calc_islands_loop_face_uvmap(float (*vert_positions)[3],
1008 const int totvert,
1009 blender::int2 *edges,
1010 const int totedge,
1011 const bool *uv_seams,
1012 const blender::OffsetIndices<int> faces,
1013 const int *corner_verts,
1014 const int *corner_edges,
1015 const int corners_num,
1016 const float (*luvs)[2],
1017 MeshIslandStore *r_island_store)
1018{
1019 UNUSED_VARS(vert_positions, totvert, edges);
1020 BLI_assert(luvs != nullptr);
1022 totedge, uv_seams, faces, corner_verts, corner_edges, corners_num, luvs, r_island_store);
1023}
1024
CustomData interface, see also DNA_customdata_types.h.
#define ORIGINDEX_NONE
@ MISLAND_TYPE_POLY
@ MISLAND_TYPE_VERT
@ MISLAND_TYPE_LOOP
@ MISLAND_TYPE_EDGE
@ MISLAND_TYPE_NONE
#define BLI_assert(a)
Definition BLI_assert.h:50
#define BLI_BITMAP_NEW(_num, _alloc_string)
Definition BLI_bitmap.h:41
#define BLI_BITMAP_TEST(_bitmap, _index)
Definition BLI_bitmap.h:65
#define BLI_BITMAP_ENABLE(_bitmap, _index)
Definition BLI_bitmap.h:82
unsigned int BLI_bitmap
Definition BLI_bitmap.h:17
#define BLI_buffer_reinit_data(buffer_, type_, new_count_)
Definition BLI_buffer.h:49
@ BLI_BUFFER_NOP
Definition BLI_buffer.h:23
#define BLI_buffer_declare_static(type_, name_, flag_, static_count_)
Definition BLI_buffer.h:27
#define BLI_buffer_free(name_)
Definition BLI_buffer.h:96
float cross_poly_v2(const float verts[][2], unsigned int nr)
Definition math_geom.cc:147
MINLINE void copy_v2_v2(float r[2], const float a[2])
MINLINE bool equals_v2v2(const float v1[2], const float v2[2]) ATTR_WARN_UNUSED_RESULT
MINLINE void sub_v2_v2v2(float r[2], const float a[2], const float b[2])
void * BLI_memarena_alloc(struct MemArena *ma, size_t size) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1) ATTR_MALLOC ATTR_ALLOC_SIZE(2)
void BLI_memarena_free(struct MemArena *ma) ATTR_NONNULL(1)
struct MemArena * BLI_memarena_new(size_t bufsize, const char *name) ATTR_WARN_UNUSED_RESULT ATTR_RETURNS_NONNULL ATTR_NONNULL(2) ATTR_MALLOC
#define BLI_MEMARENA_STD_BUFSIZE
void void BLI_memarena_clear(MemArena *ma) ATTR_NONNULL(1)
#define BLI_SCOPED_DEFER(function_to_defer)
unsigned short ushort
unsigned int uint
#define UNUSED_VARS(...)
#define UNUSED_VARS_NDEBUG(...)
#define UNLIKELY(x)
#define ELEM(...)
Read Guarded memory(de)allocation.
Provides wrapper around system-specific atomic primitives, and some extensions (faked-atomic operatio...
ATOMIC_INLINE int32_t atomic_fetch_and_add_int32(int32_t *p, int32_t x)
ATTR_WARN_UNUSED_RESULT const BMVert * v2
ATTR_WARN_UNUSED_RESULT const BMVert * v
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition btDbvt.cpp:52
Span< T > as_span() const
Definition BLI_array.hh:232
const T & last(const int64_t n=0) const
Definition BLI_array.hh:285
void reinitialize(const int64_t new_size)
Definition BLI_array.hh:388
constexpr Span slice(int64_t start, int64_t size) const
Definition BLI_span.hh:138
constexpr const T * data() const
Definition BLI_span.hh:216
constexpr int64_t size() const
Definition BLI_span.hh:253
constexpr const T * end() const
Definition BLI_span.hh:225
constexpr IndexRange index_range() const
Definition BLI_span.hh:402
constexpr const T * begin() const
Definition BLI_span.hh:221
constexpr bool is_empty() const
Definition BLI_span.hh:261
#define printf
#define fabsf(x)
draw_view in_light_buf[] float
draw_view push_constant(Type::INT, "radiance_src") .push_constant(Type capture_info_buf storage_buf(1, Qualifier::READ, "ObjectBounds", "bounds_buf[]") .push_constant(Type draw_view int
static ushort indices[]
IndexRange range
void *(* MEM_mallocN)(size_t len, const char *str)
Definition mallocn.cc:44
void *(* MEM_calloc_arrayN)(size_t len, size_t size, const char *str)
Definition mallocn.cc:43
void MEM_freeN(void *vmemh)
Definition mallocn.cc:105
void *(* MEM_callocN)(size_t len, const char *str)
Definition mallocn.cc:42
static ulong * next
static void face_edge_loop_islands_calc(const int totedge, const blender::OffsetIndices< int > faces, const blender::Span< int > corner_edges, blender::GroupedSpan< int > edge_face_map, const bool use_bitflags, MeshRemap_CheckIslandBoundary edge_boundary_check, int **r_face_groups, int *r_totgroup, BLI_bitmap **r_edge_borders, int *r_totedgeborder)
UvMapVert * BKE_mesh_uv_vert_map_get_vert(UvVertMap *vmap, uint v)
void BKE_mesh_origindex_map_create_corner_tri(MeshElemMap **r_map, int **r_mem, const blender::OffsetIndices< int > faces, const int *corner_tri_faces, const int corner_tris_num)
bool BKE_mesh_calc_islands_loop_face_edgeseam(const float(*vert_positions)[3], const int totvert, const blender::int2 *edges, const int totedge, const bool *uv_seams, const blender::OffsetIndices< int > faces, const int *corner_verts, const int *corner_edges, const int corners_num, MeshIslandStore *r_island_store)
void BKE_mesh_origindex_map_create(MeshElemMap **r_map, int **r_mem, const int totsource, const int *final_origindex, const int totfinal)
void BKE_mesh_loop_islands_add(MeshIslandStore *island_store, const int item_num, const int *items_indices, const int num_island_items, int *island_item_indices, const int num_innercut_items, int *innercut_item_indices)
void BKE_mesh_loop_islands_init(MeshIslandStore *island_store, const short item_type, const int items_num, const short island_type, const short innercut_type)
#define MISLAND_DEFAULT_BUFSIZE
void BKE_mesh_loop_islands_clear(MeshIslandStore *island_store)
bool BKE_mesh_calc_islands_loop_face_uvmap(float(*vert_positions)[3], const int totvert, blender::int2 *edges, const int totedge, const bool *uv_seams, const blender::OffsetIndices< int > faces, const int *corner_verts, const int *corner_edges, const int corners_num, const float(*luvs)[2], MeshIslandStore *r_island_store)
void BKE_mesh_uv_vert_map_free(UvVertMap *vmap)
void BKE_mesh_vert_corner_tri_map_create(MeshElemMap **r_map, int **r_mem, const int totvert, const blender::int3 *corner_tris, const int tris_num, const int *corner_verts, const int)
UvVertMap * BKE_mesh_uv_vert_map_create(const blender::OffsetIndices< int > faces, const bool *hide_poly, const bool *select_poly, const int *corner_verts, const float(*mloopuv)[2], uint totvert, const float limit[2], const bool selected, const bool use_winding)
int * BKE_mesh_calc_smoothgroups(int edges_num, const blender::OffsetIndices< int > faces, const blender::Span< int > corner_edges, const blender::Span< bool > sharp_edges, const blender::Span< bool > sharp_faces, int *r_totgroup, bool use_bitflags)
void BKE_mesh_loop_islands_free(MeshIslandStore *island_store)
static bool mesh_calc_islands_loop_face_uv(const int totedge, const bool *uv_seams, const blender::OffsetIndices< int > faces, const int *corner_verts, const int *corner_edges, const int corners_num, const float(*luvs)[2], MeshIslandStore *r_island_store)
void build_vert_to_face_indices(OffsetIndices< int > faces, Span< int > corner_verts, OffsetIndices< int > offsets, MutableSpan< int > face_indices)
GroupedSpan< int > build_vert_to_corner_map(Span< int > corner_verts, int verts_num, Array< int > &r_offsets, Array< int > &r_indices)
static Array< int > reverse_indices_in_groups(const Span< int > group_indices, const OffsetIndices< int > offsets)
GroupedSpan< int > build_edge_to_corner_map(Span< int > corner_edges, int edges_num, Array< int > &r_offsets, Array< int > &r_indices)
static Array< int > create_reverse_offsets(const Span< int > indices, const int items_num)
static GroupedSpan< int > gather_groups(const Span< int > group_indices, const int groups_num, Array< int > &r_offsets, Array< int > &r_indices)
static void sort_small_groups(const OffsetIndices< int > groups, const int grain_size, MutableSpan< int > indices)
int face_triangles_num(const int face_size)
Definition BKE_mesh.hh:287
GroupedSpan< int > build_edge_to_face_map(OffsetIndices< int > faces, Span< int > corner_edges, int edges_num, Array< int > &r_offsets, Array< int > &r_indices)
Array< int > build_corner_to_face_map(OffsetIndices< int > faces)
Array< int > build_vert_to_corner_indices(Span< int > corner_verts, OffsetIndices< int > offsets)
GroupedSpan< int > build_vert_to_face_map(OffsetIndices< int > faces, Span< int > corner_verts, int verts_num, Array< int > &r_offsets, Array< int > &r_indices)
static void reverse_group_indices_in_groups(const OffsetIndices< int > groups, const Span< int > group_to_elem, const OffsetIndices< int > offsets, MutableSpan< int > results)
GroupedSpan< int > build_vert_to_edge_map(Span< int2 > edges, int verts_num, Array< int > &r_offsets, Array< int > &r_indices)
void build_reverse_offsets(Span< int > indices, MutableSpan< int > offsets)
void parallel_for(const IndexRange range, const int64_t grain_size, const Function &function, const TaskSizeHints &size_hints=detail::TaskSizeHints_Static(1))
Definition BLI_task.hh:95
__int64 int64_t
Definition stdint.h:89
MeshElemMap ** islands
MeshElemMap ** innercuts
unsigned int face_index
UvMapVert * next
unsigned short loop_of_face_index
UvMapVert * buf
UvMapVert ** vert