Blender V4.3
bmesh_mesh_normals.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2023 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
13#include "MEM_guardedalloc.h"
14
15#include "DNA_scene_types.h"
16
17#include "BLI_array.hh"
18#include "BLI_bitmap.h"
19#include "BLI_linklist_stack.h"
20#include "BLI_math_base.hh"
21#include "BLI_math_vector.h"
22#include "BLI_task.h"
23#include "BLI_utildefines.h"
24#include "BLI_vector.hh"
25
26#include "BKE_customdata.hh"
27#include "BKE_editmesh.hh"
28#include "BKE_global.hh"
29#include "BKE_mesh.hh"
30
32
33using blender::Array;
34using blender::float3;
36using blender::Span;
37
38/* Smooth angle to use when tagging edges is disabled entirely. */
39#define EDGE_TAG_FROM_SPLIT_ANGLE_BYPASS -FLT_MAX
40
42 BMEdge *e,
43 const float split_angle_cos);
44static void bm_edge_tag_from_smooth(Span<float3> fnos, BMEdge *e, const float split_angle_cos);
45
46/* -------------------------------------------------------------------- */
54/* We use that existing internal API flag,
55 * assuming no other tool using it would run concurrently to clnors editing. */
56#define BM_LNORSPACE_UPDATE _FLAG_MF
57
59 /* Read-only data. */
62
63 /* Write data. */
65};
66
68 const float e1diff[3],
69 const float e2diff[3],
70 const float f_no[3],
71 float v_no[3])
72{
73 /* Calculate the dot product of the two edges that meet at the loop's vertex. */
74 /* Edge vectors are calculated from `e->v1` to `e->v2`, so adjust the dot product if one but not
75 * both loops actually runs from `e->v2` to `e->v1`. */
76 float dotprod = dot_v3v3(e1diff, e2diff);
77 if ((l_iter->prev->e->v1 == l_iter->prev->v) ^ (l_iter->e->v1 == l_iter->v)) {
78 dotprod = -dotprod;
79 }
80 const float fac = blender::math::safe_acos_approx(-dotprod);
81 /* Shouldn't happen as normalizing edge-vectors cause degenerate values to be zeroed out. */
82 BLI_assert(!isnan(fac));
83 madd_v3_v3fl(v_no, f_no, fac);
84}
85
87{
88 /* NOTE(@ideasman42): Regarding redundant unit-length edge-vector calculation:
89 *
90 * This functions calculates unit-length edge-vector for every loop edge
91 * in practice this means 2x `sqrt` calls per face-corner connected to each vertex.
92 *
93 * Previously (2.9x and older), the edge vectors were calculated and stored for reuse.
94 * However the overhead of did not perform well (~16% slower - single & multi-threaded)
95 * when compared with calculating the values as they are needed.
96 *
97 * For simple grid topologies this function calculates the edge-vectors 4x times.
98 * There is some room for improved performance by storing the edge-vectors for reuse locally
99 * in this function, reducing the number of redundant `sqrtf` in half (2x instead of 4x).
100 * so face loops that share an edge would not calculate it multiple times.
101 * From my tests the performance improvements are so small they're difficult to measure,
102 * the time saved removing `sqrtf` calls is lost on storing and looking up the information,
103 * even in the case of small inline lookup tables.
104 *
105 * Further, local data structures would need to support cases where
106 * stack memory isn't sufficient - adding additional complexity for corner-cases
107 * (a vertex that has thousands of connected edges for example).
108 * Unless there are important use-cases that benefit from edge-vector caching,
109 * keep this simple and calculate ~4x as many edge-vectors.
110 *
111 * In conclusion, the cost of caching & looking up edge-vectors both globally or per-vertex
112 * doesn't save enough time to make it worthwhile.
113 */
114
115 float *v_no = v->no;
116 zero_v3(v_no);
117
118 BMEdge *e_first = v->e;
119 if (e_first != nullptr) {
120 float e1diff[3], e2diff[3];
121 BMEdge *e_iter = e_first;
122 do {
123 BMLoop *l_first = e_iter->l;
124 if (l_first != nullptr) {
125 sub_v3_v3v3(e2diff, e_iter->v1->co, e_iter->v2->co);
126 normalize_v3(e2diff);
127
128 BMLoop *l_iter = l_first;
129 do {
130 if (l_iter->v == v) {
131 BMEdge *e_prev = l_iter->prev->e;
132 sub_v3_v3v3(e1diff, e_prev->v1->co, e_prev->v2->co);
133 normalize_v3(e1diff);
134
135 bm_vert_calc_normals_accum_loop(l_iter, e1diff, e2diff, l_iter->f->no, v_no);
136 }
137 } while ((l_iter = l_iter->radial_next) != l_first);
138 }
139 } while ((e_iter = BM_DISK_EDGE_NEXT(e_iter, v)) != e_first);
140
141 if (LIKELY(normalize_v3(v_no) != 0.0f)) {
142 return;
143 }
144 }
145 /* Fallback normal. */
146 normalize_v3_v3(v_no, v->co);
147}
148
149static void bm_vert_calc_normals_cb(void * /*userdata*/,
150 MempoolIterData *mp_v,
151 const TaskParallelTLS *__restrict /*tls*/)
152{
153 BMVert *v = (BMVert *)mp_v;
155}
156
158{
159 /* See #bm_vert_calc_normals_impl note on performance. */
160 float *v_no = data->vnos[BM_elem_index_get(v)];
161 zero_v3(v_no);
162
163 /* Loop over edges. */
164 BMEdge *e_first = v->e;
165 if (e_first != nullptr) {
166 float e1diff[3], e2diff[3];
167 BMEdge *e_iter = e_first;
168 do {
169 BMLoop *l_first = e_iter->l;
170 if (l_first != nullptr) {
171 sub_v3_v3v3(e2diff,
172 data->vcos[BM_elem_index_get(e_iter->v1)],
173 data->vcos[BM_elem_index_get(e_iter->v2)]);
174 normalize_v3(e2diff);
175
176 BMLoop *l_iter = l_first;
177 do {
178 if (l_iter->v == v) {
179 BMEdge *e_prev = l_iter->prev->e;
180 sub_v3_v3v3(e1diff,
181 data->vcos[BM_elem_index_get(e_prev->v1)],
182 data->vcos[BM_elem_index_get(e_prev->v2)]);
183 normalize_v3(e1diff);
184
186 l_iter, e1diff, e2diff, data->fnos[BM_elem_index_get(l_iter->f)], v_no);
187 }
188 } while ((l_iter = l_iter->radial_next) != l_first);
189 }
190 } while ((e_iter = BM_DISK_EDGE_NEXT(e_iter, v)) != e_first);
191
192 if (LIKELY(normalize_v3(v_no) != 0.0f)) {
193 return;
194 }
195 }
196 /* Fallback normal. */
197 normalize_v3_v3(v_no, data->vcos[BM_elem_index_get(v)]);
198}
199
200static void bm_vert_calc_normals_with_coords_cb(void *userdata,
201 MempoolIterData *mp_v,
202 const TaskParallelTLS *__restrict /*tls*/)
203{
205 userdata);
206 BMVert *v = (BMVert *)mp_v;
208}
209
211 const Span<float3> fnos,
212 const Span<float3> vcos,
214{
215 BM_mesh_elem_index_ensure(bm, BM_FACE | ((!vnos.is_empty() || !vcos.is_empty()) ? BM_VERT : 0));
216
217 TaskParallelSettings settings;
219 settings.use_threading = bm->totvert >= BM_THREAD_LIMIT;
220
221 if (vcos.is_empty()) {
222 BM_iter_parallel(bm, BM_VERTS_OF_MESH, bm_vert_calc_normals_cb, nullptr, &settings);
223 }
224 else {
225 BLI_assert(!fnos.is_empty() || !vnos.is_empty());
227 data.fnos = fnos;
228 data.vcos = vcos;
229 data.vnos = vnos;
230 BM_iter_parallel(bm, BM_VERTS_OF_MESH, bm_vert_calc_normals_with_coords_cb, &data, &settings);
231 }
232}
233
234static void bm_face_calc_normals_cb(void * /*userdata*/,
235 MempoolIterData *mp_f,
236 const TaskParallelTLS *__restrict /*tls*/)
237{
238 BMFace *f = (BMFace *)mp_f;
239
240 BM_face_calc_normal(f, f->no);
241}
242
244{
245 if (params->face_normals) {
246 /* Calculate all face normals. */
247 TaskParallelSettings settings;
249 settings.use_threading = bm->totedge >= BM_THREAD_LIMIT;
250
251 BM_iter_parallel(bm, BM_FACES_OF_MESH, bm_face_calc_normals_cb, nullptr, &settings);
252 }
253
254 /* Add weighted face normals to vertices, and normalize vert normals. */
255 bm_mesh_verts_calc_normals(bm, {}, {}, {});
256}
257
264
267/* -------------------------------------------------------------------- */
272 void *userdata, const int iter, const TaskParallelTLS *__restrict /*tls*/)
273{
274 BMFace *f = ((BMFace **)userdata)[iter];
275 BM_face_calc_normal(f, f->no);
276}
277
279 void *userdata, const int iter, const TaskParallelTLS *__restrict /*tls*/)
280{
281 BMVert *v = ((BMVert **)userdata)[iter];
283}
284
286 const BMPartialUpdate *bmpinfo,
288{
289 BLI_assert(bmpinfo->params.do_normals);
290 /* While harmless, exit early if there is nothing to do. */
291 if (UNLIKELY((bmpinfo->verts_len == 0) && (bmpinfo->faces_len == 0))) {
292 return;
293 }
294
295 BMVert **verts = bmpinfo->verts;
296 BMFace **faces = bmpinfo->faces;
297 const int verts_len = bmpinfo->verts_len;
298 const int faces_len = bmpinfo->faces_len;
299
300 TaskParallelSettings settings;
302
303 /* Faces. */
304 if (params->face_normals) {
306 0, faces_len, faces, bm_partial_faces_parallel_range_calc_normals_cb, &settings);
307 }
308
309 /* Verts. */
311 0, verts_len, verts, bm_partial_verts_parallel_range_calc_normal_cb, &settings);
312}
313
320
323/* -------------------------------------------------------------------- */
328 const Span<float3> fnos,
329 const Span<float3> vcos,
331{
332 /* Add weighted face normals to vertices, and normalize vert normals. */
333 bm_mesh_verts_calc_normals(bm, fnos, vcos, vnos);
334}
335
338/* -------------------------------------------------------------------- */
342void BM_normals_loops_edges_tag(BMesh *bm, const bool do_edges)
343{
344 BMFace *f;
345 BMEdge *e;
346 BMIter fiter, eiter;
347 BMLoop *l_curr, *l_first;
348
349 if (do_edges) {
350 int index_edge;
351 BM_ITER_MESH_INDEX (e, &eiter, bm, BM_EDGES_OF_MESH, index_edge) {
352 BMLoop *l_a, *l_b;
353
354 BM_elem_index_set(e, index_edge); /* set_inline */
356 if (BM_edge_loop_pair(e, &l_a, &l_b)) {
357 if (BM_elem_flag_test(e, BM_ELEM_SMOOTH) && l_a->v != l_b->v) {
359 }
360 }
361 }
362 bm->elem_index_dirty &= ~BM_EDGE;
363 }
364
365 int index_face, index_loop = 0;
366 BM_ITER_MESH_INDEX (f, &fiter, bm, BM_FACES_OF_MESH, index_face) {
367 BM_elem_index_set(f, index_face); /* set_inline */
368 l_curr = l_first = BM_FACE_FIRST_LOOP(f);
369 do {
370 BM_elem_index_set(l_curr, index_loop++); /* set_inline */
372 } while ((l_curr = l_curr->next) != l_first);
373 }
375}
376
381 const Span<float3> fnos,
382 float split_angle_cos,
383 const bool do_sharp_edges_tag)
384{
385 BMIter eiter;
386 BMEdge *e;
387 int i;
388
389 if (!fnos.is_empty()) {
391 }
392
393 if (do_sharp_edges_tag) {
394 BM_ITER_MESH_INDEX (e, &eiter, bm, BM_EDGES_OF_MESH, i) {
395 BM_elem_index_set(e, i); /* set_inline */
396 if (e->l != nullptr) {
397 bm_edge_tag_from_smooth_and_set_sharp(fnos, e, split_angle_cos);
398 }
399 }
400 }
401 else {
402 BM_ITER_MESH_INDEX (e, &eiter, bm, BM_EDGES_OF_MESH, i) {
403 BM_elem_index_set(e, i); /* set_inline */
404 if (e->l != nullptr) {
405 bm_edge_tag_from_smooth(fnos, e, split_angle_cos);
406 }
407 }
408 }
409
410 bm->elem_index_dirty &= ~BM_EDGE;
411}
412
413void BM_edges_sharp_from_angle_set(BMesh *bm, const float split_angle)
414{
415 if (split_angle >= float(M_PI)) {
416 /* Nothing to do! */
417 return;
418 }
419
420 bm_mesh_edges_sharp_tag(bm, {}, cosf(split_angle), true);
421}
422
425/* -------------------------------------------------------------------- */
430{
431 BMLoop *lfan_pivot_next = l_curr;
432 BMEdge *e_next = l_curr->e;
433
434 BLI_assert(!BM_elem_flag_test(lfan_pivot_next, BM_ELEM_TAG));
435 BM_elem_flag_enable(lfan_pivot_next, BM_ELEM_TAG);
436
437 while (true) {
438 /* Much simpler than in sibling code with basic Mesh data! */
439 lfan_pivot_next = BM_vert_step_fan_loop(lfan_pivot_next, &e_next);
440
441 if (!lfan_pivot_next || !BM_elem_flag_test(e_next, BM_ELEM_TAG)) {
442 /* Sharp loop/edge, so not a cyclic smooth fan... */
443 return false;
444 }
445 /* Smooth loop/edge... */
446 if (BM_elem_flag_test(lfan_pivot_next, BM_ELEM_TAG)) {
447 if (lfan_pivot_next == l_curr) {
448 /* We walked around a whole cyclic smooth fan
449 * without finding any already-processed loop,
450 * means we can use initial l_curr/l_prev edge as start for this smooth fan. */
451 return true;
452 }
453 /* ... already checked in some previous looping, we can abort. */
454 return false;
455 }
456 /* ... we can skip it in future, and keep checking the smooth fan. */
457 BM_elem_flag_enable(lfan_pivot_next, BM_ELEM_TAG);
458 }
459}
460
474 const Span<float3> vcos,
475 const Span<float3> fnos,
476 const short (*clnors_data)[2],
477 const int cd_loop_clnors_offset,
478 const bool has_clnors,
479 /* Cache. */
481 /* Iterate. */
482 BMLoop *l_curr,
483 /* Result. */
484 MutableSpan<float3> r_lnos,
485 MLoopNorSpaceArray *r_lnors_spacearr)
486{
488 BLI_assert(fnos.is_empty() || ((bm->elem_index_dirty & BM_FACE) == 0));
489 BLI_assert(vcos.is_empty() || ((bm->elem_index_dirty & BM_VERT) == 0));
491
492 int handled = 0;
493
494 /* Temp normal stack. */
495 BLI_SMALLSTACK_DECLARE(normal, float *);
496 /* Temp clnors stack. */
497 BLI_SMALLSTACK_DECLARE(clnors, short *);
498 /* Temp edge vectors stack, only used when computing lnor spacearr. */
499
500 /* A smooth edge, we have to check for cyclic smooth fan case.
501 * If we find a new, never-processed cyclic smooth fan, we can do it now using that loop/edge
502 * as 'entry point', otherwise we can skip it. */
503
504 /* NOTE: In theory, we could make bm_mesh_loop_check_cyclic_smooth_fan() store
505 * mlfan_pivot's in a stack, to avoid having to fan again around
506 * the vert during actual computation of clnor & clnorspace. However, this would complicate
507 * the code, add more memory usage, and
508 * BM_vert_step_fan_loop() is quite cheap in term of CPU cycles,
509 * so really think it's not worth it. */
510 if (BM_elem_flag_test(l_curr->e, BM_ELEM_TAG) &&
512 {
513 }
514 else if (!BM_elem_flag_test(l_curr->e, BM_ELEM_TAG) &&
516 {
517 /* Simple case (both edges around that vertex are sharp in related face),
518 * this vertex just takes its face normal.
519 */
520 const int l_curr_index = BM_elem_index_get(l_curr);
521 const float3 &no = !fnos.is_empty() ? fnos[BM_elem_index_get(l_curr->f)] :
522 float3(l_curr->f->no);
523 copy_v3_v3(r_lnos[l_curr_index], no);
524
525 /* If needed, generate this (simple!) lnor space. */
526 if (r_lnors_spacearr) {
527 float vec_curr[3], vec_prev[3];
528 MLoopNorSpace *lnor_space = BKE_lnor_space_create(r_lnors_spacearr);
529
530 {
531 const BMVert *v_pivot = l_curr->v;
532 const float3 &co_pivot = !vcos.is_empty() ? vcos[BM_elem_index_get(v_pivot)] :
533 float3(v_pivot->co);
534 const BMVert *v_1 = l_curr->next->v;
535 const float3 co_1 = !vcos.is_empty() ? vcos[BM_elem_index_get(v_1)] : float3(v_1->co);
536 const BMVert *v_2 = l_curr->prev->v;
537 const float3 co_2 = !vcos.is_empty() ? vcos[BM_elem_index_get(v_2)] : float3(v_2->co);
538
539 BLI_assert(v_1 == BM_edge_other_vert(l_curr->e, v_pivot));
540 BLI_assert(v_2 == BM_edge_other_vert(l_curr->prev->e, v_pivot));
541
542 sub_v3_v3v3(vec_curr, co_1, co_pivot);
543 normalize_v3(vec_curr);
544 sub_v3_v3v3(vec_prev, co_2, co_pivot);
545 normalize_v3(vec_prev);
546 }
547
548 BKE_lnor_space_define(lnor_space, r_lnos[l_curr_index], vec_curr, vec_prev, {});
549 /* We know there is only one loop in this space,
550 * no need to create a linklist in this case... */
551 BKE_lnor_space_add_loop(r_lnors_spacearr, lnor_space, l_curr_index, l_curr, true);
552
553 if (has_clnors) {
554 const short(*clnor)[2] = clnors_data ?
555 &clnors_data[l_curr_index] :
556 static_cast<const short(*)[2]>(
557 BM_ELEM_CD_GET_VOID_P(l_curr, cd_loop_clnors_offset));
558 BKE_lnor_space_custom_data_to_normal(lnor_space, *clnor, r_lnos[l_curr_index]);
559 }
560 }
561 handled = 1;
562 }
563 /* We *do not need* to check/tag loops as already computed!
564 * Due to the fact a loop only links to one of its two edges,
565 * a same fan *will never be walked more than once!*
566 * Since we consider edges having neighbor faces with inverted (flipped) normals as sharp,
567 * we are sure that no fan will be skipped, even only considering the case
568 * (sharp curr_edge, smooth prev_edge), and not the alternative
569 * (smooth curr_edge, sharp prev_edge).
570 * All this due/thanks to link between normals and loop ordering.
571 */
572 else {
573 /* We have to fan around current vertex, until we find the other non-smooth edge,
574 * and accumulate face normals into the vertex!
575 * Note in case this vertex has only one sharp edge,
576 * this is a waste because the normal is the same as the vertex normal,
577 * but I do not see any easy way to detect that (would need to count number of sharp edges
578 * per vertex, I doubt the additional memory usage would be worth it, especially as it
579 * should not be a common case in real-life meshes anyway).
580 */
581 BMVert *v_pivot = l_curr->v;
582 BMEdge *e_next;
583 const BMEdge *e_org = l_curr->e;
584 BMLoop *lfan_pivot, *lfan_pivot_next;
585 int lfan_pivot_index;
586 float lnor[3] = {0.0f, 0.0f, 0.0f};
587 float vec_curr[3], vec_next[3], vec_org[3];
588
589 /* We validate clnors data on the fly - cheapest way to do! */
590 int clnors_avg[2] = {0, 0};
591 const short(*clnor_ref)[2] = nullptr;
592 int clnors_count = 0;
593 bool clnors_invalid = false;
594
595 const float3 &co_pivot = !vcos.is_empty() ? vcos[BM_elem_index_get(v_pivot)] :
596 float3(v_pivot->co);
597
598 MLoopNorSpace *lnor_space = r_lnors_spacearr ? BKE_lnor_space_create(r_lnors_spacearr) :
599 nullptr;
600
601 BLI_assert((edge_vectors == nullptr) || edge_vectors->is_empty());
602
603 lfan_pivot = l_curr;
604 lfan_pivot_index = BM_elem_index_get(lfan_pivot);
605 e_next = lfan_pivot->e; /* Current edge here, actually! */
606
607 /* Only need to compute previous edge's vector once,
608 * then we can just reuse old current one! */
609 {
610 const BMVert *v_2 = lfan_pivot->next->v;
611 const float3 co_2 = !vcos.is_empty() ? vcos[BM_elem_index_get(v_2)] : float3(v_2->co);
612
613 BLI_assert(v_2 == BM_edge_other_vert(e_next, v_pivot));
614
615 sub_v3_v3v3(vec_org, co_2, co_pivot);
616 normalize_v3(vec_org);
617 copy_v3_v3(vec_curr, vec_org);
618
619 if (r_lnors_spacearr) {
620 edge_vectors->append(vec_org);
621 }
622 }
623
624 while (true) {
625 /* Much simpler than in sibling code with basic Mesh data! */
626 lfan_pivot_next = BM_vert_step_fan_loop(lfan_pivot, &e_next);
627 if (lfan_pivot_next) {
628 BLI_assert(lfan_pivot_next->v == v_pivot);
629 }
630 else {
631 /* next edge is non-manifold, we have to find it ourselves! */
632 e_next = (lfan_pivot->e == e_next) ? lfan_pivot->prev->e : lfan_pivot->e;
633 }
634
635 /* Compute edge vector.
636 * NOTE: We could pre-compute those into an array, in the first iteration,
637 * instead of computing them twice (or more) here.
638 * However, time gained is not worth memory and time lost,
639 * given the fact that this code should not be called that much in real-life meshes.
640 */
641 {
642 const BMVert *v_2 = BM_edge_other_vert(e_next, v_pivot);
643 const float3 co_2 = !vcos.is_empty() ? vcos[BM_elem_index_get(v_2)] : float3(v_2->co);
644
645 sub_v3_v3v3(vec_next, co_2, co_pivot);
646 normalize_v3(vec_next);
647 }
648
649 {
650 /* Code similar to accumulate_vertex_normals_poly_v3. */
651 /* Calculate angle between the two face edges incident on this vertex. */
652 const BMFace *f = lfan_pivot->f;
653 const float fac = blender::math::safe_acos_approx(dot_v3v3(vec_next, vec_curr));
654 const float3 &no = !fnos.is_empty() ? fnos[BM_elem_index_get(f)] : float3(f->no);
655 /* Accumulate */
656 madd_v3_v3fl(lnor, no, fac);
657
658 if (has_clnors) {
659 /* Accumulate all clnors, if they are not all equal we have to fix that! */
660 const short(*clnor)[2] = clnors_data ?
661 &clnors_data[lfan_pivot_index] :
662 static_cast<const short(*)[2]>(BM_ELEM_CD_GET_VOID_P(
663 lfan_pivot, cd_loop_clnors_offset));
664 if (clnors_count) {
665 clnors_invalid |= ((*clnor_ref)[0] != (*clnor)[0] || (*clnor_ref)[1] != (*clnor)[1]);
666 }
667 else {
668 clnor_ref = clnor;
669 }
670 clnors_avg[0] += (*clnor)[0];
671 clnors_avg[1] += (*clnor)[1];
672 clnors_count++;
673 /* We store here a pointer to all custom lnors processed. */
674 BLI_SMALLSTACK_PUSH(clnors, (short *)*clnor);
675 }
676 }
677
678 /* We store here a pointer to all loop-normals processed. */
679 BLI_SMALLSTACK_PUSH(normal, (float *)r_lnos[lfan_pivot_index]);
680
681 if (r_lnors_spacearr) {
682 /* Assign current lnor space to current 'vertex' loop. */
683 BKE_lnor_space_add_loop(r_lnors_spacearr, lnor_space, lfan_pivot_index, lfan_pivot, false);
684 if (e_next != e_org) {
685 /* We store here all edges-normalized vectors processed. */
686 edge_vectors->append(vec_next);
687 }
688 }
689
690 handled += 1;
691
692 if (!BM_elem_flag_test(e_next, BM_ELEM_TAG) || (e_next == e_org)) {
693 /* Next edge is sharp, we have finished with this fan of faces around this vert! */
694 break;
695 }
696
697 /* Copy next edge vector to current one. */
698 copy_v3_v3(vec_curr, vec_next);
699 /* Next pivot loop to current one. */
700 lfan_pivot = lfan_pivot_next;
701 lfan_pivot_index = BM_elem_index_get(lfan_pivot);
702 }
703
704 {
705 float lnor_len = normalize_v3(lnor);
706
707 /* If we are generating lnor spacearr, we can now define the one for this fan. */
708 if (r_lnors_spacearr) {
709 if (UNLIKELY(lnor_len == 0.0f)) {
710 /* Use vertex normal as fallback! */
711 copy_v3_v3(lnor, r_lnos[lfan_pivot_index]);
712 lnor_len = 1.0f;
713 }
714
715 BKE_lnor_space_define(lnor_space, lnor, vec_org, vec_next, *edge_vectors);
716 edge_vectors->clear();
717
718 if (has_clnors) {
719 if (clnors_invalid) {
720 short *clnor;
721
722 clnors_avg[0] /= clnors_count;
723 clnors_avg[1] /= clnors_count;
724 /* Fix/update all clnors of this fan with computed average value. */
725
726 /* Prints continuously when merge custom normals, so commenting. */
727 // printf("Invalid clnors in this fan!\n");
728
729 while ((clnor = static_cast<short *>(BLI_SMALLSTACK_POP(clnors)))) {
730 // print_v2("org clnor", clnor);
731 clnor[0] = short(clnors_avg[0]);
732 clnor[1] = short(clnors_avg[1]);
733 }
734 // print_v2("new clnors", clnors_avg);
735 }
736 else {
737 /* We still have to consume the stack! */
738 while (BLI_SMALLSTACK_POP(clnors)) {
739 /* pass */
740 }
741 }
742 BKE_lnor_space_custom_data_to_normal(lnor_space, *clnor_ref, lnor);
743 }
744 }
745
746 /* In case we get a zero normal here, just use vertex normal already set! */
747 if (LIKELY(lnor_len != 0.0f)) {
748 /* Copy back the final computed normal into all related loop-normals. */
749 float *nor;
750
751 while ((nor = static_cast<float *>(BLI_SMALLSTACK_POP(normal)))) {
752 copy_v3_v3(nor, lnor);
753 }
754 }
755 else {
756 /* We still have to consume the stack! */
757 while (BLI_SMALLSTACK_POP(normal)) {
758 /* pass */
759 }
760 }
761 }
762
763 /* Tag related vertex as sharp, to avoid fanning around it again
764 * (in case it was a smooth one). */
765 if (r_lnors_spacearr) {
767 }
768 }
769 return handled;
770}
771
772static int bm_loop_index_cmp(const void *a, const void *b)
773{
776 return -1;
777 }
778 return 1;
779}
780
790 const BMLoop *l_a,
791 const BMLoop *l_b)
792{
793 BLI_assert(l_a->radial_next == l_b);
794 return (
795 /* The face is manifold. */
796 (l_b->radial_next == l_a) &&
797 /* Faces have winding that faces the same way. */
798 (l_a->v != l_b->v) &&
799 /* The edge is smooth. */
801 /* Both faces are smooth. */
803}
804
806 BMEdge *e,
807 const float split_angle_cos)
808{
809 BLI_assert(e->l != nullptr);
810 BMLoop *l_a = e->l, *l_b = l_a->radial_next;
811 bool is_smooth = false;
813 if (split_angle_cos != -1.0f) {
814 const float dot = fnos.is_empty() ? dot_v3v3(l_a->f->no, l_b->f->no) :
815 dot_v3v3(fnos[BM_elem_index_get(l_a->f)],
816 fnos[BM_elem_index_get(l_b->f)]);
817 if (dot >= split_angle_cos) {
818 is_smooth = true;
819 }
820 }
821 else {
822 is_smooth = true;
823 }
824 }
825
826 /* Perform `BM_elem_flag_set(e, BM_ELEM_TAG, is_smooth)`
827 * NOTE: This will be set by multiple threads however it will be set to the same value. */
828
829 /* No need for atomics here as this is a single byte. */
830 char *hflag_p = &e->head.hflag;
831 if (is_smooth) {
832 *hflag_p = *hflag_p | BM_ELEM_TAG;
833 }
834 else {
835 *hflag_p = *hflag_p & ~BM_ELEM_TAG;
836 }
837}
838
847 BMEdge *e,
848 const float split_angle_cos)
849{
850 BLI_assert(e->l != nullptr);
851 BMLoop *l_a = e->l, *l_b = l_a->radial_next;
852 bool is_smooth = false;
854 if (split_angle_cos != -1.0f) {
855 const float dot = fnos.is_empty() ? dot_v3v3(l_a->f->no, l_b->f->no) :
856 dot_v3v3(fnos[BM_elem_index_get(l_a->f)],
857 fnos[BM_elem_index_get(l_b->f)]);
858 if (dot >= split_angle_cos) {
859 is_smooth = true;
860 }
861 else {
862 /* Note that we do not care about the other sharp-edge cases
863 * (sharp face, non-manifold edge, etc.),
864 * only tag edge as sharp when it is due to angle threshold. */
866 }
867 }
868 else {
869 is_smooth = true;
870 }
871 }
872
873 BM_elem_flag_set(e, BM_ELEM_TAG, is_smooth);
874}
875
882 BMesh *bm,
883 const Span<float3> vcos,
884 const Span<float3> fnos,
885 MutableSpan<float3> r_lnos,
886 const short (*clnors_data)[2],
887 const int cd_loop_clnors_offset,
888 const bool do_rebuild,
889 const float split_angle_cos,
890 /* TLS */
891 MLoopNorSpaceArray *r_lnors_spacearr,
893 /* Iterate over. */
894 BMVert *v)
895{
896 /* Respecting face order is necessary so the initial starting loop is consistent
897 * with looping over loops of all faces.
898 *
899 * Logically we could sort the loops by their index & loop over them
900 * however it's faster to use the lowest index of an un-ordered list
901 * since it's common that smooth vertices only ever need to pick one loop
902 * which then handles all the others.
903 *
904 * Sorting is only performed when multiple fans are found. */
905 const bool has_clnors = true;
906 LinkNode *loops_of_vert = nullptr;
907 int loops_of_vert_count = 0;
908 /* When false the caller must have already tagged the edges. */
909 const bool do_edge_tag = (split_angle_cos != EDGE_TAG_FROM_SPLIT_ANGLE_BYPASS);
910
911 /* The loop with the lowest index. */
912 {
913 LinkNode *link_best;
914 uint index_best = UINT_MAX;
915 BMEdge *e_curr_iter = v->e;
916 do { /* Edges of vertex. */
917 BMLoop *l_curr = e_curr_iter->l;
918 if (l_curr == nullptr) {
919 continue;
920 }
921
922 if (do_edge_tag) {
923 bm_edge_tag_from_smooth(fnos, e_curr_iter, split_angle_cos);
924 }
925
926 do { /* Radial loops. */
927 if (l_curr->v != v) {
928 continue;
929 }
930 if (do_rebuild && !BM_ELEM_API_FLAG_TEST(l_curr, BM_LNORSPACE_UPDATE) &&
932 {
933 continue;
934 }
936 BLI_linklist_prepend_alloca(&loops_of_vert, l_curr);
937 loops_of_vert_count += 1;
938
939 const uint index_test = uint(BM_elem_index_get(l_curr));
940 if (index_best > index_test) {
941 index_best = index_test;
942 link_best = loops_of_vert;
943 }
944 } while ((l_curr = l_curr->radial_next) != e_curr_iter->l);
945 } while ((e_curr_iter = BM_DISK_EDGE_NEXT(e_curr_iter, v)) != v->e);
946
947 if (UNLIKELY(loops_of_vert == nullptr)) {
948 return;
949 }
950
951 /* Immediately pop the best element.
952 * The order doesn't matter, so swap the links as it's simpler than tracking
953 * reference to `link_best`. */
954 if (link_best != loops_of_vert) {
955 std::swap(link_best->link, loops_of_vert->link);
956 }
957 }
958
959 bool loops_of_vert_is_sorted = false;
960
961 /* Keep track of the number of loops that have been assigned. */
962 int loops_of_vert_handled = 0;
963
964 while (loops_of_vert != nullptr) {
965 BMLoop *l_best = static_cast<BMLoop *>(loops_of_vert->link);
966 loops_of_vert = loops_of_vert->next;
967
968 BLI_assert(l_best->v == v);
969 loops_of_vert_handled += bm_mesh_loops_calc_normals_for_loop(bm,
970 vcos,
971 fnos,
972 clnors_data,
973 cd_loop_clnors_offset,
974 has_clnors,
975 edge_vectors,
976 l_best,
977 r_lnos,
978 r_lnors_spacearr);
979
980 /* Check if an early exit is possible without an exhaustive inspection of every loop
981 * where 1 loop's fan extends out to all remaining loops.
982 * This is a common case for smooth vertices. */
983 BLI_assert(loops_of_vert_handled <= loops_of_vert_count);
984 if (loops_of_vert_handled == loops_of_vert_count) {
985 break;
986 }
987
988 /* Note on sorting, in some cases it will be faster to scan for the lowest index each time.
989 * However in the worst case this is `O(N^2)`, so use a single sort call instead. */
990 if (!loops_of_vert_is_sorted) {
991 if (loops_of_vert && loops_of_vert->next) {
992 loops_of_vert = BLI_linklist_sort(loops_of_vert, bm_loop_index_cmp);
993 loops_of_vert_is_sorted = true;
994 }
995 }
996 }
997}
998
1004 BMesh *bm,
1005 const Span<float3> vcos,
1006 const Span<float3> fnos,
1007 MutableSpan<float3> r_lnos,
1008 const bool do_rebuild,
1009 const float split_angle_cos,
1010 /* TLS */
1011 MLoopNorSpaceArray *r_lnors_spacearr,
1013 /* Iterate over. */
1014 BMVert *v)
1015{
1016 const bool has_clnors = false;
1017 const short(*clnors_data)[2] = nullptr;
1018 /* When false the caller must have already tagged the edges. */
1019 const bool do_edge_tag = (split_angle_cos != EDGE_TAG_FROM_SPLIT_ANGLE_BYPASS);
1020 const int cd_loop_clnors_offset = -1;
1021
1022 BMEdge *e_curr_iter;
1023
1024 /* Unfortunately a loop is needed just to clear loop-tags. */
1025 e_curr_iter = v->e;
1026 do { /* Edges of vertex. */
1027 BMLoop *l_curr = e_curr_iter->l;
1028 if (l_curr == nullptr) {
1029 continue;
1030 }
1031
1032 if (do_edge_tag) {
1033 bm_edge_tag_from_smooth(fnos, e_curr_iter, split_angle_cos);
1034 }
1035
1036 do { /* Radial loops. */
1037 if (l_curr->v != v) {
1038 continue;
1039 }
1041 } while ((l_curr = l_curr->radial_next) != e_curr_iter->l);
1042 } while ((e_curr_iter = BM_DISK_EDGE_NEXT(e_curr_iter, v)) != v->e);
1043
1044 e_curr_iter = v->e;
1045 do { /* Edges of vertex. */
1046 BMLoop *l_curr = e_curr_iter->l;
1047 if (l_curr == nullptr) {
1048 continue;
1049 }
1050 do { /* Radial loops. */
1051 if (l_curr->v != v) {
1052 continue;
1053 }
1054 if (do_rebuild && !BM_ELEM_API_FLAG_TEST(l_curr, BM_LNORSPACE_UPDATE) &&
1056 {
1057 continue;
1058 }
1060 vcos,
1061 fnos,
1062 clnors_data,
1063 cd_loop_clnors_offset,
1064 has_clnors,
1065 edge_vectors,
1066 l_curr,
1067 r_lnos,
1068 r_lnors_spacearr);
1069 } while ((l_curr = l_curr->radial_next) != e_curr_iter->l);
1070 } while ((e_curr_iter = BM_DISK_EDGE_NEXT(e_curr_iter, v)) != v->e);
1071}
1072
1082 const Span<float3> vcos,
1083 const Span<float3> fnos,
1084 MutableSpan<float3> r_lnos,
1085 MLoopNorSpaceArray *r_lnors_spacearr,
1086 const short (*clnors_data)[2],
1087 const int cd_loop_clnors_offset,
1088 const bool do_rebuild,
1089 const float split_angle_cos)
1090{
1091 BMIter fiter;
1092 BMFace *f_curr;
1093 const bool has_clnors = clnors_data || (cd_loop_clnors_offset != -1);
1094 /* When false the caller must have already tagged the edges. */
1095 const bool do_edge_tag = (split_angle_cos != EDGE_TAG_FROM_SPLIT_ANGLE_BYPASS);
1096
1097 MLoopNorSpaceArray _lnors_spacearr = {nullptr};
1098
1099 std::unique_ptr<blender::Vector<blender::float3, 16>> edge_vectors = nullptr;
1100
1101 {
1102 char htype = 0;
1103 if (!vcos.is_empty()) {
1104 htype |= BM_VERT;
1105 }
1106 /* Face/Loop indices are set inline below. */
1108 }
1109
1110 if (!r_lnors_spacearr && has_clnors) {
1111 /* We need to compute lnor spacearr if some custom lnor data are given to us! */
1112 r_lnors_spacearr = &_lnors_spacearr;
1113 }
1114 if (r_lnors_spacearr) {
1116 edge_vectors = std::make_unique<blender::Vector<blender::float3, 16>>();
1117 }
1118
1119 /* Clear all loops' tags (means none are to be skipped for now). */
1120 int index_face, index_loop = 0;
1121 BM_ITER_MESH_INDEX (f_curr, &fiter, bm, BM_FACES_OF_MESH, index_face) {
1122 BMLoop *l_curr, *l_first;
1123
1124 BM_elem_index_set(f_curr, index_face); /* set_inline */
1125
1126 l_curr = l_first = BM_FACE_FIRST_LOOP(f_curr);
1127 do {
1128 BM_elem_index_set(l_curr, index_loop++); /* set_inline */
1130 } while ((l_curr = l_curr->next) != l_first);
1131 }
1133
1134 /* Always tag edges based on winding & sharp edge flag
1135 * (even when the auto-smooth angle doesn't need to be calculated). */
1136 if (do_edge_tag) {
1137 bm_mesh_edges_sharp_tag(bm, fnos, has_clnors ? -1.0f : split_angle_cos, false);
1138 }
1139
1140 /* We now know edges that can be smoothed (they are tagged),
1141 * and edges that will be hard (they aren't).
1142 * Now, time to generate the normals.
1143 */
1144 BM_ITER_MESH (f_curr, &fiter, bm, BM_FACES_OF_MESH) {
1145 BMLoop *l_curr, *l_first;
1146
1147 l_curr = l_first = BM_FACE_FIRST_LOOP(f_curr);
1148 do {
1149 if (do_rebuild && !BM_ELEM_API_FLAG_TEST(l_curr, BM_LNORSPACE_UPDATE) &&
1151 {
1152 continue;
1153 }
1155 vcos,
1156 fnos,
1157 clnors_data,
1158 cd_loop_clnors_offset,
1159 has_clnors,
1160 edge_vectors.get(),
1161 l_curr,
1162 r_lnos,
1163 r_lnors_spacearr);
1164 } while ((l_curr = l_curr->next) != l_first);
1165 }
1166
1167 if (r_lnors_spacearr) {
1168 if (r_lnors_spacearr == &_lnors_spacearr) {
1169 BKE_lnor_spacearr_free(r_lnors_spacearr);
1170 }
1171 }
1172}
1173
1188
1196
1197static void bm_mesh_loops_calc_normals_for_vert_init_fn(const void *__restrict userdata,
1198 void *__restrict chunk)
1199{
1200 auto *data = static_cast<const BMLoopsCalcNormalsWithCoordsData *>(userdata);
1201 auto *tls_data = static_cast<BMLoopsCalcNormalsWithCoords_TLS *>(chunk);
1202 if (data->r_lnors_spacearr) {
1203 tls_data->edge_vectors = MEM_new<blender::Vector<blender::float3, 16>>(__func__);
1204 BKE_lnor_spacearr_tls_init(data->r_lnors_spacearr, &tls_data->lnors_spacearr_buf);
1205 tls_data->lnors_spacearr = &tls_data->lnors_spacearr_buf;
1206 }
1207 else {
1208 tls_data->lnors_spacearr = nullptr;
1209 }
1210}
1211
1212static void bm_mesh_loops_calc_normals_for_vert_reduce_fn(const void *__restrict userdata,
1213 void *__restrict /*chunk_join*/,
1214 void *__restrict chunk)
1215{
1216 auto *data = static_cast<const BMLoopsCalcNormalsWithCoordsData *>(userdata);
1217 auto *tls_data = static_cast<BMLoopsCalcNormalsWithCoords_TLS *>(chunk);
1218
1219 if (data->r_lnors_spacearr) {
1220 BKE_lnor_spacearr_tls_join(data->r_lnors_spacearr, tls_data->lnors_spacearr);
1221 }
1222}
1223
1224static void bm_mesh_loops_calc_normals_for_vert_free_fn(const void *__restrict userdata,
1225 void *__restrict chunk)
1226{
1227 auto *data = static_cast<const BMLoopsCalcNormalsWithCoordsData *>(userdata);
1228 auto *tls_data = static_cast<BMLoopsCalcNormalsWithCoords_TLS *>(chunk);
1229
1230 if (data->r_lnors_spacearr) {
1231 MEM_delete(tls_data->edge_vectors);
1232 }
1233}
1234
1236 void *userdata, MempoolIterData *mp_v, const TaskParallelTLS *__restrict tls)
1237{
1238 BMVert *v = (BMVert *)mp_v;
1239 if (v->e == nullptr) {
1240 return;
1241 }
1242 auto *data = static_cast<BMLoopsCalcNormalsWithCoordsData *>(userdata);
1243 auto *tls_data = static_cast<BMLoopsCalcNormalsWithCoords_TLS *>(tls->userdata_chunk);
1245 data->vcos,
1246 data->fnos,
1247 data->r_lnos,
1248
1249 data->clnors_data,
1250 data->cd_loop_clnors_offset,
1251 data->do_rebuild,
1252 data->split_angle_cos,
1253 /* Thread local. */
1254 tls_data->lnors_spacearr,
1255 tls_data->edge_vectors,
1256 /* Iterate over. */
1257 v);
1258}
1259
1261 void *userdata, MempoolIterData *mp_v, const TaskParallelTLS *__restrict tls)
1262{
1263 BMVert *v = (BMVert *)mp_v;
1264 if (v->e == nullptr) {
1265 return;
1266 }
1267 auto *data = static_cast<BMLoopsCalcNormalsWithCoordsData *>(userdata);
1268 auto *tls_data = static_cast<BMLoopsCalcNormalsWithCoords_TLS *>(tls->userdata_chunk);
1270 data->vcos,
1271 data->fnos,
1272 data->r_lnos,
1273
1274 data->do_rebuild,
1275 data->split_angle_cos,
1276 /* Thread local. */
1277 tls_data->lnors_spacearr,
1278 tls_data->edge_vectors,
1279 /* Iterate over. */
1280 v);
1281}
1282
1284 const Span<float3> vcos,
1285 const Span<float3> fnos,
1286 MutableSpan<float3> r_lnos,
1287 MLoopNorSpaceArray *r_lnors_spacearr,
1288 const short (*clnors_data)[2],
1289 const int cd_loop_clnors_offset,
1290 const bool do_rebuild,
1291 const float split_angle_cos)
1292{
1293 const bool has_clnors = clnors_data || (cd_loop_clnors_offset != -1);
1294 MLoopNorSpaceArray _lnors_spacearr = {nullptr};
1295
1296 {
1297 char htype = BM_LOOP;
1298 if (!vcos.is_empty()) {
1299 htype |= BM_VERT;
1300 }
1301 if (!fnos.is_empty()) {
1302 htype |= BM_FACE;
1303 }
1304 /* Face/Loop indices are set inline below. */
1306 }
1307
1308 if (!r_lnors_spacearr && has_clnors) {
1309 /* We need to compute lnor spacearr if some custom lnor data are given to us! */
1310 r_lnors_spacearr = &_lnors_spacearr;
1311 }
1312 if (r_lnors_spacearr) {
1314 }
1315
1316 /* We now know edges that can be smoothed (they are tagged),
1317 * and edges that will be hard (they aren't).
1318 * Now, time to generate the normals.
1319 */
1320
1321 TaskParallelSettings settings;
1323
1324 BMLoopsCalcNormalsWithCoords_TLS tls = {nullptr};
1325
1326 settings.userdata_chunk = &tls;
1327 settings.userdata_chunk_size = sizeof(tls);
1328
1330 settings.func_reduce = bm_mesh_loops_calc_normals_for_vert_reduce_fn;
1332
1334 data.bm = bm;
1335 data.vcos = vcos;
1336 data.fnos = fnos;
1337 data.r_lnos = r_lnos;
1338 data.r_lnors_spacearr = r_lnors_spacearr;
1339 data.clnors_data = clnors_data;
1340 data.cd_loop_clnors_offset = cd_loop_clnors_offset;
1341 data.do_rebuild = do_rebuild;
1342 data.split_angle_cos = split_angle_cos;
1343
1344 BM_iter_parallel(bm,
1348 &data,
1349 &settings);
1350
1351 if (r_lnors_spacearr) {
1352 if (r_lnors_spacearr == &_lnors_spacearr) {
1353 BKE_lnor_spacearr_free(r_lnors_spacearr);
1354 }
1355 }
1356}
1357
1359 const Span<float3> vcos,
1360 const Span<float3> fnos,
1361 MutableSpan<float3> r_lnos,
1362 MLoopNorSpaceArray *r_lnors_spacearr,
1363 const short (*clnors_data)[2],
1364 const int cd_loop_clnors_offset,
1365 const bool do_rebuild,
1366 const float split_angle_cos)
1367{
1368 if (bm->totloop < BM_THREAD_LIMIT) {
1370 vcos,
1371 fnos,
1372 r_lnos,
1373 r_lnors_spacearr,
1374 clnors_data,
1375 cd_loop_clnors_offset,
1376 do_rebuild,
1377 split_angle_cos);
1378 }
1379 else {
1381 vcos,
1382 fnos,
1383 r_lnos,
1384 r_lnors_spacearr,
1385 clnors_data,
1386 cd_loop_clnors_offset,
1387 do_rebuild,
1388 split_angle_cos);
1389 }
1390}
1391
1392/* This threshold is a bit touchy (usual float precision issue), this value seems OK. */
1393#define LNOR_SPACE_TRIGO_THRESHOLD (1.0f - 1e-4f)
1394
1400 MLoopNorSpaceArray *lnors_spacearr,
1401 const float (*new_lnors)[3])
1402{
1403 BLI_bitmap *done_loops = BLI_BITMAP_NEW(size_t(bm->totloop), __func__);
1404 bool changed = false;
1405
1407
1408 for (int i = 0; i < bm->totloop; i++) {
1409 if (!lnors_spacearr->lspacearr[i]) {
1410 /* This should not happen in theory, but in some rare case (probably ugly geometry)
1411 * we can get some nullptr loopspacearr at this point. :/
1412 * Maybe we should set those loops' edges as sharp?
1413 */
1414 BLI_BITMAP_ENABLE(done_loops, i);
1415 if (G.debug & G_DEBUG) {
1416 printf("WARNING! Getting invalid nullptr loop space for loop %d!\n", i);
1417 }
1418 continue;
1419 }
1420
1421 if (!BLI_BITMAP_TEST(done_loops, i)) {
1422 /* Notes:
1423 * * In case of mono-loop smooth fan, we have nothing to do.
1424 * * Loops in this linklist are ordered (in reversed order compared to how they were
1425 * discovered by bke::mesh::normals_calc_corners(), but this is not a problem).
1426 * Which means if we find a mismatching clnor,
1427 * we know all remaining loops will have to be in a new, different smooth fan/lnor space.
1428 * * In smooth fan case, we compare each clnor against a ref one,
1429 * to avoid small differences adding up into a real big one in the end!
1430 */
1431 if (lnors_spacearr->lspacearr[i]->flags & MLNOR_SPACE_IS_SINGLE) {
1432 BLI_BITMAP_ENABLE(done_loops, i);
1433 continue;
1434 }
1435
1436 LinkNode *loops = lnors_spacearr->lspacearr[i]->loops;
1437 BMLoop *prev_ml = nullptr;
1438 const float *org_nor = nullptr;
1439
1440 while (loops) {
1441 BMLoop *ml = static_cast<BMLoop *>(loops->link);
1442 const int lidx = BM_elem_index_get(ml);
1443 const float *nor = new_lnors[lidx];
1444
1445 if (!org_nor) {
1446 org_nor = nor;
1447 }
1448 else if (dot_v3v3(org_nor, nor) < LNOR_SPACE_TRIGO_THRESHOLD) {
1449 /* Current normal differs too much from org one, we have to tag the edge between
1450 * previous loop's face and current's one as sharp.
1451 * We know those two loops do not point to the same edge,
1452 * since we do not allow reversed winding in a same smooth fan.
1453 */
1454 BMEdge *e = (prev_ml->e == ml->prev->e) ? prev_ml->e : ml->e;
1455
1457 changed = true;
1458
1459 org_nor = nor;
1460 }
1461
1462 prev_ml = ml;
1463 loops = loops->next;
1464 BLI_BITMAP_ENABLE(done_loops, lidx);
1465 }
1466
1467 /* We also have to check between last and first loops,
1468 * otherwise we may miss some sharp edges here!
1469 * This is just a simplified version of above while loop.
1470 * See #45984. */
1471 loops = lnors_spacearr->lspacearr[i]->loops;
1472 if (loops && org_nor) {
1473 BMLoop *ml = static_cast<BMLoop *>(loops->link);
1474 const int lidx = BM_elem_index_get(ml);
1475 const float *nor = new_lnors[lidx];
1476
1477 if (dot_v3v3(org_nor, nor) < LNOR_SPACE_TRIGO_THRESHOLD) {
1478 BMEdge *e = (prev_ml->e == ml->prev->e) ? prev_ml->e : ml->e;
1479
1481 changed = true;
1482 }
1483 }
1484 }
1485 }
1486
1487 MEM_freeN(done_loops);
1488 return changed;
1489}
1490
1496 MLoopNorSpaceArray *lnors_spacearr,
1497 short (*r_clnors_data)[2],
1498 const int cd_loop_clnors_offset,
1499 const float (*new_lnors)[3])
1500{
1501 BLI_bitmap *done_loops = BLI_BITMAP_NEW(size_t(bm->totloop), __func__);
1502
1503 BLI_SMALLSTACK_DECLARE(clnors_data, short *);
1504
1506
1507 for (int i = 0; i < bm->totloop; i++) {
1508 if (!lnors_spacearr->lspacearr[i]) {
1509 BLI_BITMAP_ENABLE(done_loops, i);
1510 if (G.debug & G_DEBUG) {
1511 printf("WARNING! Still getting invalid nullptr loop space in second loop for loop %d!\n",
1512 i);
1513 }
1514 continue;
1515 }
1516
1517 if (!BLI_BITMAP_TEST(done_loops, i)) {
1518 /* Note we accumulate and average all custom normals in current smooth fan,
1519 * to avoid getting different clnors data (tiny differences in plain custom normals can
1520 * give rather huge differences in computed 2D factors).
1521 */
1522 LinkNode *loops = lnors_spacearr->lspacearr[i]->loops;
1523
1524 if (lnors_spacearr->lspacearr[i]->flags & MLNOR_SPACE_IS_SINGLE) {
1525 BMLoop *ml = (BMLoop *)loops;
1526 const int lidx = BM_elem_index_get(ml);
1527
1528 BLI_assert(lidx == i);
1529
1530 const float *nor = new_lnors[lidx];
1531 short *clnor = static_cast<short *>(r_clnors_data ?
1532 &r_clnors_data[lidx] :
1533 BM_ELEM_CD_GET_VOID_P(ml, cd_loop_clnors_offset));
1534
1535 BKE_lnor_space_custom_normal_to_data(lnors_spacearr->lspacearr[i], nor, clnor);
1536 BLI_BITMAP_ENABLE(done_loops, i);
1537 }
1538 else {
1539 int avg_nor_count = 0;
1540 float avg_nor[3];
1541 short clnor_data_tmp[2], *clnor_data;
1542
1543 zero_v3(avg_nor);
1544
1545 while (loops) {
1546 BMLoop *ml = static_cast<BMLoop *>(loops->link);
1547 const int lidx = BM_elem_index_get(ml);
1548 const float *nor = new_lnors[lidx];
1549 short *clnor = static_cast<short *>(
1550 r_clnors_data ? &r_clnors_data[lidx] :
1551 BM_ELEM_CD_GET_VOID_P(ml, cd_loop_clnors_offset));
1552
1553 avg_nor_count++;
1554 add_v3_v3(avg_nor, nor);
1555 BLI_SMALLSTACK_PUSH(clnors_data, clnor);
1556
1557 loops = loops->next;
1558 BLI_BITMAP_ENABLE(done_loops, lidx);
1559 }
1560
1561 mul_v3_fl(avg_nor, 1.0f / float(avg_nor_count));
1563 lnors_spacearr->lspacearr[i], avg_nor, clnor_data_tmp);
1564
1565 while ((clnor_data = static_cast<short *>(BLI_SMALLSTACK_POP(clnors_data)))) {
1566 clnor_data[0] = clnor_data_tmp[0];
1567 clnor_data[1] = clnor_data_tmp[1];
1568 }
1569 }
1570 }
1571 }
1572
1573 MEM_freeN(done_loops);
1574}
1575
1584 const Span<float3> vcos,
1585 const Span<float3> fnos,
1586 MLoopNorSpaceArray *r_lnors_spacearr,
1587 short (*r_clnors_data)[2],
1588 const int cd_loop_clnors_offset,
1589 float (*new_lnors)[3],
1590 const int cd_new_lnors_offset,
1591 bool do_split_fans)
1592{
1593 BMFace *f;
1594 BMLoop *l;
1595 BMIter liter, fiter;
1596 Array<float3> cur_lnors(bm->totloop);
1597
1598 BKE_lnor_spacearr_clear(r_lnors_spacearr);
1599
1600 /* Tag smooth edges and set lnos from vnos when they might be completely smooth...
1601 * When using custom loop normals, disable the angle feature! */
1602 bm_mesh_edges_sharp_tag(bm, fnos, -1.0f, false);
1603
1604 /* Finish computing lnos by accumulating face normals
1605 * in each fan of faces defined by sharp edges. */
1607 vcos,
1608 fnos,
1609 cur_lnors,
1610 r_lnors_spacearr,
1611 r_clnors_data,
1612 cd_loop_clnors_offset,
1613 false,
1615
1616 /* Extract new normals from the data layer if necessary. */
1617 float(*custom_lnors)[3] = new_lnors;
1618
1619 if (new_lnors == nullptr) {
1620 custom_lnors = static_cast<float(*)[3]>(
1621 MEM_mallocN(sizeof(*new_lnors) * bm->totloop, __func__));
1622
1623 BM_ITER_MESH (f, &fiter, bm, BM_FACES_OF_MESH) {
1624 BM_ITER_ELEM (l, &liter, f, BM_LOOPS_OF_FACE) {
1625 const float *normal = static_cast<float *>(BM_ELEM_CD_GET_VOID_P(l, cd_new_lnors_offset));
1626 copy_v3_v3(custom_lnors[BM_elem_index_get(l)], normal);
1627 }
1628 }
1629 }
1630
1631 /* Validate the new normals. */
1632 for (int i = 0; i < bm->totloop; i++) {
1633 if (is_zero_v3(custom_lnors[i])) {
1634 copy_v3_v3(custom_lnors[i], cur_lnors[i]);
1635 }
1636 else {
1637 normalize_v3(custom_lnors[i]);
1638 }
1639 }
1640
1641 /* Now, check each current smooth fan (one lnor space per smooth fan!),
1642 * and if all its matching custom lnors are not equal, add sharp edges as needed. */
1643 if (do_split_fans && bm_mesh_loops_split_lnor_fans(bm, r_lnors_spacearr, custom_lnors)) {
1644 /* If any sharp edges were added, run bm_mesh_loops_calc_normals() again to get lnor
1645 * spacearr/smooth fans matching the given custom lnors. */
1646 BKE_lnor_spacearr_clear(r_lnors_spacearr);
1647
1649 vcos,
1650 fnos,
1651 cur_lnors,
1652 r_lnors_spacearr,
1653 r_clnors_data,
1654 cd_loop_clnors_offset,
1655 false,
1657 }
1658
1659 /* And we just have to convert plain object-space custom normals to our
1660 * lnor space-encoded ones. */
1662 bm, r_lnors_spacearr, r_clnors_data, cd_loop_clnors_offset, custom_lnors);
1663
1664 if (custom_lnors != new_lnors) {
1665 MEM_freeN(custom_lnors);
1666 }
1667}
1668
1670 const Span<float3> vnos,
1671 const Span<float3> fnos,
1672 MutableSpan<float3> r_lnos)
1673{
1674 BMIter fiter;
1675 BMFace *f_curr;
1676
1677 {
1678 char htype = BM_LOOP;
1679 if (!vnos.is_empty()) {
1680 htype |= BM_VERT;
1681 }
1682 if (!fnos.is_empty()) {
1683 htype |= BM_FACE;
1684 }
1686 }
1687
1688 BM_ITER_MESH (f_curr, &fiter, bm, BM_FACES_OF_MESH) {
1689 BMLoop *l_curr, *l_first;
1690 const bool is_face_flat = !BM_elem_flag_test(f_curr, BM_ELEM_SMOOTH);
1691
1692 l_curr = l_first = BM_FACE_FIRST_LOOP(f_curr);
1693 do {
1694 const float3 &no = is_face_flat ? (!fnos.is_empty() ? fnos[BM_elem_index_get(f_curr)] :
1695 float3(f_curr->no)) :
1696 (!vnos.is_empty() ? vnos[BM_elem_index_get(l_curr->v)] :
1697 float3(l_curr->v->no));
1698 copy_v3_v3(r_lnos[BM_elem_index_get(l_curr)], no);
1699
1700 } while ((l_curr = l_curr->next) != l_first);
1701 }
1702}
1703
1705 const Span<float3> vcos,
1706 const Span<float3> vnos,
1707 const Span<float3> fnos,
1708 const bool use_split_normals,
1709 MutableSpan<float3> r_lnos,
1710 MLoopNorSpaceArray *r_lnors_spacearr,
1711 short (*clnors_data)[2],
1712 const int cd_loop_clnors_offset,
1713 const bool do_rebuild)
1714{
1715
1716 if (use_split_normals) {
1718 vcos,
1719 fnos,
1720 r_lnos,
1721 r_lnors_spacearr,
1722 clnors_data,
1723 cd_loop_clnors_offset,
1724 do_rebuild,
1725 -1.0f);
1726 }
1727 else {
1728 BLI_assert(!r_lnors_spacearr);
1729 bm_mesh_loops_calc_normals_no_autosmooth(bm, vnos, fnos, r_lnos);
1730 }
1731}
1732
1735/* -------------------------------------------------------------------- */
1740{
1741 BLI_assert(bm->lnor_spacearr != nullptr);
1742
1745 }
1746
1747 int cd_loop_clnors_offset = CustomData_get_offset(&bm->ldata, CD_CUSTOMLOOPNORMAL);
1748
1750 bm, {}, {}, {}, true, r_lnors, bm->lnor_spacearr, nullptr, cd_loop_clnors_offset, false);
1752}
1753
1754#define CLEAR_SPACEARRAY_THRESHOLD(x) ((x) / 2)
1755
1756void BM_lnorspace_invalidate(BMesh *bm, const bool do_invalidate_all)
1757{
1759 return;
1760 }
1761 if (do_invalidate_all || bm->totvertsel > CLEAR_SPACEARRAY_THRESHOLD(bm->totvert)) {
1763 return;
1764 }
1765 if (bm->lnor_spacearr == nullptr) {
1767 return;
1768 }
1769
1770 BMVert *v;
1771 BMLoop *l;
1772 BMIter viter, liter;
1773 /* NOTE: we could use temp tag of BMItem for that,
1774 * but probably better not use it in such a low-level func?
1775 * --mont29 */
1776 BLI_bitmap *done_verts = BLI_BITMAP_NEW(bm->totvert, __func__);
1777
1779
1780 /* When we affect a given vertex, we may affect following smooth fans:
1781 * - all smooth fans of said vertex;
1782 * - all smooth fans of all immediate loop-neighbors vertices;
1783 * This can be simplified as 'all loops of selected vertices and their immediate neighbors'
1784 * need to be tagged for update.
1785 */
1786 BM_ITER_MESH (v, &viter, bm, BM_VERTS_OF_MESH) {
1788 BM_ITER_ELEM (l, &liter, v, BM_LOOPS_OF_VERT) {
1790
1791 /* Note that we only handle unselected neighbor vertices here, main loop will take care of
1792 * selected ones. */
1794 !BLI_BITMAP_TEST(done_verts, BM_elem_index_get(l->prev->v)))
1795 {
1796
1797 BMLoop *l_prev;
1798 BMIter liter_prev;
1799 BM_ITER_ELEM (l_prev, &liter_prev, l->prev->v, BM_LOOPS_OF_VERT) {
1801 }
1802 BLI_BITMAP_ENABLE(done_verts, BM_elem_index_get(l_prev->v));
1803 }
1804
1806 !BLI_BITMAP_TEST(done_verts, BM_elem_index_get(l->next->v)))
1807 {
1808
1809 BMLoop *l_next;
1810 BMIter liter_next;
1811 BM_ITER_ELEM (l_next, &liter_next, l->next->v, BM_LOOPS_OF_VERT) {
1813 }
1814 BLI_BITMAP_ENABLE(done_verts, BM_elem_index_get(l_next->v));
1815 }
1816 }
1817
1819 }
1820 }
1821
1822 MEM_freeN(done_verts);
1824}
1825
1826void BM_lnorspace_rebuild(BMesh *bm, bool preserve_clnor)
1827{
1828 BLI_assert(bm->lnor_spacearr != nullptr);
1829
1831 return;
1832 }
1833 BMFace *f;
1834 BMLoop *l;
1835 BMIter fiter, liter;
1836
1837 Array<float3> r_lnors(bm->totloop, float3(0));
1838 Array<float3> oldnors(preserve_clnor ? bm->totloop : 0, float3(0));
1839
1840 int cd_loop_clnors_offset = CustomData_get_offset(&bm->ldata, CD_CUSTOMLOOPNORMAL);
1841
1843
1844 if (preserve_clnor) {
1845 BLI_assert(bm->lnor_spacearr->lspacearr != nullptr);
1846
1847 BM_ITER_MESH (f, &fiter, bm, BM_FACES_OF_MESH) {
1848 BM_ITER_ELEM (l, &liter, f, BM_LOOPS_OF_FACE) {
1851 {
1852 short(*clnor)[2] = static_cast<short(*)[2]>(
1853 BM_ELEM_CD_GET_VOID_P(l, cd_loop_clnors_offset));
1854 int l_index = BM_elem_index_get(l);
1855
1857 bm->lnor_spacearr->lspacearr[l_index], *clnor, oldnors[l_index]);
1858 }
1859 }
1860 }
1861 }
1862
1865 }
1867 bm, {}, {}, {}, true, r_lnors, bm->lnor_spacearr, nullptr, cd_loop_clnors_offset, true);
1868
1869 BM_ITER_MESH (f, &fiter, bm, BM_FACES_OF_MESH) {
1870 BM_ITER_ELEM (l, &liter, f, BM_LOOPS_OF_FACE) {
1873 {
1874 if (preserve_clnor) {
1875 short(*clnor)[2] = static_cast<short(*)[2]>(
1876 BM_ELEM_CD_GET_VOID_P(l, cd_loop_clnors_offset));
1877 int l_index = BM_elem_index_get(l);
1879 bm->lnor_spacearr->lspacearr[l_index], oldnors[l_index], *clnor);
1880 }
1882 }
1883 }
1884 }
1885
1887
1888#ifndef NDEBUG
1890#endif
1891}
1892
1894{
1895 if (bm->lnor_spacearr == nullptr) {
1896 bm->lnor_spacearr = MEM_cnew<MLoopNorSpaceArray>(__func__);
1897 }
1898 if (bm->lnor_spacearr->lspacearr == nullptr) {
1899 Array<float3> lnors(bm->totloop, float3(0));
1900 BM_lnorspacearr_store(bm, lnors);
1901 }
1903 BM_lnorspace_rebuild(bm, false);
1904 }
1905}
1906
1909/* -------------------------------------------------------------------- */
1920#ifndef NDEBUG
1922{
1924 bool clear = true;
1925
1926 MLoopNorSpaceArray *temp = MEM_cnew<MLoopNorSpaceArray>(__func__);
1927 temp->lspacearr = nullptr;
1928
1930
1931 int cd_loop_clnors_offset = CustomData_get_offset(&bm->ldata, CD_CUSTOMLOOPNORMAL);
1932 Array<float3> lnors(bm->totloop, float3(0));
1934 bm, {}, {}, {}, true, lnors, temp, nullptr, cd_loop_clnors_offset, true);
1935
1936 for (int i = 0; i < bm->totloop; i++) {
1937 int j = 0;
1938 j += compare_ff(
1939 temp->lspacearr[i]->ref_alpha, bm->lnor_spacearr->lspacearr[i]->ref_alpha, 1e-4f);
1940 j += compare_ff(
1941 temp->lspacearr[i]->ref_beta, bm->lnor_spacearr->lspacearr[i]->ref_beta, 1e-4f);
1942 j += compare_v3v3(
1943 temp->lspacearr[i]->vec_lnor, bm->lnor_spacearr->lspacearr[i]->vec_lnor, 1e-4f);
1944 j += compare_v3v3(
1945 temp->lspacearr[i]->vec_ortho, bm->lnor_spacearr->lspacearr[i]->vec_ortho, 1e-4f);
1946 j += compare_v3v3(
1947 temp->lspacearr[i]->vec_ref, bm->lnor_spacearr->lspacearr[i]->vec_ref, 1e-4f);
1948
1949 if (j != 5) {
1950 clear = false;
1951 break;
1952 }
1953 }
1955 MEM_freeN(temp);
1957
1958 bm->spacearr_dirty &= ~BM_SPACEARR_DIRTY_ALL;
1959}
1960#endif
1961
1963 BLI_bitmap *loops,
1964 MLoopNorSpaceArray *lnor_spacearr,
1965 int *totloopsel,
1966 const bool do_all_loops_of_vert)
1967{
1968 if (l != nullptr) {
1969 const int l_idx = BM_elem_index_get(l);
1970
1971 if (!BLI_BITMAP_TEST(loops, l_idx)) {
1972 /* If vert and face selected share a loop, mark it for editing. */
1973 BLI_BITMAP_ENABLE(loops, l_idx);
1974 (*totloopsel)++;
1975
1976 if (do_all_loops_of_vert) {
1977 /* If required, also mark all loops shared by that vertex.
1978 * This is needed when loop spaces may change
1979 * (i.e. when some faces or edges might change of smooth/sharp status). */
1980 BMIter liter;
1981 BMLoop *lfan;
1982 BM_ITER_ELEM (lfan, &liter, l->v, BM_LOOPS_OF_VERT) {
1983 const int lfan_idx = BM_elem_index_get(lfan);
1984 if (!BLI_BITMAP_TEST(loops, lfan_idx)) {
1985 BLI_BITMAP_ENABLE(loops, lfan_idx);
1986 (*totloopsel)++;
1987 }
1988 }
1989 }
1990 else {
1991 /* Mark all loops in same loop normal space (aka smooth fan). */
1992 if ((lnor_spacearr->lspacearr[l_idx]->flags & MLNOR_SPACE_IS_SINGLE) == 0) {
1993 for (LinkNode *node = lnor_spacearr->lspacearr[l_idx]->loops; node; node = node->next) {
1994 const int lfan_idx = BM_elem_index_get((BMLoop *)node->link);
1995 if (!BLI_BITMAP_TEST(loops, lfan_idx)) {
1996 BLI_BITMAP_ENABLE(loops, lfan_idx);
1997 (*totloopsel)++;
1998 }
1999 }
2000 }
2001 }
2002 }
2003 }
2004}
2005
2006/* Mark the individual clnors to be edited, if multiple selection methods are used. */
2007static int bm_loop_normal_mark_indiv(BMesh *bm, BLI_bitmap *loops, const bool do_all_loops_of_vert)
2008{
2009 int totloopsel = 0;
2010
2011 const bool sel_verts = (bm->selectmode & SCE_SELECT_VERTEX) != 0;
2012 const bool sel_edges = (bm->selectmode & SCE_SELECT_EDGE) != 0;
2013 const bool sel_faces = (bm->selectmode & SCE_SELECT_FACE) != 0;
2014 const bool use_sel_face_history = sel_faces && (sel_edges || sel_verts);
2015
2017
2018 BLI_assert(bm->lnor_spacearr != nullptr);
2020
2021 if (use_sel_face_history) {
2022 /* Using face history allows to select a single loop from a single face...
2023 * Note that this is O(n^2) piece of code,
2024 * but it is not designed to be used with huge selection sets,
2025 * rather with only a few items selected at most. */
2026 /* Goes from last selected to the first selected element. */
2028 if (ese->htype == BM_FACE) {
2029 /* If current face is selected,
2030 * then any verts to be edited must have been selected before it. */
2031 for (BMEditSelection *ese_prev = ese->prev; ese_prev; ese_prev = ese_prev->prev) {
2032 if (ese_prev->htype == BM_VERT) {
2034 BM_face_vert_share_loop((BMFace *)ese->ele, (BMVert *)ese_prev->ele),
2035 loops,
2037 &totloopsel,
2038 do_all_loops_of_vert);
2039 }
2040 else if (ese_prev->htype == BM_EDGE) {
2041 BMEdge *e = (BMEdge *)ese_prev->ele;
2043 loops,
2045 &totloopsel,
2046 do_all_loops_of_vert);
2047
2049 loops,
2051 &totloopsel,
2052 do_all_loops_of_vert);
2053 }
2054 }
2055 }
2056 }
2057 }
2058 else {
2059 if (sel_faces) {
2060 /* Only select all loops of selected faces. */
2061 BMLoop *l;
2062 BMFace *f;
2063 BMIter liter, fiter;
2064 BM_ITER_MESH (f, &fiter, bm, BM_FACES_OF_MESH) {
2066 BM_ITER_ELEM (l, &liter, f, BM_LOOPS_OF_FACE) {
2068 l, loops, bm->lnor_spacearr, &totloopsel, do_all_loops_of_vert);
2069 }
2070 }
2071 }
2072 }
2073 if (sel_edges) {
2074 /* Only select all loops of selected edges. */
2075 BMLoop *l;
2076 BMEdge *e;
2077 BMIter liter, eiter;
2078 BM_ITER_MESH (e, &eiter, bm, BM_EDGES_OF_MESH) {
2080 BM_ITER_ELEM (l, &liter, e, BM_LOOPS_OF_EDGE) {
2082 l, loops, bm->lnor_spacearr, &totloopsel, do_all_loops_of_vert);
2083 /* Loops actually 'have' two edges, or said otherwise, a selected edge actually selects
2084 * *two* loops in each of its faces. We have to find the other one too. */
2085 if (BM_vert_in_edge(e, l->next->v)) {
2087 l->next, loops, bm->lnor_spacearr, &totloopsel, do_all_loops_of_vert);
2088 }
2089 else {
2092 l->prev, loops, bm->lnor_spacearr, &totloopsel, do_all_loops_of_vert);
2093 }
2094 }
2095 }
2096 }
2097 }
2098 if (sel_verts) {
2099 /* Select all loops of selected verts. */
2100 BMLoop *l;
2101 BMVert *v;
2102 BMIter liter, viter;
2103 BM_ITER_MESH (v, &viter, bm, BM_VERTS_OF_MESH) {
2105 BM_ITER_ELEM (l, &liter, v, BM_LOOPS_OF_VERT) {
2107 l, loops, bm->lnor_spacearr, &totloopsel, do_all_loops_of_vert);
2108 }
2109 }
2110 }
2111 }
2112 }
2113
2114 return totloopsel;
2115}
2116
2118 BMesh *bm, BMLoopNorEditData *lnor_ed, BMVert *v, BMLoop *l, const int offset)
2119{
2120 BLI_assert(bm->lnor_spacearr != nullptr);
2121 BLI_assert(bm->lnor_spacearr->lspacearr != nullptr);
2122
2123 const int l_index = BM_elem_index_get(l);
2124 short *clnors_data = static_cast<short *>(BM_ELEM_CD_GET_VOID_P(l, offset));
2125
2126 lnor_ed->loop_index = l_index;
2127 lnor_ed->loop = l;
2128
2129 float custom_normal[3];
2131 bm->lnor_spacearr->lspacearr[l_index], clnors_data, custom_normal);
2132
2133 lnor_ed->clnors_data = clnors_data;
2134 copy_v3_v3(lnor_ed->nloc, custom_normal);
2135 copy_v3_v3(lnor_ed->niloc, custom_normal);
2136
2137 lnor_ed->loc = v->co;
2138}
2139
2141 const bool do_all_loops_of_vert)
2142{
2143 BMLoop *l;
2144 BMVert *v;
2145 BMIter liter, viter;
2146
2147 int totloopsel = 0;
2148
2150
2151 BMLoopNorEditDataArray *lnors_ed_arr = MEM_cnew<BMLoopNorEditDataArray>(__func__);
2152 lnors_ed_arr->lidx_to_lnor_editdata = MEM_cnew_array<BMLoopNorEditData *>(bm->totloop, __func__);
2153
2156 }
2157 const int cd_custom_normal_offset = CustomData_get_offset(&bm->ldata, CD_CUSTOMLOOPNORMAL);
2158
2160
2161 BLI_bitmap *loops = BLI_BITMAP_NEW(bm->totloop, __func__);
2162
2163 /* This function define loop normals to edit, based on selection modes and history. */
2164 totloopsel = bm_loop_normal_mark_indiv(bm, loops, do_all_loops_of_vert);
2165
2166 if (totloopsel) {
2167 BMLoopNorEditData *lnor_ed = lnors_ed_arr->lnor_editdata = static_cast<BMLoopNorEditData *>(
2168 MEM_mallocN(sizeof(*lnor_ed) * totloopsel, __func__));
2169
2170 BM_ITER_MESH (v, &viter, bm, BM_VERTS_OF_MESH) {
2171 BM_ITER_ELEM (l, &liter, v, BM_LOOPS_OF_VERT) {
2172 if (BLI_BITMAP_TEST(loops, BM_elem_index_get(l))) {
2173 loop_normal_editdata_init(bm, lnor_ed, v, l, cd_custom_normal_offset);
2174 lnors_ed_arr->lidx_to_lnor_editdata[BM_elem_index_get(l)] = lnor_ed;
2175 lnor_ed++;
2176 }
2177 }
2178 }
2179 lnors_ed_arr->totloop = totloopsel;
2180 }
2181
2182 MEM_freeN(loops);
2183 lnors_ed_arr->cd_custom_normal_offset = cd_custom_normal_offset;
2184 return lnors_ed_arr;
2185}
2186
2188{
2189 MEM_SAFE_FREE(lnors_ed_arr->lnor_editdata);
2190 MEM_SAFE_FREE(lnors_ed_arr->lidx_to_lnor_editdata);
2191 MEM_freeN(lnors_ed_arr);
2192}
2193
2196/* -------------------------------------------------------------------- */
2201{
2202 BMFace *f;
2203 BMLoop *l;
2204 BMIter liter, fiter;
2205
2207 return false;
2208 }
2209
2211
2212 /* Create a loop normal layer. */
2215
2217 }
2218
2219 const int cd_custom_normal_offset = CustomData_get_offset(&bm->ldata, CD_CUSTOMLOOPNORMAL);
2220 const int cd_normal_offset = CustomData_get_offset(&bm->ldata, CD_NORMAL);
2221
2222 int l_index = 0;
2223 BM_ITER_MESH (f, &fiter, bm, BM_FACES_OF_MESH) {
2224 BM_ITER_ELEM (l, &liter, f, BM_LOOPS_OF_FACE) {
2225 const short *clnors_data = static_cast<const short *>(
2226 BM_ELEM_CD_GET_VOID_P(l, cd_custom_normal_offset));
2227 float *normal = static_cast<float *>(BM_ELEM_CD_GET_VOID_P(l, cd_normal_offset));
2228
2230 bm->lnor_spacearr->lspacearr[l_index], clnors_data, normal);
2231 l_index += 1;
2232 }
2233 }
2234
2235 return true;
2236}
2237
2239{
2242 {
2243 return;
2244 }
2245
2246 const int cd_custom_normal_offset = CustomData_get_offset(&bm->ldata, CD_CUSTOMLOOPNORMAL);
2247 const int cd_normal_offset = CustomData_get_offset(&bm->ldata, CD_NORMAL);
2248
2249 if (bm->lnor_spacearr == nullptr) {
2250 bm->lnor_spacearr = MEM_cnew<MLoopNorSpaceArray>(__func__);
2251 }
2252
2254 {},
2255 {},
2257 nullptr,
2258 cd_custom_normal_offset,
2259 nullptr,
2260 cd_normal_offset,
2261 add_sharp_edges);
2262
2264}
2265
CustomData interface, see also DNA_customdata_types.h.
int CustomData_get_offset(const CustomData *data, eCustomDataType type)
void CustomData_set_layer_flag(CustomData *data, eCustomDataType type, int flag)
bool CustomData_has_layer(const CustomData *data, eCustomDataType type)
@ G_DEBUG
void BKE_lnor_space_custom_data_to_normal(const MLoopNorSpace *lnor_space, const short clnor_data[2], float r_custom_lnor[3])
MLoopNorSpace * BKE_lnor_space_create(MLoopNorSpaceArray *lnors_spacearr)
@ MLNOR_SPACE_IS_SINGLE
Definition BKE_mesh.h:258
@ MLNOR_SPACEARR_BMLOOP_PTR
Definition BKE_mesh.h:277
void BKE_lnor_space_custom_normal_to_data(const MLoopNorSpace *lnor_space, const float custom_lnor[3], short r_clnor_data[2])
void BKE_lnor_space_add_loop(MLoopNorSpaceArray *lnors_spacearr, MLoopNorSpace *lnor_space, int corner, void *bm_loop, bool is_single)
void BKE_lnor_spacearr_clear(MLoopNorSpaceArray *lnors_spacearr)
void BKE_lnor_spacearr_init(MLoopNorSpaceArray *lnors_spacearr, int numLoops, char data_type)
void BKE_lnor_space_define(MLoopNorSpace *lnor_space, const float lnor[3], const float vec_ref[3], const float vec_other[3], blender::Span< blender::float3 > edge_vectors)
void BKE_lnor_spacearr_free(MLoopNorSpaceArray *lnors_spacearr)
void BKE_lnor_spacearr_tls_join(MLoopNorSpaceArray *lnors_spacearr, MLoopNorSpaceArray *lnors_spacearr_tls)
void BKE_lnor_spacearr_tls_init(MLoopNorSpaceArray *lnors_spacearr, MLoopNorSpaceArray *lnors_spacearr_tls)
#define BLI_assert(a)
Definition BLI_assert.h:50
#define BLI_BITMAP_NEW(_num, _alloc_string)
Definition BLI_bitmap.h:41
#define BLI_BITMAP_TEST(_bitmap, _index)
Definition BLI_bitmap.h:65
#define BLI_BITMAP_ENABLE(_bitmap, _index)
Definition BLI_bitmap.h:82
unsigned int BLI_bitmap
Definition BLI_bitmap.h:17
#define BLI_INLINE
#define LISTBASE_FOREACH_BACKWARD(type, var, list)
#define M_PI
MINLINE int compare_ff(float a, float b, float max_diff)
MINLINE void madd_v3_v3fl(float r[3], const float a[3], float f)
MINLINE void sub_v3_v3v3(float r[3], const float a[3], const float b[3])
MINLINE void mul_v3_fl(float r[3], float f)
MINLINE void copy_v3_v3(float r[3], const float a[3])
MINLINE float dot_v3v3(const float a[3], const float b[3]) ATTR_WARN_UNUSED_RESULT
MINLINE float normalize_v3_v3(float r[3], const float a[3])
MINLINE bool compare_v3v3(const float v1[3], const float v2[3], float limit) ATTR_WARN_UNUSED_RESULT
MINLINE bool is_zero_v3(const float v[3]) ATTR_WARN_UNUSED_RESULT
MINLINE void zero_v3(float r[3])
MINLINE void add_v3_v3(float r[3], const float a[3])
MINLINE float normalize_v3(float n[3])
unsigned int uint
struct MempoolIterData MempoolIterData
Definition BLI_task.h:209
void BLI_task_parallel_range(int start, int stop, void *userdata, TaskParallelRangeFunc func, const TaskParallelSettings *settings)
Definition task_range.cc:99
BLI_INLINE void BLI_parallel_range_settings_defaults(TaskParallelSettings *settings)
Definition BLI_task.h:230
BLI_INLINE void BLI_parallel_mempool_settings_defaults(TaskParallelSettings *settings)
Definition BLI_task.h:238
#define UNUSED_VARS_NDEBUG(...)
#define UNLIKELY(x)
#define LIKELY(x)
@ CD_FLAG_TEMPORARY
@ CD_CUSTOMLOOPNORMAL
@ SCE_SELECT_FACE
@ SCE_SELECT_VERTEX
@ SCE_SELECT_EDGE
Read Guarded memory(de)allocation.
#define MEM_SAFE_FREE(v)
@ BM_LOOP
#define BM_DISK_EDGE_NEXT(e, v)
@ BM_SPACEARR_DIRTY_ALL
@ BM_SPACEARR_DIRTY
@ BM_ELEM_SELECT
@ BM_ELEM_SMOOTH
@ BM_ELEM_TAG
#define BM_FACE_FIRST_LOOP(p)
#define BM_THREAD_LIMIT
#define BM_ELEM_CD_GET_VOID_P(ele, offset)
#define BM_elem_index_get(ele)
#define BM_elem_flag_disable(ele, hflag)
#define BM_elem_flag_set(ele, hflag, val)
#define BM_elem_index_set(ele, index)
#define BM_elem_flag_test(ele, hflag)
#define BM_elem_flag_enable(ele, hflag)
void BM_data_layer_add(BMesh *bm, CustomData *data, int type)
#define BM_ITER_ELEM(ele, iter, data, itype)
#define BM_ITER_MESH(ele, iter, bm, itype)
#define BM_ITER_MESH_INDEX(ele, iter, bm, itype, indexvar)
@ BM_EDGES_OF_MESH
@ BM_VERTS_OF_MESH
@ BM_FACES_OF_MESH
@ BM_LOOPS_OF_VERT
@ BM_LOOPS_OF_EDGE
@ BM_LOOPS_OF_FACE
ATTR_WARN_UNUSED_RESULT BMesh * bm
void BM_mesh_elem_index_ensure(BMesh *bm, const char htype)
static void bm_mesh_loops_assign_normal_data(BMesh *bm, MLoopNorSpaceArray *lnors_spacearr, short(*r_clnors_data)[2], const int cd_loop_clnors_offset, const float(*new_lnors)[3])
void BM_mesh_normals_update_with_partial(BMesh *bm, const BMPartialUpdate *bmpinfo)
static int bm_loop_index_cmp(const void *a, const void *b)
static void bm_vert_calc_normals_cb(void *, MempoolIterData *mp_v, const TaskParallelTLS *__restrict)
#define CLEAR_SPACEARRAY_THRESHOLD(x)
static void bm_vert_calc_normals_with_coords(BMVert *v, BMVertsCalcNormalsWithCoordsData *data)
static void bm_mesh_loops_calc_normals_for_vert_with_clnors(BMesh *bm, const Span< float3 > vcos, const Span< float3 > fnos, MutableSpan< float3 > r_lnos, const short(*clnors_data)[2], const int cd_loop_clnors_offset, const bool do_rebuild, const float split_angle_cos, MLoopNorSpaceArray *r_lnors_spacearr, blender::Vector< blender::float3, 16 > *edge_vectors, BMVert *v)
void BM_lnorspace_update(BMesh *bm)
static void bm_mesh_loops_calc_normals_for_vert_free_fn(const void *__restrict userdata, void *__restrict chunk)
void BM_mesh_normals_update_ex(BMesh *bm, const BMeshNormalsUpdate_Params *params)
BMesh Compute Normals.
#define BM_LNORSPACE_UPDATE
void BM_lnorspace_invalidate(BMesh *bm, const bool do_invalidate_all)
void BM_normals_loops_edges_tag(BMesh *bm, const bool do_edges)
static void bm_mesh_loops_custom_normals_set(BMesh *bm, const Span< float3 > vcos, const Span< float3 > fnos, MLoopNorSpaceArray *r_lnors_spacearr, short(*r_clnors_data)[2], const int cd_loop_clnors_offset, float(*new_lnors)[3], const int cd_new_lnors_offset, bool do_split_fans)
void BM_lnorspace_rebuild(BMesh *bm, bool preserve_clnor)
bool BM_custom_loop_normals_to_vector_layer(BMesh *bm)
static bool bm_mesh_loops_split_lnor_fans(BMesh *bm, MLoopNorSpaceArray *lnors_spacearr, const float(*new_lnors)[3])
void BM_mesh_normals_update_with_partial_ex(BMesh *, const BMPartialUpdate *bmpinfo, const BMeshNormalsUpdate_Params *params)
static void bm_partial_faces_parallel_range_calc_normals_cb(void *userdata, const int iter, const TaskParallelTLS *__restrict)
#define EDGE_TAG_FROM_SPLIT_ANGLE_BYPASS
static void bm_face_calc_normals_cb(void *, MempoolIterData *mp_f, const TaskParallelTLS *__restrict)
static void bm_mesh_loops_calc_normals_for_vert_init_fn(const void *__restrict userdata, void *__restrict chunk)
static void bm_loop_normal_mark_indiv_do_loop(BMLoop *l, BLI_bitmap *loops, MLoopNorSpaceArray *lnor_spacearr, int *totloopsel, const bool do_all_loops_of_vert)
static void bm_mesh_loops_calc_normals__multi_threaded(BMesh *bm, const Span< float3 > vcos, const Span< float3 > fnos, MutableSpan< float3 > r_lnos, MLoopNorSpaceArray *r_lnors_spacearr, const short(*clnors_data)[2], const int cd_loop_clnors_offset, const bool do_rebuild, const float split_angle_cos)
void BM_mesh_normals_update(BMesh *bm)
BMLoopNorEditDataArray * BM_loop_normal_editdata_array_init(BMesh *bm, const bool do_all_loops_of_vert)
static void bm_mesh_loops_calc_normals_no_autosmooth(BMesh *bm, const Span< float3 > vnos, const Span< float3 > fnos, MutableSpan< float3 > r_lnos)
#define LNOR_SPACE_TRIGO_THRESHOLD
void BM_lnorspace_err(BMesh *bm)
static void bm_partial_verts_parallel_range_calc_normal_cb(void *userdata, const int iter, const TaskParallelTLS *__restrict)
BLI_INLINE bool bm_edge_is_smooth_no_angle_test(const BMEdge *e, const BMLoop *l_a, const BMLoop *l_b)
static void bm_edge_tag_from_smooth_and_set_sharp(Span< float3 > fnos, BMEdge *e, const float split_angle_cos)
static void loop_normal_editdata_init(BMesh *bm, BMLoopNorEditData *lnor_ed, BMVert *v, BMLoop *l, const int offset)
void BM_loop_normal_editdata_array_free(BMLoopNorEditDataArray *lnors_ed_arr)
static void bm_vert_calc_normals_with_coords_cb(void *userdata, MempoolIterData *mp_v, const TaskParallelTLS *__restrict)
void BM_lnorspacearr_store(BMesh *bm, MutableSpan< float3 > r_lnors)
static int bm_loop_normal_mark_indiv(BMesh *bm, BLI_bitmap *loops, const bool do_all_loops_of_vert)
static void bm_vert_calc_normals_impl(BMVert *v)
static void bm_mesh_loops_calc_normals__single_threaded(BMesh *bm, const Span< float3 > vcos, const Span< float3 > fnos, MutableSpan< float3 > r_lnos, MLoopNorSpaceArray *r_lnors_spacearr, const short(*clnors_data)[2], const int cd_loop_clnors_offset, const bool do_rebuild, const float split_angle_cos)
static void bm_mesh_loops_calc_normals_for_vert_without_clnors(BMesh *bm, const Span< float3 > vcos, const Span< float3 > fnos, MutableSpan< float3 > r_lnos, const bool do_rebuild, const float split_angle_cos, MLoopNorSpaceArray *r_lnors_spacearr, blender::Vector< blender::float3, 16 > *edge_vectors, BMVert *v)
static int bm_mesh_loops_calc_normals_for_loop(BMesh *bm, const Span< float3 > vcos, const Span< float3 > fnos, const short(*clnors_data)[2], const int cd_loop_clnors_offset, const bool has_clnors, blender::Vector< blender::float3, 16 > *edge_vectors, BMLoop *l_curr, MutableSpan< float3 > r_lnos, MLoopNorSpaceArray *r_lnors_spacearr)
static void bm_edge_tag_from_smooth(Span< float3 > fnos, BMEdge *e, const float split_angle_cos)
static void bm_mesh_loops_calc_normals_for_vert_without_clnors_fn(void *userdata, MempoolIterData *mp_v, const TaskParallelTLS *__restrict tls)
BLI_INLINE void bm_vert_calc_normals_accum_loop(const BMLoop *l_iter, const float e1diff[3], const float e2diff[3], const float f_no[3], float v_no[3])
static void bm_mesh_verts_calc_normals(BMesh *bm, const Span< float3 > fnos, const Span< float3 > vcos, MutableSpan< float3 > vnos)
static void bm_mesh_loops_calc_normals_for_vert_with_clnors_fn(void *userdata, MempoolIterData *mp_v, const TaskParallelTLS *__restrict tls)
bool BM_loop_check_cyclic_smooth_fan(BMLoop *l_curr)
static void bm_mesh_loops_calc_normals_for_vert_reduce_fn(const void *__restrict userdata, void *__restrict, void *__restrict chunk)
static void bm_mesh_loops_calc_normals(BMesh *bm, const Span< float3 > vcos, const Span< float3 > fnos, MutableSpan< float3 > r_lnos, MLoopNorSpaceArray *r_lnors_spacearr, const short(*clnors_data)[2], const int cd_loop_clnors_offset, const bool do_rebuild, const float split_angle_cos)
static void bm_mesh_edges_sharp_tag(BMesh *bm, const Span< float3 > fnos, float split_angle_cos, const bool do_sharp_edges_tag)
void BM_edges_sharp_from_angle_set(BMesh *bm, const float split_angle)
void BM_verts_calc_normal_vcos(BMesh *bm, const Span< float3 > fnos, const Span< float3 > vcos, MutableSpan< float3 > vnos)
void BM_loops_calc_normal_vcos(BMesh *bm, const Span< float3 > vcos, const Span< float3 > vnos, const Span< float3 > fnos, const bool use_split_normals, MutableSpan< float3 > r_lnos, MLoopNorSpaceArray *r_lnors_spacearr, short(*clnors_data)[2], const int cd_loop_clnors_offset, const bool do_rebuild)
void BM_custom_loop_normals_from_vector_layer(BMesh *bm, bool add_sharp_edges)
#define BM_FACE
#define BM_EDGE
#define BM_VERT
float BM_face_calc_normal(const BMFace *f, float r_no[3])
BMESH UPDATE FACE NORMAL.
#define BM_ELEM_API_FLAG_DISABLE(element, f)
#define BM_ELEM_API_FLAG_TEST(element, f)
#define BM_ELEM_API_FLAG_ENABLE(element, f)
BMLoop * BM_vert_step_fan_loop(BMLoop *l, BMEdge **e_step)
bool BM_edge_loop_pair(BMEdge *e, BMLoop **r_la, BMLoop **r_lb)
BMLoop * BM_face_vert_share_loop(BMFace *f, BMVert *v)
Return the Loop Shared by Face and Vertex.
BLI_INLINE BMVert * BM_edge_other_vert(BMEdge *e, const BMVert *v) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL()
BLI_INLINE bool BM_vert_in_edge(const BMEdge *e, const BMVert *v) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL()
ATTR_WARN_UNUSED_RESULT const BMLoop * l
ATTR_WARN_UNUSED_RESULT const BMVert const BMEdge * e
ATTR_WARN_UNUSED_RESULT const BMLoop * l_b
ATTR_WARN_UNUSED_RESULT const BMVert * v
constexpr bool is_empty() const
Definition BLI_span.hh:510
constexpr bool is_empty() const
Definition BLI_span.hh:261
void append(const T &value)
bool is_empty() const
local_group_size(16, 16) .push_constant(Type b
#define printf
OperationNode * node
#define cosf(x)
draw_view in_light_buf[] float
static float verts[][3]
#define UINT_MAX
Definition hash_md5.cc:44
uiWidgetBaseParameters params[MAX_WIDGET_BASE_BATCH]
void *(* MEM_mallocN)(size_t len, const char *str)
Definition mallocn.cc:44
void MEM_freeN(void *vmemh)
Definition mallocn.cc:105
#define G(x, y, z)
static void clear(Message &msg)
Definition msgfmt.cc:218
float safe_acos_approx(float x)
Frequency::GEOMETRY nor[]
BMVert * v1
BMVert * v2
struct BMLoop * l
struct BMEditSelection * prev
float no[3]
BMLoopNorEditData ** lidx_to_lnor_editdata
BMLoopNorEditData * lnor_editdata
struct BMVert * v
struct BMEdge * e
struct BMLoop * radial_next
struct BMLoop * prev
struct BMFace * f
struct BMLoop * next
blender::Vector< blender::float3, 16 > * edge_vectors
BMPartialUpdate_Params params
float co[3]
struct BMEdge * e
float no[3]
BMHeader head
int totvert
struct MLoopNorSpaceArray * lnor_spacearr
char elem_index_dirty
int totedge
ListBase selected
int totvertsel
int totloop
short selectmode
char spacearr_dirty
CustomData ldata
void * link
struct LinkNode * next
MLoopNorSpace ** lspacearr
Definition BKE_mesh.h:265
float ref_alpha
Definition BKE_mesh.h:244
float vec_ortho[3]
Definition BKE_mesh.h:242
float ref_beta
Definition BKE_mesh.h:246
float vec_ref[3]
Definition BKE_mesh.h:240
float vec_lnor[3]
Definition BKE_mesh.h:238
struct LinkNode * loops
Definition BKE_mesh.h:251