Blender V5.0
bmesh_mesh_normals.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2023 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
12
13#include "MEM_guardedalloc.h"
14
15#include "DNA_scene_types.h"
16
17#include "BLI_array.hh"
18#include "BLI_bitmap.h"
19#include "BLI_linklist_stack.h"
20#include "BLI_listbase.h"
21#include "BLI_math_base.hh"
22#include "BLI_math_vector.h"
23#include "BLI_task.h"
24#include "BLI_task.hh"
25#include "BLI_utildefines.h"
26#include "BLI_vector.hh"
27
28#include "BKE_customdata.hh"
29#include "BKE_editmesh.hh"
30#include "BKE_global.hh"
31#include "BKE_mesh.hh"
32
34
35using blender::Array;
36using blender::float3;
39using blender::Span;
40
41/* Smooth angle to use when tagging edges is disabled entirely. */
42#define EDGE_TAG_FROM_SPLIT_ANGLE_BYPASS -FLT_MAX
43
45 BMEdge *e,
46 const float split_angle_cos);
47static void bm_edge_tag_from_smooth(Span<float3> fnos, BMEdge *e, const float split_angle_cos);
48
49/* -------------------------------------------------------------------- */
52
56
57/* We use that existing internal API flag,
58 * assuming no other tool using it would run concurrently to clnors editing. */
59#define BM_LNORSPACE_UPDATE _FLAG_MF
60
62 /* Read-only data. */
65
66 /* Write data. */
68};
69
71 const float e1diff[3],
72 const float e2diff[3],
73 const float f_no[3],
74 float v_no[3])
75{
76 /* Calculate the dot product of the two edges that meet at the loop's vertex. */
77 /* Edge vectors are calculated from `e->v1` to `e->v2`, so adjust the dot product if one but not
78 * both loops actually runs from `e->v2` to `e->v1`. */
79 float dotprod = dot_v3v3(e1diff, e2diff);
80 if ((l_iter->prev->e->v1 == l_iter->prev->v) ^ (l_iter->e->v1 == l_iter->v)) {
81 dotprod = -dotprod;
82 }
83 const float fac = blender::math::safe_acos_approx(-dotprod);
84 /* Shouldn't happen as normalizing edge-vectors cause degenerate values to be zeroed out. */
85 BLI_assert(!isnan(fac));
86 madd_v3_v3fl(v_no, f_no, fac);
87}
88
90{
91 /* NOTE(@ideasman42): Regarding redundant unit-length edge-vector calculation:
92 *
93 * This functions calculates unit-length edge-vector for every loop edge
94 * in practice this means 2x `sqrt` calls per face-corner connected to each vertex.
95 *
96 * Previously (2.9x and older), the edge vectors were calculated and stored for reuse.
97 * However the overhead of did not perform well (~16% slower - single & multi-threaded)
98 * when compared with calculating the values as they are needed.
99 *
100 * For simple grid topologies this function calculates the edge-vectors 4x times.
101 * There is some room for improved performance by storing the edge-vectors for reuse locally
102 * in this function, reducing the number of redundant `sqrtf` in half (2x instead of 4x).
103 * so face loops that share an edge would not calculate it multiple times.
104 * From my tests the performance improvements are so small they're difficult to measure,
105 * the time saved removing `sqrtf` calls is lost on storing and looking up the information,
106 * even in the case of small inline lookup tables.
107 *
108 * Further, local data structures would need to support cases where
109 * stack memory isn't sufficient - adding additional complexity for corner-cases
110 * (a vertex that has thousands of connected edges for example).
111 * Unless there are important use-cases that benefit from edge-vector caching,
112 * keep this simple and calculate ~4x as many edge-vectors.
113 *
114 * In conclusion, the cost of caching & looking up edge-vectors both globally or per-vertex
115 * doesn't save enough time to make it worthwhile.
116 */
117
118 float *v_no = v->no;
119 zero_v3(v_no);
120
121 BMEdge *e_first = v->e;
122 if (e_first != nullptr) {
123 float e1diff[3], e2diff[3];
124 BMEdge *e_iter = e_first;
125 do {
126 BMLoop *l_first = e_iter->l;
127 if (l_first != nullptr) {
128 sub_v3_v3v3(e2diff, e_iter->v1->co, e_iter->v2->co);
129 normalize_v3(e2diff);
130
131 BMLoop *l_iter = l_first;
132 do {
133 if (l_iter->v == v) {
134 BMEdge *e_prev = l_iter->prev->e;
135 sub_v3_v3v3(e1diff, e_prev->v1->co, e_prev->v2->co);
136 normalize_v3(e1diff);
137
138 bm_vert_calc_normals_accum_loop(l_iter, e1diff, e2diff, l_iter->f->no, v_no);
139 }
140 } while ((l_iter = l_iter->radial_next) != l_first);
141 }
142 } while ((e_iter = BM_DISK_EDGE_NEXT(e_iter, v)) != e_first);
143
144 if (LIKELY(normalize_v3(v_no) != 0.0f)) {
145 return;
146 }
147 }
148 /* Fallback normal. */
149 normalize_v3_v3(v_no, v->co);
150}
151
152static void bm_vert_calc_normals_cb(void * /*userdata*/,
153 MempoolIterData *mp_v,
154 const TaskParallelTLS *__restrict /*tls*/)
155{
156 BMVert *v = (BMVert *)mp_v;
158}
159
161{
162 /* See #bm_vert_calc_normals_impl note on performance. */
163 float *v_no = data->vnos[BM_elem_index_get(v)];
164 zero_v3(v_no);
165
166 /* Loop over edges. */
167 BMEdge *e_first = v->e;
168 if (e_first != nullptr) {
169 float e1diff[3], e2diff[3];
170 BMEdge *e_iter = e_first;
171 do {
172 BMLoop *l_first = e_iter->l;
173 if (l_first != nullptr) {
174 sub_v3_v3v3(e2diff,
175 data->vcos[BM_elem_index_get(e_iter->v1)],
176 data->vcos[BM_elem_index_get(e_iter->v2)]);
177 normalize_v3(e2diff);
178
179 BMLoop *l_iter = l_first;
180 do {
181 if (l_iter->v == v) {
182 BMEdge *e_prev = l_iter->prev->e;
183 sub_v3_v3v3(e1diff,
184 data->vcos[BM_elem_index_get(e_prev->v1)],
185 data->vcos[BM_elem_index_get(e_prev->v2)]);
186 normalize_v3(e1diff);
187
189 l_iter, e1diff, e2diff, data->fnos[BM_elem_index_get(l_iter->f)], v_no);
190 }
191 } while ((l_iter = l_iter->radial_next) != l_first);
192 }
193 } while ((e_iter = BM_DISK_EDGE_NEXT(e_iter, v)) != e_first);
194
195 if (LIKELY(normalize_v3(v_no) != 0.0f)) {
196 return;
197 }
198 }
199 /* Fallback normal. */
201}
202
203static void bm_vert_calc_normals_with_coords_cb(void *userdata,
204 MempoolIterData *mp_v,
205 const TaskParallelTLS *__restrict /*tls*/)
206{
208 userdata);
209 BMVert *v = (BMVert *)mp_v;
211}
212
214 const Span<float3> fnos,
215 const Span<float3> vcos,
217{
218 BM_mesh_elem_index_ensure(bm, BM_FACE | ((!vnos.is_empty() || !vcos.is_empty()) ? BM_VERT : 0));
219
220 TaskParallelSettings settings;
222 settings.use_threading = bm->totvert >= BM_THREAD_LIMIT;
223
224 if (vcos.is_empty()) {
225 BM_iter_parallel(bm, BM_VERTS_OF_MESH, bm_vert_calc_normals_cb, nullptr, &settings);
226 }
227 else {
228 BLI_assert(!fnos.is_empty() || !vnos.is_empty());
230 data.fnos = fnos;
231 data.vcos = vcos;
232 data.vnos = vnos;
233 BM_iter_parallel(bm, BM_VERTS_OF_MESH, bm_vert_calc_normals_with_coords_cb, &data, &settings);
234 }
235}
236
237static void bm_face_calc_normals_cb(void * /*userdata*/,
238 MempoolIterData *mp_f,
239 const TaskParallelTLS *__restrict /*tls*/)
240{
241 BMFace *f = (BMFace *)mp_f;
242
243 BM_face_calc_normal(f, f->no);
244}
245
247{
248 if (params->face_normals) {
249 /* Calculate all face normals. */
250 TaskParallelSettings settings;
252 settings.use_threading = bm->totedge >= BM_THREAD_LIMIT;
253
254 BM_iter_parallel(bm, BM_FACES_OF_MESH, bm_face_calc_normals_cb, nullptr, &settings);
255 }
256
257 /* Add weighted face normals to vertices, and normalize vert normals. */
258 bm_mesh_verts_calc_normals(bm, {}, {}, {});
259}
260
267
269
270/* -------------------------------------------------------------------- */
273
275 const BMPartialUpdate *bmpinfo,
277{
278 using namespace blender;
279 BLI_assert(bmpinfo->params.do_normals);
280 /* While harmless, exit early if there is nothing to do. */
281 if (UNLIKELY(bmpinfo->verts.is_empty() && bmpinfo->faces.is_empty())) {
282 return;
283 }
284
285 if (params->face_normals) {
286 threading::parallel_for(bmpinfo->faces.index_range(), 1024, [&](const IndexRange range) {
287 for (const int i : range) {
288 BMFace *f = bmpinfo->faces[i];
289 BM_face_calc_normal(f, f->no);
290 }
291 });
292 }
293
294 threading::parallel_for(bmpinfo->verts.index_range(), 1024, [&](const IndexRange range) {
295 for (const int i : range) {
296 BMVert *v = bmpinfo->verts[i];
297 bm_vert_calc_normals_impl(v);
298 }
299 });
300}
301
308
310
311/* -------------------------------------------------------------------- */
314
316 const Span<float3> fnos,
317 const Span<float3> vcos,
319{
320 /* Add weighted face normals to vertices, and normalize vert normals. */
321 bm_mesh_verts_calc_normals(bm, fnos, vcos, vnos);
322}
323
325
326/* -------------------------------------------------------------------- */
329
330void BM_normals_loops_edges_tag(BMesh *bm, const bool do_edges)
331{
332 BMFace *f;
333 BMEdge *e;
334 BMIter fiter, eiter;
335 BMLoop *l_curr, *l_first;
336
337 if (do_edges) {
338 int index_edge;
339 BM_ITER_MESH_INDEX (e, &eiter, bm, BM_EDGES_OF_MESH, index_edge) {
340 BMLoop *l_a, *l_b;
341
342 BM_elem_index_set(e, index_edge); /* set_inline */
344 if (BM_edge_loop_pair(e, &l_a, &l_b)) {
345 if (BM_elem_flag_test(e, BM_ELEM_SMOOTH) && l_a->v != l_b->v) {
347 }
348 }
349 }
350 bm->elem_index_dirty &= ~BM_EDGE;
351 }
352
353 int index_face, index_loop = 0;
354 BM_ITER_MESH_INDEX (f, &fiter, bm, BM_FACES_OF_MESH, index_face) {
355 BM_elem_index_set(f, index_face); /* set_inline */
356 l_curr = l_first = BM_FACE_FIRST_LOOP(f);
357 do {
358 BM_elem_index_set(l_curr, index_loop++); /* set_inline */
360 } while ((l_curr = l_curr->next) != l_first);
361 }
362 bm->elem_index_dirty &= ~(BM_FACE | BM_LOOP);
363}
364
369 const Span<float3> fnos,
370 float split_angle_cos,
371 const bool do_sharp_edges_tag)
372{
373 BMIter eiter;
374 BMEdge *e;
375 int i;
376
377 if (!fnos.is_empty()) {
379 }
380
381 if (do_sharp_edges_tag) {
383 BM_elem_index_set(e, i); /* set_inline */
384 if (e->l != nullptr) {
385 bm_edge_tag_from_smooth_and_set_sharp(fnos, e, split_angle_cos);
386 }
387 }
388 }
389 else {
391 BM_elem_index_set(e, i); /* set_inline */
392 if (e->l != nullptr) {
393 bm_edge_tag_from_smooth(fnos, e, split_angle_cos);
394 }
395 }
396 }
397
398 bm->elem_index_dirty &= ~BM_EDGE;
399}
400
401void BM_edges_sharp_from_angle_set(BMesh *bm, const float split_angle)
402{
403 if (split_angle >= float(M_PI)) {
404 /* Nothing to do! */
405 return;
406 }
407
408 bm_mesh_edges_sharp_tag(bm, {}, cosf(split_angle), true);
409}
410
412
413/* -------------------------------------------------------------------- */
416
418{
419 BMLoop *lfan_pivot_next = l_curr;
420 BMEdge *e_next = l_curr->e;
421
422 BLI_assert(!BM_elem_flag_test(lfan_pivot_next, BM_ELEM_TAG));
423 BM_elem_flag_enable(lfan_pivot_next, BM_ELEM_TAG);
424
425 while (true) {
426 /* Much simpler than in sibling code with basic Mesh data! */
427 lfan_pivot_next = BM_vert_step_fan_loop(lfan_pivot_next, &e_next);
428
429 if (!lfan_pivot_next || !BM_elem_flag_test(e_next, BM_ELEM_TAG)) {
430 /* Sharp loop/edge, so not a cyclic smooth fan... */
431 return false;
432 }
433 /* Smooth loop/edge... */
434 if (BM_elem_flag_test(lfan_pivot_next, BM_ELEM_TAG)) {
435 if (lfan_pivot_next == l_curr) {
436 /* We walked around a whole cyclic smooth fan
437 * without finding any already-processed loop,
438 * means we can use initial l_curr/l_prev edge as start for this smooth fan. */
439 return true;
440 }
441 /* ... already checked in some previous looping, we can abort. */
442 return false;
443 }
444 /* ... we can skip it in future, and keep checking the smooth fan. */
445 BM_elem_flag_enable(lfan_pivot_next, BM_ELEM_TAG);
446 }
447}
448
462 const Span<float3> vcos,
463 const Span<float3> fnos,
464 const short (*clnors_data)[2],
465 const int cd_loop_clnors_offset,
466 const bool has_clnors,
467 /* Cache. */
469 /* Iterate. */
470 BMLoop *l_curr,
471 /* Result. */
472 MutableSpan<float3> r_lnos,
473 MLoopNorSpaceArray *r_lnors_spacearr)
474{
475 BLI_assert((bm->elem_index_dirty & BM_LOOP) == 0);
476 BLI_assert(fnos.is_empty() || ((bm->elem_index_dirty & BM_FACE) == 0));
477 BLI_assert(vcos.is_empty() || ((bm->elem_index_dirty & BM_VERT) == 0));
479
480 int handled = 0;
481
482 /* Temp normal stack. */
483 BLI_SMALLSTACK_DECLARE(normal, float *);
484 /* Temp clnors stack. */
485 BLI_SMALLSTACK_DECLARE(clnors, short *);
486 /* Temp edge vectors stack, only used when computing lnor spacearr. */
487
488 /* A smooth edge, we have to check for cyclic smooth fan case.
489 * If we find a new, never-processed cyclic smooth fan, we can do it now using that loop/edge
490 * as 'entry point', otherwise we can skip it. */
491
492 /* NOTE: In theory, we could make bm_mesh_loop_check_cyclic_smooth_fan() store
493 * mlfan_pivot's in a stack, to avoid having to fan again around
494 * the vert during actual computation of clnor & clnorspace. However, this would complicate
495 * the code, add more memory usage, and
496 * BM_vert_step_fan_loop() is quite cheap in term of CPU cycles,
497 * so really think it's not worth it. */
498 if (BM_elem_flag_test(l_curr->e, BM_ELEM_TAG) &&
500 {
501 }
502 else if (!BM_elem_flag_test(l_curr->e, BM_ELEM_TAG) &&
504 {
505 /* Simple case (both edges around that vertex are sharp in related face),
506 * this vertex just takes its face normal.
507 */
508 const int l_curr_index = BM_elem_index_get(l_curr);
509 const float3 &no = !fnos.is_empty() ? fnos[BM_elem_index_get(l_curr->f)] :
510 float3(l_curr->f->no);
511 copy_v3_v3(r_lnos[l_curr_index], no);
512
513 /* If needed, generate this (simple!) lnor space. */
514 if (r_lnors_spacearr) {
515 float vec_curr[3], vec_prev[3];
516 MLoopNorSpace *lnor_space = BKE_lnor_space_create(r_lnors_spacearr);
517
518 {
519 const BMVert *v_pivot = l_curr->v;
520 const float3 &co_pivot = !vcos.is_empty() ? vcos[BM_elem_index_get(v_pivot)] :
521 float3(v_pivot->co);
522 const BMVert *v_1 = l_curr->next->v;
523 const float3 co_1 = !vcos.is_empty() ? vcos[BM_elem_index_get(v_1)] : float3(v_1->co);
524 const BMVert *v_2 = l_curr->prev->v;
525 const float3 co_2 = !vcos.is_empty() ? vcos[BM_elem_index_get(v_2)] : float3(v_2->co);
526
527 BLI_assert(v_1 == BM_edge_other_vert(l_curr->e, v_pivot));
528 BLI_assert(v_2 == BM_edge_other_vert(l_curr->prev->e, v_pivot));
529
530 sub_v3_v3v3(vec_curr, co_1, co_pivot);
531 normalize_v3(vec_curr);
532 sub_v3_v3v3(vec_prev, co_2, co_pivot);
533 normalize_v3(vec_prev);
534 }
535
536 BKE_lnor_space_define(lnor_space, r_lnos[l_curr_index], vec_curr, vec_prev, {});
537 /* We know there is only one loop in this space,
538 * no need to create a linklist in this case... */
539 BKE_lnor_space_add_loop(r_lnors_spacearr, lnor_space, l_curr_index, l_curr, true);
540
541 if (has_clnors) {
542 const short (*clnor)[2] = clnors_data ?
543 &clnors_data[l_curr_index] :
544 static_cast<const short (*)[2]>(
545 BM_ELEM_CD_GET_VOID_P(l_curr, cd_loop_clnors_offset));
546 BKE_lnor_space_custom_data_to_normal(lnor_space, *clnor, r_lnos[l_curr_index]);
547 }
548 }
549 handled = 1;
550 }
551 /* We *do not need* to check/tag loops as already computed!
552 * Due to the fact a loop only links to one of its two edges,
553 * a same fan *will never be walked more than once!*
554 * Since we consider edges having neighbor faces with inverted (flipped) normals as sharp,
555 * we are sure that no fan will be skipped, even only considering the case
556 * (sharp curr_edge, smooth prev_edge), and not the alternative
557 * (smooth curr_edge, sharp prev_edge).
558 * All this due/thanks to link between normals and loop ordering.
559 */
560 else {
561 /* We have to fan around current vertex, until we find the other non-smooth edge,
562 * and accumulate face normals into the vertex!
563 * Note in case this vertex has only one sharp edge,
564 * this is a waste because the normal is the same as the vertex normal,
565 * but I do not see any easy way to detect that (would need to count number of sharp edges
566 * per vertex, I doubt the additional memory usage would be worth it, especially as it
567 * should not be a common case in real-life meshes anyway).
568 */
569 BMVert *v_pivot = l_curr->v;
570 BMEdge *e_next;
571 const BMEdge *e_org = l_curr->e;
572 BMLoop *lfan_pivot, *lfan_pivot_next;
573 int lfan_pivot_index;
574 float lnor[3] = {0.0f, 0.0f, 0.0f};
575 float vec_curr[3], vec_next[3], vec_org[3];
576
577 /* We validate clnors data on the fly - cheapest way to do! */
578 int clnors_avg[2] = {0, 0};
579 const short (*clnor_ref)[2] = nullptr;
580 int clnors_count = 0;
581 bool clnors_invalid = false;
582
583 const float3 &co_pivot = !vcos.is_empty() ? vcos[BM_elem_index_get(v_pivot)] :
584 float3(v_pivot->co);
585
586 MLoopNorSpace *lnor_space = r_lnors_spacearr ? BKE_lnor_space_create(r_lnors_spacearr) :
587 nullptr;
588
589 BLI_assert((edge_vectors == nullptr) || edge_vectors->is_empty());
590
591 lfan_pivot = l_curr;
592 lfan_pivot_index = BM_elem_index_get(lfan_pivot);
593 e_next = lfan_pivot->e; /* Current edge here, actually! */
594
595 /* Only need to compute previous edge's vector once,
596 * then we can just reuse old current one! */
597 {
598 const BMVert *v_2 = lfan_pivot->next->v;
599 const float3 co_2 = !vcos.is_empty() ? vcos[BM_elem_index_get(v_2)] : float3(v_2->co);
600
601 BLI_assert(v_2 == BM_edge_other_vert(e_next, v_pivot));
602
603 sub_v3_v3v3(vec_org, co_2, co_pivot);
604 normalize_v3(vec_org);
605 copy_v3_v3(vec_curr, vec_org);
606
607 if (r_lnors_spacearr) {
608 edge_vectors->append(vec_org);
609 }
610 }
611
612 while (true) {
613 lfan_pivot_next = BM_vert_step_fan_loop(lfan_pivot, &e_next);
614 if (lfan_pivot_next) {
615 BLI_assert(lfan_pivot_next->v == v_pivot);
616 }
617 else {
618 /* next edge is non-manifold, we have to find it ourselves! */
619 e_next = (lfan_pivot->e == e_next) ? lfan_pivot->prev->e : lfan_pivot->e;
620 }
621
622 /* Compute edge vector.
623 * NOTE: We could pre-compute those into an array, in the first iteration,
624 * instead of computing them twice (or more) here.
625 * However, time gained is not worth memory and time lost,
626 * given the fact that this code should not be called that much in real-life meshes.
627 */
628 {
629 const BMVert *v_2 = BM_edge_other_vert(e_next, v_pivot);
630 const float3 co_2 = !vcos.is_empty() ? vcos[BM_elem_index_get(v_2)] : float3(v_2->co);
631
632 sub_v3_v3v3(vec_next, co_2, co_pivot);
633 normalize_v3(vec_next);
634 }
635
636 {
637 /* Code similar to accumulate_vertex_normals_poly_v3. */
638 /* Calculate angle between the two face edges incident on this vertex. */
639 const BMFace *f = lfan_pivot->f;
640 const float fac = blender::math::safe_acos_approx(dot_v3v3(vec_next, vec_curr));
641 const float3 &no = !fnos.is_empty() ? fnos[BM_elem_index_get(f)] : float3(f->no);
642 /* Accumulate */
643 madd_v3_v3fl(lnor, no, fac);
644
645 if (has_clnors) {
646 /* Accumulate all clnors, if they are not all equal we have to fix that! */
647 const short (*clnor)[2] = clnors_data ?
648 &clnors_data[lfan_pivot_index] :
649 static_cast<const short (*)[2]>(BM_ELEM_CD_GET_VOID_P(
650 lfan_pivot, cd_loop_clnors_offset));
651 if (clnors_count) {
652 clnors_invalid |= ((*clnor_ref)[0] != (*clnor)[0] || (*clnor_ref)[1] != (*clnor)[1]);
653 }
654 else {
655 clnor_ref = clnor;
656 }
657 clnors_avg[0] += (*clnor)[0];
658 clnors_avg[1] += (*clnor)[1];
659 clnors_count++;
660 /* We store here a pointer to all custom lnors processed. */
661 BLI_SMALLSTACK_PUSH(clnors, (short *)*clnor);
662 }
663 }
664
665 /* We store here a pointer to all loop-normals processed. */
666 BLI_SMALLSTACK_PUSH(normal, (float *)r_lnos[lfan_pivot_index]);
667
668 if (r_lnors_spacearr) {
669 /* Assign current lnor space to current 'vertex' loop. */
670 BKE_lnor_space_add_loop(r_lnors_spacearr, lnor_space, lfan_pivot_index, lfan_pivot, false);
671 if (e_next != e_org) {
672 /* We store here all edges-normalized vectors processed. */
673 edge_vectors->append(vec_next);
674 }
675 }
676
677 handled += 1;
678
679 if (!BM_elem_flag_test(e_next, BM_ELEM_TAG) || (e_next == e_org)) {
680 /* Next edge is sharp, we have finished with this fan of faces around this vert! */
681 break;
682 }
683
684 /* Copy next edge vector to current one. */
685 copy_v3_v3(vec_curr, vec_next);
686 /* Next pivot loop to current one. */
687 lfan_pivot = lfan_pivot_next;
688 lfan_pivot_index = BM_elem_index_get(lfan_pivot);
689 }
690
691 {
692 float lnor_len = normalize_v3(lnor);
693
694 /* If we are generating lnor spacearr, we can now define the one for this fan. */
695 if (r_lnors_spacearr) {
696 if (UNLIKELY(lnor_len == 0.0f)) {
697 /* Use vertex normal as fallback! */
698 copy_v3_v3(lnor, r_lnos[lfan_pivot_index]);
699 lnor_len = 1.0f;
700 }
701
702 BKE_lnor_space_define(lnor_space, lnor, vec_org, vec_next, *edge_vectors);
703 edge_vectors->clear();
704
705 if (has_clnors) {
706 if (clnors_invalid) {
707 short *clnor;
708
709 clnors_avg[0] /= clnors_count;
710 clnors_avg[1] /= clnors_count;
711 /* Fix/update all clnors of this fan with computed average value. */
712
713 /* Prints continuously when merge custom normals, so commenting. */
714 // printf("Invalid clnors in this fan!\n");
715
716 while ((clnor = static_cast<short *>(BLI_SMALLSTACK_POP(clnors)))) {
717 // print_v2("org clnor", clnor);
718 clnor[0] = short(clnors_avg[0]);
719 clnor[1] = short(clnors_avg[1]);
720 }
721 // print_v2("new clnors", clnors_avg);
722 }
723 else {
724 /* We still have to consume the stack! */
725 while (BLI_SMALLSTACK_POP(clnors)) {
726 /* pass */
727 }
728 }
729 BKE_lnor_space_custom_data_to_normal(lnor_space, *clnor_ref, lnor);
730 }
731 }
732
733 /* In case we get a zero normal here, just use vertex normal already set! */
734 if (LIKELY(lnor_len != 0.0f)) {
735 /* Copy back the final computed normal into all related loop-normals. */
736 float *nor;
737
738 while ((nor = static_cast<float *>(BLI_SMALLSTACK_POP(normal)))) {
739 copy_v3_v3(nor, lnor);
740 }
741 }
742 else {
743 /* We still have to consume the stack! */
744 while (BLI_SMALLSTACK_POP(normal)) {
745 /* pass */
746 }
747 }
748 }
749
750 /* Tag related vertex as sharp, to avoid fanning around it again
751 * (in case it was a smooth one). */
752 if (r_lnors_spacearr) {
754 }
755 }
756 return handled;
757}
758
759static int bm_loop_index_cmp(const void *a, const void *b)
760{
763 return -1;
764 }
765 return 1;
766}
767
777 const BMLoop *l_a,
778 const BMLoop *l_b)
779{
780 BLI_assert(l_a->radial_next == l_b);
781 return (
782 /* The face is manifold. */
783 (l_b->radial_next == l_a) &&
784 /* Faces have winding that faces the same way. */
785 (l_a->v != l_b->v) &&
786 /* The edge is smooth. */
788 /* Both faces are smooth. */
790}
791
793 BMEdge *e,
794 const float split_angle_cos)
795{
796 BLI_assert(e->l != nullptr);
797 BMLoop *l_a = e->l, *l_b = l_a->radial_next;
798 bool is_smooth = false;
800 if (split_angle_cos != -1.0f) {
801 const float dot = fnos.is_empty() ? dot_v3v3(l_a->f->no, l_b->f->no) :
802 dot_v3v3(fnos[BM_elem_index_get(l_a->f)],
803 fnos[BM_elem_index_get(l_b->f)]);
804 if (dot >= split_angle_cos) {
805 is_smooth = true;
806 }
807 }
808 else {
809 is_smooth = true;
810 }
811 }
812
813 /* Perform `BM_elem_flag_set(e, BM_ELEM_TAG, is_smooth)`
814 * NOTE: This will be set by multiple threads however it will be set to the same value. */
815
816 /* No need for atomics here as this is a single byte. */
817 char *hflag_p = &e->head.hflag;
818 if (is_smooth) {
819 *hflag_p = *hflag_p | BM_ELEM_TAG;
820 }
821 else {
822 *hflag_p = *hflag_p & ~BM_ELEM_TAG;
823 }
824}
825
834 BMEdge *e,
835 const float split_angle_cos)
836{
837 BLI_assert(e->l != nullptr);
838 BMLoop *l_a = e->l, *l_b = l_a->radial_next;
839 bool is_smooth = false;
841 if (split_angle_cos != -1.0f) {
842 const float dot = fnos.is_empty() ? dot_v3v3(l_a->f->no, l_b->f->no) :
843 dot_v3v3(fnos[BM_elem_index_get(l_a->f)],
844 fnos[BM_elem_index_get(l_b->f)]);
845 if (dot >= split_angle_cos) {
846 is_smooth = true;
847 }
848 else {
849 /* Note that we do not care about the other sharp-edge cases
850 * (sharp face, non-manifold edge, etc.),
851 * only tag edge as sharp when it is due to angle threshold. */
853 }
854 }
855 else {
856 is_smooth = true;
857 }
858 }
859
860 BM_elem_flag_set(e, BM_ELEM_TAG, is_smooth);
861}
862
869 BMesh *bm,
870 const Span<float3> vcos,
871 const Span<float3> fnos,
872 MutableSpan<float3> r_lnos,
873 const short (*clnors_data)[2],
874 const int cd_loop_clnors_offset,
875 const bool do_rebuild,
876 const float split_angle_cos,
877 /* TLS */
878 MLoopNorSpaceArray *r_lnors_spacearr,
880 /* Iterate over. */
881 BMVert *v)
882{
883 /* Respecting face order is necessary so the initial starting loop is consistent
884 * with looping over loops of all faces.
885 *
886 * Logically we could sort the loops by their index & loop over them
887 * however it's faster to use the lowest index of an un-ordered list
888 * since it's common that smooth vertices only ever need to pick one loop
889 * which then handles all the others.
890 *
891 * Sorting is only performed when multiple fans are found. */
892 const bool has_clnors = true;
893 LinkNode *loops_of_vert = nullptr;
894 int loops_of_vert_count = 0;
895 /* When false the caller must have already tagged the edges. */
896 const bool do_edge_tag = (split_angle_cos != EDGE_TAG_FROM_SPLIT_ANGLE_BYPASS);
897
898 /* The loop with the lowest index. */
899 {
900 LinkNode *link_best;
901 uint index_best = UINT_MAX;
902 BMEdge *e_curr_iter = v->e;
903 do { /* Edges of vertex. */
904 BMLoop *l_curr = e_curr_iter->l;
905 if (l_curr == nullptr) {
906 continue;
907 }
908
909 if (do_edge_tag) {
910 bm_edge_tag_from_smooth(fnos, e_curr_iter, split_angle_cos);
911 }
912
913 do { /* Radial loops. */
914 if (l_curr->v != v) {
915 continue;
916 }
917 if (do_rebuild && !BM_ELEM_API_FLAG_TEST(l_curr, BM_LNORSPACE_UPDATE) &&
918 !(bm->spacearr_dirty & BM_SPACEARR_DIRTY_ALL))
919 {
920 continue;
921 }
923 BLI_linklist_prepend_alloca(&loops_of_vert, l_curr);
924 loops_of_vert_count += 1;
925
926 const uint index_test = uint(BM_elem_index_get(l_curr));
927 if (index_best > index_test) {
928 index_best = index_test;
929 link_best = loops_of_vert;
930 }
931 } while ((l_curr = l_curr->radial_next) != e_curr_iter->l);
932 } while ((e_curr_iter = BM_DISK_EDGE_NEXT(e_curr_iter, v)) != v->e);
933
934 if (UNLIKELY(loops_of_vert == nullptr)) {
935 return;
936 }
937
938 /* Immediately pop the best element.
939 * The order doesn't matter, so swap the links as it's simpler than tracking
940 * reference to `link_best`. */
941 if (link_best != loops_of_vert) {
942 std::swap(link_best->link, loops_of_vert->link);
943 }
944 }
945
946 bool loops_of_vert_is_sorted = false;
947
948 /* Keep track of the number of loops that have been assigned. */
949 int loops_of_vert_handled = 0;
950
951 while (loops_of_vert != nullptr) {
952 BMLoop *l_best = static_cast<BMLoop *>(loops_of_vert->link);
953 loops_of_vert = loops_of_vert->next;
954
955 BLI_assert(l_best->v == v);
956 loops_of_vert_handled += bm_mesh_loops_calc_normals_for_loop(bm,
957 vcos,
958 fnos,
959 clnors_data,
960 cd_loop_clnors_offset,
961 has_clnors,
962 edge_vectors,
963 l_best,
964 r_lnos,
965 r_lnors_spacearr);
966
967 /* Check if an early exit is possible without an exhaustive inspection of every loop
968 * where 1 loop's fan extends out to all remaining loops.
969 * This is a common case for smooth vertices. */
970 BLI_assert(loops_of_vert_handled <= loops_of_vert_count);
971 if (loops_of_vert_handled == loops_of_vert_count) {
972 break;
973 }
974
975 /* Note on sorting, in some cases it will be faster to scan for the lowest index each time.
976 * However in the worst case this is `O(N^2)`, so use a single sort call instead. */
977 if (!loops_of_vert_is_sorted) {
978 if (loops_of_vert && loops_of_vert->next) {
979 loops_of_vert = BLI_linklist_sort(loops_of_vert, bm_loop_index_cmp);
980 loops_of_vert_is_sorted = true;
981 }
982 }
983 }
984}
985
991 BMesh *bm,
992 const Span<float3> vcos,
993 const Span<float3> fnos,
994 MutableSpan<float3> r_lnos,
995 const bool do_rebuild,
996 const float split_angle_cos,
997 /* TLS */
998 MLoopNorSpaceArray *r_lnors_spacearr,
1000 /* Iterate over. */
1001 BMVert *v)
1002{
1003 const bool has_clnors = false;
1004 const short (*clnors_data)[2] = nullptr;
1005 /* When false the caller must have already tagged the edges. */
1006 const bool do_edge_tag = (split_angle_cos != EDGE_TAG_FROM_SPLIT_ANGLE_BYPASS);
1007 const int cd_loop_clnors_offset = -1;
1008
1009 BMEdge *e_curr_iter;
1010
1011 /* Unfortunately a loop is needed just to clear loop-tags. */
1012 e_curr_iter = v->e;
1013 do { /* Edges of vertex. */
1014 BMLoop *l_curr = e_curr_iter->l;
1015 if (l_curr == nullptr) {
1016 continue;
1017 }
1018
1019 if (do_edge_tag) {
1020 bm_edge_tag_from_smooth(fnos, e_curr_iter, split_angle_cos);
1021 }
1022
1023 do { /* Radial loops. */
1024 if (l_curr->v != v) {
1025 continue;
1026 }
1028 } while ((l_curr = l_curr->radial_next) != e_curr_iter->l);
1029 } while ((e_curr_iter = BM_DISK_EDGE_NEXT(e_curr_iter, v)) != v->e);
1030
1031 e_curr_iter = v->e;
1032 do { /* Edges of vertex. */
1033 BMLoop *l_curr = e_curr_iter->l;
1034 if (l_curr == nullptr) {
1035 continue;
1036 }
1037 do { /* Radial loops. */
1038 if (l_curr->v != v) {
1039 continue;
1040 }
1041 if (do_rebuild && !BM_ELEM_API_FLAG_TEST(l_curr, BM_LNORSPACE_UPDATE) &&
1042 !(bm->spacearr_dirty & BM_SPACEARR_DIRTY_ALL))
1043 {
1044 continue;
1045 }
1047 vcos,
1048 fnos,
1049 clnors_data,
1050 cd_loop_clnors_offset,
1051 has_clnors,
1052 edge_vectors,
1053 l_curr,
1054 r_lnos,
1055 r_lnors_spacearr);
1056 } while ((l_curr = l_curr->radial_next) != e_curr_iter->l);
1057 } while ((e_curr_iter = BM_DISK_EDGE_NEXT(e_curr_iter, v)) != v->e);
1058}
1059
1069 const Span<float3> vcos,
1070 const Span<float3> fnos,
1071 MutableSpan<float3> r_lnos,
1072 MLoopNorSpaceArray *r_lnors_spacearr,
1073 const short (*clnors_data)[2],
1074 const int cd_loop_clnors_offset,
1075 const bool do_rebuild,
1076 const float split_angle_cos)
1077{
1078 BMIter fiter;
1079 BMFace *f_curr;
1080 const bool has_clnors = clnors_data || (cd_loop_clnors_offset != -1);
1081 /* When false the caller must have already tagged the edges. */
1082 const bool do_edge_tag = (split_angle_cos != EDGE_TAG_FROM_SPLIT_ANGLE_BYPASS);
1083
1084 MLoopNorSpaceArray _lnors_spacearr = {nullptr};
1085
1086 std::unique_ptr<blender::Vector<blender::float3, 16>> edge_vectors = nullptr;
1087
1088 {
1089 char htype = 0;
1090 if (!vcos.is_empty()) {
1091 htype |= BM_VERT;
1092 }
1093 /* Face/Loop indices are set inline below. */
1095 }
1096
1097 if (!r_lnors_spacearr && has_clnors) {
1098 /* We need to compute lnor spacearr if some custom lnor data are given to us! */
1099 r_lnors_spacearr = &_lnors_spacearr;
1100 }
1101 if (r_lnors_spacearr) {
1102 BKE_lnor_spacearr_init(r_lnors_spacearr, bm->totloop, MLNOR_SPACEARR_BMLOOP_PTR);
1103 edge_vectors = std::make_unique<blender::Vector<blender::float3, 16>>();
1104 }
1105
1106 /* Clear all loops' tags (means none are to be skipped for now). */
1107 int index_face, index_loop = 0;
1108 BM_ITER_MESH_INDEX (f_curr, &fiter, bm, BM_FACES_OF_MESH, index_face) {
1109 BMLoop *l_curr, *l_first;
1110
1111 BM_elem_index_set(f_curr, index_face); /* set_inline */
1112
1113 l_curr = l_first = BM_FACE_FIRST_LOOP(f_curr);
1114 do {
1115 BM_elem_index_set(l_curr, index_loop++); /* set_inline */
1117 } while ((l_curr = l_curr->next) != l_first);
1118 }
1119 bm->elem_index_dirty &= ~(BM_FACE | BM_LOOP);
1120
1121 /* Always tag edges based on winding & sharp edge flag
1122 * (even when the auto-smooth angle doesn't need to be calculated). */
1123 if (do_edge_tag) {
1124 bm_mesh_edges_sharp_tag(bm, fnos, has_clnors ? -1.0f : split_angle_cos, false);
1125 }
1126
1127 /* We now know edges that can be smoothed (they are tagged),
1128 * and edges that will be hard (they aren't).
1129 * Now, time to generate the normals.
1130 */
1131 BM_ITER_MESH (f_curr, &fiter, bm, BM_FACES_OF_MESH) {
1132 BMLoop *l_curr, *l_first;
1133
1134 l_curr = l_first = BM_FACE_FIRST_LOOP(f_curr);
1135 do {
1136 if (do_rebuild && !BM_ELEM_API_FLAG_TEST(l_curr, BM_LNORSPACE_UPDATE) &&
1137 !(bm->spacearr_dirty & BM_SPACEARR_DIRTY_ALL))
1138 {
1139 continue;
1140 }
1142 vcos,
1143 fnos,
1144 clnors_data,
1145 cd_loop_clnors_offset,
1146 has_clnors,
1147 edge_vectors.get(),
1148 l_curr,
1149 r_lnos,
1150 r_lnors_spacearr);
1151 } while ((l_curr = l_curr->next) != l_first);
1152 }
1153
1154 if (r_lnors_spacearr) {
1155 if (r_lnors_spacearr == &_lnors_spacearr) {
1156 BKE_lnor_spacearr_free(r_lnors_spacearr);
1157 }
1158 }
1159}
1160
1175
1183
1184static void bm_mesh_loops_calc_normals_for_vert_init_fn(const void *__restrict userdata,
1185 void *__restrict chunk)
1186{
1187 const auto *data = static_cast<const BMLoopsCalcNormalsWithCoordsData *>(userdata);
1188 auto *tls_data = static_cast<BMLoopsCalcNormalsWithCoords_TLS *>(chunk);
1189 if (data->r_lnors_spacearr) {
1190 tls_data->edge_vectors = MEM_new<blender::Vector<blender::float3, 16>>(__func__);
1191 BKE_lnor_spacearr_tls_init(data->r_lnors_spacearr, &tls_data->lnors_spacearr_buf);
1192 tls_data->lnors_spacearr = &tls_data->lnors_spacearr_buf;
1193 }
1194 else {
1195 tls_data->lnors_spacearr = nullptr;
1196 }
1197}
1198
1199static void bm_mesh_loops_calc_normals_for_vert_reduce_fn(const void *__restrict userdata,
1200 void *__restrict /*chunk_join*/,
1201 void *__restrict chunk)
1202{
1203 const auto *data = static_cast<const BMLoopsCalcNormalsWithCoordsData *>(userdata);
1204 auto *tls_data = static_cast<BMLoopsCalcNormalsWithCoords_TLS *>(chunk);
1205
1206 if (data->r_lnors_spacearr) {
1207 BKE_lnor_spacearr_tls_join(data->r_lnors_spacearr, tls_data->lnors_spacearr);
1208 }
1209}
1210
1211static void bm_mesh_loops_calc_normals_for_vert_free_fn(const void *__restrict userdata,
1212 void *__restrict chunk)
1213{
1214 const auto *data = static_cast<const BMLoopsCalcNormalsWithCoordsData *>(userdata);
1215 auto *tls_data = static_cast<BMLoopsCalcNormalsWithCoords_TLS *>(chunk);
1216
1217 if (data->r_lnors_spacearr) {
1218 MEM_delete(tls_data->edge_vectors);
1219 }
1220}
1221
1223 void *userdata, MempoolIterData *mp_v, const TaskParallelTLS *__restrict tls)
1224{
1225 BMVert *v = (BMVert *)mp_v;
1226 if (v->e == nullptr) {
1227 return;
1228 }
1229 auto *data = static_cast<BMLoopsCalcNormalsWithCoordsData *>(userdata);
1230 auto *tls_data = static_cast<BMLoopsCalcNormalsWithCoords_TLS *>(tls->userdata_chunk);
1232 data->vcos,
1233 data->fnos,
1234 data->r_lnos,
1235
1236 data->clnors_data,
1237 data->cd_loop_clnors_offset,
1238 data->do_rebuild,
1239 data->split_angle_cos,
1240 /* Thread local. */
1241 tls_data->lnors_spacearr,
1242 tls_data->edge_vectors,
1243 /* Iterate over. */
1244 v);
1245}
1246
1248 void *userdata, MempoolIterData *mp_v, const TaskParallelTLS *__restrict tls)
1249{
1250 BMVert *v = (BMVert *)mp_v;
1251 if (v->e == nullptr) {
1252 return;
1253 }
1254 auto *data = static_cast<BMLoopsCalcNormalsWithCoordsData *>(userdata);
1255 auto *tls_data = static_cast<BMLoopsCalcNormalsWithCoords_TLS *>(tls->userdata_chunk);
1257 data->vcos,
1258 data->fnos,
1259 data->r_lnos,
1260
1261 data->do_rebuild,
1262 data->split_angle_cos,
1263 /* Thread local. */
1264 tls_data->lnors_spacearr,
1265 tls_data->edge_vectors,
1266 /* Iterate over. */
1267 v);
1268}
1269
1271 const Span<float3> vcos,
1272 const Span<float3> fnos,
1273 MutableSpan<float3> r_lnos,
1274 MLoopNorSpaceArray *r_lnors_spacearr,
1275 const short (*clnors_data)[2],
1276 const int cd_loop_clnors_offset,
1277 const bool do_rebuild,
1278 const float split_angle_cos)
1279{
1280 const bool has_clnors = clnors_data || (cd_loop_clnors_offset != -1);
1281 MLoopNorSpaceArray _lnors_spacearr = {nullptr};
1282
1283 {
1284 char htype = BM_LOOP;
1285 if (!vcos.is_empty()) {
1286 htype |= BM_VERT;
1287 }
1288 if (!fnos.is_empty()) {
1289 htype |= BM_FACE;
1290 }
1291 /* Face/Loop indices are set inline below. */
1293 }
1294
1295 if (!r_lnors_spacearr && has_clnors) {
1296 /* We need to compute lnor spacearr if some custom lnor data are given to us! */
1297 r_lnors_spacearr = &_lnors_spacearr;
1298 }
1299 if (r_lnors_spacearr) {
1300 BKE_lnor_spacearr_init(r_lnors_spacearr, bm->totloop, MLNOR_SPACEARR_BMLOOP_PTR);
1301 }
1302
1303 /* We now know edges that can be smoothed (they are tagged),
1304 * and edges that will be hard (they aren't).
1305 * Now, time to generate the normals.
1306 */
1307
1308 TaskParallelSettings settings;
1310
1311 BMLoopsCalcNormalsWithCoords_TLS tls = {nullptr};
1312
1313 settings.userdata_chunk = &tls;
1314 settings.userdata_chunk_size = sizeof(tls);
1315
1319
1321 data.bm = bm;
1322 data.vcos = vcos;
1323 data.fnos = fnos;
1324 data.r_lnos = r_lnos;
1325 data.r_lnors_spacearr = r_lnors_spacearr;
1326 data.clnors_data = clnors_data;
1327 data.cd_loop_clnors_offset = cd_loop_clnors_offset;
1328 data.do_rebuild = do_rebuild;
1329 data.split_angle_cos = split_angle_cos;
1330
1331 BM_iter_parallel(bm,
1335 &data,
1336 &settings);
1337
1338 if (r_lnors_spacearr) {
1339 if (r_lnors_spacearr == &_lnors_spacearr) {
1340 BKE_lnor_spacearr_free(r_lnors_spacearr);
1341 }
1342 }
1343}
1344
1346 const Span<float3> vcos,
1347 const Span<float3> fnos,
1348 MutableSpan<float3> r_lnos,
1349 MLoopNorSpaceArray *r_lnors_spacearr,
1350 const short (*clnors_data)[2],
1351 const int cd_loop_clnors_offset,
1352 const bool do_rebuild,
1353 const float split_angle_cos)
1354{
1355 if (bm->totloop < BM_THREAD_LIMIT) {
1357 vcos,
1358 fnos,
1359 r_lnos,
1360 r_lnors_spacearr,
1361 clnors_data,
1362 cd_loop_clnors_offset,
1363 do_rebuild,
1364 split_angle_cos);
1365 }
1366 else {
1368 vcos,
1369 fnos,
1370 r_lnos,
1371 r_lnors_spacearr,
1372 clnors_data,
1373 cd_loop_clnors_offset,
1374 do_rebuild,
1375 split_angle_cos);
1376 }
1377}
1378
1379/* This threshold is a bit touchy (usual float precision issue), this value seems OK. */
1380#define LNOR_SPACE_TRIGO_THRESHOLD (1.0f - 1e-4f)
1381
1387 MLoopNorSpaceArray *lnors_spacearr,
1388 const float (*new_lnors)[3])
1389{
1390 BLI_bitmap *done_loops = BLI_BITMAP_NEW(size_t(bm->totloop), __func__);
1391 bool changed = false;
1392
1394
1395 for (int i = 0; i < bm->totloop; i++) {
1396 if (!lnors_spacearr->lspacearr[i]) {
1397 /* This should not happen in theory, but in some rare case (probably ugly geometry)
1398 * we can get some nullptr loopspacearr at this point. :/
1399 * Maybe we should set those loops' edges as sharp?
1400 */
1401 BLI_BITMAP_ENABLE(done_loops, i);
1402 if (G.debug & G_DEBUG) {
1403 printf("WARNING! Getting invalid nullptr loop space for loop %d!\n", i);
1404 }
1405 continue;
1406 }
1407
1408 if (!BLI_BITMAP_TEST(done_loops, i)) {
1409 /* Notes:
1410 * * In case of mono-loop smooth fan, we have nothing to do.
1411 * * Loops in this linklist are ordered (in reversed order compared to how they were
1412 * discovered by bke::mesh::normals_calc_corners(), but this is not a problem).
1413 * Which means if we find a mismatching clnor,
1414 * we know all remaining loops will have to be in a new, different smooth fan/lnor space.
1415 * * In smooth fan case, we compare each clnor against a ref one,
1416 * to avoid small differences adding up into a real big one in the end!
1417 */
1418 if (lnors_spacearr->lspacearr[i]->flags & MLNOR_SPACE_IS_SINGLE) {
1419 BLI_BITMAP_ENABLE(done_loops, i);
1420 continue;
1421 }
1422
1423 LinkNode *loops = lnors_spacearr->lspacearr[i]->loops;
1424 BMLoop *prev_ml = nullptr;
1425 const float *org_nor = nullptr;
1426
1427 while (loops) {
1428 BMLoop *ml = static_cast<BMLoop *>(loops->link);
1429 const int lidx = BM_elem_index_get(ml);
1430 const float *nor = new_lnors[lidx];
1431
1432 if (!org_nor) {
1433 org_nor = nor;
1434 }
1435 else if (dot_v3v3(org_nor, nor) < LNOR_SPACE_TRIGO_THRESHOLD) {
1436 /* Current normal differs too much from org one, we have to tag the edge between
1437 * previous loop's face and current's one as sharp.
1438 * We know those two loops do not point to the same edge,
1439 * since we do not allow reversed winding in a same smooth fan.
1440 */
1441 BMEdge *e = (prev_ml->e == ml->prev->e) ? prev_ml->e : ml->e;
1442
1444 changed = true;
1445
1446 org_nor = nor;
1447 }
1448
1449 prev_ml = ml;
1450 loops = loops->next;
1451 BLI_BITMAP_ENABLE(done_loops, lidx);
1452 }
1453
1454 /* We also have to check between last and first loops,
1455 * otherwise we may miss some sharp edges here!
1456 * This is just a simplified version of above while loop.
1457 * See #45984. */
1458 loops = lnors_spacearr->lspacearr[i]->loops;
1459 if (loops && org_nor) {
1460 BMLoop *ml = static_cast<BMLoop *>(loops->link);
1461 const int lidx = BM_elem_index_get(ml);
1462 const float *nor = new_lnors[lidx];
1463
1464 if (dot_v3v3(org_nor, nor) < LNOR_SPACE_TRIGO_THRESHOLD) {
1465 BMEdge *e = (prev_ml->e == ml->prev->e) ? prev_ml->e : ml->e;
1466
1468 changed = true;
1469 }
1470 }
1471 }
1472 }
1473
1474 MEM_freeN(done_loops);
1475 return changed;
1476}
1477
1483 MLoopNorSpaceArray *lnors_spacearr,
1484 short (*r_clnors_data)[2],
1485 const int cd_loop_clnors_offset,
1486 const float (*new_lnors)[3])
1487{
1488 BLI_bitmap *done_loops = BLI_BITMAP_NEW(size_t(bm->totloop), __func__);
1489
1490 BLI_SMALLSTACK_DECLARE(clnors_data, short *);
1491
1493
1494 for (int i = 0; i < bm->totloop; i++) {
1495 if (!lnors_spacearr->lspacearr[i]) {
1496 BLI_BITMAP_ENABLE(done_loops, i);
1497 if (G.debug & G_DEBUG) {
1498 printf("WARNING! Still getting invalid nullptr loop space in second loop for loop %d!\n",
1499 i);
1500 }
1501 continue;
1502 }
1503
1504 if (!BLI_BITMAP_TEST(done_loops, i)) {
1505 /* Note we accumulate and average all custom normals in current smooth fan,
1506 * to avoid getting different clnors data (tiny differences in plain custom normals can
1507 * give rather huge differences in computed 2D factors).
1508 */
1509 LinkNode *loops = lnors_spacearr->lspacearr[i]->loops;
1510
1511 if (lnors_spacearr->lspacearr[i]->flags & MLNOR_SPACE_IS_SINGLE) {
1512 BMLoop *ml = (BMLoop *)loops;
1513 const int lidx = BM_elem_index_get(ml);
1514
1515 BLI_assert(lidx == i);
1516
1517 const float *nor = new_lnors[lidx];
1518 short *clnor = static_cast<short *>(r_clnors_data ?
1519 &r_clnors_data[lidx] :
1520 BM_ELEM_CD_GET_VOID_P(ml, cd_loop_clnors_offset));
1521
1522 BKE_lnor_space_custom_normal_to_data(lnors_spacearr->lspacearr[i], nor, clnor);
1523 BLI_BITMAP_ENABLE(done_loops, i);
1524 }
1525 else {
1526 int avg_nor_count = 0;
1527 float avg_nor[3];
1528 short clnor_data_tmp[2], *clnor_data;
1529
1530 zero_v3(avg_nor);
1531
1532 while (loops) {
1533 BMLoop *ml = static_cast<BMLoop *>(loops->link);
1534 const int lidx = BM_elem_index_get(ml);
1535 const float *nor = new_lnors[lidx];
1536 short *clnor = static_cast<short *>(
1537 r_clnors_data ? &r_clnors_data[lidx] :
1538 BM_ELEM_CD_GET_VOID_P(ml, cd_loop_clnors_offset));
1539
1540 avg_nor_count++;
1541 add_v3_v3(avg_nor, nor);
1542 BLI_SMALLSTACK_PUSH(clnors_data, clnor);
1543
1544 loops = loops->next;
1545 BLI_BITMAP_ENABLE(done_loops, lidx);
1546 }
1547
1548 mul_v3_fl(avg_nor, 1.0f / float(avg_nor_count));
1550 lnors_spacearr->lspacearr[i], avg_nor, clnor_data_tmp);
1551
1552 while ((clnor_data = static_cast<short *>(BLI_SMALLSTACK_POP(clnors_data)))) {
1553 clnor_data[0] = clnor_data_tmp[0];
1554 clnor_data[1] = clnor_data_tmp[1];
1555 }
1556 }
1557 }
1558 }
1559
1560 MEM_freeN(done_loops);
1561}
1562
1571 const Span<float3> vcos,
1572 const Span<float3> fnos,
1573 MLoopNorSpaceArray *r_lnors_spacearr,
1574 short (*r_clnors_data)[2],
1575 const int cd_loop_clnors_offset,
1576 float (*new_lnors)[3],
1577 const int cd_new_lnors_offset,
1578 bool do_split_fans)
1579{
1580 BMFace *f;
1581 BMLoop *l;
1582 BMIter liter, fiter;
1583 Array<float3> cur_lnors(bm->totloop);
1584
1585 BKE_lnor_spacearr_clear(r_lnors_spacearr);
1586
1587 /* Tag smooth edges and set lnos from vnos when they might be completely smooth...
1588 * When using custom loop normals, disable the angle feature! */
1589 bm_mesh_edges_sharp_tag(bm, fnos, -1.0f, false);
1590
1591 /* Finish computing lnos by accumulating face normals
1592 * in each fan of faces defined by sharp edges. */
1594 vcos,
1595 fnos,
1596 cur_lnors,
1597 r_lnors_spacearr,
1598 r_clnors_data,
1599 cd_loop_clnors_offset,
1600 false,
1602
1603 /* Extract new normals from the data layer if necessary. */
1604 float (*custom_lnors)[3] = new_lnors;
1605
1606 if (new_lnors == nullptr) {
1607 custom_lnors = static_cast<float (*)[3]>(
1608 MEM_mallocN(sizeof(*new_lnors) * bm->totloop, __func__));
1609
1610 BM_ITER_MESH (f, &fiter, bm, BM_FACES_OF_MESH) {
1611 BM_ITER_ELEM (l, &liter, f, BM_LOOPS_OF_FACE) {
1612 const float *normal = static_cast<float *>(BM_ELEM_CD_GET_VOID_P(l, cd_new_lnors_offset));
1613 copy_v3_v3(custom_lnors[BM_elem_index_get(l)], normal);
1614 }
1615 }
1616 }
1617
1618 /* Validate the new normals. */
1619 for (int i = 0; i < bm->totloop; i++) {
1620 if (is_zero_v3(custom_lnors[i])) {
1621 copy_v3_v3(custom_lnors[i], cur_lnors[i]);
1622 }
1623 else {
1624 normalize_v3(custom_lnors[i]);
1625 }
1626 }
1627
1628 /* Now, check each current smooth fan (one lnor space per smooth fan!),
1629 * and if all its matching custom lnors are not equal, add sharp edges as needed. */
1630 if (do_split_fans && bm_mesh_loops_split_lnor_fans(bm, r_lnors_spacearr, custom_lnors)) {
1631 /* If any sharp edges were added, run bm_mesh_loops_calc_normals() again to get lnor
1632 * spacearr/smooth fans matching the given custom lnors. */
1633 BKE_lnor_spacearr_clear(r_lnors_spacearr);
1634
1636 vcos,
1637 fnos,
1638 cur_lnors,
1639 r_lnors_spacearr,
1640 r_clnors_data,
1641 cd_loop_clnors_offset,
1642 false,
1644 }
1645
1646 /* And we just have to convert plain object-space custom normals to our
1647 * lnor space-encoded ones. */
1649 bm, r_lnors_spacearr, r_clnors_data, cd_loop_clnors_offset, custom_lnors);
1650
1651 if (custom_lnors != new_lnors) {
1652 MEM_freeN(custom_lnors);
1653 }
1654}
1655
1657 const Span<float3> vnos,
1658 const Span<float3> fnos,
1659 MutableSpan<float3> r_lnos)
1660{
1661 BMIter fiter;
1662 BMFace *f_curr;
1663
1664 {
1665 char htype = BM_LOOP;
1666 if (!vnos.is_empty()) {
1667 htype |= BM_VERT;
1668 }
1669 if (!fnos.is_empty()) {
1670 htype |= BM_FACE;
1671 }
1673 }
1674
1675 BM_ITER_MESH (f_curr, &fiter, bm, BM_FACES_OF_MESH) {
1676 BMLoop *l_curr, *l_first;
1677 const bool is_face_flat = !BM_elem_flag_test(f_curr, BM_ELEM_SMOOTH);
1678
1679 l_curr = l_first = BM_FACE_FIRST_LOOP(f_curr);
1680 do {
1681 const float3 &no = is_face_flat ? (!fnos.is_empty() ? fnos[BM_elem_index_get(f_curr)] :
1682 float3(f_curr->no)) :
1683 (!vnos.is_empty() ? vnos[BM_elem_index_get(l_curr->v)] :
1684 float3(l_curr->v->no));
1685 copy_v3_v3(r_lnos[BM_elem_index_get(l_curr)], no);
1686
1687 } while ((l_curr = l_curr->next) != l_first);
1688 }
1689}
1690
1692 const Span<float3> vcos,
1693 const Span<float3> vnos,
1694 const Span<float3> fnos,
1695 const bool use_split_normals,
1696 MutableSpan<float3> r_lnos,
1697 MLoopNorSpaceArray *r_lnors_spacearr,
1698 short (*clnors_data)[2],
1699 const int cd_loop_clnors_offset,
1700 const bool do_rebuild)
1701{
1702
1703 if (use_split_normals) {
1705 vcos,
1706 fnos,
1707 r_lnos,
1708 r_lnors_spacearr,
1709 clnors_data,
1710 cd_loop_clnors_offset,
1711 do_rebuild,
1712 -1.0f);
1713 }
1714 else {
1715 BLI_assert(!r_lnors_spacearr);
1716 bm_mesh_loops_calc_normals_no_autosmooth(bm, vnos, fnos, r_lnos);
1717 }
1718}
1719
1721
1722/* -------------------------------------------------------------------- */
1725
1727{
1728 BLI_assert(bm->lnor_spacearr != nullptr);
1729
1730 BM_data_layer_ensure_named(bm, &bm->ldata, CD_PROP_INT16_2D, "custom_normal");
1731
1732 int cd_loop_clnors_offset = CustomData_get_offset_named(
1733 &bm->ldata, CD_PROP_INT16_2D, "custom_normal");
1734
1736 bm, {}, {}, {}, true, r_lnors, bm->lnor_spacearr, nullptr, cd_loop_clnors_offset, false);
1737 bm->spacearr_dirty &= ~(BM_SPACEARR_DIRTY | BM_SPACEARR_DIRTY_ALL);
1738}
1739
1740#define CLEAR_SPACEARRAY_THRESHOLD(x) ((x) / 2)
1741
1742void BM_lnorspace_invalidate(BMesh *bm, const bool do_invalidate_all)
1743{
1744 if (bm->spacearr_dirty & BM_SPACEARR_DIRTY_ALL) {
1745 return;
1746 }
1747 if (do_invalidate_all || bm->totvertsel > CLEAR_SPACEARRAY_THRESHOLD(bm->totvert)) {
1748 bm->spacearr_dirty |= BM_SPACEARR_DIRTY_ALL;
1749 return;
1750 }
1751 if (bm->lnor_spacearr == nullptr) {
1752 bm->spacearr_dirty |= BM_SPACEARR_DIRTY_ALL;
1753 return;
1754 }
1755
1756 BMVert *v;
1757 BMLoop *l;
1758 BMIter viter, liter;
1759 /* NOTE: we could use temp tag of BMItem for that,
1760 * but probably better not use it in such a low-level func?
1761 * --mont29 */
1762 BLI_bitmap *done_verts = BLI_BITMAP_NEW(bm->totvert, __func__);
1763
1765
1766 /* When we affect a given vertex, we may affect following smooth fans:
1767 * - all smooth fans of said vertex;
1768 * - all smooth fans of all immediate loop-neighbors vertices;
1769 * This can be simplified as 'all loops of selected vertices and their immediate neighbors'
1770 * need to be tagged for update.
1771 */
1772 BM_ITER_MESH (v, &viter, bm, BM_VERTS_OF_MESH) {
1774 BM_ITER_ELEM (l, &liter, v, BM_LOOPS_OF_VERT) {
1776
1777 /* Note that we only handle unselected neighbor vertices here, main loop will take care of
1778 * selected ones. */
1779 if (!BM_elem_flag_test(l->prev->v, BM_ELEM_SELECT) &&
1780 !BLI_BITMAP_TEST(done_verts, BM_elem_index_get(l->prev->v)))
1781 {
1782
1783 BMLoop *l_prev;
1784 BMIter liter_prev;
1785 BM_ITER_ELEM (l_prev, &liter_prev, l->prev->v, BM_LOOPS_OF_VERT) {
1787 }
1788 BLI_BITMAP_ENABLE(done_verts, BM_elem_index_get(l_prev->v));
1789 }
1790
1791 if (!BM_elem_flag_test(l->next->v, BM_ELEM_SELECT) &&
1792 !BLI_BITMAP_TEST(done_verts, BM_elem_index_get(l->next->v)))
1793 {
1794
1795 BMLoop *l_next;
1796 BMIter liter_next;
1797 BM_ITER_ELEM (l_next, &liter_next, l->next->v, BM_LOOPS_OF_VERT) {
1799 }
1800 BLI_BITMAP_ENABLE(done_verts, BM_elem_index_get(l_next->v));
1801 }
1802 }
1803
1805 }
1806 }
1807
1808 MEM_freeN(done_verts);
1809 bm->spacearr_dirty |= BM_SPACEARR_DIRTY;
1810}
1811
1812void BM_lnorspace_rebuild(BMesh *bm, bool preserve_clnor)
1813{
1814 BLI_assert(bm->lnor_spacearr != nullptr);
1815
1816 if (!(bm->spacearr_dirty & (BM_SPACEARR_DIRTY | BM_SPACEARR_DIRTY_ALL))) {
1817 return;
1818 }
1819 BMFace *f;
1820 BMLoop *l;
1821 BMIter fiter, liter;
1822
1823 Array<float3> r_lnors(bm->totloop, float3(0));
1824 Array<float3> oldnors(preserve_clnor ? bm->totloop : 0, float3(0));
1825
1826 int cd_loop_clnors_offset = CustomData_get_offset_named(
1827 &bm->ldata, CD_PROP_INT16_2D, "custom_normal");
1828
1830
1831 if (preserve_clnor) {
1832 BLI_assert(bm->lnor_spacearr->lspacearr != nullptr);
1833
1834 BM_ITER_MESH (f, &fiter, bm, BM_FACES_OF_MESH) {
1835 BM_ITER_ELEM (l, &liter, f, BM_LOOPS_OF_FACE) {
1837 bm->spacearr_dirty & BM_SPACEARR_DIRTY_ALL)
1838 {
1839 short (*clnor)[2] = static_cast<short (*)[2]>(
1840 BM_ELEM_CD_GET_VOID_P(l, cd_loop_clnors_offset));
1841 int l_index = BM_elem_index_get(l);
1842
1844 bm->lnor_spacearr->lspacearr[l_index], *clnor, oldnors[l_index]);
1845 }
1846 }
1847 }
1848 }
1849
1850 if (bm->spacearr_dirty & BM_SPACEARR_DIRTY_ALL) {
1851 BKE_lnor_spacearr_clear(bm->lnor_spacearr);
1852 }
1854 bm, {}, {}, {}, true, r_lnors, bm->lnor_spacearr, nullptr, cd_loop_clnors_offset, true);
1855
1856 BM_ITER_MESH (f, &fiter, bm, BM_FACES_OF_MESH) {
1857 BM_ITER_ELEM (l, &liter, f, BM_LOOPS_OF_FACE) {
1859 bm->spacearr_dirty & BM_SPACEARR_DIRTY_ALL)
1860 {
1861 if (preserve_clnor) {
1862 short (*clnor)[2] = static_cast<short (*)[2]>(
1863 BM_ELEM_CD_GET_VOID_P(l, cd_loop_clnors_offset));
1864 int l_index = BM_elem_index_get(l);
1866 bm->lnor_spacearr->lspacearr[l_index], oldnors[l_index], *clnor);
1867 }
1869 }
1870 }
1871 }
1872
1873 bm->spacearr_dirty &= ~(BM_SPACEARR_DIRTY | BM_SPACEARR_DIRTY_ALL);
1874
1875#ifndef NDEBUG
1877#endif
1878}
1879
1886{
1887 /* Zero values tell the normals calculation code to use the automatic normals (rather than any
1888 * custom normal vector). */
1889 Array<float3> lnors(bm->totloop, float3(0));
1890 const int vert_free_offset = CustomData_get_offset_named(
1891 &bm->vdata, CD_PROP_FLOAT3, "custom_normal");
1892 const int edge_free_offset = CustomData_get_offset_named(
1893 &bm->edata, CD_PROP_FLOAT3, "custom_normal");
1894 const int face_free_offset = CustomData_get_offset_named(
1895 &bm->pdata, CD_PROP_FLOAT3, "custom_normal");
1896 const int loop_free_offset = CustomData_get_offset_named(
1897 &bm->ldata, CD_PROP_FLOAT3, "custom_normal");
1898 if (vert_free_offset != -1) {
1899 int loop_index = 0;
1900 BMFace *f;
1901 BMLoop *l;
1902 BMIter fiter, liter;
1903 BM_ITER_MESH (f, &fiter, bm, BM_FACES_OF_MESH) {
1904 BM_ITER_ELEM (l, &liter, f, BM_LOOPS_OF_FACE) {
1905 lnors[loop_index++] = float3(BM_ELEM_CD_GET_FLOAT_P(l->v, vert_free_offset));
1906 }
1907 }
1908 BM_data_layer_free_named(bm, &bm->vdata, "custom_normal");
1909 }
1910 else if (edge_free_offset != -1) {
1911 BM_data_layer_free_named(bm, &bm->edata, "custom_normal");
1912 }
1913 else if (face_free_offset != -1) {
1914 int loop_index = 0;
1915 BMFace *f;
1916 BMLoop *l;
1917 BMIter fiter, liter;
1918 BM_ITER_MESH (f, &fiter, bm, BM_FACES_OF_MESH) {
1919 BM_ITER_ELEM (l, &liter, f, BM_LOOPS_OF_FACE) {
1920 lnors[loop_index++] = float3(BM_ELEM_CD_GET_FLOAT_P(f, face_free_offset));
1921 }
1922 }
1923 BM_data_layer_free_named(bm, &bm->pdata, "custom_normal");
1924 }
1925 else if (loop_free_offset != -1) {
1926 int loop_index = 0;
1927 BMFace *f;
1928 BMLoop *l;
1929 BMIter fiter, liter;
1930 BM_ITER_MESH (f, &fiter, bm, BM_FACES_OF_MESH) {
1931 BM_ITER_ELEM (l, &liter, f, BM_LOOPS_OF_FACE) {
1932 lnors[loop_index++] = float3(BM_ELEM_CD_GET_FLOAT_P(l, loop_free_offset));
1933 }
1934 }
1935 BM_data_layer_free_named(bm, &bm->ldata, "custom_normal");
1936 }
1937 BM_lnorspacearr_store(bm, lnors);
1938}
1939
1941{
1942 if (bm->lnor_spacearr == nullptr) {
1943 bm->lnor_spacearr = MEM_callocN<MLoopNorSpaceArray>(__func__);
1944 }
1945 if (bm->lnor_spacearr->lspacearr == nullptr) {
1947 }
1948 else if (bm->spacearr_dirty & (BM_SPACEARR_DIRTY | BM_SPACEARR_DIRTY_ALL)) {
1949 BM_lnorspace_rebuild(bm, false);
1950 }
1951}
1952
1954
1955/* -------------------------------------------------------------------- */
1960
1966#ifndef NDEBUG
1968{
1969 bm->spacearr_dirty |= BM_SPACEARR_DIRTY_ALL;
1970 bool clear = true;
1971
1973 temp->lspacearr = nullptr;
1974
1976
1977 int cd_loop_clnors_offset = CustomData_get_offset_named(
1978 &bm->ldata, CD_PROP_INT16_2D, "custom_normal");
1979 Array<float3> lnors(bm->totloop, float3(0));
1981 bm, {}, {}, {}, true, lnors, temp, nullptr, cd_loop_clnors_offset, true);
1982
1983 for (int i = 0; i < bm->totloop; i++) {
1984 int j = 0;
1985 j += compare_ff(
1986 temp->lspacearr[i]->ref_alpha, bm->lnor_spacearr->lspacearr[i]->ref_alpha, 1e-4f);
1987 j += compare_ff(
1988 temp->lspacearr[i]->ref_beta, bm->lnor_spacearr->lspacearr[i]->ref_beta, 1e-4f);
1989 j += compare_v3v3(
1990 temp->lspacearr[i]->vec_lnor, bm->lnor_spacearr->lspacearr[i]->vec_lnor, 1e-4f);
1991 j += compare_v3v3(
1992 temp->lspacearr[i]->vec_ortho, bm->lnor_spacearr->lspacearr[i]->vec_ortho, 1e-4f);
1993 j += compare_v3v3(
1994 temp->lspacearr[i]->vec_ref, bm->lnor_spacearr->lspacearr[i]->vec_ref, 1e-4f);
1995
1996 if (j != 5) {
1997 clear = false;
1998 break;
1999 }
2000 }
2002 MEM_freeN(temp);
2004
2005 bm->spacearr_dirty &= ~BM_SPACEARR_DIRTY_ALL;
2006}
2007#endif
2008
2010 BLI_bitmap *loops,
2011 MLoopNorSpaceArray *lnor_spacearr,
2012 int *totloopsel,
2013 const bool do_all_loops_of_vert)
2014{
2015 if (l != nullptr) {
2016 const int l_idx = BM_elem_index_get(l);
2017
2018 if (!BLI_BITMAP_TEST(loops, l_idx)) {
2019 /* If vert and face selected share a loop, mark it for editing. */
2020 BLI_BITMAP_ENABLE(loops, l_idx);
2021 (*totloopsel)++;
2022
2023 if (do_all_loops_of_vert) {
2024 /* If required, also mark all loops shared by that vertex.
2025 * This is needed when loop spaces may change
2026 * (i.e. when some faces or edges might change of smooth/sharp status). */
2027 BMIter liter;
2028 BMLoop *lfan;
2029 BM_ITER_ELEM (lfan, &liter, l->v, BM_LOOPS_OF_VERT) {
2030 const int lfan_idx = BM_elem_index_get(lfan);
2031 if (!BLI_BITMAP_TEST(loops, lfan_idx)) {
2032 BLI_BITMAP_ENABLE(loops, lfan_idx);
2033 (*totloopsel)++;
2034 }
2035 }
2036 }
2037 else {
2038 /* Mark all loops in same loop normal space (aka smooth fan). */
2039 if ((lnor_spacearr->lspacearr[l_idx]->flags & MLNOR_SPACE_IS_SINGLE) == 0) {
2040 for (LinkNode *node = lnor_spacearr->lspacearr[l_idx]->loops; node; node = node->next) {
2041 const int lfan_idx = BM_elem_index_get((BMLoop *)node->link);
2042 if (!BLI_BITMAP_TEST(loops, lfan_idx)) {
2043 BLI_BITMAP_ENABLE(loops, lfan_idx);
2044 (*totloopsel)++;
2045 }
2046 }
2047 }
2048 }
2049 }
2050 }
2051}
2052
2054 BLI_bitmap *loops,
2055 const bool do_all_loops_of_vert,
2056 int *totloopsel_p)
2057{
2058 /* Select all loops of selected verts. */
2059 BMLoop *l;
2060 BMVert *v;
2061 BMIter liter, viter;
2062 BM_ITER_MESH (v, &viter, bm, BM_VERTS_OF_MESH) {
2064 BM_ITER_ELEM (l, &liter, v, BM_LOOPS_OF_VERT) {
2066 l, loops, bm->lnor_spacearr, totloopsel_p, do_all_loops_of_vert);
2067 }
2068 }
2069 }
2070}
2071
2073 BLI_bitmap *loops,
2074 const bool do_all_loops_of_vert,
2075 int *totloopsel_p)
2076{
2077 /* Only select all loops of selected edges. */
2078 BMLoop *l;
2079 BMEdge *e;
2080 BMIter liter, eiter;
2081 BM_ITER_MESH (e, &eiter, bm, BM_EDGES_OF_MESH) {
2083 BM_ITER_ELEM (l, &liter, e, BM_LOOPS_OF_EDGE) {
2085 l, loops, bm->lnor_spacearr, totloopsel_p, do_all_loops_of_vert);
2086 /* Loops actually 'have' two edges, or said otherwise, a selected edge actually selects
2087 * *two* loops in each of its faces. We have to find the other one too. */
2088 if (BM_vert_in_edge(e, l->next->v)) {
2090 l->next, loops, bm->lnor_spacearr, totloopsel_p, do_all_loops_of_vert);
2091 }
2092 else {
2093 BLI_assert(BM_vert_in_edge(e, l->prev->v));
2095 l->prev, loops, bm->lnor_spacearr, totloopsel_p, do_all_loops_of_vert);
2096 }
2097 }
2098 }
2099 }
2100}
2101
2103 BLI_bitmap *loops,
2104 const bool do_all_loops_of_vert,
2105 int *totloopsel_p)
2106{
2107 /* Only select all loops of selected faces. */
2108 BMLoop *l;
2109 BMFace *f;
2110 BMIter liter, fiter;
2111 BM_ITER_MESH (f, &fiter, bm, BM_FACES_OF_MESH) {
2113 BM_ITER_ELEM (l, &liter, f, BM_LOOPS_OF_FACE) {
2115 l, loops, bm->lnor_spacearr, totloopsel_p, do_all_loops_of_vert);
2116 }
2117 }
2118 }
2119}
2120
2121static int bm_loop_normal_mark_verts(BMesh *bm, BLI_bitmap *loops, const bool do_all_loops_of_vert)
2122{
2124 BLI_assert(bm->lnor_spacearr != nullptr);
2125 BLI_assert(bm->lnor_spacearr->data_type == MLNOR_SPACEARR_BMLOOP_PTR);
2126 int totloopsel = 0;
2127 bm_loop_normal_mark_verts_impl(bm, loops, do_all_loops_of_vert, &totloopsel);
2128 return totloopsel;
2129}
2130
2131static int bm_loop_normal_mark_edges(BMesh *bm, BLI_bitmap *loops, const bool do_all_loops_of_vert)
2132{
2134 BLI_assert(bm->lnor_spacearr != nullptr);
2135 BLI_assert(bm->lnor_spacearr->data_type == MLNOR_SPACEARR_BMLOOP_PTR);
2136 int totloopsel = 0;
2137 bm_loop_normal_mark_edges_impl(bm, loops, do_all_loops_of_vert, &totloopsel);
2138 return totloopsel;
2139}
2140
2141static int bm_loop_normal_mark_faces(BMesh *bm, BLI_bitmap *loops, const bool do_all_loops_of_vert)
2142{
2144 BLI_assert(bm->lnor_spacearr != nullptr);
2145 BLI_assert(bm->lnor_spacearr->data_type == MLNOR_SPACEARR_BMLOOP_PTR);
2146 int totloopsel = 0;
2147 bm_loop_normal_mark_faces_impl(bm, loops, do_all_loops_of_vert, &totloopsel);
2148 return totloopsel;
2149}
2150
2151/* Mark the individual clnors to be edited, if multiple selection methods are used. */
2152static int bm_loop_normal_mark_indiv(BMesh *bm, BLI_bitmap *loops, const bool do_all_loops_of_vert)
2153{
2154 int totloopsel = 0;
2155
2156 const bool sel_verts = (bm->selectmode & SCE_SELECT_VERTEX) != 0;
2157 const bool sel_edges = (bm->selectmode & SCE_SELECT_EDGE) != 0;
2158 const bool sel_faces = (bm->selectmode & SCE_SELECT_FACE) != 0;
2159 const bool use_sel_face_history = sel_faces && (sel_edges || sel_verts);
2160
2162
2163 BLI_assert(bm->lnor_spacearr != nullptr);
2164 BLI_assert(bm->lnor_spacearr->data_type == MLNOR_SPACEARR_BMLOOP_PTR);
2165
2166 if (use_sel_face_history) {
2167 /* Using face history allows to select a single loop from a single face...
2168 * Note that this is O(n^2) piece of code,
2169 * but it is not designed to be used with huge selection sets,
2170 * rather with only a few items selected at most. */
2171 /* Goes from last selected to the first selected element. */
2172 LISTBASE_FOREACH_BACKWARD (BMEditSelection *, ese, &bm->selected) {
2173 if (ese->htype == BM_FACE) {
2174 /* If current face is selected,
2175 * then any verts to be edited must have been selected before it. */
2176 for (BMEditSelection *ese_prev = ese->prev; ese_prev; ese_prev = ese_prev->prev) {
2177 if (ese_prev->htype == BM_VERT) {
2179 BM_face_vert_share_loop((BMFace *)ese->ele, (BMVert *)ese_prev->ele),
2180 loops,
2181 bm->lnor_spacearr,
2182 &totloopsel,
2183 do_all_loops_of_vert);
2184 }
2185 else if (ese_prev->htype == BM_EDGE) {
2186 BMEdge *e = (BMEdge *)ese_prev->ele;
2188 loops,
2189 bm->lnor_spacearr,
2190 &totloopsel,
2191 do_all_loops_of_vert);
2192
2194 loops,
2195 bm->lnor_spacearr,
2196 &totloopsel,
2197 do_all_loops_of_vert);
2198 }
2199 }
2200 }
2201 }
2202 }
2203
2204 /* If the selection history could not be used, fall back to regular selection. */
2205 if (totloopsel == 0) {
2206 if (sel_faces) {
2207 bm_loop_normal_mark_faces_impl(bm, loops, do_all_loops_of_vert, &totloopsel);
2208 }
2209 if (sel_edges) {
2210 bm_loop_normal_mark_edges_impl(bm, loops, do_all_loops_of_vert, &totloopsel);
2211 }
2212 if (sel_verts) {
2213 bm_loop_normal_mark_verts_impl(bm, loops, do_all_loops_of_vert, &totloopsel);
2214 }
2215 }
2216
2217 return totloopsel;
2218}
2219
2221 BMesh *bm, BMLoopNorEditData *lnor_ed, BMVert *v, BMLoop *l, const int offset)
2222{
2223 BLI_assert(bm->lnor_spacearr != nullptr);
2224 BLI_assert(bm->lnor_spacearr->lspacearr != nullptr);
2225
2226 const int l_index = BM_elem_index_get(l);
2227 short *clnors_data = static_cast<short *>(BM_ELEM_CD_GET_VOID_P(l, offset));
2228
2229 lnor_ed->loop_index = l_index;
2230 lnor_ed->loop = l;
2231
2232 float custom_normal[3];
2234 bm->lnor_spacearr->lspacearr[l_index], clnors_data, custom_normal);
2235
2236 lnor_ed->clnors_data = clnors_data;
2237 copy_v3_v3(lnor_ed->nloc, custom_normal);
2238 copy_v3_v3(lnor_ed->niloc, custom_normal);
2239
2240 lnor_ed->loc = v->co;
2241}
2242
2244 BMesh *bm, const bool do_all_loops_of_vert, const char htype_override)
2245{
2246 BMLoop *l;
2247 BMVert *v;
2248 BMIter liter, viter;
2249
2250 int totloopsel = 0;
2251
2252 BLI_assert(bm->spacearr_dirty == 0);
2253
2256 __func__);
2257
2258 BM_data_layer_ensure_named(bm, &bm->ldata, CD_PROP_INT16_2D, "custom_normal");
2259 const int cd_custom_normal_offset = CustomData_get_offset_named(
2260 &bm->ldata, CD_PROP_INT16_2D, "custom_normal");
2261
2263
2264 BLI_bitmap *loops = BLI_BITMAP_NEW(bm->totloop, __func__);
2265
2266 /* This function define loop normals to edit, based on selection modes and history. */
2267 if (htype_override != 0) {
2268 BLI_assert(ELEM(htype_override, BM_VERT, BM_EDGE, BM_FACE));
2269 switch (htype_override) {
2270 case BM_VERT: {
2271 totloopsel = bm_loop_normal_mark_verts(bm, loops, do_all_loops_of_vert);
2272 break;
2273 }
2274 case BM_EDGE: {
2275 totloopsel = bm_loop_normal_mark_edges(bm, loops, do_all_loops_of_vert);
2276 break;
2277 }
2278 case BM_FACE: {
2279 totloopsel = bm_loop_normal_mark_faces(bm, loops, do_all_loops_of_vert);
2280 break;
2281 }
2282 }
2283 }
2284 else {
2285 totloopsel = bm_loop_normal_mark_indiv(bm, loops, do_all_loops_of_vert);
2286 }
2287
2288 if (totloopsel) {
2289 BMLoopNorEditData *lnor_ed = lnors_ed_arr->lnor_editdata =
2290 MEM_malloc_arrayN<BMLoopNorEditData>(totloopsel, __func__);
2291
2292 BM_ITER_MESH (v, &viter, bm, BM_VERTS_OF_MESH) {
2293 BM_ITER_ELEM (l, &liter, v, BM_LOOPS_OF_VERT) {
2294 if (BLI_BITMAP_TEST(loops, BM_elem_index_get(l))) {
2295 loop_normal_editdata_init(bm, lnor_ed, v, l, cd_custom_normal_offset);
2296 lnors_ed_arr->lidx_to_lnor_editdata[BM_elem_index_get(l)] = lnor_ed;
2297 lnor_ed++;
2298 }
2299 }
2300 }
2301 lnors_ed_arr->totloop = totloopsel;
2302 }
2303
2304 MEM_freeN(loops);
2305 lnors_ed_arr->cd_custom_normal_offset = cd_custom_normal_offset;
2306 return lnors_ed_arr;
2307}
2308
2310 const bool do_all_loops_of_vert)
2311{
2312 return BM_loop_normal_editdata_array_init_with_htype(bm, do_all_loops_of_vert, 0);
2313}
2314
2316{
2317 MEM_SAFE_FREE(lnors_ed_arr->lnor_editdata);
2318 MEM_SAFE_FREE(lnors_ed_arr->lidx_to_lnor_editdata);
2319 MEM_freeN(lnors_ed_arr);
2320}
2321
2323
2324/* -------------------------------------------------------------------- */
2327
2329{
2330 BMFace *f;
2331 BMLoop *l;
2332 BMIter liter, fiter;
2333
2334 if (!CustomData_has_layer_named(&bm->ldata, CD_PROP_INT16_2D, "custom_normal")) {
2335 return false;
2336 }
2337
2339
2340 /* Create a loop normal layer. */
2341 if (!CustomData_has_layer(&bm->ldata, CD_NORMAL)) {
2342 BM_data_layer_add(bm, &bm->ldata, CD_NORMAL);
2343
2345 }
2346
2347 const int cd_custom_normal_offset = CustomData_get_offset_named(
2348 &bm->ldata, CD_PROP_INT16_2D, "custom_normal");
2349 const int cd_normal_offset = CustomData_get_offset(&bm->ldata, CD_NORMAL);
2350
2351 int l_index = 0;
2352 BM_ITER_MESH (f, &fiter, bm, BM_FACES_OF_MESH) {
2353 BM_ITER_ELEM (l, &liter, f, BM_LOOPS_OF_FACE) {
2354 const short *clnors_data = static_cast<const short *>(
2355 BM_ELEM_CD_GET_VOID_P(l, cd_custom_normal_offset));
2356 float *normal = static_cast<float *>(BM_ELEM_CD_GET_VOID_P(l, cd_normal_offset));
2357
2359 bm->lnor_spacearr->lspacearr[l_index], clnors_data, normal);
2360 l_index += 1;
2361 }
2362 }
2363
2364 return true;
2365}
2366
2368{
2369 const int cd_custom_normal_offset = CustomData_get_offset_named(
2370 &bm->ldata, CD_PROP_INT16_2D, "custom_normal");
2371 if (cd_custom_normal_offset == -1) {
2372 return;
2373 }
2374 const int cd_normal_offset = CustomData_get_offset(&bm->ldata, CD_NORMAL);
2375 if (cd_normal_offset == -1) {
2376 return;
2377 }
2378
2379 if (bm->lnor_spacearr == nullptr) {
2380 bm->lnor_spacearr = MEM_callocN<MLoopNorSpaceArray>(__func__);
2381 }
2382
2384 {},
2385 {},
2386 bm->lnor_spacearr,
2387 nullptr,
2388 cd_custom_normal_offset,
2389 nullptr,
2390 cd_normal_offset,
2391 add_sharp_edges);
2392
2393 bm->spacearr_dirty &= ~(BM_SPACEARR_DIRTY | BM_SPACEARR_DIRTY_ALL);
2394}
2395
CustomData interface, see also DNA_customdata_types.h.
int CustomData_get_offset(const CustomData *data, eCustomDataType type)
void CustomData_set_layer_flag(CustomData *data, eCustomDataType type, int flag)
bool CustomData_has_layer_named(const CustomData *data, eCustomDataType type, blender::StringRef name)
int CustomData_get_offset_named(const CustomData *data, eCustomDataType type, blender::StringRef name)
bool CustomData_has_layer(const CustomData *data, eCustomDataType type)
@ G_DEBUG
void BKE_lnor_space_custom_data_to_normal(const MLoopNorSpace *lnor_space, const short clnor_data[2], float r_custom_lnor[3])
MLoopNorSpace * BKE_lnor_space_create(MLoopNorSpaceArray *lnors_spacearr)
@ MLNOR_SPACE_IS_SINGLE
Definition BKE_mesh.h:278
void BKE_lnor_space_custom_normal_to_data(const MLoopNorSpace *lnor_space, const float custom_lnor[3], short r_clnor_data[2])
@ MLNOR_SPACEARR_BMLOOP_PTR
Definition BKE_mesh.h:300
void BKE_lnor_space_add_loop(MLoopNorSpaceArray *lnors_spacearr, MLoopNorSpace *lnor_space, int corner, void *bm_loop, bool is_single)
void BKE_lnor_spacearr_clear(MLoopNorSpaceArray *lnors_spacearr)
void BKE_lnor_spacearr_init(MLoopNorSpaceArray *lnors_spacearr, int numLoops, char data_type)
void BKE_lnor_space_define(MLoopNorSpace *lnor_space, const float lnor[3], const float vec_ref[3], const float vec_other[3], blender::Span< blender::float3 > edge_vectors)
void BKE_lnor_spacearr_free(MLoopNorSpaceArray *lnors_spacearr)
void BKE_lnor_spacearr_tls_join(MLoopNorSpaceArray *lnors_spacearr, MLoopNorSpaceArray *lnors_spacearr_tls)
void BKE_lnor_spacearr_tls_init(MLoopNorSpaceArray *lnors_spacearr, MLoopNorSpaceArray *lnors_spacearr_tls)
#define BLI_assert(a)
Definition BLI_assert.h:46
#define BLI_BITMAP_NEW(_num, _alloc_string)
Definition BLI_bitmap.h:37
#define BLI_BITMAP_TEST(_bitmap, _index)
Definition BLI_bitmap.h:61
#define BLI_BITMAP_ENABLE(_bitmap, _index)
Definition BLI_bitmap.h:78
unsigned int BLI_bitmap
Definition BLI_bitmap.h:13
#define BLI_INLINE
#define LISTBASE_FOREACH_BACKWARD(type, var, list)
MINLINE int compare_ff(float a, float b, float max_diff)
#define M_PI
MINLINE void madd_v3_v3fl(float r[3], const float a[3], float f)
MINLINE void sub_v3_v3v3(float r[3], const float a[3], const float b[3])
MINLINE void mul_v3_fl(float r[3], float f)
MINLINE void copy_v3_v3(float r[3], const float a[3])
MINLINE float dot_v3v3(const float a[3], const float b[3]) ATTR_WARN_UNUSED_RESULT
MINLINE float normalize_v3_v3(float r[3], const float a[3])
MINLINE bool compare_v3v3(const float v1[3], const float v2[3], float limit) ATTR_WARN_UNUSED_RESULT
MINLINE bool is_zero_v3(const float v[3]) ATTR_WARN_UNUSED_RESULT
MINLINE void zero_v3(float r[3])
MINLINE void add_v3_v3(float r[3], const float a[3])
MINLINE float normalize_v3(float n[3])
unsigned int uint
struct MempoolIterData MempoolIterData
Definition BLI_task.h:200
BLI_INLINE void BLI_parallel_mempool_settings_defaults(TaskParallelSettings *settings)
Definition BLI_task.h:229
#define UNUSED_VARS_NDEBUG(...)
#define UNLIKELY(x)
#define ELEM(...)
#define LIKELY(x)
@ CD_PROP_FLOAT3
@ CD_PROP_INT16_2D
@ CD_FLAG_TEMPORARY
@ SCE_SELECT_FACE
@ SCE_SELECT_VERTEX
@ SCE_SELECT_EDGE
Read Guarded memory(de)allocation.
#define MEM_SAFE_FREE(v)
@ BM_SPACEARR_DIRTY_ALL
@ BM_SPACEARR_DIRTY
#define BM_DISK_EDGE_NEXT(e, v)
#define BM_FACE_FIRST_LOOP(p)
#define BM_ELEM_CD_GET_FLOAT_P(ele, offset)
@ BM_ELEM_SELECT
@ BM_ELEM_SMOOTH
@ BM_ELEM_TAG
#define BM_THREAD_LIMIT
@ BM_LOOP
#define BM_ELEM_CD_GET_VOID_P(ele, offset)
#define BM_elem_index_get(ele)
#define BM_elem_flag_disable(ele, hflag)
#define BM_elem_flag_set(ele, hflag, val)
#define BM_elem_index_set(ele, index)
#define BM_elem_flag_test(ele, hflag)
#define BM_elem_flag_enable(ele, hflag)
bool BM_data_layer_free_named(BMesh *bm, CustomData *data, StringRef name)
void BM_data_layer_add(BMesh *bm, CustomData *data, int type)
void BM_data_layer_ensure_named(BMesh *bm, CustomData *data, int type, const StringRef name)
#define BM_ITER_ELEM(ele, iter, data, itype)
#define BM_ITER_MESH(ele, iter, bm, itype)
#define BM_ITER_MESH_INDEX(ele, iter, bm, itype, indexvar)
@ BM_EDGES_OF_MESH
@ BM_VERTS_OF_MESH
@ BM_FACES_OF_MESH
@ BM_LOOPS_OF_VERT
@ BM_LOOPS_OF_EDGE
@ BM_LOOPS_OF_FACE
BMesh const char void * data
BMesh * bm
void BM_mesh_elem_index_ensure(BMesh *bm, const char htype)
static void bm_mesh_loops_assign_normal_data(BMesh *bm, MLoopNorSpaceArray *lnors_spacearr, short(*r_clnors_data)[2], const int cd_loop_clnors_offset, const float(*new_lnors)[3])
void BM_mesh_normals_update_with_partial(BMesh *bm, const BMPartialUpdate *bmpinfo)
static int bm_loop_index_cmp(const void *a, const void *b)
static void bm_vert_calc_normals_cb(void *, MempoolIterData *mp_v, const TaskParallelTLS *__restrict)
#define CLEAR_SPACEARRAY_THRESHOLD(x)
static void bm_vert_calc_normals_with_coords(BMVert *v, BMVertsCalcNormalsWithCoordsData *data)
static void bm_mesh_loops_calc_normals_for_vert_with_clnors(BMesh *bm, const Span< float3 > vcos, const Span< float3 > fnos, MutableSpan< float3 > r_lnos, const short(*clnors_data)[2], const int cd_loop_clnors_offset, const bool do_rebuild, const float split_angle_cos, MLoopNorSpaceArray *r_lnors_spacearr, blender::Vector< blender::float3, 16 > *edge_vectors, BMVert *v)
static void bm_loop_normal_mark_edges_impl(BMesh *bm, BLI_bitmap *loops, const bool do_all_loops_of_vert, int *totloopsel_p)
void BM_lnorspace_update(BMesh *bm)
static void bm_mesh_loops_calc_normals_for_vert_free_fn(const void *__restrict userdata, void *__restrict chunk)
void BM_mesh_normals_update_ex(BMesh *bm, const BMeshNormalsUpdate_Params *params)
BMesh Compute Normals.
#define BM_LNORSPACE_UPDATE
void BM_lnorspace_invalidate(BMesh *bm, const bool do_invalidate_all)
void BM_normals_loops_edges_tag(BMesh *bm, const bool do_edges)
static int bm_loop_normal_mark_verts(BMesh *bm, BLI_bitmap *loops, const bool do_all_loops_of_vert)
static void bm_mesh_loops_custom_normals_set(BMesh *bm, const Span< float3 > vcos, const Span< float3 > fnos, MLoopNorSpaceArray *r_lnors_spacearr, short(*r_clnors_data)[2], const int cd_loop_clnors_offset, float(*new_lnors)[3], const int cd_new_lnors_offset, bool do_split_fans)
void BM_lnorspace_rebuild(BMesh *bm, bool preserve_clnor)
static int bm_loop_normal_mark_faces(BMesh *bm, BLI_bitmap *loops, const bool do_all_loops_of_vert)
BMLoopNorEditDataArray * BM_loop_normal_editdata_array_init_with_htype(BMesh *bm, const bool do_all_loops_of_vert, const char htype_override)
static int bm_loop_normal_mark_edges(BMesh *bm, BLI_bitmap *loops, const bool do_all_loops_of_vert)
bool BM_custom_loop_normals_to_vector_layer(BMesh *bm)
static bool bm_mesh_loops_split_lnor_fans(BMesh *bm, MLoopNorSpaceArray *lnors_spacearr, const float(*new_lnors)[3])
static void bm_lnorspace_ensure_from_free_normals(BMesh *bm)
void BM_mesh_normals_update_with_partial_ex(BMesh *, const BMPartialUpdate *bmpinfo, const BMeshNormalsUpdate_Params *params)
#define EDGE_TAG_FROM_SPLIT_ANGLE_BYPASS
static void bm_face_calc_normals_cb(void *, MempoolIterData *mp_f, const TaskParallelTLS *__restrict)
static void bm_mesh_loops_calc_normals_for_vert_init_fn(const void *__restrict userdata, void *__restrict chunk)
static void bm_loop_normal_mark_faces_impl(BMesh *bm, BLI_bitmap *loops, const bool do_all_loops_of_vert, int *totloopsel_p)
static void bm_loop_normal_mark_indiv_do_loop(BMLoop *l, BLI_bitmap *loops, MLoopNorSpaceArray *lnor_spacearr, int *totloopsel, const bool do_all_loops_of_vert)
static void bm_mesh_loops_calc_normals__multi_threaded(BMesh *bm, const Span< float3 > vcos, const Span< float3 > fnos, MutableSpan< float3 > r_lnos, MLoopNorSpaceArray *r_lnors_spacearr, const short(*clnors_data)[2], const int cd_loop_clnors_offset, const bool do_rebuild, const float split_angle_cos)
void BM_mesh_normals_update(BMesh *bm)
BMLoopNorEditDataArray * BM_loop_normal_editdata_array_init(BMesh *bm, const bool do_all_loops_of_vert)
static void bm_mesh_loops_calc_normals_no_autosmooth(BMesh *bm, const Span< float3 > vnos, const Span< float3 > fnos, MutableSpan< float3 > r_lnos)
void BM_lnorspace_err(BMesh *bm)
BLI_INLINE bool bm_edge_is_smooth_no_angle_test(const BMEdge *e, const BMLoop *l_a, const BMLoop *l_b)
static void bm_edge_tag_from_smooth_and_set_sharp(Span< float3 > fnos, BMEdge *e, const float split_angle_cos)
static void loop_normal_editdata_init(BMesh *bm, BMLoopNorEditData *lnor_ed, BMVert *v, BMLoop *l, const int offset)
void BM_loop_normal_editdata_array_free(BMLoopNorEditDataArray *lnors_ed_arr)
static void bm_vert_calc_normals_with_coords_cb(void *userdata, MempoolIterData *mp_v, const TaskParallelTLS *__restrict)
void BM_lnorspacearr_store(BMesh *bm, MutableSpan< float3 > r_lnors)
static int bm_loop_normal_mark_indiv(BMesh *bm, BLI_bitmap *loops, const bool do_all_loops_of_vert)
static void bm_vert_calc_normals_impl(BMVert *v)
static void bm_mesh_loops_calc_normals__single_threaded(BMesh *bm, const Span< float3 > vcos, const Span< float3 > fnos, MutableSpan< float3 > r_lnos, MLoopNorSpaceArray *r_lnors_spacearr, const short(*clnors_data)[2], const int cd_loop_clnors_offset, const bool do_rebuild, const float split_angle_cos)
static void bm_mesh_loops_calc_normals_for_vert_without_clnors(BMesh *bm, const Span< float3 > vcos, const Span< float3 > fnos, MutableSpan< float3 > r_lnos, const bool do_rebuild, const float split_angle_cos, MLoopNorSpaceArray *r_lnors_spacearr, blender::Vector< blender::float3, 16 > *edge_vectors, BMVert *v)
static int bm_mesh_loops_calc_normals_for_loop(BMesh *bm, const Span< float3 > vcos, const Span< float3 > fnos, const short(*clnors_data)[2], const int cd_loop_clnors_offset, const bool has_clnors, blender::Vector< blender::float3, 16 > *edge_vectors, BMLoop *l_curr, MutableSpan< float3 > r_lnos, MLoopNorSpaceArray *r_lnors_spacearr)
static void bm_edge_tag_from_smooth(Span< float3 > fnos, BMEdge *e, const float split_angle_cos)
static void bm_loop_normal_mark_verts_impl(BMesh *bm, BLI_bitmap *loops, const bool do_all_loops_of_vert, int *totloopsel_p)
static void bm_mesh_loops_calc_normals_for_vert_without_clnors_fn(void *userdata, MempoolIterData *mp_v, const TaskParallelTLS *__restrict tls)
BLI_INLINE void bm_vert_calc_normals_accum_loop(const BMLoop *l_iter, const float e1diff[3], const float e2diff[3], const float f_no[3], float v_no[3])
static void bm_mesh_verts_calc_normals(BMesh *bm, const Span< float3 > fnos, const Span< float3 > vcos, MutableSpan< float3 > vnos)
static void bm_mesh_loops_calc_normals_for_vert_with_clnors_fn(void *userdata, MempoolIterData *mp_v, const TaskParallelTLS *__restrict tls)
bool BM_loop_check_cyclic_smooth_fan(BMLoop *l_curr)
static void bm_mesh_loops_calc_normals_for_vert_reduce_fn(const void *__restrict userdata, void *__restrict, void *__restrict chunk)
static void bm_mesh_loops_calc_normals(BMesh *bm, const Span< float3 > vcos, const Span< float3 > fnos, MutableSpan< float3 > r_lnos, MLoopNorSpaceArray *r_lnors_spacearr, const short(*clnors_data)[2], const int cd_loop_clnors_offset, const bool do_rebuild, const float split_angle_cos)
static void bm_mesh_edges_sharp_tag(BMesh *bm, const Span< float3 > fnos, float split_angle_cos, const bool do_sharp_edges_tag)
void BM_edges_sharp_from_angle_set(BMesh *bm, const float split_angle)
void BM_verts_calc_normal_vcos(BMesh *bm, const Span< float3 > fnos, const Span< float3 > vcos, MutableSpan< float3 > vnos)
void BM_loops_calc_normal_vcos(BMesh *bm, const Span< float3 > vcos, const Span< float3 > vnos, const Span< float3 > fnos, const bool use_split_normals, MutableSpan< float3 > r_lnos, MLoopNorSpaceArray *r_lnors_spacearr, short(*clnors_data)[2], const int cd_loop_clnors_offset, const bool do_rebuild)
void BM_custom_loop_normals_from_vector_layer(BMesh *bm, bool add_sharp_edges)
#define BM_FACE
#define BM_EDGE
#define BM_VERT
float BM_face_calc_normal(const BMFace *f, float r_no[3])
BMESH UPDATE FACE NORMAL.
#define BM_ELEM_API_FLAG_DISABLE(element, f)
#define BM_ELEM_API_FLAG_TEST(element, f)
#define BM_ELEM_API_FLAG_ENABLE(element, f)
BMLoop * BM_vert_step_fan_loop(BMLoop *l, BMEdge **e_step)
bool BM_edge_loop_pair(BMEdge *e, BMLoop **r_la, BMLoop **r_lb)
BMLoop * BM_face_vert_share_loop(BMFace *f, BMVert *v)
Return the Loop Shared by Face and Vertex.
BLI_INLINE BMVert * BM_edge_other_vert(BMEdge *e, const BMVert *v) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL()
BLI_INLINE bool BM_vert_in_edge(const BMEdge *e, const BMVert *v) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL()
ATTR_WARN_UNUSED_RESULT const BMLoop * l
ATTR_WARN_UNUSED_RESULT const BMVert const BMEdge * e
ATTR_WARN_UNUSED_RESULT const BMLoop * l_b
ATTR_WARN_UNUSED_RESULT const BMVert * v
bool is_empty() const
IndexRange index_range() const
constexpr bool is_empty() const
Definition BLI_span.hh:509
constexpr bool is_empty() const
Definition BLI_span.hh:260
void append(const T &value)
bool is_empty() const
nullptr float
dot(value.rgb, luminance_coefficients)") DEFINE_VALUE("REDUCE(lhs
uint nor
#define printf(...)
#define isnan
#define UINT_MAX
Definition hash_md5.cc:44
uiWidgetBaseParameters params[MAX_WIDGET_BASE_BATCH]
void * MEM_mallocN(size_t len, const char *str)
Definition mallocn.cc:128
void * MEM_calloc_arrayN(size_t len, size_t size, const char *str)
Definition mallocn.cc:123
void * MEM_callocN(size_t len, const char *str)
Definition mallocn.cc:118
void * MEM_malloc_arrayN(size_t len, size_t size, const char *str)
Definition mallocn.cc:133
void MEM_freeN(void *vmemh)
Definition mallocn.cc:113
#define G(x, y, z)
#define LNOR_SPACE_TRIGO_THRESHOLD
static void clear(Message &msg)
Definition msgfmt.cc:213
float safe_acos_approx(float x)
void parallel_for(const IndexRange range, const int64_t grain_size, const Function &function, const TaskSizeHints &size_hints=detail::TaskSizeHints_Static(1))
Definition BLI_task.hh:93
VecBase< float, 3 > float3
#define cosf
BMVert * v1
BMVert * v2
struct BMLoop * l
struct BMEditSelection * prev
float no[3]
BMLoopNorEditData ** lidx_to_lnor_editdata
BMLoopNorEditData * lnor_editdata
struct BMVert * v
struct BMEdge * e
struct BMLoop * radial_next
struct BMLoop * prev
struct BMFace * f
struct BMLoop * next
blender::Vector< blender::float3, 16 > * edge_vectors
blender::Vector< BMFace * > faces
blender::Vector< BMVert * > verts
BMPartialUpdate_Params params
float co[3]
float no[3]
void * link
struct LinkNode * next
MLoopNorSpace ** lspacearr
Definition BKE_mesh.h:286
float ref_alpha
Definition BKE_mesh.h:256
float vec_ortho[3]
Definition BKE_mesh.h:248
float ref_beta
Definition BKE_mesh.h:265
float vec_ref[3]
Definition BKE_mesh.h:246
float vec_lnor[3]
Definition BKE_mesh.h:241
struct LinkNode * loops
Definition BKE_mesh.h:271
TaskParallelReduceFunc func_reduce
Definition BLI_task.h:176
TaskParallelFreeFunc func_free
Definition BLI_task.h:178
TaskParallelInitFunc func_init
Definition BLI_task.h:171
size_t userdata_chunk_size
Definition BLI_task.h:164
i
Definition text_draw.cc:230