Blender V4.5
mesh_normals.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2001-2002 NaN Holding BV. All rights reserved.
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
12
13#include <climits>
14
15#include "BLI_math_geom.h"
16#include "BLI_math_vector.h"
17
18#include "BLI_array_utils.hh"
19#include "BLI_bit_vector.hh"
20#include "BLI_linklist.h"
21#include "BLI_math_base.hh"
22#include "BLI_math_vector.hh"
23#include "BLI_memarena.h"
24#include "BLI_span.hh"
25#include "BLI_task.hh"
26#include "BLI_utildefines.h"
27
28#include "BKE_attribute.hh"
29#include "BKE_global.hh"
30#include "BKE_mesh.hh"
31#include "BKE_mesh_mapping.hh"
32
33// #define DEBUG_TIME
34
35#ifdef DEBUG_TIME
36# include "BLI_timeit.hh"
37#endif
38
39/* -------------------------------------------------------------------- */
44
45namespace blender::bke {
46
48{
49 mesh.runtime->vert_normals_true_cache.ensure(
50 [&](Vector<float3> &r_data) { r_data = vert_normals; });
51}
52
54{
55 mesh.runtime->vert_normals_true_cache.ensure(
56 [&](Vector<float3> &r_data) { r_data = std::move(vert_normals); });
57}
58
60{
61 if (auto *vector = std::get_if<Vector<float3>>(&this->data)) {
62 vector->resize(size);
63 }
64 else {
65 this->data = Vector<float3>(size);
66 }
67 return std::get<Vector<float3>>(this->data).as_mutable_span();
68}
69
71{
72 if (const auto *vector = std::get_if<Vector<float3>>(&this->data)) {
73 return vector->as_span();
74 }
75 return std::get<Span<float3>>(this->data);
76}
77
79{
80 if (data.is_span()) {
81 this->data = data.get_internal_span();
82 }
83 else {
84 data.materialize(this->ensure_vector_size(data.size()));
85 }
86}
87
89{
90 this->data = std::move(data);
91}
92
93} // namespace blender::bke
94
96{
97 return mesh->runtime->vert_normals_cache.is_dirty();
98}
99
101{
102 return mesh->runtime->face_normals_cache.is_dirty();
103}
104
106
107namespace blender::bke::mesh {
108
109/* -------------------------------------------------------------------- */
112
113/*
114 * COMPUTE POLY NORMAL
115 *
116 * Computes the normal of a planar
117 * face See Graphics Gems for
118 * computing newell normal.
119 */
120static float3 normal_calc_ngon(const Span<float3> vert_positions, const Span<int> face_verts)
121{
122 float3 normal(0);
123
124 /* Newell's Method */
125 const float *v_prev = vert_positions[face_verts.last()];
126 for (const int i : face_verts.index_range()) {
127 const float *v_curr = vert_positions[face_verts[i]];
128 add_newell_cross_v3_v3v3(normal, v_prev, v_curr);
129 v_prev = v_curr;
130 }
131
132 if (UNLIKELY(normalize_v3(normal) == 0.0f)) {
133 /* Other axis are already set to zero. */
134 normal[2] = 1.0f;
135 }
136
137 return normal;
138}
139
140float3 face_normal_calc(const Span<float3> vert_positions, const Span<int> face_verts)
141{
142 float3 normal;
143 if (face_verts.size() == 4) {
144 normal_quad_v3(normal,
145 vert_positions[face_verts[0]],
146 vert_positions[face_verts[1]],
147 vert_positions[face_verts[2]],
148 vert_positions[face_verts[3]]);
149 }
150 else if (face_verts.size() == 3) {
151 normal = math::normal_tri(vert_positions[face_verts[0]],
152 vert_positions[face_verts[1]],
153 vert_positions[face_verts[2]]);
154 }
155 else {
156 BLI_assert(face_verts.size() > 4);
157 normal = normal_calc_ngon(vert_positions, face_verts);
158 }
159
160 if (UNLIKELY(math::is_zero(normal))) {
161 normal.z = 1.0f;
162 }
163
164 BLI_ASSERT_UNIT_V3(normal);
165 return normal;
166}
167
169
170/* -------------------------------------------------------------------- */
176
177void normals_calc_faces(const Span<float3> positions,
179 const Span<int> corner_verts,
180 MutableSpan<float3> face_normals)
181{
182 BLI_assert(faces.size() == face_normals.size());
183 threading::parallel_for(faces.index_range(), 1024, [&](const IndexRange range) {
184 for (const int i : range) {
185 face_normals[i] = normal_calc_ngon(positions, corner_verts.slice(faces[i]));
186 }
187 });
188}
189
190void normals_calc_verts(const Span<float3> vert_positions,
192 const Span<int> corner_verts,
193 const GroupedSpan<int> vert_to_face_map,
194 const Span<float3> face_normals,
195 MutableSpan<float3> vert_normals)
196{
197 const Span<float3> positions = vert_positions;
198 threading::parallel_for(positions.index_range(), 1024, [&](const IndexRange range) {
199 for (const int vert : range) {
200 const Span<int> vert_faces = vert_to_face_map[vert];
201 if (vert_faces.is_empty()) {
202 vert_normals[vert] = math::normalize(positions[vert]);
203 continue;
204 }
205
206 float3 vert_normal(0);
207 for (const int face : vert_faces) {
208 const int2 adjacent_verts = face_find_adjacent_verts(faces[face], corner_verts, vert);
209 const float3 dir_prev = math::normalize(positions[adjacent_verts[0]] - positions[vert]);
210 const float3 dir_next = math::normalize(positions[adjacent_verts[1]] - positions[vert]);
211 const float factor = math::safe_acos_approx(math::dot(dir_prev, dir_next));
212
213 vert_normal += face_normals[face] * factor;
214 }
215
216 vert_normals[vert] = math::normalize(vert_normal);
217 }
218 });
219}
220
222
223static void mix_normals_corner_to_vert(const Span<float3> vert_positions,
225 const Span<int> corner_verts,
226 const GroupedSpan<int> vert_to_face_map,
227 const Span<float3> corner_normals,
228 MutableSpan<float3> vert_normals)
229{
230 const Span<float3> positions = vert_positions;
231 threading::parallel_for(positions.index_range(), 1024, [&](const IndexRange range) {
232 for (const int vert : range) {
233 const Span<int> vert_faces = vert_to_face_map[vert];
234 if (vert_faces.is_empty()) {
235 vert_normals[vert] = math::normalize(positions[vert]);
236 continue;
237 }
238
239 float3 vert_normal(0);
240 for (const int face : vert_faces) {
241 const int corner = mesh::face_find_corner_from_vert(faces[face], corner_verts, vert);
242 const int2 adjacent_verts{corner_verts[mesh::face_corner_prev(faces[face], corner)],
243 corner_verts[mesh::face_corner_next(faces[face], corner)]};
244
245 const float3 dir_prev = math::normalize(positions[adjacent_verts[0]] - positions[vert]);
246 const float3 dir_next = math::normalize(positions[adjacent_verts[1]] - positions[vert]);
247 const float factor = math::safe_acos_approx(math::dot(dir_prev, dir_next));
248
249 vert_normal += corner_normals[corner] * factor;
250 }
251
252 vert_normals[vert] = math::normalize(vert_normal);
253 }
254 });
255}
256
258 const Span<int> corner_verts,
259 const Span<float3> vert_normals,
260 MutableSpan<float3> face_normals)
261{
262 threading::parallel_for(faces.index_range(), 1024, [&](const IndexRange range) {
263 for (const int face : range) {
264 float3 sum(0);
265 for (const int vert : corner_verts.slice(faces[face])) {
266 sum += vert_normals[vert];
267 }
268 face_normals[face] = math::normalize(sum);
269 }
270 });
271}
272
274 const Span<float3> corner_normals,
275 MutableSpan<float3> face_normals)
276{
277 threading::parallel_for(faces.index_range(), 1024, [&](const IndexRange range) {
278 for (const int face : range) {
279 const Span<float3> face_corner_normals = corner_normals.slice(faces[face]);
280 const float3 sum = std::accumulate(
281 face_corner_normals.begin(), face_corner_normals.end(), float3(0));
282 face_normals[face] = math::normalize(sum);
283 }
284 });
285}
286
287} // namespace blender::bke::mesh
288
289/* -------------------------------------------------------------------- */
292
293blender::bke::MeshNormalDomain Mesh::normals_domain(const bool support_sharp_face) const
294{
295 using namespace blender;
296 using namespace blender::bke;
297 const bke::AttributeAccessor attributes = this->attributes();
298 if (const std::optional<AttributeMetaData> custom = attributes.lookup_meta_data("custom_normal"))
299 {
300 switch (custom->domain) {
303 case AttrDomain::Edge:
304 break;
305 case AttrDomain::Face:
309 default:
311 }
312 }
313
314 const VArray<bool> sharp_faces = *attributes.lookup_or_default<bool>(
315 "sharp_face", AttrDomain::Face, false);
316
317 const array_utils::BooleanMix face_mix = array_utils::booleans_mix_calc(sharp_faces);
318 if (face_mix == array_utils::BooleanMix::AllTrue) {
319 return MeshNormalDomain::Face;
320 }
321
322 const VArray<bool> sharp_edges = *attributes.lookup_or_default<bool>(
323 "sharp_edge", AttrDomain::Edge, false);
324 const array_utils::BooleanMix edge_mix = array_utils::booleans_mix_calc(sharp_edges);
325 if (edge_mix == array_utils::BooleanMix::AllTrue) {
326 return MeshNormalDomain::Face;
327 }
328
329 if (edge_mix == array_utils::BooleanMix::AllFalse &&
330 (face_mix == array_utils::BooleanMix::AllFalse || support_sharp_face))
331 {
332 return MeshNormalDomain::Point;
333 }
334
335 return MeshNormalDomain::Corner;
336}
337
338blender::Span<blender::float3> Mesh::vert_normals() const
339{
340 using namespace blender;
341 using namespace blender::bke;
342 this->runtime->vert_normals_cache.ensure([&](NormalsCache &r_data) {
343 if (const GAttributeReader custom = this->attributes().lookup("custom_normal")) {
344 if (custom.varray.type().is<float3>()) {
345 if (custom.domain == AttrDomain::Point) {
346 r_data.store_varray(custom.varray.typed<float3>());
347 return;
348 }
349 if (custom.domain == AttrDomain::Face) {
350 mesh::normals_calc_verts(this->vert_positions(),
351 this->faces(),
352 this->corner_verts(),
353 this->vert_to_face_map(),
354 VArraySpan<float3>(custom.varray.typed<float3>()),
355 r_data.ensure_vector_size(this->verts_num));
356
357 return;
358 }
359 if (custom.domain == AttrDomain::Corner) {
360 mesh::mix_normals_corner_to_vert(this->vert_positions(),
361 this->faces(),
362 this->corner_verts(),
363 this->vert_to_face_map(),
364 VArraySpan<float3>(custom.varray.typed<float3>()),
365 r_data.ensure_vector_size(this->verts_num));
366 return;
367 }
368 }
369 else if (custom.varray.type().is<short2>() && custom.domain == AttrDomain::Corner) {
370 mesh::mix_normals_corner_to_vert(this->vert_positions(),
371 this->faces(),
372 this->corner_verts(),
373 this->vert_to_face_map(),
374 this->corner_normals(),
375 r_data.ensure_vector_size(this->verts_num));
376 return;
377 }
378 }
379 r_data.data = NormalsCache::UseTrueCache();
380 });
381 if (std::holds_alternative<NormalsCache::UseTrueCache>(
382 this->runtime->vert_normals_cache.data().data))
383 {
384 return this->vert_normals_true();
385 }
386
387 return this->runtime->vert_normals_cache.data().get_span();
388}
389
390blender::Span<blender::float3> Mesh::vert_normals_true() const
391{
392 using namespace blender;
393 using namespace blender::bke;
394 this->runtime->vert_normals_true_cache.ensure([&](Vector<float3> &r_data) {
395 r_data.reinitialize(this->verts_num);
396 mesh::normals_calc_verts(this->vert_positions(),
397 this->faces(),
398 this->corner_verts(),
399 this->vert_to_face_map(),
400 this->face_normals_true(),
401 r_data);
402 });
403 return this->runtime->vert_normals_true_cache.data();
404}
405
406blender::Span<blender::float3> Mesh::face_normals() const
407{
408 using namespace blender;
409 using namespace blender::bke;
410 this->runtime->face_normals_cache.ensure([&](NormalsCache &r_data) {
411 if (const GAttributeReader custom = this->attributes().lookup("custom_normal")) {
412 if (custom.varray.type().is<float3>()) {
413 if (custom.domain == AttrDomain::Face) {
414 r_data.store_varray(custom.varray.typed<float3>());
415 return;
416 }
417 if (custom.domain == AttrDomain::Point) {
418 mesh::mix_normals_vert_to_face(this->faces(),
419 this->corner_verts(),
420 VArraySpan<float3>(custom.varray.typed<float3>()),
421 r_data.ensure_vector_size(this->faces_num));
422 return;
423 }
424 if (custom.domain == AttrDomain::Corner) {
425 mesh::mix_normals_corner_to_face(this->faces(),
426 VArraySpan<float3>(custom.varray.typed<float3>()),
427 r_data.ensure_vector_size(this->faces_num));
428 return;
429 }
430 }
431 else if (custom.varray.type().is<short2>() && custom.domain == AttrDomain::Corner) {
432 mesh::mix_normals_corner_to_face(
433 this->faces(), this->corner_normals(), r_data.ensure_vector_size(this->faces_num));
434 return;
435 }
436 }
437 r_data.data = NormalsCache::UseTrueCache();
438 });
439 if (std::holds_alternative<NormalsCache::UseTrueCache>(
440 this->runtime->face_normals_cache.data().data))
441 {
442 return this->face_normals_true();
443 }
444 return this->runtime->face_normals_cache.data().get_span();
445}
446
447blender::Span<blender::float3> Mesh::face_normals_true() const
448{
449 using namespace blender;
450 using namespace blender::bke;
451 this->runtime->face_normals_true_cache.ensure([&](Vector<float3> &r_data) {
452 r_data.reinitialize(this->faces_num);
453 mesh::normals_calc_faces(this->vert_positions(), this->faces(), this->corner_verts(), r_data);
454 });
455 return this->runtime->face_normals_true_cache.data();
456}
457
458blender::Span<blender::float3> Mesh::corner_normals() const
459{
460 using namespace blender;
461 using namespace blender::bke;
462 this->runtime->corner_normals_cache.ensure([&](NormalsCache &r_data) {
463 const OffsetIndices<int> faces = this->faces();
464 switch (this->normals_domain()) {
465 case MeshNormalDomain::Point: {
466 MutableSpan<float3> data = r_data.ensure_vector_size(this->corners_num);
467 array_utils::gather(this->vert_normals(), this->corner_verts(), data);
468 break;
469 }
470 case MeshNormalDomain::Face: {
471 MutableSpan<float3> data = r_data.ensure_vector_size(this->corners_num);
472 const Span<float3> face_normals = this->face_normals();
473 array_utils::gather_to_groups(faces, faces.index_range(), face_normals, data);
474 break;
475 }
476 case MeshNormalDomain::Corner: {
477 const AttributeAccessor attributes = this->attributes();
478 const GAttributeReader custom = attributes.lookup("custom_normal");
479 if (custom && custom.varray.type().is<float3>()) {
480 if (custom.domain == bke::AttrDomain::Corner) {
481 r_data.store_varray(custom.varray.typed<float3>());
482 }
483 return;
484 }
485 MutableSpan<float3> data = r_data.ensure_vector_size(this->corners_num);
486 const VArraySpan sharp_edges = *attributes.lookup<bool>("sharp_edge", AttrDomain::Edge);
487 const VArraySpan sharp_faces = *attributes.lookup<bool>("sharp_face", AttrDomain::Face);
488 mesh::normals_calc_corners(this->vert_positions(),
489 this->faces(),
490 this->corner_verts(),
491 this->corner_edges(),
492 this->vert_to_face_map(),
493 this->face_normals_true(),
494 sharp_edges,
495 sharp_faces,
497 nullptr,
498 data);
499 }
500 }
501 });
502 return this->runtime->corner_normals_cache.data().get_span();
503}
504
506 const int numLoops,
507 const char data_type)
508{
509 if (!(lnors_spacearr->lspacearr && lnors_spacearr->loops_pool)) {
510 MemArena *mem;
511
512 if (!lnors_spacearr->mem) {
513 lnors_spacearr->mem = BLI_memarena_new(BLI_MEMARENA_STD_BUFSIZE, __func__);
514 }
515 mem = lnors_spacearr->mem;
516 if (numLoops > 0) {
517 lnors_spacearr->lspacearr = (MLoopNorSpace **)BLI_memarena_calloc(
518 mem, sizeof(MLoopNorSpace *) * size_t(numLoops));
519 lnors_spacearr->loops_pool = (LinkNode *)BLI_memarena_alloc(
520 mem, sizeof(LinkNode) * size_t(numLoops));
521 }
522 else {
523 lnors_spacearr->lspacearr = nullptr;
524 lnors_spacearr->loops_pool = nullptr;
525 }
526
527 lnors_spacearr->spaces_num = 0;
528 }
530 lnors_spacearr->data_type = data_type;
531}
532
534 MLoopNorSpaceArray *lnors_spacearr_tls)
535{
536 *lnors_spacearr_tls = *lnors_spacearr;
537 lnors_spacearr_tls->mem = BLI_memarena_new(BLI_MEMARENA_STD_BUFSIZE, __func__);
538}
539
541 MLoopNorSpaceArray *lnors_spacearr_tls)
542{
543 BLI_assert(lnors_spacearr->data_type == lnors_spacearr_tls->data_type);
544 BLI_assert(lnors_spacearr->mem != lnors_spacearr_tls->mem);
545 lnors_spacearr->spaces_num += lnors_spacearr_tls->spaces_num;
546 BLI_memarena_merge(lnors_spacearr->mem, lnors_spacearr_tls->mem);
547 BLI_memarena_free(lnors_spacearr_tls->mem);
548 lnors_spacearr_tls->mem = nullptr;
549 BKE_lnor_spacearr_clear(lnors_spacearr_tls);
550}
551
553{
554 lnors_spacearr->spaces_num = 0;
555 lnors_spacearr->lspacearr = nullptr;
556 lnors_spacearr->loops_pool = nullptr;
557 if (lnors_spacearr->mem != nullptr) {
558 BLI_memarena_clear(lnors_spacearr->mem);
559 }
560}
561
563{
564 lnors_spacearr->spaces_num = 0;
565 lnors_spacearr->lspacearr = nullptr;
566 lnors_spacearr->loops_pool = nullptr;
567 BLI_memarena_free(lnors_spacearr->mem);
568 lnors_spacearr->mem = nullptr;
569}
570
572{
573 lnors_spacearr->spaces_num++;
574 return (MLoopNorSpace *)BLI_memarena_calloc(lnors_spacearr->mem, sizeof(MLoopNorSpace));
575}
576
577/* This threshold is a bit touchy (usual float precision issue), this value seems OK. */
578#define LNOR_SPACE_TRIGO_THRESHOLD (1.0f - 1e-4f)
579
580namespace blender::bke::mesh {
581
583 const float3 &vec_ref,
584 const float3 &vec_other,
585 const Span<float3> edge_vectors)
586{
587 CornerNormalSpace lnor_space{};
588 const float pi2 = float(M_PI) * 2.0f;
589 const float dtp_ref = math::dot(vec_ref, lnor);
590 const float dtp_other = math::dot(vec_other, lnor);
591
592 if (UNLIKELY(std::abs(dtp_ref) >= LNOR_SPACE_TRIGO_THRESHOLD ||
593 std::abs(dtp_other) >= LNOR_SPACE_TRIGO_THRESHOLD))
594 {
595 /* If vec_ref or vec_other are too much aligned with lnor, we can't build lnor space,
596 * tag it as invalid and abort. */
597 lnor_space.ref_alpha = lnor_space.ref_beta = 0.0f;
598 return lnor_space;
599 }
600
601 lnor_space.vec_lnor = lnor;
602
603 /* Compute ref alpha, average angle of all available edge vectors to lnor. */
604 if (!edge_vectors.is_empty()) {
605 float alpha = 0.0f;
606 for (const float3 &vec : edge_vectors) {
607 alpha += math::safe_acos_approx(math::dot(vec, lnor));
608 }
609 /* This piece of code shall only be called for more than one loop. */
610 /* NOTE: In theory, this could be `count > 2`,
611 * but there is one case where we only have two edges for two loops:
612 * a smooth vertex with only two edges and two faces (our Monkey's nose has that, e.g.).
613 */
614 BLI_assert(edge_vectors.size() >= 2);
615 lnor_space.ref_alpha = alpha / float(edge_vectors.size());
616 }
617 else {
618 lnor_space.ref_alpha = (math::safe_acos_approx(math::dot(vec_ref, lnor)) +
619 math::safe_acos_approx(math::dot(vec_other, lnor))) /
620 2.0f;
621 }
622
623 /* Project vec_ref on lnor's ortho plane. */
624 lnor_space.vec_ref = math::normalize(vec_ref - lnor * dtp_ref);
625 lnor_space.vec_ortho = math::normalize(math::cross(lnor, lnor_space.vec_ref));
626
627 /* Project vec_other on lnor's ortho plane. */
628 const float3 vec_other_proj = math::normalize(vec_other - lnor * dtp_other);
629
630 /* Beta is angle between ref_vec and other_vec, around lnor. */
631 const float dtp = math::dot(lnor_space.vec_ref, vec_other_proj);
633 const float beta = math::safe_acos_approx(dtp);
634 lnor_space.ref_beta = (math::dot(lnor_space.vec_ortho, vec_other_proj) < 0.0f) ? pi2 - beta :
635 beta;
636 }
637 else {
638 lnor_space.ref_beta = pi2;
639 }
640
641 return lnor_space;
642}
643
644} // namespace blender::bke::mesh
645
647 const float lnor[3],
648 const float vec_ref[3],
649 const float vec_other[3],
650 const blender::Span<blender::float3> edge_vectors)
651{
652 using namespace blender::bke::mesh;
653 const CornerNormalSpace space = corner_fan_space_define(lnor, vec_ref, vec_other, edge_vectors);
654 copy_v3_v3(lnor_space->vec_lnor, space.vec_lnor);
655 copy_v3_v3(lnor_space->vec_ref, space.vec_ref);
656 copy_v3_v3(lnor_space->vec_ortho, space.vec_ortho);
657 lnor_space->ref_alpha = space.ref_alpha;
658 lnor_space->ref_beta = space.ref_beta;
659}
660
662 MLoopNorSpace *lnor_space,
663 const int corner,
664 void *bm_loop,
665 const bool is_single)
666{
667 BLI_assert((lnors_spacearr->data_type == MLNOR_SPACEARR_LOOP_INDEX && bm_loop == nullptr) ||
668 (lnors_spacearr->data_type == MLNOR_SPACEARR_BMLOOP_PTR && bm_loop != nullptr));
669
670 lnors_spacearr->lspacearr[corner] = lnor_space;
671 if (bm_loop == nullptr) {
672 bm_loop = POINTER_FROM_INT(corner);
673 }
674 if (is_single) {
675 BLI_assert(lnor_space->loops == nullptr);
676 lnor_space->flags |= MLNOR_SPACE_IS_SINGLE;
677 lnor_space->loops = (LinkNode *)bm_loop;
678 }
679 else {
680 BLI_assert((lnor_space->flags & MLNOR_SPACE_IS_SINGLE) == 0);
681 BLI_linklist_prepend_nlink(&lnor_space->loops, bm_loop, &lnors_spacearr->loops_pool[corner]);
682 }
683}
684
685MINLINE float unit_short_to_float(const short val)
686{
687 return float(val) / float(SHRT_MAX);
688}
689
690MINLINE short unit_float_to_short(const float val)
691{
692 /* Rounding. */
693 return short(floorf(val * float(SHRT_MAX) + 0.5f));
694}
695
696namespace blender::bke::mesh {
697
699 const short2 clnor_data)
700{
701 /* NOP custom normal data or invalid lnor space, return. */
702 if (clnor_data[0] == 0 || lnor_space.ref_alpha == 0.0f || lnor_space.ref_beta == 0.0f) {
703 return lnor_space.vec_lnor;
704 }
705
706 float3 r_custom_lnor;
707
708 /* TODO: Check whether using #sincosf() gives any noticeable benefit
709 * (could not even get it working under linux though)! */
710 const float pi2 = float(M_PI * 2.0);
711 const float alphafac = unit_short_to_float(clnor_data[0]);
712 const float alpha = (alphafac > 0.0f ? lnor_space.ref_alpha : pi2 - lnor_space.ref_alpha) *
713 alphafac;
714 const float betafac = unit_short_to_float(clnor_data[1]);
715
716 mul_v3_v3fl(r_custom_lnor, lnor_space.vec_lnor, cosf(alpha));
717
718 if (betafac == 0.0f) {
719 madd_v3_v3fl(r_custom_lnor, lnor_space.vec_ref, sinf(alpha));
720 }
721 else {
722 const float sinalpha = sinf(alpha);
723 const float beta = (betafac > 0.0f ? lnor_space.ref_beta : pi2 - lnor_space.ref_beta) *
724 betafac;
725 madd_v3_v3fl(r_custom_lnor, lnor_space.vec_ref, sinalpha * cosf(beta));
726 madd_v3_v3fl(r_custom_lnor, lnor_space.vec_ortho, sinalpha * sinf(beta));
727 }
728
729 return r_custom_lnor;
730}
731
732} // namespace blender::bke::mesh
733
735 const short clnor_data[2],
736 float r_custom_lnor[3])
737{
738 using namespace blender::bke::mesh;
739 CornerNormalSpace space;
740 space.vec_lnor = lnor_space->vec_lnor;
741 space.vec_ref = lnor_space->vec_ref;
742 space.vec_ortho = lnor_space->vec_ortho;
743 space.ref_alpha = lnor_space->ref_alpha;
744 space.ref_beta = lnor_space->ref_beta;
745 copy_v3_v3(r_custom_lnor, corner_space_custom_data_to_normal(space, clnor_data));
746}
747
748namespace blender::bke::mesh {
749
751 const float3 &custom_lnor)
752{
753 /* We use zero vector as NOP custom normal (can be simpler than giving auto-computed `lnor`). */
754 if (is_zero_v3(custom_lnor) || compare_v3v3(lnor_space.vec_lnor, custom_lnor, 1e-4f)) {
755 return short2(0);
756 }
757
758 short2 r_clnor_data;
759
760 const float pi2 = float(M_PI * 2.0);
761 const float cos_alpha = math::dot(lnor_space.vec_lnor, custom_lnor);
762
763 const float alpha = math::safe_acos_approx(cos_alpha);
764 if (alpha > lnor_space.ref_alpha) {
765 /* Note we could stick to [0, pi] range here,
766 * but makes decoding more complex, not worth it. */
767 r_clnor_data[0] = unit_float_to_short(-(pi2 - alpha) / (pi2 - lnor_space.ref_alpha));
768 }
769 else {
770 r_clnor_data[0] = unit_float_to_short(alpha / lnor_space.ref_alpha);
771 }
772
773 /* Project custom lnor on (vec_ref, vec_ortho) plane. */
774 const float3 vec = math::normalize(lnor_space.vec_lnor * -cos_alpha + custom_lnor);
775
776 const float cos_beta = math::dot(lnor_space.vec_ref, vec);
777
778 if (cos_beta < LNOR_SPACE_TRIGO_THRESHOLD) {
779 float beta = math::safe_acos_approx(cos_beta);
780 if (math::dot(lnor_space.vec_ortho, vec) < 0.0f) {
781 beta = pi2 - beta;
782 }
783
784 if (beta > lnor_space.ref_beta) {
785 r_clnor_data[1] = unit_float_to_short(-(pi2 - beta) / (pi2 - lnor_space.ref_beta));
786 }
787 else {
788 r_clnor_data[1] = unit_float_to_short(beta / lnor_space.ref_beta);
789 }
790 }
791 else {
792 r_clnor_data[1] = 0;
793 }
794
795 return r_clnor_data;
796}
797
798} // namespace blender::bke::mesh
799
801 const float custom_lnor[3],
802 short r_clnor_data[2])
803{
804 using namespace blender::bke::mesh;
805 CornerNormalSpace space;
806 space.vec_lnor = lnor_space->vec_lnor;
807 space.vec_ref = lnor_space->vec_ref;
808 space.vec_ortho = lnor_space->vec_ortho;
809 space.ref_alpha = lnor_space->ref_alpha;
810 space.ref_beta = lnor_space->ref_beta;
811 copy_v2_v2_short(r_clnor_data, corner_space_custom_normal_to_data(space, custom_lnor));
812}
813
814namespace blender::bke {
815
816namespace mesh {
817
818#define INDEX_UNSET INT_MIN
819#define INDEX_INVALID -1
820/* See comment about edge_to_corners below. */
821#define IS_EDGE_SHARP(_e2l) ELEM((_e2l)[1], INDEX_UNSET, INDEX_INVALID)
822
824 const Span<int> corner_verts,
825 const Span<int> corner_edges,
826 const Span<int> corner_to_face_map,
827 const Span<float3> face_normals,
828 const Span<bool> sharp_faces,
829 const Span<bool> sharp_edges,
830 const float split_angle,
831 MutableSpan<int2> edge_to_corners,
832 MutableSpan<bool> r_sharp_edges)
833{
834 const float split_angle_cos = cosf(split_angle);
835 auto face_is_smooth = [&](const int face_i) {
836 return sharp_faces.is_empty() || !sharp_faces[face_i];
837 };
838
839 for (const int face_i : faces.index_range()) {
840 for (const int corner : faces[face_i]) {
841 const int vert = corner_verts[corner];
842 const int edge = corner_edges[corner];
843
844 int2 &e2l = edge_to_corners[edge];
845
846 /* Check whether current edge might be smooth or sharp */
847 if ((e2l[0] | e2l[1]) == 0) {
848 /* 'Empty' edge until now, set e2l[0] (and e2l[1] to INDEX_UNSET to tag it as unset). */
849 e2l[0] = corner;
850 /* We have to check this here too, else we might miss some flat faces!!! */
851 e2l[1] = face_is_smooth(face_i) ? INDEX_UNSET : INDEX_INVALID;
852 }
853 else if (e2l[1] == INDEX_UNSET) {
854 const bool is_angle_sharp = math::dot(face_normals[corner_to_face_map[e2l[0]]],
855 face_normals[face_i]) < split_angle_cos;
856
857 /* Second corner using this edge, time to test its sharpness.
858 * An edge is sharp if it is tagged as such, or its face is not smooth,
859 * or both faces have opposed (flipped) normals, i.e. both corners on the same edge share
860 * the same vertex, or angle between both its faces' normals is above split_angle value. */
861 if (!face_is_smooth(face_i) || (!sharp_edges.is_empty() && sharp_edges[edge]) ||
862 vert == corner_verts[e2l[0]] || is_angle_sharp)
863 {
864 /* NOTE: we are sure that corner != 0 here ;). */
865 e2l[1] = INDEX_INVALID;
866
867 /* We want to avoid tagging edges as sharp when it is already defined as such by
868 * other causes than angle threshold. */
869 if (is_angle_sharp) {
870 r_sharp_edges[edge] = true;
871 }
872 }
873 else {
874 e2l[1] = corner;
875 }
876 }
877 else if (!IS_EDGE_SHARP(e2l)) {
878 /* More than two corners using this edge, tag as sharp if not yet done. */
879 e2l[1] = INDEX_INVALID;
880
881 /* We want to avoid tagging edges as sharp when it is already defined as such by
882 * other causes than angle threshold. */
883 r_sharp_edges[edge] = false;
884 }
885 /* Else, edge is already 'disqualified' (i.e. sharp)! */
886 }
887 }
888}
889
891 const Span<int> corner_verts,
892 const Span<int> corner_edges,
893 const Span<float3> face_normals,
894 const Span<int> corner_to_face,
895 const Span<bool> sharp_faces,
896 const float split_angle,
897 MutableSpan<bool> sharp_edges)
898{
899 if (split_angle >= float(M_PI)) {
900 /* Nothing to do! */
901 return;
902 }
903
904 /* Mapping edge -> corners. */
905 Array<int2> edge_to_corners(sharp_edges.size(), int2(0));
906
908 corner_verts,
909 corner_edges,
910 corner_to_face,
911 face_normals,
912 sharp_faces,
913 sharp_edges,
914 split_angle,
915 edge_to_corners,
916 sharp_edges);
917}
918
929
938 const Span<int> corner_verts,
939 const Span<int> vert_faces,
940 const int vert,
941 MutableSpan<VertCornerInfo> r_corner_infos)
942{
943 for (const int i : vert_faces.index_range()) {
944 const int face = vert_faces[i];
945 r_corner_infos[i].face = face;
946 r_corner_infos[i].corner = face_find_corner_from_vert(faces[face], corner_verts, vert);
947 r_corner_infos[i].corner_prev = face_corner_prev(faces[face], r_corner_infos[i].corner);
948 r_corner_infos[i].corner_next = face_corner_next(faces[face], r_corner_infos[i].corner);
949 r_corner_infos[i].vert_prev = corner_verts[r_corner_infos[i].corner_prev];
950 r_corner_infos[i].vert_next = corner_verts[r_corner_infos[i].corner_next];
951 }
952}
953
955using EdgeUninitialized = std::monostate;
956
966
975
981struct EdgeSharp {};
982
983using VertEdgeInfo = std::variant<EdgeUninitialized, EdgeOneCorner, EdgeTwoCorners, EdgeSharp>;
984
985static void add_corner_to_edge(const Span<int> corner_edges,
986 const Span<bool> sharp_edges,
987 const int local_corner,
988 const int corner,
989 const int other_corner,
990 const bool winding_torwards_vert,
991 VertEdgeInfo &info)
992{
993 if (std::holds_alternative<EdgeUninitialized>(info)) {
994 if (!sharp_edges.is_empty()) {
995 /* The first time we encounter the edge, we check if it is marked sharp. In that case corner
996 * fans shouldn't propagate past it. To find the edge we need to check if the current corner
997 * references the edge connected to `other_corner` or if `other_corner` uses the edge. */
998 if (sharp_edges[corner_edges[winding_torwards_vert ? other_corner : corner]]) {
999 info = EdgeSharp{};
1000 return;
1001 }
1002 }
1003 info = EdgeOneCorner{local_corner, winding_torwards_vert};
1004 }
1005 else if (const EdgeOneCorner *info_one_edge = std::get_if<EdgeOneCorner>(&info)) {
1006 /* If the edge ends up being used by faces, we still have to check if the winding direction
1007 * changes. Though it's an undesirable situation for the mesh to be in, we shouldn't propagate
1008 * smooth normals across edges facing opposite directions. Breaking the flow on these winding
1009 * direction changes also simplifies the fan traversal later on; without it the we couldn't
1010 * traverse by just continuing to use the next/previous corner. */
1011 if (info_one_edge->winding_torwards_vert == winding_torwards_vert) {
1012 info = EdgeSharp{};
1013 return;
1014 }
1015 info = EdgeTwoCorners{info_one_edge->local_corner_1, local_corner};
1016 }
1017 else {
1018 /* The edge is either already sharp, or we're trying to add a third corner. */
1019 info = EdgeSharp{};
1020 }
1021}
1022
1025 16,
1031
1038 LocalEdgeVectorSet &r_other_vert_to_edge)
1039{
1040 r_other_vert_to_edge.reserve(corner_infos.size());
1041 for (VertCornerInfo &info : corner_infos) {
1042 info.local_edge_prev = r_other_vert_to_edge.index_of_or_add(info.vert_prev);
1043 info.local_edge_next = r_other_vert_to_edge.index_of_or_add(info.vert_next);
1044 }
1045}
1046
1047static void calc_connecting_edge_info(const Span<int> corner_edges,
1048 const Span<bool> sharp_edges,
1049 const Span<bool> sharp_faces,
1050 const Span<VertCornerInfo> corner_infos,
1051 MutableSpan<VertEdgeInfo> edge_infos)
1052{
1053 for (const int local_corner : corner_infos.index_range()) {
1054 const VertCornerInfo &info = corner_infos[local_corner];
1055 if (!sharp_faces.is_empty() && sharp_faces[info.face]) {
1056 /* Sharp faces implicitly cause sharp edges. */
1057 edge_infos[info.local_edge_prev] = EdgeSharp{};
1058 edge_infos[info.local_edge_next] = EdgeSharp{};
1059 continue;
1060 }
1061 /* The "previous" edge is winding towards the vertex, the "next" edge is winding away. */
1062 add_corner_to_edge(corner_edges,
1063 sharp_edges,
1064 local_corner,
1065 info.corner,
1066 info.corner_prev,
1067 true,
1068 edge_infos[info.local_edge_prev]);
1069 add_corner_to_edge(corner_edges,
1070 sharp_edges,
1071 local_corner,
1072 info.corner,
1073 info.corner_next,
1074 false,
1075 edge_infos[info.local_edge_next]);
1076 }
1077}
1078
1085 const Span<VertEdgeInfo> edge_infos,
1086 const int start_local_corner,
1087 Vector<int, 16> &result_fan)
1088{
1089 result_fan.append(start_local_corner);
1090 {
1091 /* Travel around the vertex in a right-handed clockwise direction (based on the normal). The
1092 * corners found in this traversal are reversed so the direction matches with the next
1093 * traversal (or so that the next traversal doesn't have to be added at the beginning of the
1094 * vector). */
1095 int current = start_local_corner;
1096 int local_edge = corner_infos[current].local_edge_next;
1097 bool found_cyclic_fan = false;
1098 while (const EdgeTwoCorners *edge = std::get_if<EdgeTwoCorners>(&edge_infos[local_edge])) {
1099 current = current == edge->local_corner_1 ? edge->local_corner_2 : edge->local_corner_1;
1100 if (current == start_local_corner) {
1101 found_cyclic_fan = true;
1102 break;
1103 }
1104 result_fan.append(current);
1105 local_edge = corner_infos[current].local_edge_next;
1106 }
1107 /* Reverse the corners added so the final order is consistent with the next traversal. */
1108 result_fan.as_mutable_span().reverse();
1109
1110 if (found_cyclic_fan) {
1111 /* To match behavior from the previous implementation of face corner normal calculation, the
1112 * final fan is rotated so that the smallest face corner index comes first. */
1113 int *fan_first_corner = std::min_element(
1114 result_fan.begin(), result_fan.end(), [&](const int a, const int b) {
1115 return corner_infos[a].corner < corner_infos[b].corner;
1116 });
1117 std::rotate(result_fan.begin(), fan_first_corner, result_fan.end());
1118 return;
1119 }
1120 }
1121
1122 /* Travel in the other direction. */
1123 int current = start_local_corner;
1124 int local_edge = corner_infos[current].local_edge_prev;
1125 while (const EdgeTwoCorners *edge = std::get_if<EdgeTwoCorners>(&edge_infos[local_edge])) {
1126 current = current == edge->local_corner_1 ? edge->local_corner_2 : edge->local_corner_1;
1127 /* Cyclic fans have already been found, so there's no need to check for them here. */
1128 result_fan.append(current);
1129 local_edge = corner_infos[current].local_edge_prev;
1130 }
1131}
1132
1138static void calc_edge_directions(const Span<float3> vert_positions,
1139 const Span<int> local_edge_by_vert,
1140 const float3 &vert_position,
1141 MutableSpan<float3> edge_dirs)
1142{
1143 for (const int i : local_edge_by_vert.index_range()) {
1144 edge_dirs[i] = math::normalize(vert_positions[local_edge_by_vert[i]] - vert_position);
1145 }
1146}
1147
1150 const Span<float3> edge_dirs,
1151 const Span<float3> face_normals,
1152 const Span<int> local_corners_in_fan)
1153{
1154 if (local_corners_in_fan.size() == 1) {
1155 /* Logically this special case is unnecessary, but due to floating point precision it is
1156 * required for the output to be the same as previous versions of the algorithm.*/
1157 return face_normals[corner_infos[local_corners_in_fan.first()].face];
1158 }
1159 float3 fan_normal(0);
1160 for (const int local_corner : local_corners_in_fan) {
1161 const VertCornerInfo &info = corner_infos[local_corner];
1162 const float3 &dir_prev = edge_dirs[info.local_edge_prev];
1163 const float3 &dir_next = edge_dirs[info.local_edge_next];
1164 const float factor = math::safe_acos_approx(math::dot(dir_prev, dir_next));
1165 fan_normal += face_normals[info.face] * factor;
1166 }
1167 return math::normalize(fan_normal);
1168}
1169
1170/* Don't inline this function to simplify the code path without custom normals.*/
1172 const Span<short2> custom_normals,
1173 const Span<VertCornerInfo> corner_infos,
1174 const Span<float3> edge_dirs,
1175 const Span<int> local_corners_in_fan,
1176 float3 &fan_normal,
1177 CornerNormalSpaceArray *r_fan_spaces)
1178{
1179 const int local_edge_first = corner_infos[local_corners_in_fan.first()].local_edge_next;
1180 const int local_edge_last = corner_infos[local_corners_in_fan.last()].local_edge_prev;
1181
1182 Vector<float3, 16> fan_edge_dirs;
1183 if (local_corners_in_fan.size() > 1) {
1184 fan_edge_dirs.reserve(local_corners_in_fan.size() + 1);
1185 for (const int local_corner : local_corners_in_fan) {
1186 const VertCornerInfo &info = corner_infos[local_corner];
1187 fan_edge_dirs.append_unchecked(edge_dirs[info.local_edge_next]);
1188 }
1189 if (local_edge_last != local_edge_first) {
1190 fan_edge_dirs.append_unchecked(edge_dirs[local_edge_last]);
1191 }
1192 }
1193
1195 fan_normal, edge_dirs[local_edge_first], edge_dirs[local_edge_last], fan_edge_dirs);
1196
1197 if (!custom_normals.is_empty()) {
1198 int2 average_custom_normal(0);
1199 for (const int local_corner : local_corners_in_fan) {
1200 const VertCornerInfo &info = corner_infos[local_corner];
1201 average_custom_normal += int2(custom_normals[info.corner]);
1202 }
1203 average_custom_normal /= local_corners_in_fan.size();
1204 fan_normal = corner_space_custom_data_to_normal(fan_space, short2(average_custom_normal));
1205 }
1206
1207 if (r_fan_spaces) {
1208 std::lock_guard lock(r_fan_spaces->build_mutex);
1209 r_fan_spaces->spaces.append(fan_space);
1210 const int fan_space_index = r_fan_spaces->spaces.size() - 1;
1211 for (const int local_corner : local_corners_in_fan) {
1212 const VertCornerInfo &info = corner_infos[local_corner];
1213 r_fan_spaces->corner_space_indices[info.corner] = fan_space_index;
1214 }
1215 if (r_fan_spaces->create_corners_by_space) {
1216 Array<int> corners_in_space(local_corners_in_fan.size());
1217 for (const int i : local_corners_in_fan.index_range()) {
1218 const VertCornerInfo &info = corner_infos[local_corners_in_fan[i]];
1219 corners_in_space[i] = info.corner;
1220 }
1221 r_fan_spaces->corners_by_space.append(std::move(corners_in_space));
1222 }
1223 }
1224}
1225
1226void normals_calc_corners(const Span<float3> vert_positions,
1228 const Span<int> corner_verts,
1229 const Span<int> corner_edges,
1230 const GroupedSpan<int> vert_to_face_map,
1231 const Span<float3> face_normals,
1232 const Span<bool> sharp_edges,
1233 const Span<bool> sharp_faces,
1234 const Span<short2> custom_normals,
1235 CornerNormalSpaceArray *r_fan_spaces,
1236 MutableSpan<float3> r_corner_normals)
1237{
1238 if (r_fan_spaces) {
1239 /* These are potentially-wasteful over-allocations. */
1240 r_fan_spaces->spaces.reserve(corner_verts.size());
1241 r_fan_spaces->corner_space_indices.reinitialize(corner_verts.size());
1242 if (r_fan_spaces->create_corners_by_space) {
1243 r_fan_spaces->corners_by_space.reserve(corner_verts.size());
1244 }
1245 }
1246
1247 int64_t grain_size = 256;
1248 /* Decrease parallelism in case where lock is used to avoid contention. */
1249 if (!custom_normals.is_empty() || r_fan_spaces) {
1250 grain_size = std::max(int64_t(16384), vert_positions.size() / 2);
1251 }
1252
1253 threading::parallel_for(vert_positions.index_range(), grain_size, [&](const IndexRange range) {
1254 Vector<VertCornerInfo, 16> corner_infos;
1255 LocalEdgeVectorSet local_edge_by_vert;
1256 Vector<VertEdgeInfo, 16> edge_infos;
1257 Vector<float3, 16> edge_dirs;
1258 Vector<bool, 16> local_corner_visited;
1259 Vector<int, 16> corners_in_fan;
1260 for (const int vert : range) {
1261 const float3 vert_position = vert_positions[vert];
1262 const Span<int> vert_faces = vert_to_face_map[vert];
1263
1264 /* Because we're iterating over vertices in order to batch work for their connected face
1265 * corners, we have to handle loose vertices and vertices not used by faces. */
1266 if (vert_faces.is_empty()) {
1267 continue;
1268 }
1269
1270 corner_infos.resize(vert_faces.size());
1271 collect_corner_info(faces, corner_verts, vert_faces, vert, corner_infos);
1272
1273 local_edge_by_vert.clear_and_keep_capacity();
1274 calc_local_edge_indices(corner_infos, local_edge_by_vert);
1275
1276 edge_infos.clear();
1277 edge_infos.resize(local_edge_by_vert.size());
1278 calc_connecting_edge_info(corner_edges, sharp_edges, sharp_faces, corner_infos, edge_infos);
1279
1280 edge_dirs.resize(edge_infos.size());
1281 calc_edge_directions(vert_positions, local_edge_by_vert, vert_position, edge_dirs);
1282
1283 /* Though we are protected from traversing to the same corner twice by the fact that 3-way
1284 * connections are marked sharp, we need to maintain the "visited" status of each corner so
1285 * we can find the next start corner for each subsequent fan traversal. Keeping track of the
1286 * number of visited corners is a quick way to avoid this book keeping for the final fan (and
1287 * there are usually just two, so that should be worth it). */
1288 int visited_count = 0;
1289 local_corner_visited.resize(vert_faces.size());
1290 local_corner_visited.fill(false);
1291
1292 int start_local_corner = 0;
1293 while (start_local_corner != -1) {
1294 corners_in_fan.clear();
1295 traverse_fan_local_corners(corner_infos, edge_infos, start_local_corner, corners_in_fan);
1296
1297 float3 fan_normal = accumulate_fan_normal(
1298 corner_infos, edge_dirs, face_normals, corners_in_fan);
1299
1300 if (!custom_normals.is_empty() || r_fan_spaces) {
1301 handle_fan_result_and_custom_normals(
1302 custom_normals, corner_infos, edge_dirs, corners_in_fan, fan_normal, r_fan_spaces);
1303 }
1304
1305 for (const int local_corner : corners_in_fan) {
1306 const VertCornerInfo &info = corner_infos[local_corner];
1307 r_corner_normals[info.corner] = fan_normal;
1308 }
1309
1310 visited_count += corners_in_fan.size();
1311 if (visited_count == corner_infos.size()) {
1312 break;
1313 }
1314 local_corner_visited.as_mutable_span().fill_indices(corners_in_fan.as_span(), true);
1315 start_local_corner = local_corner_visited.first_index_of_try(false);
1316 }
1317 BLI_assert(visited_count == corner_infos.size());
1318 }
1319 });
1320}
1321
1322#undef INDEX_UNSET
1323#undef INDEX_INVALID
1324#undef IS_EDGE_SHARP
1325
1335
1338 const Span<int> corner_verts,
1339 const Span<int> corner_edges,
1340 const GroupedSpan<int> vert_to_face_map,
1341 const Span<float3> vert_normals,
1342 const Span<float3> face_normals,
1343 const Span<bool> sharp_faces,
1344 const bool use_vertices,
1345 MutableSpan<float3> r_custom_corner_normals,
1346 MutableSpan<bool> sharp_edges,
1347 MutableSpan<short2> r_clnors_data)
1348{
1349 /* We *may* make that poor #bke::mesh::normals_calc_corners() even more complex by making it
1350 * handling that feature too, would probably be more efficient in absolute. However, this
1351 * function *is not* performance-critical, since it is mostly expected to be called by IO add-ons
1352 * when importing custom normals, and modifier (and perhaps from some editing tools later?). So
1353 * better to keep some simplicity here, and just call #bke::mesh::normals_calc_corners() twice!
1354 */
1355 CornerNormalSpaceArray lnors_spacearr;
1356 lnors_spacearr.create_corners_by_space = true;
1357 BitVector<> done_corners(corner_verts.size(), false);
1358 Array<float3> corner_normals(corner_verts.size());
1359 const Array<int> corner_to_face = build_corner_to_face_map(faces);
1360
1361 /* Compute current lnor spacearr. */
1362 normals_calc_corners(positions,
1363 faces,
1364 corner_verts,
1365 corner_edges,
1366 vert_to_face_map,
1367 face_normals,
1368 sharp_edges,
1369 sharp_faces,
1370 r_clnors_data,
1371 &lnors_spacearr,
1372 corner_normals);
1373
1374 /* Set all given zero vectors to their default value. */
1375 if (use_vertices) {
1376 for (const int i : positions.index_range()) {
1377 if (is_zero_v3(r_custom_corner_normals[i])) {
1378 copy_v3_v3(r_custom_corner_normals[i], vert_normals[i]);
1379 }
1380 }
1381 }
1382 else {
1383 for (const int i : corner_verts.index_range()) {
1384 if (is_zero_v3(r_custom_corner_normals[i])) {
1385 copy_v3_v3(r_custom_corner_normals[i], corner_normals[i]);
1386 }
1387 }
1388 }
1389
1390 /* Now, check each current smooth fan (one lnor space per smooth fan!),
1391 * and if all its matching custom corner_normals are not (enough) equal, add sharp edges as
1392 * needed. This way, next time we run bke::mesh::normals_calc_corners(), we'll get lnor
1393 * spacearr/smooth fans matching given custom corner_normals. Note this code *will never* unsharp
1394 * edges! And quite obviously, when we set custom normals per vertices, running this is
1395 * absolutely useless. */
1396 if (use_vertices) {
1397 done_corners.fill(true);
1398 }
1399 else {
1400 for (const int i : corner_verts.index_range()) {
1401 if (lnors_spacearr.corner_space_indices[i] == -1) {
1402 /* This should not happen in theory, but in some rare case (probably ugly geometry)
1403 * we can get some missing loopspacearr at this point. :/
1404 * Maybe we should set those corners' edges as sharp? */
1405 done_corners[i].set();
1406 if (G.debug & G_DEBUG) {
1407 printf("WARNING! Getting invalid nullptr corner space for corner %d!\n", i);
1408 }
1409 continue;
1410 }
1411 if (done_corners[i]) {
1412 continue;
1413 }
1414
1415 const int space_index = lnors_spacearr.corner_space_indices[i];
1416 const Span<int> fan_corners = lnors_spacearr.corners_by_space[space_index];
1417
1418 /* Notes:
1419 * - In case of mono-corner smooth fan, we have nothing to do.
1420 * - Loops in this linklist are ordered (in reversed order compared to how they were
1421 * discovered by bke::mesh::normals_calc_corners(), but this is not a problem).
1422 * Which means if we find a mismatching clnor,
1423 * we know all remaining corners will have to be in a new, different smooth fan/lnor space.
1424 * - In smooth fan case, we compare each clnor against a ref one,
1425 * to avoid small differences adding up into a real big one in the end!
1426 */
1427 if (fan_corners.is_empty()) {
1428 done_corners[i].set();
1429 continue;
1430 }
1431
1432 int prev_corner = -1;
1433 const float *org_nor = nullptr;
1434
1435 for (int i = fan_corners.index_range().last(); i >= 0; i--) {
1436 const int corner = fan_corners[i];
1437 float *nor = r_custom_corner_normals[corner];
1438
1439 if (!org_nor) {
1440 org_nor = nor;
1441 }
1442 else if (dot_v3v3(org_nor, nor) < LNOR_SPACE_TRIGO_THRESHOLD) {
1443 /* Current normal differs too much from org one, we have to tag the edge between
1444 * previous corner's face and current's one as sharp.
1445 * We know those two corners do not point to the same edge,
1446 * since we do not allow reversed winding in a same smooth fan. */
1447 const IndexRange face = faces[corner_to_face[corner]];
1448 const int corner_prev = face_corner_prev(face, corner);
1449 const int edge = corner_edges[corner];
1450 const int edge_prev = corner_edges[corner_prev];
1451 const int prev_edge = corner_edges[prev_corner];
1452 sharp_edges[prev_edge == edge_prev ? prev_edge : edge] = true;
1453
1454 org_nor = nor;
1455 }
1456
1457 prev_corner = corner;
1458 done_corners[corner].set();
1459 }
1460
1461 /* We also have to check between last and first corners,
1462 * otherwise we may miss some sharp edges here!
1463 * This is just a simplified version of above while loop.
1464 * See #45984. */
1465 if (fan_corners.size() > 1 && org_nor) {
1466 const int corner = fan_corners.last();
1467 float *nor = r_custom_corner_normals[corner];
1468
1469 if (dot_v3v3(org_nor, nor) < LNOR_SPACE_TRIGO_THRESHOLD) {
1470 const IndexRange face = faces[corner_to_face[corner]];
1471 const int corner_prev = face_corner_prev(face, corner);
1472 const int edge = corner_edges[corner];
1473 const int edge_prev = corner_edges[corner_prev];
1474 const int prev_edge = corner_edges[prev_corner];
1475 sharp_edges[prev_edge == edge_prev ? prev_edge : edge] = true;
1476 }
1477 }
1478 }
1479
1480 /* And now, recompute our new auto `corner_normals` and lnor spacearr! */
1481 normals_calc_corners(positions,
1482 faces,
1483 corner_verts,
1484 corner_edges,
1485 vert_to_face_map,
1486 face_normals,
1487 sharp_edges,
1488 sharp_faces,
1489 r_clnors_data,
1490 &lnors_spacearr,
1491 corner_normals);
1492 }
1493
1494 /* And we just have to convert plain object-space custom normals to our
1495 * lnor space-encoded ones. */
1496 for (const int i : corner_verts.index_range()) {
1497 if (lnors_spacearr.corner_space_indices[i] == -1) {
1498 done_corners[i].reset();
1499 if (G.debug & G_DEBUG) {
1500 printf("WARNING! Still getting invalid nullptr corner space in second for loop %d!\n", i);
1501 }
1502 continue;
1503 }
1504 if (!done_corners[i]) {
1505 continue;
1506 }
1507
1508 const int space_index = lnors_spacearr.corner_space_indices[i];
1509 const Span<int> fan_corners = lnors_spacearr.corners_by_space[space_index];
1510
1511 /* Note we accumulate and average all custom normals in current smooth fan,
1512 * to avoid getting different clnors data (tiny differences in plain custom normals can
1513 * give rather huge differences in computed 2D factors). */
1514 if (fan_corners.size() < 2) {
1515 const int nidx = use_vertices ? corner_verts[i] : i;
1516 r_clnors_data[i] = corner_space_custom_normal_to_data(lnors_spacearr.spaces[space_index],
1517 r_custom_corner_normals[nidx]);
1518 done_corners[i].reset();
1519 }
1520 else {
1521 float3 avg_nor(0.0f);
1522 for (const int corner : fan_corners) {
1523 const int nidx = use_vertices ? corner_verts[corner] : corner;
1524 avg_nor += r_custom_corner_normals[nidx];
1525 done_corners[corner].reset();
1526 }
1527
1528 mul_v3_fl(avg_nor, 1.0f / float(fan_corners.size()));
1530 lnors_spacearr.spaces[space_index], avg_nor);
1531
1532 r_clnors_data.fill_indices(fan_corners, clnor_data_tmp);
1533 }
1534 }
1535}
1536
1537void normals_corner_custom_set(const Span<float3> vert_positions,
1539 const Span<int> corner_verts,
1540 const Span<int> corner_edges,
1541 const GroupedSpan<int> vert_to_face_map,
1542 const Span<float3> vert_normals,
1543 const Span<float3> face_normals,
1544 const Span<bool> sharp_faces,
1545 MutableSpan<bool> sharp_edges,
1546 MutableSpan<float3> r_custom_corner_normals,
1547 MutableSpan<short2> r_clnors_data)
1548{
1549 mesh_normals_corner_custom_set(vert_positions,
1550 faces,
1551 corner_verts,
1552 corner_edges,
1553 vert_to_face_map,
1554 vert_normals,
1555 face_normals,
1556 sharp_faces,
1557 false,
1558 r_custom_corner_normals,
1559 sharp_edges,
1560 r_clnors_data);
1561}
1562
1565 const Span<int> corner_verts,
1566 const Span<int> corner_edges,
1567 const GroupedSpan<int> vert_to_face_map,
1568 const Span<float3> vert_normals,
1569 const Span<float3> face_normals,
1570 const Span<bool> sharp_faces,
1571 MutableSpan<bool> sharp_edges,
1572 MutableSpan<float3> r_custom_vert_normals,
1573 MutableSpan<short2> r_clnors_data)
1574{
1575 mesh_normals_corner_custom_set(vert_positions,
1576 faces,
1577 corner_verts,
1578 corner_edges,
1579 vert_to_face_map,
1580 vert_normals,
1581 face_normals,
1582 sharp_faces,
1583 true,
1584 r_custom_vert_normals,
1585 sharp_edges,
1586 r_clnors_data);
1587}
1588
1590 MutableSpan<float3> r_custom_nors,
1591 const bool use_vertices)
1592{
1593 MutableAttributeAccessor attributes = mesh.attributes_for_write();
1594 SpanAttributeWriter custom_normals = attributes.lookup_or_add_for_write_span<short2>(
1595 "custom_normal", AttrDomain::Corner);
1596 if (!custom_normals) {
1597 return;
1598 }
1599 SpanAttributeWriter<bool> sharp_edges = attributes.lookup_or_add_for_write_span<bool>(
1600 "sharp_edge", AttrDomain::Edge);
1601 const VArraySpan sharp_faces = *attributes.lookup<bool>("sharp_face", AttrDomain::Face);
1602
1603 mesh_normals_corner_custom_set(mesh.vert_positions(),
1604 mesh.faces(),
1605 mesh.corner_verts(),
1606 mesh.corner_edges(),
1607 mesh.vert_to_face_map(),
1608 mesh.vert_normals_true(),
1609 mesh.face_normals_true(),
1610 sharp_faces,
1611 use_vertices,
1612 r_custom_nors,
1613 sharp_edges.span,
1614 custom_normals.span);
1615
1616 sharp_edges.finish();
1617 custom_normals.finish();
1618}
1619
1620} // namespace mesh
1621
1623{
1624 threading::parallel_for(normals.index_range(), 4096, [&](const IndexRange range) {
1625 for (const int i : range) {
1626 normals[i] = math::normalize(normals[i]);
1627 }
1628 });
1629}
1630
1632{
1633 normalize_vecs(corner_normals);
1634 mesh::mesh_set_custom_normals(mesh, corner_normals, false);
1635}
1636
1638{
1639 mesh::mesh_set_custom_normals(mesh, corner_normals, false);
1640}
1641
1643{
1644 normalize_vecs(vert_normals);
1645 mesh::mesh_set_custom_normals(mesh, vert_normals, true);
1646}
1647
1652
1653} // namespace blender::bke
1654
1655#undef LNOR_SPACE_TRIGO_THRESHOLD
1656
@ G_DEBUG
void BKE_lnor_space_custom_data_to_normal(const MLoopNorSpace *lnor_space, const short clnor_data[2], float r_custom_lnor[3])
MLoopNorSpace * BKE_lnor_space_create(MLoopNorSpaceArray *lnors_spacearr)
@ MLNOR_SPACEARR_LOOP_INDEX
Definition BKE_mesh.h:292
@ MLNOR_SPACEARR_BMLOOP_PTR
Definition BKE_mesh.h:293
void BKE_lnor_space_custom_normal_to_data(const MLoopNorSpace *lnor_space, const float custom_lnor[3], short r_clnor_data[2])
bool BKE_mesh_vert_normals_are_dirty(const Mesh *mesh)
void BKE_lnor_space_add_loop(MLoopNorSpaceArray *lnors_spacearr, MLoopNorSpace *lnor_space, int corner, void *bm_loop, bool is_single)
void BKE_lnor_spacearr_clear(MLoopNorSpaceArray *lnors_spacearr)
void BKE_lnor_spacearr_init(MLoopNorSpaceArray *lnors_spacearr, int numLoops, char data_type)
bool BKE_mesh_face_normals_are_dirty(const Mesh *mesh)
@ MLNOR_SPACE_IS_SINGLE
Definition BKE_mesh.h:271
void BKE_lnor_space_define(MLoopNorSpace *lnor_space, const float lnor[3], const float vec_ref[3], const float vec_other[3], blender::Span< blender::float3 > edge_vectors)
void BKE_lnor_spacearr_free(MLoopNorSpaceArray *lnors_spacearr)
void BKE_lnor_spacearr_tls_join(MLoopNorSpaceArray *lnors_spacearr, MLoopNorSpaceArray *lnors_spacearr_tls)
void BKE_lnor_spacearr_tls_init(MLoopNorSpaceArray *lnors_spacearr, MLoopNorSpaceArray *lnors_spacearr_tls)
#define BLI_assert_unreachable()
Definition BLI_assert.h:93
#define BLI_assert(a)
Definition BLI_assert.h:46
#define BLI_NOINLINE
#define BLI_ASSERT_UNIT_V3(v)
#define M_PI
float normal_quad_v3(float n[3], const float v1[3], const float v2[3], const float v3[3], const float v4[3])
Definition math_geom.cc:58
#define MINLINE
MINLINE void madd_v3_v3fl(float r[3], const float a[3], float f)
MINLINE void add_newell_cross_v3_v3v3(float n[3], const float v_prev[3], const float v_curr[3])
MINLINE void mul_v3_fl(float r[3], float f)
MINLINE void copy_v3_v3(float r[3], const float a[3])
MINLINE float dot_v3v3(const float a[3], const float b[3]) ATTR_WARN_UNUSED_RESULT
MINLINE void copy_v2_v2_short(short r[2], const short a[2])
MINLINE bool compare_v3v3(const float v1[3], const float v2[3], float limit) ATTR_WARN_UNUSED_RESULT
MINLINE bool is_zero_v3(const float v[3]) ATTR_WARN_UNUSED_RESULT
MINLINE void mul_v3_v3fl(float r[3], const float a[3], float f)
MINLINE float normalize_v3(float n[3])
void BLI_memarena_merge(MemArena *ma_dst, MemArena *ma_src)
#define BLI_MEMARENA_STD_BUFSIZE
MemArena * BLI_memarena_new(size_t bufsize, const char *name) ATTR_WARN_UNUSED_RESULT ATTR_RETURNS_NONNULL ATTR_NONNULL(2) ATTR_MALLOC
void * BLI_memarena_calloc(MemArena *ma, size_t size) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1) ATTR_MALLOC ATTR_ALLOC_SIZE(2)
void BLI_memarena_free(MemArena *ma) ATTR_NONNULL(1)
void * BLI_memarena_alloc(MemArena *ma, size_t size) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1) ATTR_MALLOC ATTR_ALLOC_SIZE(2)
void BLI_memarena_clear(MemArena *ma) ATTR_NONNULL(1)
#define POINTER_FROM_INT(i)
#define UNLIKELY(x)
#define ELEM(...)
#define LIKELY(x)
float[3] Vector
volatile int lock
BMesh const char void * data
long long int int64_t
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition btDbvt.cpp:52
void reinitialize(const int64_t new_size)
Definition BLI_array.hh:398
void reinitialize(const int64_t new_size)
bool is() const
constexpr int64_t last(const int64_t n=0) const
constexpr int64_t size() const
Definition BLI_span.hh:493
constexpr void fill_indices(Span< IndexT > indices, const T &value) const
Definition BLI_span.hh:526
constexpr const T & first() const
Definition BLI_span.hh:315
constexpr int64_t size() const
Definition BLI_span.hh:252
constexpr const T & last(const int64_t n=0) const
Definition BLI_span.hh:325
constexpr IndexRange index_range() const
Definition BLI_span.hh:401
constexpr bool is_empty() const
Definition BLI_span.hh:260
int64_t index_of_or_add(const Key &key)
void reserve(const int64_t n)
void append(const T &value)
MutableSpan< T > as_mutable_span()
void append_unchecked(const T &value)
void reserve(const int64_t min_capacity)
void fill(const bool value)
GAttributeReader lookup(const StringRef attribute_id) const
GAttributeReader lookup_or_default(StringRef attribute_id, AttrDomain domain, eCustomDataType data_type, const void *default_value=nullptr) const
std::optional< AttributeMetaData > lookup_meta_data(StringRef attribute_id) const
GSpanAttributeWriter lookup_or_add_for_write_span(StringRef attribute_id, AttrDomain domain, eCustomDataType data_type, const AttributeInit &initializer=AttributeInitDefaultValue())
void store_varray(const VArray< float3 > &data)
std::variant< UseTrueCache, Vector< float3 >, Span< float3 > > data
MutableSpan< float3 > ensure_vector_size(const int size)
Span< float3 > get_span() const
void store_vector(Vector< float3 > &&data)
#define sinf(x)
#define cosf(x)
#define floorf(x)
static float normals[][3]
uint nor
VecBase< int, 2 > int2
VecBase< short, 2 > short2
#define this
#define printf(...)
ccl_device_inline float beta(const float x, const float y)
Definition math_base.h:651
static char faces[256]
#define G(x, y, z)
#define INDEX_INVALID
MINLINE short unit_float_to_short(const float val)
#define INDEX_UNSET
#define LNOR_SPACE_TRIGO_THRESHOLD
MINLINE float unit_short_to_float(const short val)
#define IS_EDGE_SHARP(_e2l)
void gather_to_groups(const OffsetIndices< int > dst_offsets, const IndexMask &src_selection, const Span< T > src, MutableSpan< T > dst)
BooleanMix booleans_mix_calc(const VArray< bool > &varray, IndexRange range_to_check)
void gather(const GVArray &src, const IndexMask &indices, GMutableSpan dst, int64_t grain_size=4096)
GAttributeReader lookup(const void *owner, const StringRef name)
std::monostate EdgeUninitialized
static CornerNormalSpace corner_fan_space_define(const float3 &lnor, const float3 &vec_ref, const float3 &vec_other, const Span< float3 > edge_vectors)
static float3 normal_calc_ngon(const Span< float3 > vert_positions, const Span< int > face_verts)
static void mesh_normals_corner_custom_set(const Span< float3 > positions, const OffsetIndices< int > faces, const Span< int > corner_verts, const Span< int > corner_edges, const GroupedSpan< int > vert_to_face_map, const Span< float3 > vert_normals, const Span< float3 > face_normals, const Span< bool > sharp_faces, const bool use_vertices, MutableSpan< float3 > r_custom_corner_normals, MutableSpan< bool > sharp_edges, MutableSpan< short2 > r_clnors_data)
static float3 corner_space_custom_data_to_normal(const CornerNormalSpace &lnor_space, const short2 clnor_data)
float3 face_normal_calc(Span< float3 > vert_positions, Span< int > face_verts)
static void calc_edge_directions(const Span< float3 > vert_positions, const Span< int > local_edge_by_vert, const float3 &vert_position, MutableSpan< float3 > edge_dirs)
static void mix_normals_vert_to_face(const OffsetIndices< int > faces, const Span< int > corner_verts, const Span< float3 > vert_normals, MutableSpan< float3 > face_normals)
int face_corner_prev(const IndexRange face, const int corner)
Definition BKE_mesh.hh:290
static void mix_normals_corner_to_vert(const Span< float3 > vert_positions, const OffsetIndices< int > faces, const Span< int > corner_verts, const GroupedSpan< int > vert_to_face_map, const Span< float3 > corner_normals, MutableSpan< float3 > vert_normals)
static BLI_NOINLINE void handle_fan_result_and_custom_normals(const Span< short2 > custom_normals, const Span< VertCornerInfo > corner_infos, const Span< float3 > edge_dirs, const Span< int > local_corners_in_fan, float3 &fan_normal, CornerNormalSpaceArray *r_fan_spaces)
int face_find_corner_from_vert(const IndexRange face, const Span< int > corner_verts, const int vert)
Definition BKE_mesh.hh:311
static void calc_local_edge_indices(MutableSpan< VertCornerInfo > corner_infos, LocalEdgeVectorSet &r_other_vert_to_edge)
static void mesh_set_custom_normals(Mesh &mesh, MutableSpan< float3 > r_custom_nors, const bool use_vertices)
static void traverse_fan_local_corners(const Span< VertCornerInfo > corner_infos, const Span< VertEdgeInfo > edge_infos, const int start_local_corner, Vector< int, 16 > &result_fan)
static float3 accumulate_fan_normal(const Span< VertCornerInfo > corner_infos, const Span< float3 > edge_dirs, const Span< float3 > face_normals, const Span< int > local_corners_in_fan)
static void calc_connecting_edge_info(const Span< int > corner_edges, const Span< bool > sharp_edges, const Span< bool > sharp_faces, const Span< VertCornerInfo > corner_infos, MutableSpan< VertEdgeInfo > edge_infos)
Array< int > build_corner_to_face_map(OffsetIndices< int > faces)
static void add_corner_to_edge(const Span< int > corner_edges, const Span< bool > sharp_edges, const int local_corner, const int corner, const int other_corner, const bool winding_torwards_vert, VertEdgeInfo &info)
static void mix_normals_corner_to_face(const OffsetIndices< int > faces, const Span< float3 > corner_normals, MutableSpan< float3 > face_normals)
void normals_calc_corners(Span< float3 > vert_positions, OffsetIndices< int > faces, Span< int > corner_verts, Span< int > corner_edges, GroupedSpan< int > vert_to_face_map, Span< float3 > face_normals, Span< bool > sharp_edges, Span< bool > sharp_faces, Span< short2 > custom_normals, CornerNormalSpaceArray *r_fan_spaces, MutableSpan< float3 > r_corner_normals)
void normals_corner_custom_set_from_verts(Span< float3 > vert_positions, OffsetIndices< int > faces, Span< int > corner_verts, Span< int > corner_edges, GroupedSpan< int > vert_to_face_map, Span< float3 > vert_normals, Span< float3 > face_normals, Span< bool > sharp_faces, MutableSpan< bool > sharp_edges, MutableSpan< float3 > r_custom_vert_normals, MutableSpan< short2 > r_clnors_data)
VectorSet< int, 16, DefaultProbingStrategy, DefaultHash< int >, DefaultEquality< int >, SimpleVectorSetSlot< int, int >, GuardedAllocator > LocalEdgeVectorSet
static void collect_corner_info(const OffsetIndices< int > faces, const Span< int > corner_verts, const Span< int > vert_faces, const int vert, MutableSpan< VertCornerInfo > r_corner_infos)
short2 corner_space_custom_normal_to_data(const CornerNormalSpace &lnor_space, const float3 &custom_lnor)
void edges_sharp_from_angle_set(OffsetIndices< int > faces, Span< int > corner_verts, Span< int > corner_edges, Span< float3 > face_normals, Span< int > corner_to_face, Span< bool > sharp_faces, const float split_angle, MutableSpan< bool > sharp_edges)
int face_corner_next(const IndexRange face, const int corner)
Definition BKE_mesh.hh:299
void normals_calc_verts(Span< float3 > vert_positions, OffsetIndices< int > faces, Span< int > corner_verts, GroupedSpan< int > vert_to_face_map, Span< float3 > face_normals, MutableSpan< float3 > vert_normals)
static void mesh_edges_sharp_tag(const OffsetIndices< int > faces, const Span< int > corner_verts, const Span< int > corner_edges, const Span< int > corner_to_face_map, const Span< float3 > face_normals, const Span< bool > sharp_faces, const Span< bool > sharp_edges, const float split_angle, MutableSpan< int2 > edge_to_corners, MutableSpan< bool > r_sharp_edges)
void normals_corner_custom_set(Span< float3 > vert_positions, OffsetIndices< int > faces, Span< int > corner_verts, Span< int > corner_edges, GroupedSpan< int > vert_to_face_map, Span< float3 > vert_normals, Span< float3 > face_normals, Span< bool > sharp_faces, MutableSpan< bool > sharp_edges, MutableSpan< float3 > r_custom_corner_normals, MutableSpan< short2 > r_clnors_data)
std::variant< EdgeUninitialized, EdgeOneCorner, EdgeTwoCorners, EdgeSharp > VertEdgeInfo
void normals_calc_faces(Span< float3 > vert_positions, OffsetIndices< int > faces, Span< int > corner_verts, MutableSpan< float3 > face_normals)
static void normalize_vecs(MutableSpan< float3 > normals)
void mesh_set_custom_normals_normalized(Mesh &mesh, MutableSpan< float3 > corner_normals)
void mesh_set_custom_normals_from_verts(Mesh &mesh, MutableSpan< float3 > vert_normals)
void mesh_vert_normals_assign(Mesh &mesh, Span< float3 > vert_normals)
void mesh_set_custom_normals_from_verts_normalized(Mesh &mesh, MutableSpan< float3 > vert_normals)
void mesh_set_custom_normals(Mesh &mesh, MutableSpan< float3 > corner_normals)
VecBase< T, 3 > normal_tri(const VecBase< T, 3 > &v1, const VecBase< T, 3 > &v2, const VecBase< T, 3 > &v3)
float safe_acos_approx(float x)
T dot(const QuaternionBase< T > &a, const QuaternionBase< T > &b)
bool is_zero(const T &a)
AxisSigned cross(const AxisSigned a, const AxisSigned b)
MatBase< T, NumCol, NumRow > normalize(const MatBase< T, NumCol, NumRow > &a)
void parallel_for(const IndexRange range, const int64_t grain_size, const Function &function, const TaskSizeHints &size_hints=detail::TaskSizeHints_Static(1))
Definition BLI_task.hh:93
VecBase< int32_t, 2 > int2
PythonProbingStrategy<> DefaultProbingStrategy
VecBase< float, 3 > float3
blender::VecBase< int16_t, 2 > short2
struct LinkNode * loops_pool
Definition BKE_mesh.h:281
struct MemArena * mem
Definition BKE_mesh.h:286
MLoopNorSpace ** lspacearr
Definition BKE_mesh.h:279
float ref_alpha
Definition BKE_mesh.h:249
float vec_ortho[3]
Definition BKE_mesh.h:241
float ref_beta
Definition BKE_mesh.h:258
float vec_ref[3]
Definition BKE_mesh.h:239
float vec_lnor[3]
Definition BKE_mesh.h:234
struct LinkNode * loops
Definition BKE_mesh.h:264
MeshRuntimeHandle * runtime
Vector< Array< int > > corners_by_space
Definition BKE_mesh.hh:178
Vector< CornerNormalSpace > spaces
Definition BKE_mesh.hh:166
i
Definition text_draw.cc:230