30 if (uv_a[0] == uv_b[0] && uv_a[1] == uv_b[1]) {
47 const float diff_abs = 1e-12f;
48 const int diff_ulp = 12;
60 if (loops_for_vert.
size() <= 1) {
66 for (
float2 *mloopuv : mloopuv_layers) {
69 while (loops_merge.
size() > 1) {
71 const float *uv_src = mloopuv[loops_merge[0]];
72 for (
uint i = 1; i <= i_last;) {
73 float *uv_dst = mloopuv[loops_merge[i]];
76 uv_dst[0] = uv_src[0];
77 uv_dst[1] = uv_src[1];
81 loops_merge[i] = loops_merge[i_last--];
95 loops_merge[0] = loops_merge[i_last];
96 loops_merge.resize(i_last);
104 if (mesh->corners_num == 0) {
108 if (mloopuv_layers_num == 0) {
115 mloopuv_layers.
reserve(mloopuv_layers_num);
116 for (
int a = 0; a < mloopuv_layers_num; a++) {
125 for (const int64_t v_index : range) {
126 merge_uvs_for_vertex(vert_to_corner[v_index], mloopuv_layers_as_span);
CustomData interface, see also DNA_customdata_types.h.
int CustomData_number_of_layers(const CustomData *data, eCustomDataType type)
void * CustomData_get_layer_n_for_write(CustomData *data, eCustomDataType type, int n, int totelem)
#define BLI_assert_unreachable()
MINLINE int compare_ff_relative(float a, float b, float max_diff, int max_ulps)
Read Guarded memory(de)allocation.
constexpr int64_t size() const
void extend_unchecked(Span< T > array)
void append_unchecked(const T &value)
void reserve(const int64_t min_capacity)
Span< T > as_span() const
static int compare_v2_classify(const float uv_a[2], const float uv_b[2])
void BKE_mesh_merge_customdata_for_apply_modifier(Mesh *mesh)
static void merge_uvs_for_vertex(const Span< int > loops_for_vert, Span< float2 * > mloopuv_layers)
void parallel_for(const IndexRange range, const int64_t grain_size, const Function &function, const TaskSizeHints &size_hints=detail::TaskSizeHints_Static(1))