Blender V5.0
bmesh_interp.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2007 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
10
11#include "MEM_guardedalloc.h"
12
13#include "DNA_meshdata_types.h"
14
15#include "BLI_alloca.h"
16#include "BLI_linklist.h"
17#include "BLI_math_geom.h"
18#include "BLI_math_matrix.h"
19#include "BLI_math_vector.h"
20#include "BLI_memarena.h"
21#include "BLI_task.h"
22
23#include "BKE_attribute.h"
24#include "BKE_attribute.hh"
26#include "BKE_customdata.hh"
27#include "BKE_multires.hh"
28
29#include "bmesh.hh"
31
33
34/* edge and vertex share, currently there's no need to have different logic */
35static void bm_data_interp_from_elem(CustomData *data_layer,
36 const BMElem *ele_src_1,
37 const BMElem *ele_src_2,
38 BMElem *ele_dst,
39 const float fac)
40{
41 if (ele_src_1->head.data && ele_src_2->head.data) {
42 /* first see if we can avoid interpolation */
43 if (fac <= 0.0f) {
44 if (ele_src_1 == ele_dst) {
45 /* do nothing */
46 }
47 else {
48 CustomData_bmesh_copy_block(*data_layer, ele_src_1->head.data, &ele_dst->head.data);
49 }
50 }
51 else if (fac >= 1.0f) {
52 if (ele_src_2 == ele_dst) {
53 /* do nothing */
54 }
55 else {
56 CustomData_bmesh_copy_block(*data_layer, ele_src_2->head.data, &ele_dst->head.data);
57 }
58 }
59 else {
60 const void *src[2];
61 float w[2];
62
63 src[0] = ele_src_1->head.data;
64 src[1] = ele_src_2->head.data;
65 w[0] = 1.0f - fac;
66 w[1] = fac;
67 CustomData_bmesh_interp(data_layer, src, w, 2, ele_dst->head.data);
68 }
69 }
70}
71
73 BMesh *bm, const BMVert *v_src_1, const BMVert *v_src_2, BMVert *v_dst, const float fac)
74{
76 &bm->vdata, (const BMElem *)v_src_1, (const BMElem *)v_src_2, (BMElem *)v_dst, fac);
77}
78
80 BMesh *bm, const BMEdge *e_src_1, const BMEdge *e_src_2, BMEdge *e_dst, const float fac)
81{
83 &bm->edata, (const BMElem *)e_src_1, (const BMElem *)e_src_2, (BMElem *)e_dst, fac);
84}
85
92static void UNUSED_FUNCTION(BM_Data_Vert_Average)(BMesh * /*bm*/, BMFace * /*f*/)
93{
94 // BMIter iter;
95}
96
98 const BMVert *v_src_1,
99 const BMVert * /*v_src_2*/,
100 BMVert *v,
101 BMEdge *e,
102 const float fac)
103{
104 float w[2];
105 BMLoop *l_v1 = nullptr, *l_v = nullptr, *l_v2 = nullptr;
106 BMLoop *l_iter = nullptr;
107
108 if (!e->l) {
109 return;
110 }
111
112 w[1] = 1.0f - fac;
113 w[0] = fac;
114
115 l_iter = e->l;
116 do {
117 if (l_iter->v == v_src_1) {
118 l_v1 = l_iter;
119 l_v = l_v1->next;
120 l_v2 = l_v->next;
121 }
122 else if (l_iter->v == v) {
123 l_v1 = l_iter->next;
124 l_v = l_iter;
125 l_v2 = l_iter->prev;
126 }
127
128 if (!l_v1 || !l_v2) {
129 return;
130 }
131
132 const void *src[2];
133 src[0] = l_v1->head.data;
134 src[1] = l_v2->head.data;
135
136 CustomData_bmesh_interp(&bm->ldata, src, w, 2, l_v->head.data);
137 } while ((l_iter = l_iter->radial_next) != e->l);
138}
139
141 BMFace *f_dst,
142 const BMFace *f_src,
143 const bool do_vertex,
144 const void **blocks_l,
145 const void **blocks_v,
146 float (*cos_2d)[2],
147 float axis_mat[3][3])
148{
149 BMLoop *l_iter;
150 BMLoop *l_first;
151
152 float *w = static_cast<float *>(BLI_array_alloca(w, f_src->len));
153 float co[2];
154
155 /* interpolate */
156 l_iter = l_first = BM_FACE_FIRST_LOOP(f_dst);
157 do {
158 mul_v2_m3v3(co, axis_mat, l_iter->v->co);
159 interp_weights_poly_v2(w, cos_2d, f_src->len, co);
160 CustomData_bmesh_interp(&bm->ldata, blocks_l, w, f_src->len, l_iter->head.data);
161 if (do_vertex) {
162 CustomData_bmesh_interp(&bm->vdata, blocks_v, w, f_src->len, l_iter->v->head.data);
163 }
164 } while ((l_iter = l_iter->next) != l_first);
165}
166
167void BM_face_interp_from_face(BMesh *bm, BMFace *f_dst, const BMFace *f_src, const bool do_vertex)
168{
169 BMLoop *l_iter;
170 BMLoop *l_first;
171
172 const void **blocks_l = static_cast<const void **>(BLI_array_alloca(blocks_l, f_src->len));
173 const void **blocks_v = do_vertex ?
174 static_cast<const void **>(BLI_array_alloca(blocks_v, f_src->len)) :
175 nullptr;
176 float (*cos_2d)[2] = static_cast<float (*)[2]>(BLI_array_alloca(cos_2d, f_src->len));
177 float axis_mat[3][3]; /* use normal to transform into 2d xy coords */
178 int i;
179
180 /* convert the 3d coords into 2d for projection */
182 axis_dominant_v3_to_m3(axis_mat, f_src->no);
183
184 i = 0;
185 l_iter = l_first = BM_FACE_FIRST_LOOP(f_src);
186 do {
187 mul_v2_m3v3(cos_2d[i], axis_mat, l_iter->v->co);
188 blocks_l[i] = l_iter->head.data;
189 if (do_vertex) {
190 blocks_v[i] = l_iter->v->head.data;
191 }
192 } while ((void)i++, (l_iter = l_iter->next) != l_first);
193
194 BM_face_interp_from_face_ex(bm, f_dst, f_src, do_vertex, blocks_l, blocks_v, cos_2d, axis_mat);
195}
196
211static int compute_mdisp_quad(const BMLoop *l,
212 const float l_f_center[3],
213 float v1[3],
214 float v2[3],
215 float v3[3],
216 float v4[3],
217 float e1[3],
218 float e2[3])
219{
220 float n[3], p[3];
221
222#ifndef NDEBUG
223 {
224 float cent[3];
225 /* computer center */
227 BLI_assert(equals_v3v3(cent, l_f_center));
228 }
229#endif
230
231 mid_v3_v3v3(p, l->prev->v->co, l->v->co);
232 mid_v3_v3v3(n, l->next->v->co, l->v->co);
233
234 copy_v3_v3(v1, l_f_center);
235 copy_v3_v3(v2, p);
236 copy_v3_v3(v3, l->v->co);
237 copy_v3_v3(v4, n);
238
239 sub_v3_v3v3(e1, v2, v1);
240 sub_v3_v3v3(e2, v3, v4);
241
242 return 1;
243}
244
245static bool quad_co(const float v1[3],
246 const float v2[3],
247 const float v3[3],
248 const float v4[3],
249 const float p[3],
250 const float n[3],
251 float r_uv[2])
252{
253 float projverts[5][3], n2[3];
254 const float origin[2] = {0.0f, 0.0f};
255 int i;
256
257 /* project points into 2d along normal */
258 copy_v3_v3(projverts[0], v1);
259 copy_v3_v3(projverts[1], v2);
260 copy_v3_v3(projverts[2], v3);
261 copy_v3_v3(projverts[3], v4);
262 copy_v3_v3(projverts[4], p);
263
264 normal_quad_v3(n2, projverts[0], projverts[1], projverts[2], projverts[3]);
265
266 if (dot_v3v3(n, n2) < -FLT_EPSILON) {
267 return false;
268 }
269
270 /* rotate */
271 poly_rotate_plane(n, projverts, 5);
272
273 /* subtract origin */
274 for (i = 0; i < 4; i++) {
275 sub_v2_v2(projverts[i], projverts[4]);
276 }
277
278 if (!isect_point_quad_v2(origin, projverts[0], projverts[1], projverts[2], projverts[3])) {
279 return false;
280 }
281
282 resolve_quad_uv_v2(r_uv, origin, projverts[0], projverts[3], projverts[2], projverts[1]);
283
284 return true;
285}
286
287static void mdisp_axis_from_quad(const float v1[3],
288 const float v2[3],
289 float /*v3*/[3],
290 const float v4[3],
291 float r_axis_x[3],
292 float r_axis_y[3])
293{
294 sub_v3_v3v3(r_axis_x, v4, v1);
295 sub_v3_v3v3(r_axis_y, v2, v1);
296
297 normalize_v3(r_axis_x);
298 normalize_v3(r_axis_y);
299}
300
307static bool mdisp_in_mdispquad(BMLoop *l_src,
308 BMLoop *l_dst,
309 const float l_dst_f_center[3],
310 const float p[3],
311 int res,
312 float r_axis_x[3],
313 float r_axis_y[3],
314 float r_uv[2])
315{
316 float v1[3], v2[3], c[3], v3[3], v4[3], e1[3], e2[3];
317 float eps = FLT_EPSILON * 4000;
318
319 if (is_zero_v3(l_src->v->no)) {
321 }
322 if (is_zero_v3(l_dst->v->no)) {
324 }
325
326 compute_mdisp_quad(l_dst, l_dst_f_center, v1, v2, v3, v4, e1, e2);
327
328 /* expand quad a bit */
329 mid_v3_v3v3v3v3(c, v1, v2, v3, v4);
330
331 sub_v3_v3(v1, c);
332 sub_v3_v3(v2, c);
333 sub_v3_v3(v3, c);
334 sub_v3_v3(v4, c);
335 mul_v3_fl(v1, 1.0f + eps);
336 mul_v3_fl(v2, 1.0f + eps);
337 mul_v3_fl(v3, 1.0f + eps);
338 mul_v3_fl(v4, 1.0f + eps);
339 add_v3_v3(v1, c);
340 add_v3_v3(v2, c);
341 add_v3_v3(v3, c);
342 add_v3_v3(v4, c);
343
344 if (!quad_co(v1, v2, v3, v4, p, l_src->v->no, r_uv)) {
345 return false;
346 }
347
348 mul_v2_fl(r_uv, float(res - 1));
349
350 mdisp_axis_from_quad(v1, v2, v3, v4, r_axis_x, r_axis_y);
351
352 return true;
353}
354
355static float bm_loop_flip_equotion(float mat[2][2],
356 float b[2],
357 const float target_axis_x[3],
358 const float target_axis_y[3],
359 const float coord[3],
360 int i,
361 int j)
362{
363 mat[0][0] = target_axis_x[i];
364 mat[0][1] = target_axis_y[i];
365 mat[1][0] = target_axis_x[j];
366 mat[1][1] = target_axis_y[j];
367 b[0] = coord[i];
368 b[1] = coord[j];
369
370 return cross_v2v2(mat[0], mat[1]);
371}
372
373static void bm_loop_flip_disp(const float source_axis_x[3],
374 const float source_axis_y[3],
375 const float target_axis_x[3],
376 const float target_axis_y[3],
377 float disp[3])
378{
379 float vx[3], vy[3], coord[3];
380 float n[3], vec[3];
381 float b[2], mat[2][2], d;
382
383 mul_v3_v3fl(vx, source_axis_x, disp[0]);
384 mul_v3_v3fl(vy, source_axis_y, disp[1]);
385 add_v3_v3v3(coord, vx, vy);
386
387 /* project displacement from source grid plane onto target grid plane */
388 cross_v3_v3v3(n, target_axis_x, target_axis_y);
389 project_v3_v3v3(vec, coord, n);
390 sub_v3_v3v3(coord, coord, vec);
391
392 d = bm_loop_flip_equotion(mat, b, target_axis_x, target_axis_y, coord, 0, 1);
393
394 if (fabsf(d) < 1e-4f) {
395 d = bm_loop_flip_equotion(mat, b, target_axis_x, target_axis_y, coord, 0, 2);
396 if (fabsf(d) < 1e-4f) {
397 d = bm_loop_flip_equotion(mat, b, target_axis_x, target_axis_y, coord, 1, 2);
398 }
399 }
400
401 disp[0] = (b[0] * mat[1][1] - mat[0][1] * b[1]) / d;
402 disp[1] = (mat[0][0] * b[1] - b[0] * mat[1][0]) / d;
403}
404
409
411 const float *f_src_center;
412
413 float *axis_x, *axis_y;
414 float *v1, *v4;
415 float *e1, *e2;
416
417 int res;
418 float d;
419};
420
421static void loop_interp_multires_cb(void *__restrict userdata,
422 const int ix,
423 const TaskParallelTLS *__restrict /*tls*/)
424{
425 BMLoopInterpMultiresData *data = static_cast<BMLoopInterpMultiresData *>(userdata);
426
427 BMLoop *l_first = data->l_src_first;
428 BMLoop *l_dst = data->l_dst;
429 const int cd_loop_mdisp_offset = data->cd_loop_mdisp_offset;
430
431 MDisps *md_dst = data->md_dst;
432 const float *f_src_center = data->f_src_center;
433
434 float *axis_x = data->axis_x;
435 float *axis_y = data->axis_y;
436
437 float *v1 = data->v1;
438 float *v4 = data->v4;
439 float *e1 = data->e1;
440 float *e2 = data->e2;
441
442 const int res = data->res;
443 const float d = data->d;
444
445 float x = d * ix, y;
446 int iy;
447 for (y = 0.0f, iy = 0; iy < res; y += d, iy++) {
448 BMLoop *l_iter = l_first;
449 float co1[3], co2[3], co[3];
450
451 madd_v3_v3v3fl(co1, v1, e1, y);
452 madd_v3_v3v3fl(co2, v4, e2, y);
453 interp_v3_v3v3(co, co1, co2, x);
454
455 do {
456 MDisps *md_src;
457 float src_axis_x[3], src_axis_y[3];
458 float uv[2];
459
460 md_src = static_cast<MDisps *>(BM_ELEM_CD_GET_VOID_P(l_iter, cd_loop_mdisp_offset));
461
462 if (mdisp_in_mdispquad(l_dst, l_iter, f_src_center, co, res, src_axis_x, src_axis_y, uv)) {
463 old_mdisps_bilinear(md_dst->disps[iy * res + ix], md_src->disps, res, uv[0], uv[1]);
464 bm_loop_flip_disp(src_axis_x, src_axis_y, axis_x, axis_y, md_dst->disps[iy * res + ix]);
465
466 break;
467 }
468 } while ((l_iter = l_iter->next) != l_first);
469 }
470}
471
473 BMLoop *l_dst,
474 const BMFace *f_src,
475 const float f_dst_center[3],
476 const float f_src_center[3],
477 const int cd_loop_mdisp_offset)
478{
479 MDisps *md_dst;
480 float v1[3], v2[3], v3[3], v4[3] = {0.0f, 0.0f, 0.0f}, e1[3], e2[3];
481 float axis_x[3], axis_y[3];
482
483 /* ignore 2-edged faces */
484 if (UNLIKELY(l_dst->f->len < 3)) {
485 return;
486 }
487
488 md_dst = static_cast<MDisps *>(BM_ELEM_CD_GET_VOID_P(l_dst, cd_loop_mdisp_offset));
489 compute_mdisp_quad(l_dst, f_dst_center, v1, v2, v3, v4, e1, e2);
490
491 /* if no disps data allocate a new grid, the size of the first grid in f_src. */
492 if (!md_dst->totdisp) {
493 const MDisps *md_src = static_cast<const MDisps *>(
494 BM_ELEM_CD_GET_VOID_P(BM_FACE_FIRST_LOOP(f_src), cd_loop_mdisp_offset));
495
496 md_dst->totdisp = md_src->totdisp;
497 md_dst->level = md_src->level;
498 if (md_dst->totdisp) {
499 md_dst->disps = static_cast<float (*)[3]>(
500 MEM_callocN(sizeof(float[3]) * md_dst->totdisp, __func__));
501 }
502 else {
503 return;
504 }
505 }
506
507 mdisp_axis_from_quad(v1, v2, v3, v4, axis_x, axis_y);
508
509 const int res = int(sqrt(md_dst->totdisp));
511 data.l_dst = l_dst;
512 data.l_src_first = BM_FACE_FIRST_LOOP(f_src);
513 data.cd_loop_mdisp_offset = cd_loop_mdisp_offset;
514 data.md_dst = md_dst;
515 data.f_src_center = f_src_center;
516 data.axis_x = axis_x;
517 data.axis_y = axis_y;
518 data.v1 = v1;
519 data.v4 = v4;
520 data.e1 = e1;
521 data.e2 = e2;
522 data.res = res;
523 data.d = 1.0f / float(res - 1);
524
525 TaskParallelSettings settings;
527 settings.use_threading = (res > 5);
529}
530
531void BM_loop_interp_multires(BMesh *bm, BMLoop *l_dst, const BMFace *f_src)
532{
533 const int cd_loop_mdisp_offset = CustomData_get_offset(&bm->ldata, CD_MDISPS);
534
535 if (cd_loop_mdisp_offset != -1) {
536 float f_dst_center[3];
537 float f_src_center[3];
538
539 BM_face_calc_center_median(l_dst->f, f_dst_center);
540 BM_face_calc_center_median(f_src, f_src_center);
541
542 BM_loop_interp_multires_ex(bm, l_dst, f_src, f_dst_center, f_src_center, cd_loop_mdisp_offset);
543 }
544}
545
547 BMFace *f_dst,
548 const BMFace *f_src,
549 const float f_dst_center[3],
550 const float f_src_center[3],
551 const int cd_loop_mdisp_offset)
552{
553 BMLoop *l_iter, *l_first;
554 l_iter = l_first = BM_FACE_FIRST_LOOP(f_dst);
555 do {
557 bm, l_iter, f_src, f_dst_center, f_src_center, cd_loop_mdisp_offset);
558 } while ((l_iter = l_iter->next) != l_first);
559}
560
561void BM_face_interp_multires(BMesh *bm, BMFace *f_dst, const BMFace *f_src)
562{
563 const int cd_loop_mdisp_offset = CustomData_get_offset(&bm->ldata, CD_MDISPS);
564
565 if (cd_loop_mdisp_offset != -1) {
566 float f_dst_center[3];
567 float f_src_center[3];
568
569 BM_face_calc_center_median(f_dst, f_dst_center);
570 BM_face_calc_center_median(f_src, f_src_center);
571
572 BM_face_interp_multires_ex(bm, f_dst, f_src, f_dst_center, f_src_center, cd_loop_mdisp_offset);
573 }
574}
575
577{
578 const int cd_loop_mdisp_offset = CustomData_get_offset(&bm->ldata, CD_MDISPS);
579 BMLoop *l;
580 BMIter liter;
581
582 if (cd_loop_mdisp_offset == -1) {
583 return;
584 }
585
586 BM_ITER_ELEM (l, &liter, f, BM_LOOPS_OF_FACE) {
587 MDisps *mdp = static_cast<MDisps *>(BM_ELEM_CD_GET_VOID_P(l->prev, cd_loop_mdisp_offset));
588 MDisps *mdl = static_cast<MDisps *>(BM_ELEM_CD_GET_VOID_P(l, cd_loop_mdisp_offset));
589 MDisps *mdn = static_cast<MDisps *>(BM_ELEM_CD_GET_VOID_P(l->next, cd_loop_mdisp_offset));
590 float co1[3];
591 int sides;
592 int y;
593
608
609 sides = int(sqrt(mdp->totdisp));
610 for (y = 0; y < sides; y++) {
611 mid_v3_v3v3(co1, mdn->disps[y * sides], mdl->disps[y]);
612
613 copy_v3_v3(mdn->disps[y * sides], co1);
614 copy_v3_v3(mdl->disps[y], co1);
615 }
616 }
617
618 BM_ITER_ELEM (l, &liter, f, BM_LOOPS_OF_FACE) {
619 MDisps *mdl1 = static_cast<MDisps *>(BM_ELEM_CD_GET_VOID_P(l, cd_loop_mdisp_offset));
620 MDisps *mdl2;
621 float co1[3], co2[3], co[3];
622 int sides;
623 int y;
624
639
640 if (l->radial_next == l) {
641 continue;
642 }
643
644 if (l->radial_next->v == l->v) {
645 mdl2 = static_cast<MDisps *>(BM_ELEM_CD_GET_VOID_P(l->radial_next, cd_loop_mdisp_offset));
646 }
647 else {
648 mdl2 = static_cast<MDisps *>(
649 BM_ELEM_CD_GET_VOID_P(l->radial_next->next, cd_loop_mdisp_offset));
650 }
651
652 sides = int(sqrt(mdl1->totdisp));
653 for (y = 0; y < sides; y++) {
654 int a1, a2, o1, o2;
655
656 if (l->v != l->radial_next->v) {
657 a1 = sides * y + sides - 2;
658 a2 = (sides - 2) * sides + y;
659
660 o1 = sides * y + sides - 1;
661 o2 = (sides - 1) * sides + y;
662 }
663 else {
664 a1 = sides * y + sides - 2;
665 a2 = sides * y + sides - 2;
666 o1 = sides * y + sides - 1;
667 o2 = sides * y + sides - 1;
668 }
669
670 /* magic blending numbers, hardcoded! */
671 add_v3_v3v3(co1, mdl1->disps[a1], mdl2->disps[a2]);
672 mul_v3_fl(co1, 0.18);
673
674 add_v3_v3v3(co2, mdl1->disps[o1], mdl2->disps[o2]);
675 mul_v3_fl(co2, 0.32);
676
677 add_v3_v3v3(co, co1, co2);
678
679 copy_v3_v3(mdl1->disps[o1], co);
680 copy_v3_v3(mdl2->disps[o2], co);
681 }
682 }
683}
684
686 BMesh *bm, BMLoop *l_dst, const BMFace *f_src, const bool do_vertex, const bool do_multires)
687{
688 BMLoop *l_iter;
689 BMLoop *l_first;
690 const void **vblocks = do_vertex ?
691 static_cast<const void **>(BLI_array_alloca(vblocks, f_src->len)) :
692 nullptr;
693 const void **blocks = static_cast<const void **>(BLI_array_alloca(blocks, f_src->len));
694 float (*cos_2d)[2] = static_cast<float (*)[2]>(BLI_array_alloca(cos_2d, f_src->len));
695 float *w = static_cast<float *>(BLI_array_alloca(w, f_src->len));
696 float axis_mat[3][3]; /* use normal to transform into 2d xy coords */
697 float co[2];
698
699 /* Convert the 3d coords into 2d for projection. */
700 float axis_dominant[3];
701 if (!is_zero_v3(f_src->no)) {
702 BLI_assert(BM_face_is_normal_valid(f_src));
703 copy_v3_v3(axis_dominant, f_src->no);
704 }
705 else {
706 /* Rare case in which all the vertices of the face are aligned.
707 * Get a random axis that is orthogonal to the tangent. */
708 float vec[3];
709 BM_face_calc_tangent_auto(f_src, vec);
710 ortho_v3_v3(axis_dominant, vec);
711 normalize_v3(axis_dominant);
712 }
713 axis_dominant_v3_to_m3(axis_mat, axis_dominant);
714
715 int i = 0;
716 l_iter = l_first = BM_FACE_FIRST_LOOP(f_src);
717 do {
718 mul_v2_m3v3(cos_2d[i], axis_mat, l_iter->v->co);
719 blocks[i] = l_iter->head.data;
720
721 if (do_vertex) {
722 vblocks[i] = l_iter->v->head.data;
723 }
724 } while ((void)i++, (l_iter = l_iter->next) != l_first);
725
726 mul_v2_m3v3(co, axis_mat, l_dst->v->co);
727
728 /* interpolate */
729 interp_weights_poly_v2(w, cos_2d, f_src->len, co);
730 CustomData_bmesh_interp(&bm->ldata, blocks, w, f_src->len, l_dst->head.data);
731 if (do_vertex) {
732 CustomData_bmesh_interp(&bm->vdata, vblocks, w, f_src->len, l_dst->v->head.data);
733 }
734
735 if (do_multires) {
736 BM_loop_interp_multires(bm, l_dst, f_src);
737 }
738}
739
740void BM_vert_interp_from_face(BMesh *bm, BMVert *v_dst, const BMFace *f_src)
741{
742 BMLoop *l_iter;
743 BMLoop *l_first;
744 const void **blocks = static_cast<const void **>(BLI_array_alloca(blocks, f_src->len));
745 float (*cos_2d)[2] = static_cast<float (*)[2]>(BLI_array_alloca(cos_2d, f_src->len));
746 float *w = static_cast<float *>(BLI_array_alloca(w, f_src->len));
747 float axis_mat[3][3]; /* use normal to transform into 2d xy coords */
748 float co[2];
749
750 /* convert the 3d coords into 2d for projection */
752 axis_dominant_v3_to_m3(axis_mat, f_src->no);
753
754 int i = 0;
755 l_iter = l_first = BM_FACE_FIRST_LOOP(f_src);
756 do {
757 mul_v2_m3v3(cos_2d[i], axis_mat, l_iter->v->co);
758 blocks[i] = l_iter->v->head.data;
759 } while ((void)i++, (l_iter = l_iter->next) != l_first);
760
761 mul_v2_m3v3(co, axis_mat, v_dst->co);
762
763 /* interpolate */
764 interp_weights_poly_v2(w, cos_2d, f_src->len, co);
765 CustomData_bmesh_interp(&bm->vdata, blocks, w, f_src->len, v_dst->head.data);
766}
767
769{
771
772 BMIter iter;
773 BLI_mempool *oldpool = olddata->pool;
774 void *block;
775
776 if (data == &bm->vdata) {
777 BMVert *eve;
778
780
781 BM_ITER_MESH (eve, &iter, bm, BM_VERTS_OF_MESH) {
782 block = nullptr;
783 CustomData_bmesh_copy_block(*data, cd_map, eve->head.data, &block);
784 CustomData_bmesh_free_block(olddata, &eve->head.data);
785 eve->head.data = block;
786 }
787 }
788 else if (data == &bm->edata) {
789 BMEdge *eed;
790
792
793 BM_ITER_MESH (eed, &iter, bm, BM_EDGES_OF_MESH) {
794 block = nullptr;
795 CustomData_bmesh_copy_block(*data, cd_map, eed->head.data, &block);
796 CustomData_bmesh_free_block(olddata, &eed->head.data);
797 eed->head.data = block;
798 }
799 }
800 else if (data == &bm->ldata) {
801 BMIter liter;
802 BMFace *efa;
803 BMLoop *l;
804
806 BM_ITER_MESH (efa, &iter, bm, BM_FACES_OF_MESH) {
807 BM_ITER_ELEM (l, &liter, efa, BM_LOOPS_OF_FACE) {
808 block = nullptr;
809 CustomData_bmesh_copy_block(*data, cd_map, l->head.data, &block);
810 CustomData_bmesh_free_block(olddata, &l->head.data);
811 l->head.data = block;
812 }
813 }
814 }
815 else if (data == &bm->pdata) {
816 BMFace *efa;
817
819
820 BM_ITER_MESH (efa, &iter, bm, BM_FACES_OF_MESH) {
821 block = nullptr;
822 CustomData_bmesh_copy_block(*data, cd_map, efa->head.data, &block);
823 CustomData_bmesh_free_block(olddata, &efa->head.data);
824 efa->head.data = block;
825 }
826 }
827 else {
828 /* should never reach this! */
829 BLI_assert(0);
830 }
831
832 if (oldpool) {
833 /* this should never happen but can when dissolve fails - #28960. */
834 BLI_assert(data->pool != oldpool);
835
836 BLI_mempool_destroy(oldpool);
837 }
838}
839
841{
842 CustomData olddata = *data;
843 olddata.layers = (olddata.layers) ?
844 static_cast<CustomDataLayer *>(MEM_dupallocN(olddata.layers)) :
845 nullptr;
846 /* The pool is now owned by `olddata` and must not be shared. */
847 data->pool = nullptr;
848
850
851 update_data_blocks(bm, &olddata, data);
852 if (olddata.layers) {
853 MEM_freeN(olddata.layers);
854 }
855}
856
858{
859 CustomData olddata = *data;
860 olddata.layers = (olddata.layers) ?
861 static_cast<CustomDataLayer *>(MEM_dupallocN(olddata.layers)) :
862 nullptr;
863 /* The pool is now owned by `olddata` and must not be shared. */
864 data->pool = nullptr;
865
867
868 update_data_blocks(bm, &olddata, data);
869 if (olddata.layers) {
870 MEM_freeN(olddata.layers);
871 }
872}
873
880
882{
883 const int nr_uv_layers = CustomData_number_of_layers(&bm->ldata, CD_PROP_FLOAT2);
884 for (int l = 0; l < nr_uv_layers; l++) {
885 /* NOTE: you can't re-use the return-value of #CustomData_get_layer_name()
886 * because adding layers can invalidate that. */
889 bm,
890 &bm->ldata,
893 }
894}
895
902
903bool BM_uv_map_attr_pin_exists(const BMesh *bm, const StringRef uv_map_name)
904{
907 &bm->ldata, CD_PROP_BOOL, BKE_uv_map_pin_name_get(uv_map_name, name)) != -1);
908}
909
911{
912 CustomData olddata = *data;
913 olddata.layers = (olddata.layers) ?
914 static_cast<CustomDataLayer *>(MEM_dupallocN(olddata.layers)) :
915 nullptr;
916 /* The pool is now owned by `olddata` and must not be shared. */
917 data->pool = nullptr;
918
919 const bool had_layer = CustomData_free_layer_active(data, eCustomDataType(type));
920 /* Assert because its expensive to realloc - better not do if layer isn't present. */
921 BLI_assert(had_layer != false);
922 UNUSED_VARS_NDEBUG(had_layer);
923
924 update_data_blocks(bm, &olddata, data);
925 if (olddata.layers) {
926 MEM_freeN(olddata.layers);
927 }
928}
929
931{
932 CustomData olddata = *data;
933 olddata.layers = (olddata.layers) ?
934 static_cast<CustomDataLayer *>(MEM_dupallocN(olddata.layers)) :
935 nullptr;
936 /* The pool is now owned by `olddata` and must not be shared. */
937 data->pool = nullptr;
938
939 const bool had_layer = CustomData_free_layer_named(data, name);
940
941 if (had_layer) {
942 update_data_blocks(bm, &olddata, data);
943 }
944 else {
945 /* Move pool ownership back to BMesh CustomData, no block reallocation. */
946 data->pool = olddata.pool;
947 }
948
949 if (olddata.layers) {
950 MEM_freeN(olddata.layers);
951 }
952
953 return had_layer;
954}
955
956void BM_data_layer_free_n(BMesh *bm, CustomData *data, int type, int n)
957{
958 CustomData olddata = *data;
959 olddata.layers = (olddata.layers) ?
960 static_cast<CustomDataLayer *>(MEM_dupallocN(olddata.layers)) :
961 nullptr;
962 /* The pool is now owned by `olddata` and must not be shared. */
963 data->pool = nullptr;
964
965 const bool had_layer = CustomData_free_layer(
967 /* Assert because its expensive to realloc - better not do if layer isn't present. */
968 BLI_assert(had_layer != false);
969 UNUSED_VARS_NDEBUG(had_layer);
970
971 update_data_blocks(bm, &olddata, data);
972 if (olddata.layers) {
973 MEM_freeN(olddata.layers);
974 }
975}
976
977void BM_data_layer_copy(BMesh *bm, CustomData *data, int type, int src_n, int dst_n)
978{
979 BMIter iter;
980
981 if (&bm->vdata == data) {
982 BMVert *eve;
983
984 BM_ITER_MESH (eve, &iter, bm, BM_VERTS_OF_MESH) {
985 void *ptr = CustomData_bmesh_get_n(data, eve->head.data, eCustomDataType(type), src_n);
987 }
988 }
989 else if (&bm->edata == data) {
990 BMEdge *eed;
991
992 BM_ITER_MESH (eed, &iter, bm, BM_EDGES_OF_MESH) {
993 void *ptr = CustomData_bmesh_get_n(data, eed->head.data, eCustomDataType(type), src_n);
995 }
996 }
997 else if (&bm->pdata == data) {
998 BMFace *efa;
999
1000 BM_ITER_MESH (efa, &iter, bm, BM_FACES_OF_MESH) {
1001 void *ptr = CustomData_bmesh_get_n(data, efa->head.data, eCustomDataType(type), src_n);
1003 }
1004 }
1005 else if (&bm->ldata == data) {
1006 BMIter liter;
1007 BMFace *efa;
1008 BMLoop *l;
1009
1010 BM_ITER_MESH (efa, &iter, bm, BM_FACES_OF_MESH) {
1011 BM_ITER_ELEM (l, &liter, efa, BM_LOOPS_OF_FACE) {
1012 void *ptr = CustomData_bmesh_get_n(data, l->head.data, eCustomDataType(type), src_n);
1013 CustomData_bmesh_set_n(data, l->head.data, eCustomDataType(type), dst_n, ptr);
1014 }
1015 }
1016 }
1017 else {
1018 /* should never reach this! */
1019 BLI_assert(0);
1020 }
1021}
1022
1023float BM_elem_float_data_get(CustomData *cd, void *element, int type)
1024{
1025 const float *f = static_cast<const float *>(
1027 return f ? *f : 0.0f;
1028}
1029
1030void BM_elem_float_data_set(CustomData *cd, void *element, int type, const float val)
1031{
1032 float *f = static_cast<float *>(
1034 if (f) {
1035 *f = val;
1036 }
1037}
1038
1040{
1041 using namespace blender;
1042 for (const CustomDataLayer &layer : Span(bm.vdata.layers, bm.vdata.totlayer)) {
1043 if (layer.name == name) {
1044 return {layer.offset,
1047 }
1048 }
1049 for (const CustomDataLayer &layer : Span(bm.edata.layers, bm.edata.totlayer)) {
1050 if (layer.name == name) {
1051 return {layer.offset,
1054 }
1055 }
1056 for (const CustomDataLayer &layer : Span(bm.pdata.layers, bm.pdata.totlayer)) {
1057 if (layer.name == name) {
1058 return {layer.offset,
1061 }
1062 }
1063 for (const CustomDataLayer &layer : Span(bm.ldata.layers, bm.ldata.totlayer)) {
1064 if (layer.name == name) {
1065 return {layer.offset,
1068 }
1069 }
1070 return {};
1071}
1072
1073/* -------------------------------------------------------------------- */
1091
1093 /* same for all groups */
1094 int type;
1096 const float *loop_weights;
1098
1099 /* --- Per loop fan vars --- */
1100
1101 /* reference for this contiguous fan */
1102 const void *data_ref;
1104
1105 /* accumulate 'LoopGroupCD.weight' to make unit length */
1107
1108 /* both arrays the size of the 'BM_vert_face_count(v)'
1109 * each contiguous fan gets a slide of these arrays */
1113};
1114
1115/* Store vars to pass into 'CustomData_bmesh_interp' */
1117 /* direct customdata pointer array */
1118 void **data;
1119 /* weights (aligned with 'data') */
1121 /* index-in-face */
1123 /* number of loops in the fan */
1125};
1126
1128{
1129 const int i = BM_elem_index_get(l);
1130 const float w = lwc->loop_weights[i];
1133 lwc->data_index_array[lwc->data_len] = i;
1134 lwc->weight_array[lwc->data_len] = w;
1135 lwc->weight_accum += w;
1136
1137 lwc->data_len += 1;
1138}
1139
1145static void bm_loop_walk_data(LoopWalkCtx *lwc, BMLoop *l_walk)
1146{
1147 int i;
1148
1150 lwc->data_ref,
1151 BM_ELEM_CD_GET_VOID_P(l_walk, lwc->cd_layer_offset)));
1153
1154 bm_loop_walk_add(lwc, l_walk);
1155
1156 /* recurse around this loop-fan (in both directions) */
1157 for (i = 0; i < 2; i++) {
1158 BMLoop *l_other = ((i == 0) ? l_walk : l_walk->prev)->radial_next;
1159 if (l_other->radial_next != l_other) {
1160 if (l_other->v != l_walk->v) {
1161 l_other = l_other->next;
1162 }
1163 BLI_assert(l_other->v == l_walk->v);
1166 lwc->data_ref,
1167 BM_ELEM_CD_GET_VOID_P(l_other, lwc->cd_layer_offset)))
1168 {
1169 bm_loop_walk_data(lwc, l_other);
1170 }
1171 }
1172 }
1173 }
1174}
1175
1177 BMesh *bm, BMVert *v, const int layer_n, const float *loop_weights, MemArena *arena)
1178{
1179 LoopWalkCtx lwc;
1180 LinkNode *groups = nullptr;
1181 BMLoop *l;
1182 BMIter liter;
1183 int loop_num;
1184
1185 lwc.type = bm->ldata.layers[layer_n].type;
1186 lwc.cd_layer_offset = bm->ldata.layers[layer_n].offset;
1187 lwc.loop_weights = loop_weights;
1188 lwc.arena = arena;
1189
1190 /* Enable 'BM_ELEM_INTERNAL_TAG', leaving the flag clean on completion. */
1191 loop_num = 0;
1192 BM_ITER_ELEM (l, &liter, v, BM_LOOPS_OF_VERT) {
1194 BM_elem_index_set(l, loop_num); /* set_dirty! */
1195 loop_num++;
1196 }
1197 bm->elem_index_dirty |= BM_LOOP;
1198
1199 lwc.data_len = 0;
1200 lwc.data_array = static_cast<void **>(BLI_memarena_alloc(lwc.arena, sizeof(void *) * loop_num));
1201 lwc.data_index_array = static_cast<int *>(BLI_memarena_alloc(lwc.arena, sizeof(int) * loop_num));
1202 lwc.weight_array = static_cast<float *>(BLI_memarena_alloc(lwc.arena, sizeof(float) * loop_num));
1203
1204 BM_ITER_ELEM (l, &liter, v, BM_LOOPS_OF_VERT) {
1206 LoopGroupCD *lf = static_cast<LoopGroupCD *>(BLI_memarena_alloc(lwc.arena, sizeof(*lf)));
1207 int len_prev = lwc.data_len;
1208
1210
1211 /* assign len-last */
1212 lf->data = &lwc.data_array[lwc.data_len];
1213 lf->data_index = &lwc.data_index_array[lwc.data_len];
1214 lf->data_weights = &lwc.weight_array[lwc.data_len];
1215 lwc.weight_accum = 0.0f;
1216
1217 /* new group */
1218 bm_loop_walk_data(&lwc, l);
1219 lf->data_len = lwc.data_len - len_prev;
1220
1221 if (LIKELY(lwc.weight_accum != 0.0f)) {
1222 mul_vn_fl(lf->data_weights, lf->data_len, 1.0f / lwc.weight_accum);
1223 }
1224 else {
1225 copy_vn_fl(lf->data_weights, lf->data_len, 1.0f / float(lf->data_len));
1226 }
1227
1228 BLI_linklist_prepend_arena(&groups, lf, lwc.arena);
1229 }
1230 }
1231
1232 BLI_assert(lwc.data_len == loop_num);
1233
1234 return groups;
1235}
1236
1238 void *lf_p,
1239 int layer_n,
1240 void *data_tmp)
1241{
1242 LoopGroupCD *lf = static_cast<LoopGroupCD *>(lf_p);
1243 const int type = bm->ldata.layers[layer_n].type;
1244 int i;
1245 const float *data_weights;
1246
1247 data_weights = lf->data_weights;
1248
1250 &bm->ldata, (const void **)lf->data, data_weights, lf->data_len, data_tmp, layer_n);
1251
1252 for (i = 0; i < lf->data_len; i++) {
1253 CustomData_copy_elements(eCustomDataType(type), data_tmp, lf->data[i], 1);
1254 }
1255}
1256
1258 BMesh *bm, void *lf_p, const int layer_n, void *data_tmp, const float *loop_weights)
1259{
1260 LoopGroupCD *lf = static_cast<LoopGroupCD *>(lf_p);
1261 const int type = bm->ldata.layers[layer_n].type;
1262 int i;
1263 const float *data_weights;
1264
1265 /* re-weight */
1266 float *temp_weights = static_cast<float *>(BLI_array_alloca(temp_weights, lf->data_len));
1267 float weight_accum = 0.0f;
1268
1269 for (i = 0; i < lf->data_len; i++) {
1270 float w = loop_weights[lf->data_index[i]] * lf->data_weights[i];
1271 temp_weights[i] = w;
1272 weight_accum += w;
1273 }
1274
1275 if (LIKELY(weight_accum != 0.0f)) {
1276 mul_vn_fl(temp_weights, lf->data_len, 1.0f / weight_accum);
1277 data_weights = temp_weights;
1278 }
1279 else {
1280 data_weights = lf->data_weights;
1281 }
1282
1284 &bm->ldata, (const void **)lf->data, data_weights, lf->data_len, data_tmp, layer_n);
1285
1286 for (i = 0; i < lf->data_len; i++) {
1287 CustomData_copy_elements(eCustomDataType(type), data_tmp, lf->data[i], 1);
1288 }
1289}
1290
1291void BM_vert_loop_groups_data_layer_merge(BMesh *bm, LinkNode *groups, const int layer_n)
1292{
1293 const int type = bm->ldata.layers[layer_n].type;
1294 const int size = CustomData_sizeof(eCustomDataType(type));
1295 void *data_tmp = alloca(size);
1296
1297 do {
1298 bm_vert_loop_groups_data_layer_merge__single(bm, groups->link, layer_n, data_tmp);
1299 } while ((groups = groups->next));
1300}
1301
1303 LinkNode *groups,
1304 const int layer_n,
1305 const float *loop_weights)
1306{
1307 const int type = bm->ldata.layers[layer_n].type;
1308 const int size = CustomData_sizeof(eCustomDataType(type));
1309 void *data_tmp = alloca(size);
1310
1311 do {
1313 bm, groups->link, layer_n, data_tmp, loop_weights);
1314 } while ((groups = groups->next));
1315}
1316
Generic geometry attributes built on CustomData.
blender::StringRef BKE_uv_map_pin_name_get(blender::StringRef uv_map_name, char *buffer)
CustomData interface, see also DNA_customdata_types.h.
int CustomData_sizeof(eCustomDataType type)
int CustomData_get_offset(const CustomData *data, eCustomDataType type)
int CustomData_get_layer_index_n(const CustomData *data, eCustomDataType type, int n)
@ CD_SET_DEFAULT
void CustomData_bmesh_free_block(CustomData *data, void **block)
void * CustomData_add_layer_named(CustomData *data, eCustomDataType type, eCDAllocType alloctype, int totelem, blender::StringRef name)
BMCustomDataCopyMap CustomData_bmesh_copy_map_calc(const CustomData &src, const CustomData &dst, eCustomDataMask mask_exclude=0)
void CustomData_bmesh_init_pool(CustomData *data, int totelem, char htype)
void * CustomData_bmesh_get_n(const CustomData *data, void *block, eCustomDataType type, int n)
bool CustomData_free_layer(CustomData *data, eCustomDataType type, int index)
int CustomData_get_named_layer_index(const CustomData *data, eCustomDataType type, blender::StringRef name)
const char * CustomData_get_layer_name(const CustomData *data, eCustomDataType type, int n)
void * CustomData_bmesh_get(const CustomData *data, void *block, eCustomDataType type)
void CustomData_copy_elements(eCustomDataType type, const void *src_data, void *dst_data, int count)
void CustomData_bmesh_set_n(CustomData *data, void *block, eCustomDataType type, int n, const void *source)
void CustomData_bmesh_copy_block(CustomData &data, void *src_block, void **dst_block)
void CustomData_bmesh_interp_n(CustomData *data, const void **src_blocks, const float *weights, int count, void *dst_block_ofs, int n)
void * CustomData_add_layer(CustomData *data, eCustomDataType type, eCDAllocType alloctype, int totelem)
bool CustomData_data_equals(eCustomDataType type, const void *data1, const void *data2)
int CustomData_number_of_layers(const CustomData *data, eCustomDataType type)
bool CustomData_free_layer_named(CustomData *data, blender::StringRef name)
bool CustomData_free_layer_active(CustomData *data, eCustomDataType type)
void CustomData_bmesh_interp(CustomData *data, const void **src_blocks, const float *weights, int count, void *dst_block)
void old_mdisps_bilinear(float out[3], float(*disps)[3], int st, float u, float v)
Definition multires.cc:568
#define BLI_array_alloca(arr, realsize)
Definition BLI_alloca.h:18
#define BLI_assert(a)
Definition BLI_assert.h:46
int isect_point_quad_v2(const float p[2], const float v1[2], const float v2[2], const float v3[2], const float v4[2])
float normal_quad_v3(float n[3], const float v1[3], const float v2[3], const float v3[3], const float v4[3])
Definition math_geom.cc:58
void resolve_quad_uv_v2(float r_uv[2], const float st[2], const float st0[2], const float st1[2], const float st2[2], const float st3[2])
void axis_dominant_v3_to_m3(float r_mat[3][3], const float normal[3])
Normal to x,y matrix.
void interp_weights_poly_v2(float w[], float v[][2], int n, const float co[2])
void mul_v2_m3v3(float r[2], const float M[3][3], const float a[3])
void mid_v3_v3v3v3v3(float v[3], const float v1[3], const float v2[3], const float v3[3], const float v4[3])
void mul_vn_fl(float *array_tar, int size, float f)
MINLINE void sub_v2_v2(float r[2], const float a[2])
MINLINE void sub_v3_v3(float r[3], const float a[3])
MINLINE bool equals_v3v3(const float v1[3], const float v2[3]) ATTR_WARN_UNUSED_RESULT
MINLINE void sub_v3_v3v3(float r[3], const float a[3], const float b[3])
MINLINE void mul_v2_fl(float r[2], float f)
MINLINE void mul_v3_fl(float r[3], float f)
MINLINE void copy_v3_v3(float r[3], const float a[3])
void copy_vn_fl(float *array_tar, int size, float val)
void project_v3_v3v3(float out[3], const float p[3], const float v_proj[3])
MINLINE float dot_v3v3(const float a[3], const float b[3]) ATTR_WARN_UNUSED_RESULT
void interp_v3_v3v3(float r[3], const float a[3], const float b[3], float t)
MINLINE void add_v3_v3v3(float r[3], const float a[3], const float b[3])
MINLINE void cross_v3_v3v3(float r[3], const float a[3], const float b[3])
MINLINE float cross_v2v2(const float a[2], const float b[2]) ATTR_WARN_UNUSED_RESULT
MINLINE bool is_zero_v3(const float v[3]) ATTR_WARN_UNUSED_RESULT
void mid_v3_v3v3(float r[3], const float a[3], const float b[3])
MINLINE void madd_v3_v3v3fl(float r[3], const float a[3], const float b[3], float f)
MINLINE void mul_v3_v3fl(float r[3], const float a[3], float f)
MINLINE void add_v3_v3(float r[3], const float a[3])
MINLINE float normalize_v3(float n[3])
void * BLI_memarena_alloc(MemArena *ma, size_t size) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1) ATTR_MALLOC ATTR_ALLOC_SIZE(2)
void BLI_mempool_destroy(BLI_mempool *pool) ATTR_NONNULL(1)
void BLI_task_parallel_range(int start, int stop, void *userdata, TaskParallelRangeFunc func, const TaskParallelSettings *settings)
Definition task_range.cc:99
BLI_INLINE void BLI_parallel_range_settings_defaults(TaskParallelSettings *settings)
Definition BLI_task.h:221
#define UNUSED_FUNCTION(x)
#define UNUSED_VARS_NDEBUG(...)
#define UNLIKELY(x)
#define LIKELY(x)
#define MAX_CUSTOMDATA_LAYER_NAME
@ CD_PROP_FLOAT2
Read Guarded memory(de)allocation.
#define BM_FACE_FIRST_LOOP(p)
@ BM_ELEM_INTERNAL_TAG
@ BM_LOOP
#define BM_ELEM_CD_GET_VOID_P(ele, offset)
#define BM_elem_index_get(ele)
#define BM_elem_flag_disable(ele, hflag)
#define BM_elem_index_set(ele, index)
#define BM_elem_flag_test(ele, hflag)
#define BM_elem_flag_enable(ele, hflag)
void BM_face_interp_multires(BMesh *bm, BMFace *f_dst, const BMFace *f_src)
static int compute_mdisp_quad(const BMLoop *l, const float l_f_center[3], float v1[3], float v2[3], float v3[3], float v4[3], float e1[3], float e2[3])
Multires Interpolation.
bool BM_data_layer_free_named(BMesh *bm, CustomData *data, StringRef name)
static void bm_loop_walk_add(LoopWalkCtx *lwc, BMLoop *l)
BMDataLayerLookup BM_data_layer_lookup(const BMesh &bm, const blender::StringRef name)
void BM_data_layer_free_n(BMesh *bm, CustomData *data, int type, int n)
void BM_data_layer_free(BMesh *bm, CustomData *data, int type)
float BM_elem_float_data_get(CustomData *cd, void *element, int type)
void BM_vert_interp_from_face(BMesh *bm, BMVert *v_dst, const BMFace *f_src)
static void loop_interp_multires_cb(void *__restrict userdata, const int ix, const TaskParallelTLS *__restrict)
bool BM_uv_map_attr_pin_exists(const BMesh *bm, const StringRef uv_map_name)
void BM_face_interp_multires_ex(BMesh *bm, BMFace *f_dst, const BMFace *f_src, const float f_dst_center[3], const float f_src_center[3], const int cd_loop_mdisp_offset)
void BM_data_layer_copy(BMesh *bm, CustomData *data, int type, int src_n, int dst_n)
void BM_vert_loop_groups_data_layer_merge(BMesh *bm, LinkNode *groups, const int layer_n)
static void bm_vert_loop_groups_data_layer_merge__single(BMesh *bm, void *lf_p, int layer_n, void *data_tmp)
void BM_vert_loop_groups_data_layer_merge_weights(BMesh *bm, LinkNode *groups, const int layer_n, const float *loop_weights)
void BM_data_layer_add_named(BMesh *bm, CustomData *data, int type, const StringRef name)
void BM_face_interp_from_face_ex(BMesh *bm, BMFace *f_dst, const BMFace *f_src, const bool do_vertex, const void **blocks_l, const void **blocks_v, float(*cos_2d)[2], float axis_mat[3][3])
Data Interpolate From Face.
static void update_data_blocks(BMesh *bm, CustomData *olddata, CustomData *data)
static bool mdisp_in_mdispquad(BMLoop *l_src, BMLoop *l_dst, const float l_dst_f_center[3], const float p[3], int res, float r_axis_x[3], float r_axis_y[3], float r_uv[2])
void BM_loop_interp_multires(BMesh *bm, BMLoop *l_dst, const BMFace *f_src)
void BM_data_layer_add(BMesh *bm, CustomData *data, int type)
void BM_data_interp_from_edges(BMesh *bm, const BMEdge *e_src_1, const BMEdge *e_src_2, BMEdge *e_dst, const float fac)
Data, Interpolate From Edges.
void BM_face_interp_from_face(BMesh *bm, BMFace *f_dst, const BMFace *f_src, const bool do_vertex)
void BM_uv_map_attr_pin_ensure_for_all_layers(BMesh *bm)
void BM_face_multires_bounds_smooth(BMesh *bm, BMFace *f)
void BM_uv_map_attr_pin_ensure_named(BMesh *bm, const StringRef uv_map_name)
static void bm_loop_walk_data(LoopWalkCtx *lwc, BMLoop *l_walk)
void BM_loop_interp_from_face(BMesh *bm, BMLoop *l_dst, const BMFace *f_src, const bool do_vertex, const bool do_multires)
void BM_data_interp_face_vert_edge(BMesh *bm, const BMVert *v_src_1, const BMVert *, BMVert *v, BMEdge *e, const float fac)
Data Face-Vert Edge Interpolate.
static bool quad_co(const float v1[3], const float v2[3], const float v3[3], const float v4[3], const float p[3], const float n[3], float r_uv[2])
static void mdisp_axis_from_quad(const float v1[3], const float v2[3], float[3], const float v4[3], float r_axis_x[3], float r_axis_y[3])
LinkNode * BM_vert_loop_groups_data_layer_create(BMesh *bm, BMVert *v, const int layer_n, const float *loop_weights, MemArena *arena)
static void bm_vert_loop_groups_data_layer_merge_weights__single(BMesh *bm, void *lf_p, const int layer_n, void *data_tmp, const float *loop_weights)
static float bm_loop_flip_equotion(float mat[2][2], float b[2], const float target_axis_x[3], const float target_axis_y[3], const float coord[3], int i, int j)
void BM_loop_interp_multires_ex(BMesh *, BMLoop *l_dst, const BMFace *f_src, const float f_dst_center[3], const float f_src_center[3], const int cd_loop_mdisp_offset)
static void UNUSED_FUNCTION BM_Data_Vert_Average(BMesh *, BMFace *)
Data Vert Average.
void BM_elem_float_data_set(CustomData *cd, void *element, int type, const float val)
void BM_data_layer_ensure_named(BMesh *bm, CustomData *data, int type, const StringRef name)
static void bm_loop_flip_disp(const float source_axis_x[3], const float source_axis_y[3], const float target_axis_x[3], const float target_axis_y[3], float disp[3])
static void bm_data_interp_from_elem(CustomData *data_layer, const BMElem *ele_src_1, const BMElem *ele_src_2, BMElem *ele_dst, const float fac)
void BM_data_interp_from_verts(BMesh *bm, const BMVert *v_src_1, const BMVert *v_src_2, BMVert *v_dst, const float fac)
Data, Interpolate From Verts.
#define BM_ITER_ELEM(ele, iter, data, itype)
#define BM_ITER_MESH(ele, iter, bm, itype)
@ BM_EDGES_OF_MESH
@ BM_VERTS_OF_MESH
@ BM_FACES_OF_MESH
@ BM_LOOPS_OF_VERT
@ BM_LOOPS_OF_FACE
BMesh const char void * data
BMesh * bm
#define BM_FACE
#define BM_EDGE
#define BM_VERT
ATTR_WARN_UNUSED_RESULT const void * element
void poly_rotate_plane(const float normal[3], float(*verts)[3], const uint nverts)
POLY ROTATE PLANE.
void BM_face_calc_center_median(const BMFace *f, float r_cent[3])
void BM_vert_normal_update_all(BMVert *v)
bool BM_face_is_normal_valid(const BMFace *f)
ATTR_WARN_UNUSED_RESULT const BMVert * v2
ATTR_WARN_UNUSED_RESULT const BMLoop * l
ATTR_WARN_UNUSED_RESULT const BMVert const BMEdge * e
ATTR_WARN_UNUSED_RESULT const BMVert * v
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition btDbvt.cpp:52
SIMD_FORCE_INLINE const btScalar & w() const
Return the w value.
Definition btQuadWord.h:119
nullptr float
#define sqrt
void * MEM_callocN(size_t len, const char *str)
Definition mallocn.cc:118
void * MEM_dupallocN(const void *vmemh)
Definition mallocn.cc:143
void MEM_freeN(void *vmemh)
Definition mallocn.cc:113
std::optional< AttrType > custom_data_type_to_attr_type(eCustomDataType data_type)
const btScalar eps
Definition poly34.cpp:11
const char * name
#define fabsf
BMHeader head
BMHeader head
BMHeader head
float no[3]
void * data
BMHeader head
struct BMVert * v
struct BMLoop * radial_next
struct BMLoop * prev
struct BMFace * f
struct BMLoop * next
float co[3]
float no[3]
BMHeader head
struct BLI_mempool * pool
CustomDataLayer * layers
void * link
struct LinkNode * next
float * data_weights
const float * loop_weights
float * weight_array
const void * data_ref
int * data_index_array
void ** data_array
MemArena * arena
float(* disps)[3]
i
Definition text_draw.cc:230
PointerRNA * ptr
Definition wm_files.cc:4238