Blender V4.3
bmesh_interp.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2007 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
11#include "MEM_guardedalloc.h"
12
13#include "DNA_meshdata_types.h"
14
15#include "BLI_alloca.h"
16#include "BLI_linklist.h"
17#include "BLI_math_geom.h"
18#include "BLI_math_matrix.h"
19#include "BLI_math_vector.h"
20#include "BLI_memarena.h"
21#include "BLI_task.h"
22
23#include "BKE_attribute.hh"
24#include "BKE_customdata.hh"
25#include "BKE_multires.hh"
26
27#include "bmesh.hh"
29
30/* edge and vertex share, currently there's no need to have different logic */
31static void bm_data_interp_from_elem(CustomData *data_layer,
32 const BMElem *ele_src_1,
33 const BMElem *ele_src_2,
34 BMElem *ele_dst,
35 const float fac)
36{
37 if (ele_src_1->head.data && ele_src_2->head.data) {
38 /* first see if we can avoid interpolation */
39 if (fac <= 0.0f) {
40 if (ele_src_1 == ele_dst) {
41 /* do nothing */
42 }
43 else {
44 CustomData_bmesh_copy_block(*data_layer, ele_src_1->head.data, &ele_dst->head.data);
45 }
46 }
47 else if (fac >= 1.0f) {
48 if (ele_src_2 == ele_dst) {
49 /* do nothing */
50 }
51 else {
52 CustomData_bmesh_copy_block(*data_layer, ele_src_2->head.data, &ele_dst->head.data);
53 }
54 }
55 else {
56 const void *src[2];
57 float w[2];
58
59 src[0] = ele_src_1->head.data;
60 src[1] = ele_src_2->head.data;
61 w[0] = 1.0f - fac;
62 w[1] = fac;
63 CustomData_bmesh_interp(data_layer, src, w, nullptr, 2, ele_dst->head.data);
64 }
65 }
66}
67
69 BMesh *bm, const BMVert *v_src_1, const BMVert *v_src_2, BMVert *v_dst, const float fac)
70{
72 &bm->vdata, (const BMElem *)v_src_1, (const BMElem *)v_src_2, (BMElem *)v_dst, fac);
73}
74
76 BMesh *bm, const BMEdge *e_src_1, const BMEdge *e_src_2, BMEdge *e_dst, const float fac)
77{
79 &bm->edata, (const BMElem *)e_src_1, (const BMElem *)e_src_2, (BMElem *)e_dst, fac);
80}
81
88static void UNUSED_FUNCTION(BM_Data_Vert_Average)(BMesh * /*bm*/, BMFace * /*f*/)
89{
90 // BMIter iter;
91}
92
94 const BMVert *v_src_1,
95 const BMVert * /*v_src_2*/,
96 BMVert *v,
97 BMEdge *e,
98 const float fac)
99{
100 float w[2];
101 BMLoop *l_v1 = nullptr, *l_v = nullptr, *l_v2 = nullptr;
102 BMLoop *l_iter = nullptr;
103
104 if (!e->l) {
105 return;
106 }
107
108 w[1] = 1.0f - fac;
109 w[0] = fac;
110
111 l_iter = e->l;
112 do {
113 if (l_iter->v == v_src_1) {
114 l_v1 = l_iter;
115 l_v = l_v1->next;
116 l_v2 = l_v->next;
117 }
118 else if (l_iter->v == v) {
119 l_v1 = l_iter->next;
120 l_v = l_iter;
121 l_v2 = l_iter->prev;
122 }
123
124 if (!l_v1 || !l_v2) {
125 return;
126 }
127
128 const void *src[2];
129 src[0] = l_v1->head.data;
130 src[1] = l_v2->head.data;
131
132 CustomData_bmesh_interp(&bm->ldata, src, w, nullptr, 2, l_v->head.data);
133 } while ((l_iter = l_iter->radial_next) != e->l);
134}
135
137 BMFace *f_dst,
138 const BMFace *f_src,
139 const bool do_vertex,
140 const void **blocks_l,
141 const void **blocks_v,
142 float (*cos_2d)[2],
143 float axis_mat[3][3])
144{
145 BMLoop *l_iter;
146 BMLoop *l_first;
147
148 float *w = static_cast<float *>(BLI_array_alloca(w, f_src->len));
149 float co[2];
150
151 /* interpolate */
152 l_iter = l_first = BM_FACE_FIRST_LOOP(f_dst);
153 do {
154 mul_v2_m3v3(co, axis_mat, l_iter->v->co);
155 interp_weights_poly_v2(w, cos_2d, f_src->len, co);
156 CustomData_bmesh_interp(&bm->ldata, blocks_l, w, nullptr, f_src->len, l_iter->head.data);
157 if (do_vertex) {
158 CustomData_bmesh_interp(&bm->vdata, blocks_v, w, nullptr, f_src->len, l_iter->v->head.data);
159 }
160 } while ((l_iter = l_iter->next) != l_first);
161}
162
163void BM_face_interp_from_face(BMesh *bm, BMFace *f_dst, const BMFace *f_src, const bool do_vertex)
164{
165 BMLoop *l_iter;
166 BMLoop *l_first;
167
168 const void **blocks_l = static_cast<const void **>(BLI_array_alloca(blocks_l, f_src->len));
169 const void **blocks_v = do_vertex ?
170 static_cast<const void **>(BLI_array_alloca(blocks_v, f_src->len)) :
171 nullptr;
172 float(*cos_2d)[2] = static_cast<float(*)[2]>(BLI_array_alloca(cos_2d, f_src->len));
173 float axis_mat[3][3]; /* use normal to transform into 2d xy coords */
174 int i;
175
176 /* convert the 3d coords into 2d for projection */
178 axis_dominant_v3_to_m3(axis_mat, f_src->no);
179
180 i = 0;
181 l_iter = l_first = BM_FACE_FIRST_LOOP(f_src);
182 do {
183 mul_v2_m3v3(cos_2d[i], axis_mat, l_iter->v->co);
184 blocks_l[i] = l_iter->head.data;
185 if (do_vertex) {
186 blocks_v[i] = l_iter->v->head.data;
187 }
188 } while ((void)i++, (l_iter = l_iter->next) != l_first);
189
190 BM_face_interp_from_face_ex(bm, f_dst, f_src, do_vertex, blocks_l, blocks_v, cos_2d, axis_mat);
191}
192
207static int compute_mdisp_quad(const BMLoop *l,
208 const float l_f_center[3],
209 float v1[3],
210 float v2[3],
211 float v3[3],
212 float v4[3],
213 float e1[3],
214 float e2[3])
215{
216 float n[3], p[3];
217
218#ifndef NDEBUG
219 {
220 float cent[3];
221 /* computer center */
223 BLI_assert(equals_v3v3(cent, l_f_center));
224 }
225#endif
226
227 mid_v3_v3v3(p, l->prev->v->co, l->v->co);
228 mid_v3_v3v3(n, l->next->v->co, l->v->co);
229
230 copy_v3_v3(v1, l_f_center);
231 copy_v3_v3(v2, p);
232 copy_v3_v3(v3, l->v->co);
233 copy_v3_v3(v4, n);
234
235 sub_v3_v3v3(e1, v2, v1);
236 sub_v3_v3v3(e2, v3, v4);
237
238 return 1;
239}
240
241static bool quad_co(const float v1[3],
242 const float v2[3],
243 const float v3[3],
244 const float v4[3],
245 const float p[3],
246 const float n[3],
247 float r_uv[2])
248{
249 float projverts[5][3], n2[3];
250 const float origin[2] = {0.0f, 0.0f};
251 int i;
252
253 /* project points into 2d along normal */
254 copy_v3_v3(projverts[0], v1);
255 copy_v3_v3(projverts[1], v2);
256 copy_v3_v3(projverts[2], v3);
257 copy_v3_v3(projverts[3], v4);
258 copy_v3_v3(projverts[4], p);
259
260 normal_quad_v3(n2, projverts[0], projverts[1], projverts[2], projverts[3]);
261
262 if (dot_v3v3(n, n2) < -FLT_EPSILON) {
263 return false;
264 }
265
266 /* rotate */
267 poly_rotate_plane(n, projverts, 5);
268
269 /* subtract origin */
270 for (i = 0; i < 4; i++) {
271 sub_v2_v2(projverts[i], projverts[4]);
272 }
273
274 if (!isect_point_quad_v2(origin, projverts[0], projverts[1], projverts[2], projverts[3])) {
275 return false;
276 }
277
278 resolve_quad_uv_v2(r_uv, origin, projverts[0], projverts[3], projverts[2], projverts[1]);
279
280 return true;
281}
282
283static void mdisp_axis_from_quad(const float v1[3],
284 const float v2[3],
285 float[3] /*v3[3]*/,
286 const float v4[3],
287 float r_axis_x[3],
288 float r_axis_y[3])
289{
290 sub_v3_v3v3(r_axis_x, v4, v1);
291 sub_v3_v3v3(r_axis_y, v2, v1);
292
293 normalize_v3(r_axis_x);
294 normalize_v3(r_axis_y);
295}
296
303static bool mdisp_in_mdispquad(BMLoop *l_src,
304 BMLoop *l_dst,
305 const float l_dst_f_center[3],
306 const float p[3],
307 int res,
308 float r_axis_x[3],
309 float r_axis_y[3],
310 float r_uv[2])
311{
312 float v1[3], v2[3], c[3], v3[3], v4[3], e1[3], e2[3];
313 float eps = FLT_EPSILON * 4000;
314
315 if (is_zero_v3(l_src->v->no)) {
317 }
318 if (is_zero_v3(l_dst->v->no)) {
320 }
321
322 compute_mdisp_quad(l_dst, l_dst_f_center, v1, v2, v3, v4, e1, e2);
323
324 /* expand quad a bit */
325 mid_v3_v3v3v3v3(c, v1, v2, v3, v4);
326
327 sub_v3_v3(v1, c);
328 sub_v3_v3(v2, c);
329 sub_v3_v3(v3, c);
330 sub_v3_v3(v4, c);
331 mul_v3_fl(v1, 1.0f + eps);
332 mul_v3_fl(v2, 1.0f + eps);
333 mul_v3_fl(v3, 1.0f + eps);
334 mul_v3_fl(v4, 1.0f + eps);
335 add_v3_v3(v1, c);
336 add_v3_v3(v2, c);
337 add_v3_v3(v3, c);
338 add_v3_v3(v4, c);
339
340 if (!quad_co(v1, v2, v3, v4, p, l_src->v->no, r_uv)) {
341 return false;
342 }
343
344 mul_v2_fl(r_uv, float(res - 1));
345
346 mdisp_axis_from_quad(v1, v2, v3, v4, r_axis_x, r_axis_y);
347
348 return true;
349}
350
351static float bm_loop_flip_equotion(float mat[2][2],
352 float b[2],
353 const float target_axis_x[3],
354 const float target_axis_y[3],
355 const float coord[3],
356 int i,
357 int j)
358{
359 mat[0][0] = target_axis_x[i];
360 mat[0][1] = target_axis_y[i];
361 mat[1][0] = target_axis_x[j];
362 mat[1][1] = target_axis_y[j];
363 b[0] = coord[i];
364 b[1] = coord[j];
365
366 return cross_v2v2(mat[0], mat[1]);
367}
368
369static void bm_loop_flip_disp(const float source_axis_x[3],
370 const float source_axis_y[3],
371 const float target_axis_x[3],
372 const float target_axis_y[3],
373 float disp[3])
374{
375 float vx[3], vy[3], coord[3];
376 float n[3], vec[3];
377 float b[2], mat[2][2], d;
378
379 mul_v3_v3fl(vx, source_axis_x, disp[0]);
380 mul_v3_v3fl(vy, source_axis_y, disp[1]);
381 add_v3_v3v3(coord, vx, vy);
382
383 /* project displacement from source grid plane onto target grid plane */
384 cross_v3_v3v3(n, target_axis_x, target_axis_y);
385 project_v3_v3v3(vec, coord, n);
386 sub_v3_v3v3(coord, coord, vec);
387
388 d = bm_loop_flip_equotion(mat, b, target_axis_x, target_axis_y, coord, 0, 1);
389
390 if (fabsf(d) < 1e-4f) {
391 d = bm_loop_flip_equotion(mat, b, target_axis_x, target_axis_y, coord, 0, 2);
392 if (fabsf(d) < 1e-4f) {
393 d = bm_loop_flip_equotion(mat, b, target_axis_x, target_axis_y, coord, 1, 2);
394 }
395 }
396
397 disp[0] = (b[0] * mat[1][1] - mat[0][1] * b[1]) / d;
398 disp[1] = (mat[0][0] * b[1] - b[0] * mat[1][0]) / d;
399}
400
405
407 const float *f_src_center;
408
409 float *axis_x, *axis_y;
410 float *v1, *v4;
411 float *e1, *e2;
412
413 int res;
414 float d;
415};
416
417static void loop_interp_multires_cb(void *__restrict userdata,
418 const int ix,
419 const TaskParallelTLS *__restrict /*tls*/)
420{
421 BMLoopInterpMultiresData *data = static_cast<BMLoopInterpMultiresData *>(userdata);
422
423 BMLoop *l_first = data->l_src_first;
424 BMLoop *l_dst = data->l_dst;
425 const int cd_loop_mdisp_offset = data->cd_loop_mdisp_offset;
426
427 MDisps *md_dst = data->md_dst;
428 const float *f_src_center = data->f_src_center;
429
430 float *axis_x = data->axis_x;
431 float *axis_y = data->axis_y;
432
433 float *v1 = data->v1;
434 float *v4 = data->v4;
435 float *e1 = data->e1;
436 float *e2 = data->e2;
437
438 const int res = data->res;
439 const float d = data->d;
440
441 float x = d * ix, y;
442 int iy;
443 for (y = 0.0f, iy = 0; iy < res; y += d, iy++) {
444 BMLoop *l_iter = l_first;
445 float co1[3], co2[3], co[3];
446
447 madd_v3_v3v3fl(co1, v1, e1, y);
448 madd_v3_v3v3fl(co2, v4, e2, y);
449 interp_v3_v3v3(co, co1, co2, x);
450
451 do {
452 MDisps *md_src;
453 float src_axis_x[3], src_axis_y[3];
454 float uv[2];
455
456 md_src = static_cast<MDisps *>(BM_ELEM_CD_GET_VOID_P(l_iter, cd_loop_mdisp_offset));
457
458 if (mdisp_in_mdispquad(l_dst, l_iter, f_src_center, co, res, src_axis_x, src_axis_y, uv)) {
459 old_mdisps_bilinear(md_dst->disps[iy * res + ix], md_src->disps, res, uv[0], uv[1]);
460 bm_loop_flip_disp(src_axis_x, src_axis_y, axis_x, axis_y, md_dst->disps[iy * res + ix]);
461
462 break;
463 }
464 } while ((l_iter = l_iter->next) != l_first);
465 }
466}
467
469 BMLoop *l_dst,
470 const BMFace *f_src,
471 const float f_dst_center[3],
472 const float f_src_center[3],
473 const int cd_loop_mdisp_offset)
474{
475 MDisps *md_dst;
476 float v1[3], v2[3], v3[3], v4[3] = {0.0f, 0.0f, 0.0f}, e1[3], e2[3];
477 float axis_x[3], axis_y[3];
478
479 /* ignore 2-edged faces */
480 if (UNLIKELY(l_dst->f->len < 3)) {
481 return;
482 }
483
484 md_dst = static_cast<MDisps *>(BM_ELEM_CD_GET_VOID_P(l_dst, cd_loop_mdisp_offset));
485 compute_mdisp_quad(l_dst, f_dst_center, v1, v2, v3, v4, e1, e2);
486
487 /* if no disps data allocate a new grid, the size of the first grid in f_src. */
488 if (!md_dst->totdisp) {
489 const MDisps *md_src = static_cast<const MDisps *>(
490 BM_ELEM_CD_GET_VOID_P(BM_FACE_FIRST_LOOP(f_src), cd_loop_mdisp_offset));
491
492 md_dst->totdisp = md_src->totdisp;
493 md_dst->level = md_src->level;
494 if (md_dst->totdisp) {
495 md_dst->disps = static_cast<float(*)[3]>(
496 MEM_callocN(sizeof(float[3]) * md_dst->totdisp, __func__));
497 }
498 else {
499 return;
500 }
501 }
502
503 mdisp_axis_from_quad(v1, v2, v3, v4, axis_x, axis_y);
504
505 const int res = int(sqrt(md_dst->totdisp));
506 BMLoopInterpMultiresData data = {};
507 data.l_dst = l_dst;
508 data.l_src_first = BM_FACE_FIRST_LOOP(f_src);
509 data.cd_loop_mdisp_offset = cd_loop_mdisp_offset;
510 data.md_dst = md_dst;
511 data.f_src_center = f_src_center;
512 data.axis_x = axis_x;
513 data.axis_y = axis_y;
514 data.v1 = v1;
515 data.v4 = v4;
516 data.e1 = e1;
517 data.e2 = e2;
518 data.res = res;
519 data.d = 1.0f / float(res - 1);
520
521 TaskParallelSettings settings;
523 settings.use_threading = (res > 5);
524 BLI_task_parallel_range(0, res, &data, loop_interp_multires_cb, &settings);
525}
526
527void BM_loop_interp_multires(BMesh *bm, BMLoop *l_dst, const BMFace *f_src)
528{
529 const int cd_loop_mdisp_offset = CustomData_get_offset(&bm->ldata, CD_MDISPS);
530
531 if (cd_loop_mdisp_offset != -1) {
532 float f_dst_center[3];
533 float f_src_center[3];
534
535 BM_face_calc_center_median(l_dst->f, f_dst_center);
536 BM_face_calc_center_median(f_src, f_src_center);
537
538 BM_loop_interp_multires_ex(bm, l_dst, f_src, f_dst_center, f_src_center, cd_loop_mdisp_offset);
539 }
540}
541
543 BMFace *f_dst,
544 const BMFace *f_src,
545 const float f_dst_center[3],
546 const float f_src_center[3],
547 const int cd_loop_mdisp_offset)
548{
549 BMLoop *l_iter, *l_first;
550 l_iter = l_first = BM_FACE_FIRST_LOOP(f_dst);
551 do {
553 bm, l_iter, f_src, f_dst_center, f_src_center, cd_loop_mdisp_offset);
554 } while ((l_iter = l_iter->next) != l_first);
555}
556
557void BM_face_interp_multires(BMesh *bm, BMFace *f_dst, const BMFace *f_src)
558{
559 const int cd_loop_mdisp_offset = CustomData_get_offset(&bm->ldata, CD_MDISPS);
560
561 if (cd_loop_mdisp_offset != -1) {
562 float f_dst_center[3];
563 float f_src_center[3];
564
565 BM_face_calc_center_median(f_dst, f_dst_center);
566 BM_face_calc_center_median(f_src, f_src_center);
567
568 BM_face_interp_multires_ex(bm, f_dst, f_src, f_dst_center, f_src_center, cd_loop_mdisp_offset);
569 }
570}
571
573{
574 const int cd_loop_mdisp_offset = CustomData_get_offset(&bm->ldata, CD_MDISPS);
575 BMLoop *l;
576 BMIter liter;
577
578 if (cd_loop_mdisp_offset == -1) {
579 return;
580 }
581
582 BM_ITER_ELEM (l, &liter, f, BM_LOOPS_OF_FACE) {
583 MDisps *mdp = static_cast<MDisps *>(BM_ELEM_CD_GET_VOID_P(l->prev, cd_loop_mdisp_offset));
584 MDisps *mdl = static_cast<MDisps *>(BM_ELEM_CD_GET_VOID_P(l, cd_loop_mdisp_offset));
585 MDisps *mdn = static_cast<MDisps *>(BM_ELEM_CD_GET_VOID_P(l->next, cd_loop_mdisp_offset));
586 float co1[3];
587 int sides;
588 int y;
589
605 sides = int(sqrt(mdp->totdisp));
606 for (y = 0; y < sides; y++) {
607 mid_v3_v3v3(co1, mdn->disps[y * sides], mdl->disps[y]);
608
609 copy_v3_v3(mdn->disps[y * sides], co1);
610 copy_v3_v3(mdl->disps[y], co1);
611 }
612 }
613
614 BM_ITER_ELEM (l, &liter, f, BM_LOOPS_OF_FACE) {
615 MDisps *mdl1 = static_cast<MDisps *>(BM_ELEM_CD_GET_VOID_P(l, cd_loop_mdisp_offset));
616 MDisps *mdl2;
617 float co1[3], co2[3], co[3];
618 int sides;
619 int y;
620
636 if (l->radial_next == l) {
637 continue;
638 }
639
640 if (l->radial_next->v == l->v) {
641 mdl2 = static_cast<MDisps *>(BM_ELEM_CD_GET_VOID_P(l->radial_next, cd_loop_mdisp_offset));
642 }
643 else {
644 mdl2 = static_cast<MDisps *>(
645 BM_ELEM_CD_GET_VOID_P(l->radial_next->next, cd_loop_mdisp_offset));
646 }
647
648 sides = int(sqrt(mdl1->totdisp));
649 for (y = 0; y < sides; y++) {
650 int a1, a2, o1, o2;
651
652 if (l->v != l->radial_next->v) {
653 a1 = sides * y + sides - 2;
654 a2 = (sides - 2) * sides + y;
655
656 o1 = sides * y + sides - 1;
657 o2 = (sides - 1) * sides + y;
658 }
659 else {
660 a1 = sides * y + sides - 2;
661 a2 = sides * y + sides - 2;
662 o1 = sides * y + sides - 1;
663 o2 = sides * y + sides - 1;
664 }
665
666 /* magic blending numbers, hardcoded! */
667 add_v3_v3v3(co1, mdl1->disps[a1], mdl2->disps[a2]);
668 mul_v3_fl(co1, 0.18);
669
670 add_v3_v3v3(co2, mdl1->disps[o1], mdl2->disps[o2]);
671 mul_v3_fl(co2, 0.32);
672
673 add_v3_v3v3(co, co1, co2);
674
675 copy_v3_v3(mdl1->disps[o1], co);
676 copy_v3_v3(mdl2->disps[o2], co);
677 }
678 }
679}
680
682 BMesh *bm, BMLoop *l_dst, const BMFace *f_src, const bool do_vertex, const bool do_multires)
683{
684 BMLoop *l_iter;
685 BMLoop *l_first;
686 const void **vblocks = do_vertex ?
687 static_cast<const void **>(BLI_array_alloca(vblocks, f_src->len)) :
688 nullptr;
689 const void **blocks = static_cast<const void **>(BLI_array_alloca(blocks, f_src->len));
690 float(*cos_2d)[2] = static_cast<float(*)[2]>(BLI_array_alloca(cos_2d, f_src->len));
691 float *w = static_cast<float *>(BLI_array_alloca(w, f_src->len));
692 float axis_mat[3][3]; /* use normal to transform into 2d xy coords */
693 float co[2];
694
695 /* Convert the 3d coords into 2d for projection. */
696 float axis_dominant[3];
697 if (!is_zero_v3(f_src->no)) {
699 copy_v3_v3(axis_dominant, f_src->no);
700 }
701 else {
702 /* Rare case in which all the vertices of the face are aligned.
703 * Get a random axis that is orthogonal to the tangent. */
704 float vec[3];
705 BM_face_calc_tangent_auto(f_src, vec);
706 ortho_v3_v3(axis_dominant, vec);
707 normalize_v3(axis_dominant);
708 }
709 axis_dominant_v3_to_m3(axis_mat, axis_dominant);
710
711 int i = 0;
712 l_iter = l_first = BM_FACE_FIRST_LOOP(f_src);
713 do {
714 mul_v2_m3v3(cos_2d[i], axis_mat, l_iter->v->co);
715 blocks[i] = l_iter->head.data;
716
717 if (do_vertex) {
718 vblocks[i] = l_iter->v->head.data;
719 }
720 } while ((void)i++, (l_iter = l_iter->next) != l_first);
721
722 mul_v2_m3v3(co, axis_mat, l_dst->v->co);
723
724 /* interpolate */
725 interp_weights_poly_v2(w, cos_2d, f_src->len, co);
726 CustomData_bmesh_interp(&bm->ldata, blocks, w, nullptr, f_src->len, l_dst->head.data);
727 if (do_vertex) {
728 CustomData_bmesh_interp(&bm->vdata, vblocks, w, nullptr, f_src->len, l_dst->v->head.data);
729 }
730
731 if (do_multires) {
732 BM_loop_interp_multires(bm, l_dst, f_src);
733 }
734}
735
736void BM_vert_interp_from_face(BMesh *bm, BMVert *v_dst, const BMFace *f_src)
737{
738 BMLoop *l_iter;
739 BMLoop *l_first;
740 const void **blocks = static_cast<const void **>(BLI_array_alloca(blocks, f_src->len));
741 float(*cos_2d)[2] = static_cast<float(*)[2]>(BLI_array_alloca(cos_2d, f_src->len));
742 float *w = static_cast<float *>(BLI_array_alloca(w, f_src->len));
743 float axis_mat[3][3]; /* use normal to transform into 2d xy coords */
744 float co[2];
745
746 /* convert the 3d coords into 2d for projection */
748 axis_dominant_v3_to_m3(axis_mat, f_src->no);
749
750 int i = 0;
751 l_iter = l_first = BM_FACE_FIRST_LOOP(f_src);
752 do {
753 mul_v2_m3v3(cos_2d[i], axis_mat, l_iter->v->co);
754 blocks[i] = l_iter->v->head.data;
755 } while ((void)i++, (l_iter = l_iter->next) != l_first);
756
757 mul_v2_m3v3(co, axis_mat, v_dst->co);
758
759 /* interpolate */
760 interp_weights_poly_v2(w, cos_2d, f_src->len, co);
761 CustomData_bmesh_interp(&bm->vdata, blocks, w, nullptr, f_src->len, v_dst->head.data);
762}
763
764static void update_data_blocks(BMesh *bm, CustomData *olddata, CustomData *data)
765{
766 const BMCustomDataCopyMap cd_map = CustomData_bmesh_copy_map_calc(*olddata, *data);
767
768 BMIter iter;
769 BLI_mempool *oldpool = olddata->pool;
770 void *block;
771
772 if (data == &bm->vdata) {
773 BMVert *eve;
774
776
777 BM_ITER_MESH (eve, &iter, bm, BM_VERTS_OF_MESH) {
778 block = nullptr;
779 CustomData_bmesh_copy_block(*data, cd_map, eve->head.data, &block);
780 CustomData_bmesh_free_block(olddata, &eve->head.data);
781 eve->head.data = block;
782 }
783 }
784 else if (data == &bm->edata) {
785 BMEdge *eed;
786
788
789 BM_ITER_MESH (eed, &iter, bm, BM_EDGES_OF_MESH) {
790 block = nullptr;
791 CustomData_bmesh_copy_block(*data, cd_map, eed->head.data, &block);
792 CustomData_bmesh_free_block(olddata, &eed->head.data);
793 eed->head.data = block;
794 }
795 }
796 else if (data == &bm->ldata) {
797 BMIter liter;
798 BMFace *efa;
799 BMLoop *l;
800
802 BM_ITER_MESH (efa, &iter, bm, BM_FACES_OF_MESH) {
803 BM_ITER_ELEM (l, &liter, efa, BM_LOOPS_OF_FACE) {
804 block = nullptr;
805 CustomData_bmesh_copy_block(*data, cd_map, l->head.data, &block);
807 l->head.data = block;
808 }
809 }
810 }
811 else if (data == &bm->pdata) {
812 BMFace *efa;
813
815
816 BM_ITER_MESH (efa, &iter, bm, BM_FACES_OF_MESH) {
817 block = nullptr;
818 CustomData_bmesh_copy_block(*data, cd_map, efa->head.data, &block);
819 CustomData_bmesh_free_block(olddata, &efa->head.data);
820 efa->head.data = block;
821 }
822 }
823 else {
824 /* should never reach this! */
825 BLI_assert(0);
826 }
827
828 if (oldpool) {
829 /* this should never happen but can when dissolve fails - #28960. */
830 BLI_assert(data->pool != oldpool);
831
832 BLI_mempool_destroy(oldpool);
833 }
834}
835
836void BM_data_layer_add(BMesh *bm, CustomData *data, int type)
837{
838 CustomData olddata = *data;
839 olddata.layers = (olddata.layers) ?
840 static_cast<CustomDataLayer *>(MEM_dupallocN(olddata.layers)) :
841 nullptr;
842 /* The pool is now owned by `olddata` and must not be shared. */
843 data->pool = nullptr;
844
846
847 update_data_blocks(bm, &olddata, data);
848 if (olddata.layers) {
849 MEM_freeN(olddata.layers);
850 }
851}
852
853void BM_data_layer_add_named(BMesh *bm, CustomData *data, int type, const char *name)
854{
855 CustomData olddata = *data;
856 olddata.layers = (olddata.layers) ?
857 static_cast<CustomDataLayer *>(MEM_dupallocN(olddata.layers)) :
858 nullptr;
859 /* The pool is now owned by `olddata` and must not be shared. */
860 data->pool = nullptr;
861
863
864 update_data_blocks(bm, &olddata, data);
865 if (olddata.layers) {
866 MEM_freeN(olddata.layers);
867 }
868}
869
870void BM_data_layer_ensure_named(BMesh *bm, CustomData *data, int type, const char *name)
871{
872 if (CustomData_get_named_layer_index(data, eCustomDataType(type), name) == -1) {
873 BM_data_layer_add_named(bm, data, type, name);
874 }
875}
876
878{
879 const int nr_uv_layers = CustomData_number_of_layers(&bm->ldata, CD_PROP_FLOAT2);
880 for (int l = 0; l < nr_uv_layers; l++) {
881 /* NOTE: you can't re-use the return-value of #CustomData_get_layer_name()
882 * because adding layers can invalidate that. */
883 char name[MAX_CUSTOMDATA_LAYER_NAME];
885 bm,
886 &bm->ldata,
889 name));
891 bm,
892 &bm->ldata,
895 name));
897 bm,
898 &bm->ldata,
901 }
902}
903
904void BM_uv_map_ensure_vert_select_attr(BMesh *bm, const char *uv_map_name)
905{
906 char name[MAX_CUSTOMDATA_LAYER_NAME];
908 bm, &bm->ldata, CD_PROP_BOOL, BKE_uv_map_vert_select_name_get(uv_map_name, name));
909}
910
911void BM_uv_map_ensure_edge_select_attr(BMesh *bm, const char *uv_map_name)
912{
913 char name[MAX_CUSTOMDATA_LAYER_NAME];
915 bm, &bm->ldata, CD_PROP_BOOL, BKE_uv_map_edge_select_name_get(uv_map_name, name));
916}
917
918void BM_uv_map_ensure_pin_attr(BMesh *bm, const char *uv_map_name)
919{
920 char name[MAX_CUSTOMDATA_LAYER_NAME];
922 bm, &bm->ldata, CD_PROP_BOOL, BKE_uv_map_pin_name_get(uv_map_name, name));
923}
924
925void BM_data_layer_free(BMesh *bm, CustomData *data, int type)
926{
927 CustomData olddata = *data;
928 olddata.layers = (olddata.layers) ?
929 static_cast<CustomDataLayer *>(MEM_dupallocN(olddata.layers)) :
930 nullptr;
931 /* The pool is now owned by `olddata` and must not be shared. */
932 data->pool = nullptr;
933
934 const bool had_layer = CustomData_free_layer_active(data, eCustomDataType(type), 0);
935 /* Assert because its expensive to realloc - better not do if layer isn't present. */
936 BLI_assert(had_layer != false);
937 UNUSED_VARS_NDEBUG(had_layer);
938
939 update_data_blocks(bm, &olddata, data);
940 if (olddata.layers) {
941 MEM_freeN(olddata.layers);
942 }
943}
944
945bool BM_data_layer_free_named(BMesh *bm, CustomData *data, const char *name)
946{
947 CustomData olddata = *data;
948 olddata.layers = (olddata.layers) ?
949 static_cast<CustomDataLayer *>(MEM_dupallocN(olddata.layers)) :
950 nullptr;
951 /* The pool is now owned by `olddata` and must not be shared. */
952 data->pool = nullptr;
953
954 const bool had_layer = CustomData_free_layer_named(data, name, 0);
955
956 if (had_layer) {
957 update_data_blocks(bm, &olddata, data);
958 }
959 else {
960 /* Move pool ownership back to BMesh CustomData, no block reallocation. */
961 data->pool = olddata.pool;
962 }
963
964 if (olddata.layers) {
965 MEM_freeN(olddata.layers);
966 }
967
968 return had_layer;
969}
970
971void BM_data_layer_free_n(BMesh *bm, CustomData *data, int type, int n)
972{
973 CustomData olddata = *data;
974 olddata.layers = (olddata.layers) ?
975 static_cast<CustomDataLayer *>(MEM_dupallocN(olddata.layers)) :
976 nullptr;
977 /* The pool is now owned by `olddata` and must not be shared. */
978 data->pool = nullptr;
979
980 const bool had_layer = CustomData_free_layer(
981 data,
982 eCustomDataType(type),
983 0,
985 /* Assert because its expensive to realloc - better not do if layer isn't present. */
986 BLI_assert(had_layer != false);
987 UNUSED_VARS_NDEBUG(had_layer);
988
989 update_data_blocks(bm, &olddata, data);
990 if (olddata.layers) {
991 MEM_freeN(olddata.layers);
992 }
993}
994
995void BM_data_layer_copy(BMesh *bm, CustomData *data, int type, int src_n, int dst_n)
996{
997 BMIter iter;
998
999 if (&bm->vdata == data) {
1000 BMVert *eve;
1001
1002 BM_ITER_MESH (eve, &iter, bm, BM_VERTS_OF_MESH) {
1003 void *ptr = CustomData_bmesh_get_n(data, eve->head.data, eCustomDataType(type), src_n);
1004 CustomData_bmesh_set_n(data, eve->head.data, eCustomDataType(type), dst_n, ptr);
1005 }
1006 }
1007 else if (&bm->edata == data) {
1008 BMEdge *eed;
1009
1010 BM_ITER_MESH (eed, &iter, bm, BM_EDGES_OF_MESH) {
1011 void *ptr = CustomData_bmesh_get_n(data, eed->head.data, eCustomDataType(type), src_n);
1012 CustomData_bmesh_set_n(data, eed->head.data, eCustomDataType(type), dst_n, ptr);
1013 }
1014 }
1015 else if (&bm->pdata == data) {
1016 BMFace *efa;
1017
1018 BM_ITER_MESH (efa, &iter, bm, BM_FACES_OF_MESH) {
1019 void *ptr = CustomData_bmesh_get_n(data, efa->head.data, eCustomDataType(type), src_n);
1020 CustomData_bmesh_set_n(data, efa->head.data, eCustomDataType(type), dst_n, ptr);
1021 }
1022 }
1023 else if (&bm->ldata == data) {
1024 BMIter liter;
1025 BMFace *efa;
1026 BMLoop *l;
1027
1028 BM_ITER_MESH (efa, &iter, bm, BM_FACES_OF_MESH) {
1029 BM_ITER_ELEM (l, &liter, efa, BM_LOOPS_OF_FACE) {
1030 void *ptr = CustomData_bmesh_get_n(data, l->head.data, eCustomDataType(type), src_n);
1031 CustomData_bmesh_set_n(data, l->head.data, eCustomDataType(type), dst_n, ptr);
1032 }
1033 }
1034 }
1035 else {
1036 /* should never reach this! */
1037 BLI_assert(0);
1038 }
1039}
1040
1041float BM_elem_float_data_get(CustomData *cd, void *element, int type)
1042{
1043 const float *f = static_cast<const float *>(
1044 CustomData_bmesh_get(cd, ((BMHeader *)element)->data, eCustomDataType(type)));
1045 return f ? *f : 0.0f;
1046}
1047
1048void BM_elem_float_data_set(CustomData *cd, void *element, int type, const float val)
1049{
1050 float *f = static_cast<float *>(
1051 CustomData_bmesh_get(cd, ((BMHeader *)element)->data, eCustomDataType(type)));
1052 if (f) {
1053 *f = val;
1054 }
1055}
1056
1057/* -------------------------------------------------------------------- */
1077 /* same for all groups */
1078 int type;
1080 const float *loop_weights;
1082
1083 /* --- Per loop fan vars --- */
1084
1085 /* reference for this contiguous fan */
1086 const void *data_ref;
1088
1089 /* accumulate 'LoopGroupCD.weight' to make unit length */
1091
1092 /* both arrays the size of the 'BM_vert_face_count(v)'
1093 * each contiguous fan gets a slide of these arrays */
1097};
1098
1099/* Store vars to pass into 'CustomData_bmesh_interp' */
1101 /* direct customdata pointer array */
1102 void **data;
1103 /* weights (aligned with 'data') */
1105 /* index-in-face */
1107 /* number of loops in the fan */
1109};
1110
1112{
1113 const int i = BM_elem_index_get(l);
1114 const float w = lwc->loop_weights[i];
1117 lwc->data_index_array[lwc->data_len] = i;
1118 lwc->weight_array[lwc->data_len] = w;
1119 lwc->weight_accum += w;
1120
1121 lwc->data_len += 1;
1122}
1123
1129static void bm_loop_walk_data(LoopWalkCtx *lwc, BMLoop *l_walk)
1130{
1131 int i;
1132
1134 lwc->data_ref,
1135 BM_ELEM_CD_GET_VOID_P(l_walk, lwc->cd_layer_offset)));
1137
1138 bm_loop_walk_add(lwc, l_walk);
1139
1140 /* recurse around this loop-fan (in both directions) */
1141 for (i = 0; i < 2; i++) {
1142 BMLoop *l_other = ((i == 0) ? l_walk : l_walk->prev)->radial_next;
1143 if (l_other->radial_next != l_other) {
1144 if (l_other->v != l_walk->v) {
1145 l_other = l_other->next;
1146 }
1147 BLI_assert(l_other->v == l_walk->v);
1150 lwc->data_ref,
1151 BM_ELEM_CD_GET_VOID_P(l_other, lwc->cd_layer_offset)))
1152 {
1153 bm_loop_walk_data(lwc, l_other);
1154 }
1155 }
1156 }
1157 }
1158}
1159
1161 BMesh *bm, BMVert *v, const int layer_n, const float *loop_weights, MemArena *arena)
1162{
1163 LoopWalkCtx lwc;
1164 LinkNode *groups = nullptr;
1165 BMLoop *l;
1166 BMIter liter;
1167 int loop_num;
1168
1169 lwc.type = bm->ldata.layers[layer_n].type;
1170 lwc.cd_layer_offset = bm->ldata.layers[layer_n].offset;
1171 lwc.loop_weights = loop_weights;
1172 lwc.arena = arena;
1173
1174 /* Enable 'BM_ELEM_INTERNAL_TAG', leaving the flag clean on completion. */
1175 loop_num = 0;
1176 BM_ITER_ELEM (l, &liter, v, BM_LOOPS_OF_VERT) {
1178 BM_elem_index_set(l, loop_num); /* set_dirty! */
1179 loop_num++;
1180 }
1182
1183 lwc.data_len = 0;
1184 lwc.data_array = static_cast<void **>(BLI_memarena_alloc(lwc.arena, sizeof(void *) * loop_num));
1185 lwc.data_index_array = static_cast<int *>(BLI_memarena_alloc(lwc.arena, sizeof(int) * loop_num));
1186 lwc.weight_array = static_cast<float *>(BLI_memarena_alloc(lwc.arena, sizeof(float) * loop_num));
1187
1188 BM_ITER_ELEM (l, &liter, v, BM_LOOPS_OF_VERT) {
1190 LoopGroupCD *lf = static_cast<LoopGroupCD *>(BLI_memarena_alloc(lwc.arena, sizeof(*lf)));
1191 int len_prev = lwc.data_len;
1192
1194
1195 /* assign len-last */
1196 lf->data = &lwc.data_array[lwc.data_len];
1197 lf->data_index = &lwc.data_index_array[lwc.data_len];
1198 lf->data_weights = &lwc.weight_array[lwc.data_len];
1199 lwc.weight_accum = 0.0f;
1200
1201 /* new group */
1202 bm_loop_walk_data(&lwc, l);
1203 lf->data_len = lwc.data_len - len_prev;
1204
1205 if (LIKELY(lwc.weight_accum != 0.0f)) {
1206 mul_vn_fl(lf->data_weights, lf->data_len, 1.0f / lwc.weight_accum);
1207 }
1208 else {
1209 copy_vn_fl(lf->data_weights, lf->data_len, 1.0f / float(lf->data_len));
1210 }
1211
1212 BLI_linklist_prepend_arena(&groups, lf, lwc.arena);
1213 }
1214 }
1215
1216 BLI_assert(lwc.data_len == loop_num);
1217
1218 return groups;
1219}
1220
1222 void *lf_p,
1223 int layer_n,
1224 void *data_tmp)
1225{
1226 LoopGroupCD *lf = static_cast<LoopGroupCD *>(lf_p);
1227 const int type = bm->ldata.layers[layer_n].type;
1228 int i;
1229 const float *data_weights;
1230
1231 data_weights = lf->data_weights;
1232
1234 &bm->ldata, (const void **)lf->data, data_weights, nullptr, lf->data_len, data_tmp, layer_n);
1235
1236 for (i = 0; i < lf->data_len; i++) {
1237 CustomData_copy_elements(eCustomDataType(type), data_tmp, lf->data[i], 1);
1238 }
1239}
1240
1242 BMesh *bm, void *lf_p, const int layer_n, void *data_tmp, const float *loop_weights)
1243{
1244 LoopGroupCD *lf = static_cast<LoopGroupCD *>(lf_p);
1245 const int type = bm->ldata.layers[layer_n].type;
1246 int i;
1247 const float *data_weights;
1248
1249 /* re-weight */
1250 float *temp_weights = static_cast<float *>(BLI_array_alloca(temp_weights, lf->data_len));
1251 float weight_accum = 0.0f;
1252
1253 for (i = 0; i < lf->data_len; i++) {
1254 float w = loop_weights[lf->data_index[i]] * lf->data_weights[i];
1255 temp_weights[i] = w;
1256 weight_accum += w;
1257 }
1258
1259 if (LIKELY(weight_accum != 0.0f)) {
1260 mul_vn_fl(temp_weights, lf->data_len, 1.0f / weight_accum);
1261 data_weights = temp_weights;
1262 }
1263 else {
1264 data_weights = lf->data_weights;
1265 }
1266
1268 &bm->ldata, (const void **)lf->data, data_weights, nullptr, lf->data_len, data_tmp, layer_n);
1269
1270 for (i = 0; i < lf->data_len; i++) {
1271 CustomData_copy_elements(eCustomDataType(type), data_tmp, lf->data[i], 1);
1272 }
1273}
1274
1275void BM_vert_loop_groups_data_layer_merge(BMesh *bm, LinkNode *groups, const int layer_n)
1276{
1277 const int type = bm->ldata.layers[layer_n].type;
1278 const int size = CustomData_sizeof(eCustomDataType(type));
1279 void *data_tmp = alloca(size);
1280
1281 do {
1282 bm_vert_loop_groups_data_layer_merge__single(bm, groups->link, layer_n, data_tmp);
1283 } while ((groups = groups->next));
1284}
1285
1287 LinkNode *groups,
1288 const int layer_n,
1289 const float *loop_weights)
1290{
1291 const int type = bm->ldata.layers[layer_n].type;
1292 const int size = CustomData_sizeof(eCustomDataType(type));
1293 void *data_tmp = alloca(size);
1294
1295 do {
1297 bm, groups->link, layer_n, data_tmp, loop_weights);
1298 } while ((groups = groups->next));
1299}
1300
const char * BKE_uv_map_pin_name_get(const char *uv_map_name, char *buffer)
const char * BKE_uv_map_vert_select_name_get(const char *uv_map_name, char *buffer)
const char * BKE_uv_map_edge_select_name_get(const char *uv_map_name, char *buffer)
CustomData interface, see also DNA_customdata_types.h.
int CustomData_sizeof(eCustomDataType type)
int CustomData_get_offset(const CustomData *data, eCustomDataType type)
int CustomData_get_layer_index_n(const CustomData *data, eCustomDataType type, int n)
bool CustomData_free_layer_named(CustomData *data, blender::StringRef name, const int totelem)
@ CD_SET_DEFAULT
void CustomData_copy_elements(eCustomDataType type, void *src_data_ofs, void *dst_data_ofs, int count)
void CustomData_bmesh_free_block(CustomData *data, void **block)
void * CustomData_add_layer_named(CustomData *data, eCustomDataType type, eCDAllocType alloctype, int totelem, blender::StringRef name)
BMCustomDataCopyMap CustomData_bmesh_copy_map_calc(const CustomData &src, const CustomData &dst, eCustomDataMask mask_exclude=0)
void CustomData_bmesh_init_pool(CustomData *data, int totelem, char htype)
void * CustomData_bmesh_get_n(const CustomData *data, void *block, eCustomDataType type, int n)
bool CustomData_free_layer_active(CustomData *data, eCustomDataType type, int totelem)
int CustomData_get_named_layer_index(const CustomData *data, eCustomDataType type, blender::StringRef name)
const char * CustomData_get_layer_name(const CustomData *data, eCustomDataType type, int n)
bool CustomData_free_layer(CustomData *data, eCustomDataType type, int totelem, int index)
void * CustomData_bmesh_get(const CustomData *data, void *block, eCustomDataType type)
void CustomData_bmesh_interp(CustomData *data, const void **src_blocks, const float *weights, const float *sub_weights, int count, void *dst_block)
void CustomData_bmesh_set_n(CustomData *data, void *block, eCustomDataType type, int n, const void *source)
void CustomData_bmesh_copy_block(CustomData &data, void *src_block, void **dst_block)
void * CustomData_add_layer(CustomData *data, eCustomDataType type, eCDAllocType alloctype, int totelem)
bool CustomData_data_equals(eCustomDataType type, const void *data1, const void *data2)
int CustomData_number_of_layers(const CustomData *data, eCustomDataType type)
void CustomData_bmesh_interp_n(CustomData *data, const void **src_blocks, const float *weights, const float *sub_weights, int count, void *dst_block_ofs, int n)
void old_mdisps_bilinear(float out[3], float(*disps)[3], int st, float u, float v)
Definition multires.cc:1290
#define BLI_array_alloca(arr, realsize)
Definition BLI_alloca.h:25
#define BLI_assert(a)
Definition BLI_assert.h:50
sqrt(x)+1/max(0
int isect_point_quad_v2(const float p[2], const float v1[2], const float v2[2], const float v3[2], const float v4[2])
float normal_quad_v3(float n[3], const float v1[3], const float v2[3], const float v3[3], const float v4[3])
Definition math_geom.cc:56
void resolve_quad_uv_v2(float r_uv[2], const float st[2], const float st0[2], const float st1[2], const float st2[2], const float st3[2])
void axis_dominant_v3_to_m3(float r_mat[3][3], const float normal[3])
Normal to x,y matrix.
void interp_weights_poly_v2(float w[], float v[][2], int n, const float co[2])
void mul_v2_m3v3(float r[2], const float M[3][3], const float a[3])
void mid_v3_v3v3v3v3(float v[3], const float v1[3], const float v2[3], const float v3[3], const float v4[3])
void mul_vn_fl(float *array_tar, int size, float f)
MINLINE void sub_v2_v2(float r[2], const float a[2])
MINLINE void sub_v3_v3(float r[3], const float a[3])
MINLINE bool equals_v3v3(const float v1[3], const float v2[3]) ATTR_WARN_UNUSED_RESULT
MINLINE void sub_v3_v3v3(float r[3], const float a[3], const float b[3])
MINLINE void mul_v2_fl(float r[2], float f)
MINLINE void mul_v3_fl(float r[3], float f)
MINLINE void copy_v3_v3(float r[3], const float a[3])
void copy_vn_fl(float *array_tar, int size, float val)
void project_v3_v3v3(float out[3], const float p[3], const float v_proj[3])
MINLINE float dot_v3v3(const float a[3], const float b[3]) ATTR_WARN_UNUSED_RESULT
void interp_v3_v3v3(float r[3], const float a[3], const float b[3], float t)
Definition math_vector.c:36
MINLINE void add_v3_v3v3(float r[3], const float a[3], const float b[3])
MINLINE void cross_v3_v3v3(float r[3], const float a[3], const float b[3])
void ortho_v3_v3(float out[3], const float v[3])
MINLINE float cross_v2v2(const float a[2], const float b[2]) ATTR_WARN_UNUSED_RESULT
MINLINE bool is_zero_v3(const float v[3]) ATTR_WARN_UNUSED_RESULT
void mid_v3_v3v3(float r[3], const float a[3], const float b[3])
MINLINE void madd_v3_v3v3fl(float r[3], const float a[3], const float b[3], float f)
MINLINE void mul_v3_v3fl(float r[3], const float a[3], float f)
MINLINE void add_v3_v3(float r[3], const float a[3])
MINLINE float normalize_v3(float n[3])
void * BLI_memarena_alloc(struct MemArena *ma, size_t size) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1) ATTR_MALLOC ATTR_ALLOC_SIZE(2)
void BLI_mempool_destroy(BLI_mempool *pool) ATTR_NONNULL(1)
void BLI_task_parallel_range(int start, int stop, void *userdata, TaskParallelRangeFunc func, const TaskParallelSettings *settings)
Definition task_range.cc:99
BLI_INLINE void BLI_parallel_range_settings_defaults(TaskParallelSettings *settings)
Definition BLI_task.h:230
#define UNUSED_FUNCTION(x)
#define UNUSED_VARS_NDEBUG(...)
#define UNLIKELY(x)
#define LIKELY(x)
#define MAX_CUSTOMDATA_LAYER_NAME
@ CD_PROP_FLOAT2
Read Guarded memory(de)allocation.
@ BM_LOOP
@ BM_ELEM_INTERNAL_TAG
#define BM_FACE_FIRST_LOOP(p)
#define BM_ELEM_CD_GET_VOID_P(ele, offset)
#define BM_elem_index_get(ele)
#define BM_elem_flag_disable(ele, hflag)
#define BM_elem_index_set(ele, index)
#define BM_elem_flag_test(ele, hflag)
#define BM_elem_flag_enable(ele, hflag)
void BM_face_interp_multires(BMesh *bm, BMFace *f_dst, const BMFace *f_src)
static int compute_mdisp_quad(const BMLoop *l, const float l_f_center[3], float v1[3], float v2[3], float v3[3], float v4[3], float e1[3], float e2[3])
Multires Interpolation.
static void bm_loop_walk_add(LoopWalkCtx *lwc, BMLoop *l)
void BM_data_layer_free_n(BMesh *bm, CustomData *data, int type, int n)
void BM_data_layer_free(BMesh *bm, CustomData *data, int type)
float BM_elem_float_data_get(CustomData *cd, void *element, int type)
void BM_vert_interp_from_face(BMesh *bm, BMVert *v_dst, const BMFace *f_src)
static void loop_interp_multires_cb(void *__restrict userdata, const int ix, const TaskParallelTLS *__restrict)
void BM_face_interp_multires_ex(BMesh *bm, BMFace *f_dst, const BMFace *f_src, const float f_dst_center[3], const float f_src_center[3], const int cd_loop_mdisp_offset)
void BM_data_layer_copy(BMesh *bm, CustomData *data, int type, int src_n, int dst_n)
void BM_vert_loop_groups_data_layer_merge(BMesh *bm, LinkNode *groups, const int layer_n)
static void bm_vert_loop_groups_data_layer_merge__single(BMesh *bm, void *lf_p, int layer_n, void *data_tmp)
void BM_vert_loop_groups_data_layer_merge_weights(BMesh *bm, LinkNode *groups, const int layer_n, const float *loop_weights)
bool BM_data_layer_free_named(BMesh *bm, CustomData *data, const char *name)
void BM_data_layer_ensure_named(BMesh *bm, CustomData *data, int type, const char *name)
void BM_face_interp_from_face_ex(BMesh *bm, BMFace *f_dst, const BMFace *f_src, const bool do_vertex, const void **blocks_l, const void **blocks_v, float(*cos_2d)[2], float axis_mat[3][3])
Data Interpolate From Face.
static void update_data_blocks(BMesh *bm, CustomData *olddata, CustomData *data)
static bool mdisp_in_mdispquad(BMLoop *l_src, BMLoop *l_dst, const float l_dst_f_center[3], const float p[3], int res, float r_axis_x[3], float r_axis_y[3], float r_uv[2])
void BM_loop_interp_multires(BMesh *bm, BMLoop *l_dst, const BMFace *f_src)
void BM_data_layer_add(BMesh *bm, CustomData *data, int type)
void BM_data_layer_add_named(BMesh *bm, CustomData *data, int type, const char *name)
void BM_uv_map_ensure_pin_attr(BMesh *bm, const char *uv_map_name)
void BM_data_interp_from_edges(BMesh *bm, const BMEdge *e_src_1, const BMEdge *e_src_2, BMEdge *e_dst, const float fac)
Data, Interpolate From Edges.
void BM_face_interp_from_face(BMesh *bm, BMFace *f_dst, const BMFace *f_src, const bool do_vertex)
void BM_face_multires_bounds_smooth(BMesh *bm, BMFace *f)
static void bm_loop_walk_data(LoopWalkCtx *lwc, BMLoop *l_walk)
void BM_loop_interp_from_face(BMesh *bm, BMLoop *l_dst, const BMFace *f_src, const bool do_vertex, const bool do_multires)
void BM_data_interp_face_vert_edge(BMesh *bm, const BMVert *v_src_1, const BMVert *, BMVert *v, BMEdge *e, const float fac)
Data Face-Vert Edge Interpolate.
static bool quad_co(const float v1[3], const float v2[3], const float v3[3], const float v4[3], const float p[3], const float n[3], float r_uv[2])
static void mdisp_axis_from_quad(const float v1[3], const float v2[3], float[3], const float v4[3], float r_axis_x[3], float r_axis_y[3])
LinkNode * BM_vert_loop_groups_data_layer_create(BMesh *bm, BMVert *v, const int layer_n, const float *loop_weights, MemArena *arena)
static void bm_vert_loop_groups_data_layer_merge_weights__single(BMesh *bm, void *lf_p, const int layer_n, void *data_tmp, const float *loop_weights)
static float bm_loop_flip_equotion(float mat[2][2], float b[2], const float target_axis_x[3], const float target_axis_y[3], const float coord[3], int i, int j)
void BM_uv_map_ensure_vert_select_attr(BMesh *bm, const char *uv_map_name)
void BM_uv_map_ensure_edge_select_attr(BMesh *bm, const char *uv_map_name)
void BM_loop_interp_multires_ex(BMesh *, BMLoop *l_dst, const BMFace *f_src, const float f_dst_center[3], const float f_src_center[3], const int cd_loop_mdisp_offset)
static void UNUSED_FUNCTION BM_Data_Vert_Average(BMesh *, BMFace *)
Data Vert Average.
void BM_elem_float_data_set(CustomData *cd, void *element, int type, const float val)
static void bm_loop_flip_disp(const float source_axis_x[3], const float source_axis_y[3], const float target_axis_x[3], const float target_axis_y[3], float disp[3])
static void bm_data_interp_from_elem(CustomData *data_layer, const BMElem *ele_src_1, const BMElem *ele_src_2, BMElem *ele_dst, const float fac)
void BM_uv_map_ensure_select_and_pin_attrs(BMesh *bm)
void BM_data_interp_from_verts(BMesh *bm, const BMVert *v_src_1, const BMVert *v_src_2, BMVert *v_dst, const float fac)
Data, Interpolate From Verts.
#define BM_ITER_ELEM(ele, iter, data, itype)
#define BM_ITER_MESH(ele, iter, bm, itype)
@ BM_EDGES_OF_MESH
@ BM_VERTS_OF_MESH
@ BM_FACES_OF_MESH
@ BM_LOOPS_OF_VERT
@ BM_LOOPS_OF_FACE
ATTR_WARN_UNUSED_RESULT BMesh * bm
#define BM_FACE
#define BM_EDGE
#define BM_VERT
void poly_rotate_plane(const float normal[3], float(*verts)[3], const uint nverts)
POLY ROTATE PLANE.
void BM_face_calc_center_median(const BMFace *f, float r_cent[3])
void BM_face_calc_tangent_auto(const BMFace *f, float r_tangent[3])
void BM_vert_normal_update_all(BMVert *v)
bool BM_face_is_normal_valid(const BMFace *f)
ATTR_WARN_UNUSED_RESULT const BMVert * v2
ATTR_WARN_UNUSED_RESULT const BMLoop * l
ATTR_WARN_UNUSED_RESULT const BMVert const BMEdge * e
ATTR_WARN_UNUSED_RESULT const BMVert * v
SIMD_FORCE_INLINE const btScalar & w() const
Return the w value.
Definition btQuadWord.h:119
local_group_size(16, 16) .push_constant(Type b
#define fabsf(x)
draw_view in_light_buf[] float
draw_view push_constant(Type::INT, "radiance_src") .push_constant(Type capture_info_buf storage_buf(1, Qualifier::READ, "ObjectBounds", "bounds_buf[]") .push_constant(Type draw_view int
void MEM_freeN(void *vmemh)
Definition mallocn.cc:105
void *(* MEM_callocN)(size_t len, const char *str)
Definition mallocn.cc:42
void *(* MEM_dupallocN)(const void *vmemh)
Definition mallocn.cc:39
const btScalar eps
Definition poly34.cpp:11
BMHeader head
BMHeader head
BMHeader head
float no[3]
void * data
BMHeader head
struct BMVert * v
struct BMLoop * radial_next
struct BMLoop * prev
struct BMFace * f
struct BMLoop * next
float co[3]
float no[3]
BMHeader head
int totvert
char elem_index_dirty
CustomData vdata
int totedge
CustomData edata
int totloop
CustomData pdata
CustomData ldata
int totface
struct BLI_mempool * pool
CustomDataLayer * layers
float * data_weights
const float * loop_weights
float * weight_array
const void * data_ref
int * data_index_array
void ** data_array
MemArena * arena
float(* disps)[3]
PointerRNA * ptr
Definition wm_files.cc:4126