Blender V4.3
bmesh_intersect_edges.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2019 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
9#include "MEM_guardedalloc.h"
10
11#include "BLI_math_geom.h"
12#include "BLI_math_vector.h"
13#include "BLI_sort.h"
14#include "BLI_stack.h"
15#include "BLI_vector.hh"
16
17#include "BKE_bvhutils.hh"
18
19#include "atomic_ops.h"
20
21#include "bmesh.hh"
22
23#include "bmesh_intersect_edges.hh" /* own include */
24
25// #define INTERSECT_EDGES_DEBUG
26
27#define KDOP_TREE_TYPE 4
28#define KDOP_AXIS_LEN 14
29#define BLI_STACK_PAIR_LEN (2 * KDOP_TREE_TYPE)
30
31/* -------------------------------------------------------------------- */
37/* Callbacks for `BM_vert_pair_shared_face_cb` */
38
50
52 BMLoop *l_a,
53 BMLoop *l_b,
54 void *userdata)
55{
56 EDBMSplitBestFaceData *data = static_cast<EDBMSplitBestFaceData *>(userdata);
57 float no[3];
58 copy_v3_v3(no, f->no);
59
60 float min = dot_v3v3(l_a->v->co, no);
61 float max = dot_v3v3(l_b->v->co, no);
62 if (min > max) {
63 std::swap(min, max);
64 }
65
66 BMEdge **e_iter = &data->edgenet[0];
67 BMEdge *e_next = data->edgenet[1];
68 BMVert *v_test = ELEM((*e_iter)->v1, e_next->v1, e_next->v2) ? (*e_iter)->v2 : (*e_iter)->v1;
69
70 int verts_len = data->edgenet_len - 1;
71 for (int i = verts_len; i--; e_iter++) {
72 v_test = BM_edge_other_vert(*e_iter, v_test);
73 BLI_assert(v_test != nullptr);
74 if (!BM_face_point_inside_test(f, v_test->co)) {
75 return false;
76 }
77 float dot = dot_v3v3(v_test->co, no);
78 if (dot < min) {
79 min = dot;
80 }
81 if (dot > max) {
82 max = dot;
83 }
84 }
85
86 const float test_edgenet_range_on_face_normal = max - min;
87 if (test_edgenet_range_on_face_normal < data->best_edgenet_range_on_face_normal) {
88 data->best_edgenet_range_on_face_normal = test_edgenet_range_on_face_normal;
89 data->r_best_face = f;
90 }
91
92 return false;
93}
94
95/* find the best splittable face between the two vertices. */
97 BMLoop *l_a,
98 BMLoop *l_b,
99 void *userdata)
100{
101 float(*data)[3] = static_cast<float(*)[3]>(userdata);
102 float *v_a_co = data[0];
103 float *v_a_b_dir = data[1];
104 const float range_min = -FLT_EPSILON;
105 const float range_max = 1.0f + FLT_EPSILON;
106
107 float co[3];
108 float dir[3];
109 float lambda_b;
110
111 copy_v3_v3(co, l_a->prev->v->co);
112 sub_v3_v3v3(dir, l_a->next->v->co, co);
113 if (isect_ray_ray_v3(v_a_co, v_a_b_dir, co, dir, nullptr, &lambda_b)) {
114 if (IN_RANGE(lambda_b, range_min, range_max)) {
115 return true;
116 }
117 copy_v3_v3(co, l_b->prev->v->co);
118 sub_v3_v3v3(dir, l_b->next->v->co, co);
119 if (isect_ray_ray_v3(v_a_co, v_a_b_dir, co, dir, nullptr, &lambda_b)) {
120 return IN_RANGE(lambda_b, range_min, range_max);
121 }
122 }
123 return false;
124}
125
127 BMVert *v_a, BMVert *v_b, BMEdge **edgenet, const int edgenet_len, const float epsilon)
128{
129 BMFace *r_best_face = nullptr;
130
131 BLI_assert(v_a != v_b);
132
133 BMLoop *dummy;
134 if (edgenet_len == 1) {
135 float data[2][3];
136 copy_v3_v3(data[0], v_b->co);
137 sub_v3_v3v3(data[1], v_a->co, data[0]);
138 r_best_face = BM_vert_pair_shared_face_cb(
139 v_a, v_b, false, bm_vert_pair_share_splittable_face_cb, &data, &dummy, &dummy);
140 BLI_assert(!r_best_face || BM_edge_in_face(edgenet[0], r_best_face) == false);
141 }
142 else {
144 data.edgenet = edgenet;
145 data.edgenet_len = edgenet_len;
146 data.best_edgenet_range_on_face_normal = FLT_MAX;
147 data.r_best_face = nullptr;
148
150 v_a, v_b, true, bm_vert_pair_share_best_splittable_face_cb, &data, &dummy, &dummy);
151
152 if (data.r_best_face) {
153 /* Check if the edgenet's range is smaller than the face's range. */
154 float no[3], min = FLT_MAX, max = -FLT_MAX;
155 copy_v3_v3(no, data.r_best_face->no);
156 BMVert *v_test;
157 BMIter f_iter;
158 BM_ITER_ELEM (v_test, &f_iter, data.r_best_face, BM_VERTS_OF_FACE) {
159 float dot = dot_v3v3(v_test->co, no);
160 if (dot < min) {
161 min = dot;
162 }
163 if (dot > max) {
164 max = dot;
165 }
166 }
167 float face_range_on_normal = max - min + 2 * epsilon;
168 if (face_range_on_normal < data.best_edgenet_range_on_face_normal) {
169 data.r_best_face = nullptr;
170 }
171 }
172 r_best_face = data.r_best_face;
173 }
174
175 return r_best_face;
176}
177
180/* -------------------------------------------------------------------- */
187 union {
190 struct {
192 float lambda;
193 };
194 };
195};
196
197/* -------------------------------------------------------------------- */
198/* Overlap Callbacks */
199
207
208/* Utils */
209
211{
212 r_pair_elem->vert = v;
213}
214
216 float lambda,
217 int *r_data_cut_edges_len,
218 EDBMSplitElem *r_pair_elem)
219{
220 r_pair_elem->edge = e;
221 r_pair_elem->lambda = lambda;
222
223 /* Even though we have multiple atomic operations, this is fine here, since
224 * there is no dependency on order.
225 * The `e->head.index` check + atomic increment will ever be true once, as
226 * expected. We don't care which instance of the code actually ends up
227 * incrementing `r_data_cut_edge_len`, so there is no race condition here. */
228 if (atomic_fetch_and_add_int32(&e->head.index, 1) == 0) {
229 atomic_fetch_and_add_int32(r_data_cut_edges_len, 1);
230 }
231}
232
233/* Util for Vert x Edge and Edge x Edge callbacks */
235 BMEdge *e,
236 const float co[3],
237 const float dir[3],
238 float lambda,
239 float data_dist_sq,
240 int *data_cut_edges_len,
241 EDBMSplitElem r_pair[2])
242{
243 BMVert *e_v;
244 float dist_sq_vert_factor;
245
246 if (!IN_RANGE_INCL(lambda, 0.0f, 1.0f)) {
247 /* Vert x Vert is already handled elsewhere. */
248 return false;
249 }
250
251 if (lambda < 0.5f) {
252 e_v = e->v1;
253 dist_sq_vert_factor = lambda;
254 }
255 else {
256 e_v = e->v2;
257 dist_sq_vert_factor = 1.0f - lambda;
258 }
259
260 if (v != e_v) {
261 float dist_sq_vert = square_f(dist_sq_vert_factor) * len_squared_v3(dir);
262 if (dist_sq_vert < data_dist_sq) {
263 /* Vert x Vert is already handled elsewhere. */
264 return false;
265 }
266
267 float closest[3];
268 madd_v3_v3v3fl(closest, co, dir, lambda);
269
270 float dist_sq = len_squared_v3v3(v->co, closest);
271 if (dist_sq < data_dist_sq) {
272 bm_edge_pair_elem_setup(e, lambda, data_cut_edges_len, &r_pair[0]);
273 bm_vert_pair_elem_setup_ex(v, &r_pair[1]);
274 return true;
275 }
276 }
277
278 return false;
279}
280
281/* Vertex x Vertex Callback */
282
283static bool bm_vertxvert_isect_cb(void *userdata, int index_a, int index_b, int thread)
284{
285 EDBMSplitData *data = static_cast<EDBMSplitData *>(userdata);
286 BMVert *v_a = BM_vert_at_index(data->bm, index_a);
287 BMVert *v_b = BM_vert_at_index(data->bm, index_b);
288
289 EDBMSplitElem *pair = static_cast<EDBMSplitElem *>(BLI_stack_push_r(data->pair_stack[thread]));
290
291 bm_vert_pair_elem_setup_ex(v_a, &pair[0]);
292 bm_vert_pair_elem_setup_ex(v_b, &pair[1]);
293
294 return true;
295}
296
297static bool bm_vertxvert_self_isect_cb(void *userdata, int index_a, int index_b, int thread)
298{
299 if (index_a < index_b) {
300 return bm_vertxvert_isect_cb(userdata, index_a, index_b, thread);
301 }
302 return false;
303}
304
305/* Vertex x Edge and Edge x Vertex Callbacks */
306
307static bool bm_edgexvert_isect_cb(void *userdata, int index_a, int index_b, int thread)
308{
309 EDBMSplitData *data = static_cast<EDBMSplitData *>(userdata);
310 BMEdge *e = BM_edge_at_index(data->bm, index_a);
311 BMVert *v = BM_vert_at_index(data->bm, index_b);
312
313 float co[3], dir[3], lambda;
314 copy_v3_v3(co, e->v1->co);
315 sub_v3_v3v3(dir, e->v2->co, co);
316 lambda = ray_point_factor_v3_ex(v->co, co, dir, 0.0f, -1.0f);
317
318 EDBMSplitElem pair_tmp[2];
320 v, e, co, dir, lambda, data->dist_sq, &data->cut_edges_len, pair_tmp))
321 {
322 EDBMSplitElem *pair = static_cast<EDBMSplitElem *>(BLI_stack_push_r(data->pair_stack[thread]));
323 pair[0] = pair_tmp[0];
324 pair[1] = pair_tmp[1];
325 }
326
327 /* Always return false with edges. */
328 return false;
329}
330
331/* Edge x Edge Callbacks */
332
334 BMEdge *e_a,
335 BMEdge *e_b,
336 const float co_a[3],
337 const float dir_a[3],
338 const float co_b[3],
339 const float dir_b[3],
340 float lambda_a,
341 float lambda_b,
342 EDBMSplitElem r_pair[2])
343{
344 float dist_sq_va_factor, dist_sq_vb_factor;
345 BMVert *e_a_v, *e_b_v;
346 if (lambda_a < 0.5f) {
347 e_a_v = e_a->v1;
348 dist_sq_va_factor = lambda_a;
349 }
350 else {
351 e_a_v = e_a->v2;
352 dist_sq_va_factor = 1.0f - lambda_a;
353 }
354
355 if (lambda_b < 0.5f) {
356 e_b_v = e_b->v1;
357 dist_sq_vb_factor = lambda_b;
358 }
359 else {
360 e_b_v = e_b->v2;
361 dist_sq_vb_factor = 1.0f - lambda_b;
362 }
363
364 if (e_a_v != e_b_v) {
365 if (!IN_RANGE_INCL(lambda_a, 0.0f, 1.0f) || !IN_RANGE_INCL(lambda_b, 0.0f, 1.0f)) {
366 /* Vert x Edge is already handled elsewhere. */
367 return false;
368 }
369
370 float dist_sq_va = square_f(dist_sq_va_factor) * len_squared_v3(dir_a);
371 float dist_sq_vb = square_f(dist_sq_vb_factor) * len_squared_v3(dir_b);
372
373 if (dist_sq_va < data->dist_sq || dist_sq_vb < data->dist_sq) {
374 /* Vert x Edge is already handled elsewhere. */
375 return false;
376 }
377
378 float near_a[3], near_b[3];
379 madd_v3_v3v3fl(near_a, co_a, dir_a, lambda_a);
380 madd_v3_v3v3fl(near_b, co_b, dir_b, lambda_b);
381
382 float dist_sq = len_squared_v3v3(near_a, near_b);
383 if (dist_sq < data->dist_sq) {
384 bm_edge_pair_elem_setup(e_a, lambda_a, &data->cut_edges_len, &r_pair[0]);
385 bm_edge_pair_elem_setup(e_b, lambda_b, &data->cut_edges_len, &r_pair[1]);
386 return true;
387 }
388 }
389 return false;
390}
391
392static bool bm_edgexedge_isect_cb(void *userdata, int index_a, int index_b, int thread)
393{
394 EDBMSplitData *data = static_cast<EDBMSplitData *>(userdata);
395 BMEdge *e_a = BM_edge_at_index(data->bm, index_a);
396 BMEdge *e_b = BM_edge_at_index(data->bm, index_b);
397
398 if (BM_edge_share_vert_check(e_a, e_b)) {
399 /* The other vertices may intersect but Vert x Edge is already handled elsewhere. */
400 return false;
401 }
402
403 float co_a[3], dir_a[3], co_b[3], dir_b[3];
404 copy_v3_v3(co_a, e_a->v1->co);
405 sub_v3_v3v3(dir_a, e_a->v2->co, co_a);
406
407 copy_v3_v3(co_b, e_b->v1->co);
408 sub_v3_v3v3(dir_b, e_b->v2->co, co_b);
409
410 float lambda_a, lambda_b;
411 /* Using with dist^4 as `epsilon` is not the best solution, but it fits in most cases. */
412 if (isect_ray_ray_epsilon_v3(co_a, dir_a, co_b, dir_b, data->dist_sq_sq, &lambda_a, &lambda_b)) {
413 EDBMSplitElem pair_tmp[2];
415 data, e_a, e_b, co_a, dir_a, co_b, dir_b, lambda_a, lambda_b, pair_tmp))
416 {
417 EDBMSplitElem *pair = static_cast<EDBMSplitElem *>(
418 BLI_stack_push_r(data->pair_stack[thread]));
419 pair[0] = pair_tmp[0];
420 pair[1] = pair_tmp[1];
421 }
422 }
423
424 /* Edge x Edge returns always false. */
425 return false;
426}
427
428static bool bm_edgexedge_self_isect_cb(void *userdata, int index_a, int index_b, int thread)
429{
430 if (index_a < index_b) {
431 return bm_edgexedge_isect_cb(userdata, index_a, index_b, thread);
432 }
433 return false;
434}
435
436/* -------------------------------------------------------------------- */
437/* BVHTree Overlap Function */
438
439static void bm_elemxelem_bvhtree_overlap(const BVHTree *tree1,
440 const BVHTree *tree2,
442 EDBMSplitData *data,
443 BLI_Stack **pair_stack)
444{
445 int parallel_tasks_num = BLI_bvhtree_overlap_thread_num(tree1);
446 for (int i = 0; i < parallel_tasks_num; i++) {
447 if (pair_stack[i] == nullptr) {
448 pair_stack[i] = BLI_stack_new(sizeof(const EDBMSplitElem[2]), __func__);
449 }
450 }
451 data->pair_stack = pair_stack;
452 BLI_bvhtree_overlap_ex(tree1, tree2, nullptr, callback, data, 1, BVH_OVERLAP_USE_THREADING);
453}
454
455/* -------------------------------------------------------------------- */
456/* Callbacks for `BLI_qsort_r` */
457
458static int sort_cmp_by_lambda_cb(const void *index1_v, const void *index2_v, void *keys_v)
459{
460 const EDBMSplitElem *pair_flat = static_cast<const EDBMSplitElem *>(keys_v);
461 const int index1 = *(int *)index1_v;
462 const int index2 = *(int *)index2_v;
463
464 if (pair_flat[index1].lambda > pair_flat[index2].lambda) {
465 return 1;
466 }
467 return -1;
468}
469
470/* -------------------------------------------------------------------- */
471/* Main API */
472
473#define INTERSECT_EDGES
474
476 BMesh *bm, const char hflag, const float dist, const bool split_faces, GHash *r_targetmap)
477{
478 bool ok = false;
479
480 BMIter iter;
481 BMVert *v;
482 BMEdge *e;
483 int i;
484
485 /* Store all intersections in this array. */
486 EDBMSplitElem(*pair_iter)[2], (*pair_array)[2] = nullptr;
487 int pair_len = 0;
488
489 BLI_Stack *pair_stack[BLI_STACK_PAIR_LEN] = {nullptr};
490 BLI_Stack **pair_stack_vertxvert = pair_stack;
491 BLI_Stack **pair_stack_edgexelem = &pair_stack[KDOP_TREE_TYPE];
492
493 const float dist_sq = square_f(dist);
494 const float dist_half = dist / 2;
495
496 EDBMSplitData data{};
497 data.bm = bm;
498 data.pair_stack = pair_stack;
499 data.cut_edges_len = 0;
500 data.dist_sq = dist_sq;
501 data.dist_sq_sq = square_f(dist_sq);
502
504
505 /* tag and count the verts to be tested. */
506 int verts_act_len = 0, verts_remain_len = 0;
507 BM_ITER_MESH (v, &iter, bm, BM_VERTS_OF_MESH) {
508 if (BM_elem_flag_test(v, hflag)) {
510 verts_act_len++;
511 }
512 else {
515 verts_remain_len++;
516 }
517 }
518
519 /* The index will indicate which cut in pair_array this vertex belongs to. */
520 BM_elem_index_set(v, -1);
521 }
523
524 /* Start the creation of BVHTrees. */
525 BVHTree *tree_verts_act = nullptr, *tree_verts_remain = nullptr;
526 if (verts_act_len) {
527 tree_verts_act = BLI_bvhtree_new(verts_act_len, dist_half, KDOP_TREE_TYPE, KDOP_AXIS_LEN);
528 }
529 if (verts_remain_len) {
530 tree_verts_remain = BLI_bvhtree_new(
531 verts_remain_len, dist_half, KDOP_TREE_TYPE, KDOP_AXIS_LEN);
532 }
533
534 if (tree_verts_act || tree_verts_remain) {
537 if (tree_verts_act) {
538 BLI_bvhtree_insert(tree_verts_act, i, v->co, 1);
539 }
540 }
541 else if (tree_verts_remain && !BM_elem_flag_test(v, BM_ELEM_HIDDEN)) {
542 BLI_bvhtree_insert(tree_verts_remain, i, v->co, 1);
543 }
544 }
545
546 if (tree_verts_act) {
547 BLI_bvhtree_balance(tree_verts_act);
548 /* First pair search. */
550 tree_verts_act, tree_verts_act, bm_vertxvert_self_isect_cb, &data, pair_stack_vertxvert);
551 }
552
553 if (tree_verts_remain) {
554 BLI_bvhtree_balance(tree_verts_remain);
555 }
556
557 if (tree_verts_act && tree_verts_remain) {
559 tree_verts_remain, tree_verts_act, bm_vertxvert_isect_cb, &data, pair_stack_vertxvert);
560 }
561 }
562
563 for (i = KDOP_TREE_TYPE; i--;) {
564 if (pair_stack_vertxvert[i]) {
565 pair_len += BLI_stack_count(pair_stack_vertxvert[i]);
566 }
567 }
568
569#ifdef INTERSECT_EDGES
570 uint vertxvert_pair_len = pair_len;
571
572# define EDGE_ACT_TO_TEST 1
573# define EDGE_REMAIN_TO_TEST 2
574 /* Tag and count the edges. */
575 int edges_act_len = 0, edges_remain_len = 0;
576 BM_ITER_MESH (e, &iter, bm, BM_EDGES_OF_MESH) {
577 if (BM_elem_flag_test(e, BM_ELEM_HIDDEN) || (len_squared_v3v3(e->v1->co, e->v2->co) < dist_sq))
578 {
579 /* Don't test hidden edges or smaller than the minimum distance.
580 * These have already been handled in the vertices overlap. */
582 if (split_faces) {
583 /* Tag to be ignored. */
585 }
586 continue;
587 }
588
591 edges_act_len++;
592 }
593 else {
595 edges_remain_len++;
596 if (split_faces) {
597 /* Tag to be ignored. */
599 }
600 }
601 }
602
603 BVHTree *tree_edges_act = nullptr, *tree_edges_remain = nullptr;
604 if (edges_act_len) {
605 tree_edges_act = BLI_bvhtree_new(edges_act_len, dist_half, KDOP_TREE_TYPE, KDOP_AXIS_LEN);
606 }
607
608 if (edges_remain_len && (tree_edges_act || tree_verts_act)) {
609 tree_edges_remain = BLI_bvhtree_new(
610 edges_remain_len, dist_half, KDOP_TREE_TYPE, KDOP_AXIS_LEN);
611 }
612
613 if (tree_edges_act || tree_edges_remain) {
615 int edge_test = BM_elem_index_get(e);
616 float co[2][3];
617 if (edge_test == EDGE_ACT_TO_TEST) {
618 BLI_assert(tree_edges_act);
619 e->head.index = 0;
620 copy_v3_v3(co[0], e->v1->co);
621 copy_v3_v3(co[1], e->v2->co);
622 BLI_bvhtree_insert(tree_edges_act, i, co[0], 2);
623 }
624 else if (edge_test == EDGE_REMAIN_TO_TEST) {
625 BLI_assert(tree_edges_remain);
626 e->head.index = 0;
627 copy_v3_v3(co[0], e->v1->co);
628 copy_v3_v3(co[1], e->v2->co);
629 BLI_bvhtree_insert(tree_edges_remain, i, co[0], 2);
630 }
631# ifdef INTERSECT_EDGES_DEBUG
632 else {
633 e->head.index = 0;
634 }
635# endif
636 /* Tag used when converting pairs to vert x vert. */
638 }
639# undef EDGE_ACT_TO_TEST
640# undef EDGE_REMAIN_TO_TEST
641
642 /* Use `e->head.index` to count intersections. */
644
645 if (tree_edges_act) {
646 BLI_bvhtree_balance(tree_edges_act);
647 }
648
649 if (tree_edges_remain) {
650 BLI_bvhtree_balance(tree_edges_remain);
651 }
652
653 int edgexedge_pair_len = 0;
654 if (tree_edges_act) {
655 /* Edge x Edge */
657 tree_edges_act, tree_edges_act, bm_edgexedge_self_isect_cb, &data, pair_stack_edgexelem);
658
659 if (tree_edges_remain) {
661 tree_edges_remain, tree_edges_act, bm_edgexedge_isect_cb, &data, pair_stack_edgexelem);
662 }
663
664 for (i = KDOP_TREE_TYPE; i--;) {
665 if (pair_stack_edgexelem[i]) {
666 edgexedge_pair_len += BLI_stack_count(pair_stack_edgexelem[i]);
667 }
668 }
669
670 if (tree_verts_act) {
671 /* Edge v Vert */
673 tree_edges_act, tree_verts_act, bm_edgexvert_isect_cb, &data, pair_stack_edgexelem);
674 }
675
676 if (tree_verts_remain) {
677 /* Edge v Vert */
679 tree_edges_act, tree_verts_remain, bm_edgexvert_isect_cb, &data, pair_stack_edgexelem);
680 }
681
682 BLI_bvhtree_free(tree_edges_act);
683 }
684
685 if (tree_verts_act && tree_edges_remain) {
686 /* Edge v Vert */
688 tree_edges_remain, tree_verts_act, bm_edgexvert_isect_cb, &data, pair_stack_edgexelem);
689 }
690
691 BLI_bvhtree_free(tree_edges_remain);
692
693 int edgexelem_pair_len = 0;
694 for (i = KDOP_TREE_TYPE; i--;) {
695 if (pair_stack_edgexelem[i]) {
696 edgexelem_pair_len += BLI_stack_count(pair_stack_edgexelem[i]);
697 }
698 }
699
700 pair_len += edgexelem_pair_len;
701 int edgexvert_pair_len = edgexelem_pair_len - edgexedge_pair_len;
702
703 if (edgexelem_pair_len) {
704 pair_array = static_cast<EDBMSplitElem(*)[2]>(
705 MEM_mallocN(sizeof(*pair_array) * pair_len, __func__));
706
707 pair_iter = pair_array;
708 for (i = 0; i < BLI_STACK_PAIR_LEN; i++) {
709 if (pair_stack[i]) {
710 uint count = uint(BLI_stack_count(pair_stack[i]));
711 BLI_stack_pop_n_reverse(pair_stack[i], pair_iter, count);
712 pair_iter += count;
713 }
714 }
715
716 /* Map intersections per edge. */
717 union EdgeIntersectionsMap {
718 struct {
719 int cuts_len;
720 int cuts_index[];
721 };
722 int as_int[0];
723 } *e_map_iter, *e_map;
724
725# ifdef INTERSECT_EDGES_DEBUG
726 int cut_edges_len = 0;
727 BM_ITER_MESH (e, &iter, bm, BM_EDGES_OF_MESH) {
728 if (e->head.index != 0) {
729 cut_edges_len++;
730 }
731 }
732 BLI_assert(cut_edges_len == data.cut_edges_len);
733# endif
734
735 size_t e_map_size = (data.cut_edges_len * sizeof(*e_map)) +
736 ((size_t(2) * edgexedge_pair_len + edgexvert_pair_len) *
737 sizeof(*(e_map->cuts_index)));
738
739 e_map = static_cast<EdgeIntersectionsMap *>(MEM_mallocN(e_map_size, __func__));
740 int map_len = 0;
741
742 /* Convert every pair to Vert x Vert. */
743
744 /* The list of pairs starts with [vert x vert] followed by [edge x edge]
745 * and finally [edge x vert].
746 * Ignore the [vert x vert] pairs */
747 EDBMSplitElem *pair_flat, *pair_flat_iter;
748 pair_flat = (EDBMSplitElem *)&pair_array[vertxvert_pair_len];
749 pair_flat_iter = &pair_flat[0];
750 uint pair_flat_len = 2 * edgexelem_pair_len;
751 for (i = 0; i < pair_flat_len; i++, pair_flat_iter++) {
752 if (pair_flat_iter->elem->head.htype != BM_EDGE) {
753 continue;
754 }
755
756 e = pair_flat_iter->edge;
759 int e_cuts_len = e->head.index;
760
761 e_map_iter = (EdgeIntersectionsMap *)&e_map->as_int[map_len];
762 e_map_iter->cuts_len = e_cuts_len;
763 e_map_iter->cuts_index[0] = i;
764
765 /* Use `e->head.index` to indicate which slot to fill with the `cut` index. */
766 e->head.index = map_len + 1;
767 map_len += 1 + e_cuts_len;
768 }
769 else {
770 e_map->as_int[++e->head.index] = i;
771 }
772 }
773
774 /* Split Edges A to set all Vert x Edge. */
775 for (i = 0; i < map_len;
776 e_map_iter = (EdgeIntersectionsMap *)&e_map->as_int[i], i += 1 + e_map_iter->cuts_len)
777 {
778
779 /* sort by lambda. */
780 BLI_qsort_r(e_map_iter->cuts_index,
781 e_map_iter->cuts_len,
782 sizeof(*(e_map->cuts_index)),
784 pair_flat);
785
786 float lambda, lambda_prev = 0.0f;
787 for (int j = 0; j < e_map_iter->cuts_len; j++) {
788 uint index = e_map_iter->cuts_index[j];
789
790 EDBMSplitElem *pair_elem = &pair_flat[index];
791 lambda = (pair_elem->lambda - lambda_prev) / (1.0f - lambda_prev);
792 lambda_prev = pair_elem->lambda;
793 e = pair_elem->edge;
794 if (split_faces) {
795 /* Tagged edges are ignored when split faces.
796 * Un-tag these. */
798 }
799
800 BMVert *v_new = BM_edge_split(bm, e, e->v1, nullptr, lambda);
801 pair_elem->vert = v_new;
802 }
803 }
804
805 MEM_freeN(e_map);
806 }
807 }
808#endif
809
810 BLI_bvhtree_free(tree_verts_act);
811 BLI_bvhtree_free(tree_verts_remain);
812
813 if (r_targetmap) {
814 if (pair_len && pair_array == nullptr) {
815 pair_array = static_cast<EDBMSplitElem(*)[2]>(
816 MEM_mallocN(sizeof(*pair_array) * pair_len, __func__));
817 pair_iter = pair_array;
818 for (i = 0; i < BLI_STACK_PAIR_LEN; i++) {
819 if (pair_stack[i]) {
820 uint count = uint(BLI_stack_count(pair_stack[i]));
821 BLI_stack_pop_n_reverse(pair_stack[i], pair_iter, count);
822 pair_iter += count;
823 }
824 }
825 }
826
827 if (pair_array) {
828 BMVert *v_key, *v_val;
829 pair_iter = &pair_array[0];
830 for (i = 0; i < pair_len; i++, pair_iter++) {
831 BLI_assert((*pair_iter)[0].elem->head.htype == BM_VERT);
832 BLI_assert((*pair_iter)[1].elem->head.htype == BM_VERT);
833 BLI_assert((*pair_iter)[0].elem != (*pair_iter)[1].elem);
834 v_key = (*pair_iter)[0].vert;
835 v_val = (*pair_iter)[1].vert;
836 BLI_ghash_insert(r_targetmap, v_key, v_val);
837 }
838
847 pair_iter = &pair_array[0];
848 for (i = 0; i < pair_len; i++, pair_iter++) {
849 v_key = (*pair_iter)[0].vert;
850 v_val = (*pair_iter)[1].vert;
851 BMVert *v_target;
852 while ((v_target = static_cast<BMVert *>(BLI_ghash_lookup(r_targetmap, v_val)))) {
853 v_val = v_target;
854 }
855 if (v_val != (*pair_iter)[1].vert) {
856 BMVert **v_val_p = (BMVert **)BLI_ghash_lookup_p(r_targetmap, v_key);
857 *v_val_p = (*pair_iter)[1].vert = v_val;
858 }
859 if (split_faces) {
860 /* The vertex index indicates its position in the pair_array flat. */
861 BM_elem_index_set(v_key, i * 2);
862 BM_elem_index_set(v_val, i * 2 + 1);
863 }
864 }
865
866 if (split_faces) {
867 BMEdge **edgenet = nullptr;
868 int edgenet_alloc_len = 0;
869
870 EDBMSplitElem *pair_flat = (EDBMSplitElem *)&pair_array[0];
871 BM_ITER_MESH (e, &iter, bm, BM_EDGES_OF_MESH) {
873 /* Edge out of context or already tested. */
874 continue;
875 }
876
877 BMVert *va, *vb, *va_dest = nullptr;
878 va = e->v1;
879 vb = e->v2;
880
881 int v_cut = BM_elem_index_get(va);
882 int v_cut_other = BM_elem_index_get(vb);
883 if (v_cut == -1 && v_cut_other == -1) {
885 /* Edge out of context. */
887 }
888 continue;
889 }
890
891 /* Tag to avoid testing again. */
893
894 if (v_cut == -1) {
895 std::swap(va, vb);
896 v_cut = v_cut_other;
897 v_cut_other = -1;
898 }
899
900 /* `v_cut` indicates the other vertex within the `pair_array`. */
901 v_cut += v_cut % 2 ? -1 : 1;
902 va_dest = pair_flat[v_cut].vert;
903
904 if (BM_vert_pair_share_face_check(va, va_dest)) {
905 /* Vert par acts on the same face.
906 * Although there are cases like this where the face can be split,
907 * for efficiency it is better to ignore then. */
908 continue;
909 }
910
911 BMFace *best_face = nullptr;
912 BMVert *v_other_dest, *v_other = vb;
913 BMEdge *e_net = e;
914 int edgenet_len = 0;
915 while (true) {
916 if (v_cut_other != -1) {
917 v_cut_other += v_cut_other % 2 ? -1 : 1;
918 v_other_dest = pair_flat[v_cut_other].vert;
919
920 if (BM_vert_pair_share_face_check(v_other, v_other_dest)) {
921 /* Vert par acts on the same face.
922 * Although there are cases like this where the face can be split,
923 * for efficiency and to avoid complications, it is better to ignore these cases.
924 */
925 break;
926 }
927 }
928 else {
929 v_other_dest = v_other;
930 }
931
932 if (va_dest == v_other_dest) {
933 /* Edge/Edge-net to vertex - we can't split the face. */
934 break;
935 }
936 if (edgenet_len == 0 && BM_edge_exists(va_dest, v_other_dest)) {
937 /* Edge to edge - no need to detect face. */
938 break;
939 }
940
941 if (edgenet_alloc_len == edgenet_len) {
942 edgenet_alloc_len = (edgenet_alloc_len + 1) * 2;
943 edgenet = static_cast<BMEdge **>(
944 MEM_reallocN(edgenet, (edgenet_alloc_len) * sizeof(*edgenet)));
945 }
946 edgenet[edgenet_len++] = e_net;
947
948 best_face = bm_vert_pair_best_face_get(
949 va_dest, v_other_dest, edgenet, edgenet_len, dist);
950
951 if (best_face) {
952 if ((va_dest != va) && !BM_edge_exists(va_dest, va)) {
960 e_net = edgenet[0];
961 if (edgenet_len > 1) {
962 vb = BM_edge_other_vert(e_net, va);
963 }
964 else {
965 vb = v_other_dest;
966 }
967 edgenet[0] = BM_edge_create(bm, va_dest, vb, e_net, BM_CREATE_NOP);
968 }
969 if ((edgenet_len > 1) && (v_other_dest != v_other) &&
970 !BM_edge_exists(v_other_dest, v_other))
971 {
979 e_net = edgenet[edgenet_len - 1];
980 edgenet[edgenet_len - 1] = BM_edge_create(
981 bm, v_other_dest, BM_edge_other_vert(e_net, v_other), e_net, BM_CREATE_NOP);
982 }
983 break;
984 }
985
986 BMEdge *e_test = e_net, *e_next = nullptr;
987 while ((e_test = BM_DISK_EDGE_NEXT(e_test, v_other)) != (e_net)) {
988 if (!BM_edge_is_wire(e_test)) {
989 if (BM_elem_flag_test(e_test, BM_ELEM_TAG)) {
990 continue;
991 }
992 if (!BM_elem_flag_test(e_test->v1, BM_ELEM_TAG) &&
994 {
995 continue;
996 }
997 /* Avoids endless loop. */
999 }
1000 else if (!BM_edge_is_wire(e_net)) {
1001 continue;
1002 }
1003 e_next = e_test;
1004 break;
1005 }
1006
1007 if (e_next == nullptr) {
1008 break;
1009 }
1010
1011 e_net = e_next;
1012 v_other = BM_edge_other_vert(e_net, v_other);
1013 if (v_other == va) {
1014 /* Endless loop. */
1015 break;
1016 }
1017 v_cut_other = BM_elem_index_get(v_other);
1018 }
1019
1020 if (best_face) {
1022 BM_face_split_edgenet(bm, best_face, edgenet, edgenet_len, &face_arr);
1023 /* Update the new faces normal.
1024 * Normal is necessary to obtain the best face for edgenet */
1025 for (BMFace *face : face_arr) {
1027 }
1028 }
1029 }
1030
1031 if (edgenet) {
1032 MEM_freeN(edgenet);
1033 }
1034 }
1035 ok = true;
1036 }
1037 }
1038
1039 for (i = BLI_STACK_PAIR_LEN; i--;) {
1040 if (pair_stack[i]) {
1041 BLI_stack_free(pair_stack[i]);
1042 }
1043 }
1044 if (pair_array) {
1045 MEM_freeN(pair_array);
1046 }
1047
1048 return ok;
1049}
1050
#define BLI_assert(a)
Definition BLI_assert.h:50
void ** BLI_ghash_lookup_p(GHash *gh, const void *key) ATTR_WARN_UNUSED_RESULT
Definition BLI_ghash.c:745
void * BLI_ghash_lookup(const GHash *gh, const void *key) ATTR_WARN_UNUSED_RESULT
Definition BLI_ghash.c:731
void BLI_ghash_insert(GHash *gh, void *key, void *val)
Definition BLI_ghash.c:707
BVHTree * BLI_bvhtree_new(int maxsize, float epsilon, char tree_type, char axis)
@ BVH_OVERLAP_USE_THREADING
Definition BLI_kdopbvh.h:77
void BLI_bvhtree_balance(BVHTree *tree)
void BLI_bvhtree_free(BVHTree *tree)
void BLI_bvhtree_insert(BVHTree *tree, int index, const float co[3], int numpoints)
BVHTreeOverlap * BLI_bvhtree_overlap_ex(const BVHTree *tree1, const BVHTree *tree2, uint *r_overlap_num, BVHTree_OverlapCallback callback, void *userdata, uint max_interactions, int flag)
int BLI_bvhtree_overlap_thread_num(const BVHTree *tree)
bool(* BVHTree_OverlapCallback)(void *userdata, int index_a, int index_b, int thread)
MINLINE float square_f(float a)
bool isect_ray_ray_v3(const float ray_origin_a[3], const float ray_direction_a[3], const float ray_origin_b[3], const float ray_direction_b[3], float *r_lambda_a, float *r_lambda_b)
float ray_point_factor_v3_ex(const float p[3], const float ray_origin[3], const float ray_direction[3], float epsilon, float fallback)
bool isect_ray_ray_epsilon_v3(const float ray_origin_a[3], const float ray_direction_a[3], const float ray_origin_b[3], const float ray_direction_b[3], float epsilon, float *r_lambda_a, float *r_lambda_b)
MINLINE float len_squared_v3(const float v[3]) ATTR_WARN_UNUSED_RESULT
MINLINE float len_squared_v3v3(const float a[3], const float b[3]) ATTR_WARN_UNUSED_RESULT
MINLINE void sub_v3_v3v3(float r[3], const float a[3], const float b[3])
MINLINE void copy_v3_v3(float r[3], const float a[3])
MINLINE float dot_v3v3(const float a[3], const float b[3]) ATTR_WARN_UNUSED_RESULT
MINLINE void madd_v3_v3v3fl(float r[3], const float a[3], const float b[3], float f)
void BLI_qsort_r(void *a, size_t n, size_t es, BLI_sort_cmp_t cmp, void *thunk)
Definition sort.c:76
void BLI_stack_pop_n_reverse(BLI_Stack *stack, void *dst, unsigned int n) ATTR_NONNULL()
Definition stack.c:156
size_t BLI_stack_count(const BLI_Stack *stack) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL()
Definition stack.c:227
void BLI_stack_free(BLI_Stack *stack) ATTR_NONNULL()
Definition stack.c:96
void * BLI_stack_push_r(BLI_Stack *stack) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL()
Definition stack.c:103
#define BLI_stack_new(esize, descr)
unsigned int uint
#define IN_RANGE(a, b, c)
#define ELEM(...)
#define IN_RANGE_INCL(a, b, c)
Read Guarded memory(de)allocation.
#define MEM_reallocN(vmemh, len)
Provides wrapper around system-specific atomic primitives, and some extensions (faked-atomic operatio...
ATOMIC_INLINE int32_t atomic_fetch_and_add_int32(int32_t *p, int32_t x)
#define BM_DISK_EDGE_NEXT(e, v)
@ BM_ELEM_HIDDEN
@ BM_ELEM_TAG
BMEdge * BM_edge_create(BMesh *bm, BMVert *v1, BMVert *v2, const BMEdge *e_example, const eBMCreateFlag create_flag)
Main function for creating a new edge.
@ BM_CREATE_NOP
Definition bmesh_core.hh:24
#define BM_elem_index_get(ele)
#define BM_elem_flag_disable(ele, hflag)
#define BM_elem_index_set(ele, index)
#define BM_elem_flag_test(ele, hflag)
#define BM_elem_flag_enable(ele, hflag)
#define KDOP_TREE_TYPE
bool BM_mesh_intersect_edges(BMesh *bm, const char hflag, const float dist, const bool split_faces, GHash *r_targetmap)
static bool bm_vertxvert_self_isect_cb(void *userdata, int index_a, int index_b, int thread)
static bool bm_vert_pair_share_best_splittable_face_cb(BMFace *f, BMLoop *l_a, BMLoop *l_b, void *userdata)
static bool bm_edgexvert_isect_impl(BMVert *v, BMEdge *e, const float co[3], const float dir[3], float lambda, float data_dist_sq, int *data_cut_edges_len, EDBMSplitElem r_pair[2])
static bool bm_edgexedge_isect_impl(EDBMSplitData *data, BMEdge *e_a, BMEdge *e_b, const float co_a[3], const float dir_a[3], const float co_b[3], const float dir_b[3], float lambda_a, float lambda_b, EDBMSplitElem r_pair[2])
static void bm_edge_pair_elem_setup(BMEdge *e, float lambda, int *r_data_cut_edges_len, EDBMSplitElem *r_pair_elem)
static void bm_elemxelem_bvhtree_overlap(const BVHTree *tree1, const BVHTree *tree2, BVHTree_OverlapCallback callback, EDBMSplitData *data, BLI_Stack **pair_stack)
static bool bm_edgexedge_self_isect_cb(void *userdata, int index_a, int index_b, int thread)
static BMFace * bm_vert_pair_best_face_get(BMVert *v_a, BMVert *v_b, BMEdge **edgenet, const int edgenet_len, const float epsilon)
#define KDOP_AXIS_LEN
static int sort_cmp_by_lambda_cb(const void *index1_v, const void *index2_v, void *keys_v)
#define EDGE_REMAIN_TO_TEST
#define BLI_STACK_PAIR_LEN
static bool bm_vertxvert_isect_cb(void *userdata, int index_a, int index_b, int thread)
#define EDGE_ACT_TO_TEST
static bool bm_vert_pair_share_splittable_face_cb(BMFace *, BMLoop *l_a, BMLoop *l_b, void *userdata)
static void bm_vert_pair_elem_setup_ex(BMVert *v, EDBMSplitElem *r_pair_elem)
static bool bm_edgexedge_isect_cb(void *userdata, int index_a, int index_b, int thread)
static bool bm_edgexvert_isect_cb(void *userdata, int index_a, int index_b, int thread)
#define BM_ITER_ELEM(ele, iter, data, itype)
#define BM_ITER_MESH(ele, iter, bm, itype)
#define BM_ITER_MESH_INDEX(ele, iter, bm, itype, indexvar)
@ BM_EDGES_OF_MESH
@ BM_VERTS_OF_MESH
@ BM_VERTS_OF_FACE
ATTR_WARN_UNUSED_RESULT BMesh * bm
void BM_mesh_elem_table_ensure(BMesh *bm, const char htype)
BLI_INLINE BMEdge * BM_edge_at_index(BMesh *bm, const int index)
BLI_INLINE BMVert * BM_vert_at_index(BMesh *bm, const int index)
BMVert * BM_edge_split(BMesh *bm, BMEdge *e, BMVert *v, BMEdge **r_e, float fac)
Edge Split.
#define BM_EDGE
#define BM_VERT
bool BM_face_point_inside_test(const BMFace *f, const float co[3])
void BM_face_normal_update(BMFace *f)
bool BM_face_split_edgenet(BMesh *bm, BMFace *f, BMEdge **edge_net, const int edge_net_len, blender::Vector< BMFace * > *r_face_arr)
bool BM_vert_pair_share_face_check(BMVert *v_a, BMVert *v_b)
bool BM_edge_in_face(const BMEdge *e, const BMFace *f)
bool BM_edge_share_vert_check(BMEdge *e1, BMEdge *e2)
BMEdge * BM_edge_exists(BMVert *v_a, BMVert *v_b)
BMFace * BM_vert_pair_shared_face_cb(BMVert *v_a, BMVert *v_b, const bool allow_adjacent, bool(*callback)(BMFace *, BMLoop *, BMLoop *, void *userdata), void *user_data, BMLoop **r_l_a, BMLoop **r_l_b)
BLI_INLINE BMVert * BM_edge_other_vert(BMEdge *e, const BMVert *v) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL()
BLI_INLINE bool BM_edge_is_wire(const BMEdge *e) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL()
ATTR_WARN_UNUSED_RESULT const BMVert const BMEdge * e
ATTR_WARN_UNUSED_RESULT const BMLoop * l_b
ATTR_WARN_UNUSED_RESULT const BMVert * v
additional_info("compositor_sum_squared_difference_float_shared") .push_constant(Type output_img float dot(value.rgb, luminance_coefficients)") .define("LOAD(value)"
DEGForeachIDComponentCallback callback
draw_view in_light_buf[] float
int count
void *(* MEM_mallocN)(size_t len, const char *str)
Definition mallocn.cc:44
void MEM_freeN(void *vmemh)
Definition mallocn.cc:105
#define min(a, b)
Definition sort.c:32
#define FLT_MAX
Definition stdcycles.h:14
BMVert * v1
BMVert * v2
BMHeader head
float no[3]
struct BMVert * v
struct BMLoop * prev
struct BMLoop * next
float co[3]
BMHeader head
char elem_index_dirty
ccl_device_inline int as_int(uint i)
Definition util/math.h:224