Blender V5.0
mask_rasterize.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2012 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
50
51#include <algorithm> /* For `min/max`. */
52
53#include "CLG_log.h"
54
55#include "MEM_guardedalloc.h"
56
57#include "DNA_mask_types.h"
58#include "DNA_scene_types.h"
59#include "DNA_vec_types.h"
60
61#include "BLI_math_geom.h"
62#include "BLI_math_vector.h"
63#include "BLI_memarena.h"
64#include "BLI_scanfill.h"
65#include "BLI_utildefines.h"
66
67#include "BLI_linklist.h"
68#include "BLI_listbase.h"
69#include "BLI_rect.h"
70#include "BLI_task.h"
71
72#include "BKE_mask.h"
73
74#include "BLI_strict_flags.h" /* IWYU pragma: keep. Keep last. */
75
76/* this is rather and annoying hack, use define to isolate it.
77 * problem is caused by scanfill removing edges on us. */
78#define USE_SCANFILL_EDGE_WORKAROUND
79
80#define SPLINE_RESOL_CAP_PER_PIXEL 2
81#define SPLINE_RESOL_CAP_MIN 8
82#define SPLINE_RESOL_CAP_MAX 64
83
84/* found this gives best performance for high detail masks, values between 2 and 8 work best */
85#define BUCKET_PIXELS_PER_CELL 4
86
87#define SF_EDGE_IS_BOUNDARY 0xff
88#define SF_KEYINDEX_TEMP_ID uint(-1)
89
90#define TRI_TERMINATOR_ID uint(-1)
91#define TRI_VERT uint(-1)
92
93/* for debugging add... */
94#ifndef NDEBUG
95// printf("%u %u %u %u\n", _t[0], _t[1], _t[2], _t[3]);
96# define FACE_ASSERT(face, vert_max) \
97 { \
98 uint *_t = face; \
99 BLI_assert(_t[0] < vert_max); \
100 BLI_assert(_t[1] < vert_max); \
101 BLI_assert(_t[2] < vert_max); \
102 BLI_assert(_t[3] < vert_max || _t[3] == TRI_VERT); \
103 } \
104 (void)0
105#else
106/* do nothing */
107# define FACE_ASSERT(face, vert_max)
108#endif
109
110static CLG_LogRef LOG = {"mask.rasterize"};
111
112static void rotate_point_v2(
113 float r_p[2], const float p[2], const float cent[2], const float angle, const float asp[2])
114{
115 const float s = sinf(angle);
116 const float c = cosf(angle);
117 float p_new[2];
118
119 /* translate point back to origin */
120 r_p[0] = (p[0] - cent[0]) / asp[0];
121 r_p[1] = (p[1] - cent[1]) / asp[1];
122
123 /* rotate point */
124 p_new[0] = ((r_p[0] * c) - (r_p[1] * s)) * asp[0];
125 p_new[1] = ((r_p[0] * s) + (r_p[1] * c)) * asp[1];
126
127 /* translate point back */
128 r_p[0] = p_new[0] + cent[0];
129 r_p[1] = p_new[1] + cent[1];
130}
131
133{
134 return v < min ? min : (v > max ? max : v);
135}
136
138 const float co_xy[2],
139 const float co_z)
140{
141 const float co[3] = {co_xy[0], co_xy[1], co_z};
142 return BLI_scanfill_vert_add(sf_ctx, co);
143}
144
145/* --------------------------------------------------------------------- */
146/* local structs for mask rasterizing */
147/* --------------------------------------------------------------------- */
148
158 /* geometry */
160 uint (*face_array)[4]; /* access coords tri/quad */
161 float (*face_coords)[3]; /* xy, z 0-1 (1.0 == filled) */
162
163 /* 2d bounds (to quickly skip bucket lookup) */
165
166 /* buckets */
168 /* cache divide and subtract */
169 float buckets_xy_scalar[2]; /* (1.0 / (buckets_width + FLT_EPSILON)) * buckets_x */
172
173 /* copied direct from #MaskLayer.--- */
174 /* blending options */
175 float alpha;
176 char blend;
179};
180
182 /* body of the spline */
185
186 /* capping for non-filled, non cyclic splines */
189
191};
192
199
200 /* 2d bounds (to quickly skip bucket lookup) */
202};
203
204/* --------------------------------------------------------------------- */
205/* alloc / free functions */
206/* --------------------------------------------------------------------- */
207
209{
210 MaskRasterHandle *mr_handle;
211
212 mr_handle = MEM_callocN<MaskRasterHandle>("MaskRasterHandle");
213
214 return mr_handle;
215}
216
218{
219 const uint layers_tot = mr_handle->layers_tot;
220 MaskRasterLayer *layer = mr_handle->layers;
221
222 for (uint i = 0; i < layers_tot; i++, layer++) {
223
224 if (layer->face_array) {
225 MEM_freeN(layer->face_array);
226 }
227
228 if (layer->face_coords) {
229 MEM_freeN(layer->face_coords);
230 }
231
232 if (layer->buckets_face) {
233 const uint bucket_tot = layer->buckets_x * layer->buckets_y;
234 uint bucket_index;
235 for (bucket_index = 0; bucket_index < bucket_tot; bucket_index++) {
236 uint *face_index = layer->buckets_face[bucket_index];
237 if (face_index) {
238 MEM_freeN(face_index);
239 }
240 }
241
242 MEM_freeN(layer->buckets_face);
243 }
244 }
245
246 MEM_freeN(mr_handle->layers);
247 MEM_freeN(mr_handle);
248}
249
250static void maskrasterize_spline_differentiate_point_outset(float (*diff_feather_points)[2],
251 float (*diff_points)[2],
252 const uint tot_diff_point,
253 const float ofs,
254 const bool do_test)
255{
256 uint k_prev = tot_diff_point - 2;
257 uint k_curr = tot_diff_point - 1;
258 uint k_next = 0;
259
260 uint k;
261
262 float d_prev[2];
263 float d_next[2];
264 float d[2];
265
266 const float *co_prev;
267 const float *co_curr;
268 const float *co_next;
269
270 const float ofs_squared = ofs * ofs;
271
272 co_prev = diff_points[k_prev];
273 co_curr = diff_points[k_curr];
274 co_next = diff_points[k_next];
275
276 /* precalc */
277 sub_v2_v2v2(d_prev, co_prev, co_curr);
278 normalize_v2(d_prev);
279
280 for (k = 0; k < tot_diff_point; k++) {
281
282 // co_prev = diff_points[k_prev]; /* Precalculate. */
283 co_curr = diff_points[k_curr];
284 co_next = diff_points[k_next];
285
286 // sub_v2_v2v2(d_prev, co_prev, co_curr); /* Precalculate. */
287 sub_v2_v2v2(d_next, co_curr, co_next);
288
289 // normalize_v2(d_prev); /* precalc */
290 normalize_v2(d_next);
291
292 if ((do_test == false) ||
293 (len_squared_v2v2(diff_feather_points[k], diff_points[k]) < ofs_squared))
294 {
295
296 add_v2_v2v2(d, d_prev, d_next);
297
298 normalize_v2(d);
299
300 diff_feather_points[k][0] = diff_points[k][0] + (d[1] * ofs);
301 diff_feather_points[k][1] = diff_points[k][1] + (-d[0] * ofs);
302 }
303
304 /* use next iter */
305 copy_v2_v2(d_prev, d_next);
306
307 // k_prev = k_curr; /* Precalculate. */
308 k_curr = k_next;
309 k_next++;
310 }
311}
312
313/* this function is not exact, sometimes it returns false positives,
314 * the main point of it is to clear out _almost_ all bucket/face non-intersections,
315 * returning true in corner cases is ok but missing an intersection is NOT.
316 *
317 * method used
318 * - check if the center of the buckets bounding box is intersecting the face
319 * - if not get the max radius to a corner of the bucket and see how close we
320 * are to any of the triangle edges.
321 */
323 uint face_index,
324 const uint bucket_x,
325 const uint bucket_y,
326 const float bucket_size_x,
327 const float bucket_size_y,
328 const float bucket_max_rad_squared)
329{
330 uint *face = layer->face_array[face_index];
331 float (*cos)[3] = layer->face_coords;
332
333 const float xmin = layer->bounds.xmin + (bucket_size_x * float(bucket_x));
334 const float ymin = layer->bounds.ymin + (bucket_size_y * float(bucket_y));
335 const float xmax = xmin + bucket_size_x;
336 const float ymax = ymin + bucket_size_y;
337
338 const float cent[2] = {(xmin + xmax) * 0.5f, (ymin + ymax) * 0.5f};
339
340 if (face[3] == TRI_VERT) {
341 const float *v1 = cos[face[0]];
342 const float *v2 = cos[face[1]];
343 const float *v3 = cos[face[2]];
344
345 if (isect_point_tri_v2(cent, v1, v2, v3)) {
346 return true;
347 }
348
349 if ((dist_squared_to_line_segment_v2(cent, v1, v2) < bucket_max_rad_squared) ||
350 (dist_squared_to_line_segment_v2(cent, v2, v3) < bucket_max_rad_squared) ||
351 (dist_squared_to_line_segment_v2(cent, v3, v1) < bucket_max_rad_squared))
352 {
353 return true;
354 }
355
356 // printf("skip tri\n");
357 return false;
358 }
359
360 const float *v1 = cos[face[0]];
361 const float *v2 = cos[face[1]];
362 const float *v3 = cos[face[2]];
363 const float *v4 = cos[face[3]];
364
365 if (isect_point_tri_v2(cent, v1, v2, v3)) {
366 return true;
367 }
368 if (isect_point_tri_v2(cent, v1, v3, v4)) {
369 return true;
370 }
371
372 if ((dist_squared_to_line_segment_v2(cent, v1, v2) < bucket_max_rad_squared) ||
373 (dist_squared_to_line_segment_v2(cent, v2, v3) < bucket_max_rad_squared) ||
374 (dist_squared_to_line_segment_v2(cent, v3, v4) < bucket_max_rad_squared) ||
375 (dist_squared_to_line_segment_v2(cent, v4, v1) < bucket_max_rad_squared))
376 {
377 return true;
378 }
379
380 // printf("skip quad\n");
381 return false;
382}
383
385{
386 layer->face_tot = 0;
387 layer->face_coords = nullptr;
388 layer->face_array = nullptr;
389
390 layer->buckets_x = 0;
391 layer->buckets_y = 0;
392
393 layer->buckets_xy_scalar[0] = 0.0f;
394 layer->buckets_xy_scalar[1] = 0.0f;
395
396 layer->buckets_face = nullptr;
397
398 BLI_rctf_init(&layer->bounds, -1.0f, -1.0f, -1.0f, -1.0f);
399}
400
401static void layer_bucket_init(MaskRasterLayer *layer, const float pixel_size)
402{
403 MemArena *arena = BLI_memarena_new(MEM_SIZE_OPTIMAL(1 << 16), __func__);
404
405 const float bucket_dim_x = BLI_rctf_size_x(&layer->bounds);
406 const float bucket_dim_y = BLI_rctf_size_y(&layer->bounds);
407
408 layer->buckets_x = uint((bucket_dim_x / pixel_size) / float(BUCKET_PIXELS_PER_CELL));
409 layer->buckets_y = uint((bucket_dim_y / pixel_size) / float(BUCKET_PIXELS_PER_CELL));
410
411 // printf("bucket size %ux%u\n", layer->buckets_x, layer->buckets_y);
412
413 CLAMP(layer->buckets_x, 8, 512);
414 CLAMP(layer->buckets_y, 8, 512);
415
416 layer->buckets_xy_scalar[0] = (1.0f / (bucket_dim_x + FLT_EPSILON)) * float(layer->buckets_x);
417 layer->buckets_xy_scalar[1] = (1.0f / (bucket_dim_y + FLT_EPSILON)) * float(layer->buckets_y);
418
419 {
420 /* width and height of each bucket */
421 const float bucket_size_x = (bucket_dim_x + FLT_EPSILON) / float(layer->buckets_x);
422 const float bucket_size_y = (bucket_dim_y + FLT_EPSILON) / float(layer->buckets_y);
423 const float bucket_max_rad = (max_ff(bucket_size_x, bucket_size_y) * float(M_SQRT2)) +
424 FLT_EPSILON;
425 const float bucket_max_rad_squared = bucket_max_rad * bucket_max_rad;
426
427 uint *face = &layer->face_array[0][0];
428 float (*cos)[3] = layer->face_coords;
429
430 const uint bucket_tot = layer->buckets_x * layer->buckets_y;
431 LinkNode **bucketstore = MEM_calloc_arrayN<LinkNode *>(bucket_tot, __func__);
432 uint *bucketstore_tot = MEM_calloc_arrayN<uint>(bucket_tot, __func__);
433
434 uint face_index;
435
436 for (face_index = 0; face_index < layer->face_tot; face_index++, face += 4) {
437 float xmin;
438 float xmax;
439 float ymin;
440 float ymax;
441
442 if (face[3] == TRI_VERT) {
443 const float *v1 = cos[face[0]];
444 const float *v2 = cos[face[1]];
445 const float *v3 = cos[face[2]];
446
447 xmin = min_ff(v1[0], min_ff(v2[0], v3[0]));
448 xmax = max_ff(v1[0], max_ff(v2[0], v3[0]));
449 ymin = min_ff(v1[1], min_ff(v2[1], v3[1]));
450 ymax = max_ff(v1[1], max_ff(v2[1], v3[1]));
451 }
452 else {
453 const float *v1 = cos[face[0]];
454 const float *v2 = cos[face[1]];
455 const float *v3 = cos[face[2]];
456 const float *v4 = cos[face[3]];
457
458 xmin = min_ff(v1[0], min_ff(v2[0], min_ff(v3[0], v4[0])));
459 xmax = max_ff(v1[0], max_ff(v2[0], max_ff(v3[0], v4[0])));
460 ymin = min_ff(v1[1], min_ff(v2[1], min_ff(v3[1], v4[1])));
461 ymax = max_ff(v1[1], max_ff(v2[1], max_ff(v3[1], v4[1])));
462 }
463
464 /* not essential but may as will skip any faces outside the view */
465 if (!((xmax < 0.0f) || (ymax < 0.0f) || (xmin > 1.0f) || (ymin > 1.0f))) {
466
467 CLAMP(xmin, 0.0f, 1.0f);
468 CLAMP(ymin, 0.0f, 1.0f);
469 CLAMP(xmax, 0.0f, 1.0f);
470 CLAMP(ymax, 0.0f, 1.0f);
471
472 {
473 uint xi_min = uint((xmin - layer->bounds.xmin) * layer->buckets_xy_scalar[0]);
474 uint xi_max = uint((xmax - layer->bounds.xmin) * layer->buckets_xy_scalar[0]);
475 uint yi_min = uint((ymin - layer->bounds.ymin) * layer->buckets_xy_scalar[1]);
476 uint yi_max = uint((ymax - layer->bounds.ymin) * layer->buckets_xy_scalar[1]);
477 void *face_index_void = POINTER_FROM_UINT(face_index);
478
479 uint xi, yi;
480
481 /* this should _almost_ never happen but since it can in extreme cases,
482 * we have to clamp the values or we overrun the buffer and crash */
483 if (xi_min >= layer->buckets_x) {
484 xi_min = layer->buckets_x - 1;
485 }
486 if (xi_max >= layer->buckets_x) {
487 xi_max = layer->buckets_x - 1;
488 }
489 if (yi_min >= layer->buckets_y) {
490 yi_min = layer->buckets_y - 1;
491 }
492 if (yi_max >= layer->buckets_y) {
493 yi_max = layer->buckets_y - 1;
494 }
495
496 for (yi = yi_min; yi <= yi_max; yi++) {
497 uint bucket_index = (layer->buckets_x * yi) + xi_min;
498 for (xi = xi_min; xi <= xi_max; xi++, bucket_index++) {
499 /* correct but do in outer loop */
500 // uint bucket_index = (layer->buckets_x * yi) + xi;
501
502 BLI_assert(xi < layer->buckets_x);
503 BLI_assert(yi < layer->buckets_y);
504 BLI_assert(bucket_index < bucket_tot);
505
506 /* Check if the bucket intersects with the face. */
507 /* NOTE: there is a trade off here since checking box/tri intersections isn't as
508 * optimal as it could be, but checking pixels against faces they will never
509 * intersect with is likely the greater slowdown here -
510 * so check if the cell intersects the face. */
511 if (layer_bucket_isect_test(layer,
512 face_index,
513 xi,
514 yi,
515 bucket_size_x,
516 bucket_size_y,
517 bucket_max_rad_squared))
518 {
519 BLI_linklist_prepend_arena(&bucketstore[bucket_index], face_index_void, arena);
520 bucketstore_tot[bucket_index]++;
521 }
522 }
523 }
524 }
525 }
526 }
527
528 if (true) {
529 /* Now convert link-nodes into arrays for faster per pixel access. */
530 uint **buckets_face = MEM_calloc_arrayN<uint *>(bucket_tot, __func__);
531 uint bucket_index;
532
533 for (bucket_index = 0; bucket_index < bucket_tot; bucket_index++) {
534 if (bucketstore_tot[bucket_index]) {
535 uint *bucket = MEM_calloc_arrayN<uint>((bucketstore_tot[bucket_index] + 1), __func__);
536 LinkNode *bucket_node;
537
538 buckets_face[bucket_index] = bucket;
539
540 for (bucket_node = bucketstore[bucket_index]; bucket_node;
541 bucket_node = bucket_node->next)
542 {
543 *bucket = POINTER_AS_UINT(bucket_node->link);
544 bucket++;
545 }
546 *bucket = TRI_TERMINATOR_ID;
547 }
548 else {
549 buckets_face[bucket_index] = nullptr;
550 }
551 }
552
553 layer->buckets_face = buckets_face;
554 }
555
556 MEM_freeN(bucketstore);
557 MEM_freeN(bucketstore_tot);
558 }
559
560 BLI_memarena_free(arena);
561}
562
564 Mask *mask,
565 const int width,
566 const int height,
567 const bool do_aspect_correct,
568 const bool do_mask_aa,
569 const bool do_feather)
570{
571 const rctf default_bounds = {0.0f, 1.0f, 0.0f, 1.0f};
572 const float pixel_size = 1.0f / float(min_ii(width, height));
573 const float asp_xy[2] = {
574 (do_aspect_correct && width > height) ? float(height) / float(width) : 1.0f,
575 (do_aspect_correct && width < height) ? float(width) / float(height) : 1.0f};
576
577 const float zvec[3] = {0.0f, 0.0f, -1.0f};
578 MaskLayer *masklay;
579 uint masklay_index;
580 MemArena *sf_arena;
581
582 mr_handle->layers_tot = uint(BLI_listbase_count(&mask->masklayers));
583 mr_handle->layers = MEM_calloc_arrayN<MaskRasterLayer>(mr_handle->layers_tot, "MaskRasterLayer");
584 BLI_rctf_init_minmax(&mr_handle->bounds);
585
586 sf_arena = BLI_memarena_new(BLI_SCANFILL_ARENA_SIZE, __func__);
587
588 for (masklay = static_cast<MaskLayer *>(mask->masklayers.first), masklay_index = 0; masklay;
589 masklay = masklay->next, masklay_index++)
590 {
591 /* we need to store vertex ranges for open splines for filling */
592 uint tot_splines;
593 MaskRasterSplineInfo *open_spline_ranges;
594 uint open_spline_index = 0;
595
596 /* scanfill */
597 ScanFillContext sf_ctx;
598 ScanFillVert *sf_vert = nullptr;
599 ScanFillVert *sf_vert_next = nullptr;
600 ScanFillFace *sf_tri;
601
602 uint sf_vert_tot = 0;
603 uint tot_feather_quads = 0;
604
605#ifdef USE_SCANFILL_EDGE_WORKAROUND
606 uint tot_boundary_used = 0;
607 uint tot_boundary_found = 0;
608#endif
609
610 if (masklay->visibility_flag & MASK_HIDE_RENDER) {
611 /* skip the layer */
612 mr_handle->layers_tot--;
613 masklay_index--;
614 continue;
615 }
616
617 tot_splines = uint(BLI_listbase_count(&masklay->splines));
618 open_spline_ranges = MEM_calloc_arrayN<MaskRasterSplineInfo>(tot_splines, __func__);
619
620 BLI_scanfill_begin_arena(&sf_ctx, sf_arena);
621
622 LISTBASE_FOREACH (MaskSpline *, spline, &masklay->splines) {
623 const bool is_cyclic = (spline->flag & MASK_SPLINE_CYCLIC) != 0;
624 const bool is_fill = (spline->flag & MASK_SPLINE_NOFILL) == 0;
625
626 float (*diff_points)[2];
627 uint tot_diff_point;
628
629 float (*diff_feather_points)[2];
630 float (*diff_feather_points_flip)[2];
631 uint tot_diff_feather_points;
632
633 const uint resol_a = uint(BKE_mask_spline_resolution(spline, width, height) / 4);
634 const uint resol_b = BKE_mask_spline_feather_resolution(spline, width, height) / 4;
635 const uint resol = std::clamp(std::max(resol_a, resol_b), 4u, 512u);
636
637 diff_points = BKE_mask_spline_differentiate_with_resolution(spline, resol, &tot_diff_point);
638
639 if (do_feather) {
641 spline, resol, false, &tot_diff_feather_points);
642 BLI_assert(diff_feather_points);
643 }
644 else {
645 tot_diff_feather_points = 0;
646 diff_feather_points = nullptr;
647 }
648
649 if (tot_diff_point > 3) {
650 ScanFillVert *sf_vert_prev;
651 uint j;
652
653 sf_ctx.poly_nr++;
654
655 if (do_aspect_correct) {
656 if (width != height) {
657 float *fp;
658 float *ffp;
659 float asp;
660
661 if (width < height) {
662 fp = &diff_points[0][0];
663 ffp = tot_diff_feather_points ? &diff_feather_points[0][0] : nullptr;
664 asp = float(width) / float(height);
665 }
666 else {
667 fp = &diff_points[0][1];
668 ffp = tot_diff_feather_points ? &diff_feather_points[0][1] : nullptr;
669 asp = float(height) / float(width);
670 }
671
672 for (uint i = 0; i < tot_diff_point; i++, fp += 2) {
673 (*fp) = (((*fp) - 0.5f) / asp) + 0.5f;
674 }
675
676 if (tot_diff_feather_points) {
677 for (uint i = 0; i < tot_diff_feather_points; i++, ffp += 2) {
678 (*ffp) = (((*ffp) - 0.5f) / asp) + 0.5f;
679 }
680 }
681 }
682 }
683
684 /* fake aa, using small feather */
685 if (do_mask_aa == true) {
686 if (do_feather == false) {
687 tot_diff_feather_points = tot_diff_point;
688 diff_feather_points = MEM_calloc_arrayN<float[2]>(tot_diff_feather_points, __func__);
689 /* add single pixel feather */
691 diff_feather_points, diff_points, tot_diff_point, pixel_size, false);
692 }
693 else {
694 /* ensure single pixel feather, on any zero feather areas */
696 diff_feather_points, diff_points, tot_diff_point, pixel_size, true);
697 }
698 }
699
700 if (is_fill) {
701 /* Apply intersections depending on fill settings. */
702 if (spline->flag & MASK_SPLINE_NOINTERSECT) {
704 spline, diff_feather_points, tot_diff_feather_points);
705 }
706
707 sf_vert_prev = scanfill_vert_add_v2_with_depth(&sf_ctx, diff_points[0], 0.0f);
708 sf_vert_prev->tmp.u = sf_vert_tot;
709
710 /* Absolute index of feather vert. */
711 sf_vert_prev->keyindex = sf_vert_tot + tot_diff_point;
712
713 sf_vert_tot++;
714
715 for (j = 1; j < tot_diff_point; j++) {
716 sf_vert = scanfill_vert_add_v2_with_depth(&sf_ctx, diff_points[j], 0.0f);
717 sf_vert->tmp.u = sf_vert_tot;
718 sf_vert->keyindex = sf_vert_tot + tot_diff_point; /* absolute index of feather vert */
719 sf_vert_tot++;
720 }
721
722 sf_vert = sf_vert_prev;
723 sf_vert_prev = static_cast<ScanFillVert *>(sf_ctx.fillvertbase.last);
724
725 for (j = 0; j < tot_diff_point; j++) {
726 ScanFillEdge *sf_edge = BLI_scanfill_edge_add(&sf_ctx, sf_vert_prev, sf_vert);
727
728#ifdef USE_SCANFILL_EDGE_WORKAROUND
729 if (diff_feather_points) {
730 sf_edge->tmp.c = SF_EDGE_IS_BOUNDARY;
731 tot_boundary_used++;
732 }
733#else
734 (void)sf_edge;
735#endif
736 sf_vert_prev = sf_vert;
737 sf_vert = sf_vert->next;
738 }
739
740 if (diff_feather_points) {
741 BLI_assert(tot_diff_feather_points == tot_diff_point);
742
743 /* NOTE: only added for convenience, we don't in fact use these to scan-fill,
744 * only to create feather faces after scan-fill. */
745 for (j = 0; j < tot_diff_feather_points; j++) {
746 sf_vert = scanfill_vert_add_v2_with_depth(&sf_ctx, diff_feather_points[j], 1.0f);
747 sf_vert->keyindex = SF_KEYINDEX_TEMP_ID;
748 sf_vert_tot++;
749 }
750
751 tot_feather_quads += tot_diff_point;
752 }
753 }
754 else {
755 /* unfilled spline */
756 if (diff_feather_points) {
757
758 if (spline->flag & MASK_SPLINE_NOINTERSECT) {
759 diff_feather_points_flip = MEM_calloc_arrayN<float[2]>(tot_diff_feather_points,
760 "diff_feather_points_flip");
761
762 float co_diff[2];
763 for (j = 0; j < tot_diff_point; j++) {
764 sub_v2_v2v2(co_diff, diff_points[j], diff_feather_points[j]);
765 add_v2_v2v2(diff_feather_points_flip[j], diff_points[j], co_diff);
766 }
767
769 spline, diff_feather_points, tot_diff_feather_points);
771 spline, diff_feather_points_flip, tot_diff_feather_points);
772 }
773 else {
774 diff_feather_points_flip = nullptr;
775 }
776
777 open_spline_ranges[open_spline_index].vertex_offset = sf_vert_tot;
778 open_spline_ranges[open_spline_index].vertex_total = tot_diff_point;
779
780 /* TODO: an alternate functions so we can avoid double vector copy! */
781 for (j = 0; j < tot_diff_point; j++) {
782
783 /* center vert */
784 sf_vert = scanfill_vert_add_v2_with_depth(&sf_ctx, diff_points[j], 0.0f);
785 sf_vert->tmp.u = sf_vert_tot;
786 sf_vert->keyindex = SF_KEYINDEX_TEMP_ID;
787 sf_vert_tot++;
788
789 /* feather vert A */
790 sf_vert = scanfill_vert_add_v2_with_depth(&sf_ctx, diff_feather_points[j], 1.0f);
791 sf_vert->tmp.u = sf_vert_tot;
792 sf_vert->keyindex = SF_KEYINDEX_TEMP_ID;
793 sf_vert_tot++;
794
795 /* feather vert B */
796 if (diff_feather_points_flip) {
798 &sf_ctx, diff_feather_points_flip[j], 1.0f);
799 }
800 else {
801 float co_diff[2];
802 sub_v2_v2v2(co_diff, diff_points[j], diff_feather_points[j]);
803 add_v2_v2v2(co_diff, diff_points[j], co_diff);
804 sf_vert = scanfill_vert_add_v2_with_depth(&sf_ctx, co_diff, 1.0f);
805 }
806
807 sf_vert->tmp.u = sf_vert_tot;
808 sf_vert->keyindex = SF_KEYINDEX_TEMP_ID;
809 sf_vert_tot++;
810
811 tot_feather_quads += 2;
812 }
813
814 if (!is_cyclic) {
815 tot_feather_quads -= 2;
816 }
817
818 if (diff_feather_points_flip) {
819 MEM_freeN(diff_feather_points_flip);
820 diff_feather_points_flip = nullptr;
821 }
822
823 /* cap ends */
824
825 /* dummy init value */
826 open_spline_ranges[open_spline_index].vertex_total_cap_head = 0;
827 open_spline_ranges[open_spline_index].vertex_total_cap_tail = 0;
828
829 if (!is_cyclic) {
830 const float *fp_cent;
831 const float *fp_turn;
832
833 uint k;
834
835 fp_cent = diff_points[0];
836 fp_turn = diff_feather_points[0];
837
838#define CALC_CAP_RESOL \
839 clampis_uint(uint(len_v2v2(fp_cent, fp_turn) / (pixel_size * SPLINE_RESOL_CAP_PER_PIXEL)), \
840 SPLINE_RESOL_CAP_MIN, \
841 SPLINE_RESOL_CAP_MAX)
842
843 {
844 const uint vertex_total_cap = CALC_CAP_RESOL;
845
846 for (k = 1; k < vertex_total_cap; k++) {
847 const float angle = float(k) * (1.0f / float(vertex_total_cap)) * float(M_PI);
848 float co_feather[2];
849 rotate_point_v2(co_feather, fp_turn, fp_cent, angle, asp_xy);
850
851 sf_vert = scanfill_vert_add_v2_with_depth(&sf_ctx, co_feather, 1.0f);
852 sf_vert->tmp.u = sf_vert_tot;
853 sf_vert->keyindex = SF_KEYINDEX_TEMP_ID;
854 sf_vert_tot++;
855 }
856 tot_feather_quads += vertex_total_cap;
857
858 open_spline_ranges[open_spline_index].vertex_total_cap_head = vertex_total_cap;
859 }
860
861 fp_cent = diff_points[tot_diff_point - 1];
862 fp_turn = diff_feather_points[tot_diff_point - 1];
863
864 {
865 const uint vertex_total_cap = CALC_CAP_RESOL;
866
867 for (k = 1; k < vertex_total_cap; k++) {
868 const float angle = float(k) * (1.0f / float(vertex_total_cap)) * float(M_PI);
869 float co_feather[2];
870 rotate_point_v2(co_feather, fp_turn, fp_cent, -angle, asp_xy);
871
872 sf_vert = scanfill_vert_add_v2_with_depth(&sf_ctx, co_feather, 1.0f);
873 sf_vert->tmp.u = sf_vert_tot;
874 sf_vert->keyindex = SF_KEYINDEX_TEMP_ID;
875 sf_vert_tot++;
876 }
877 tot_feather_quads += vertex_total_cap;
878
879 open_spline_ranges[open_spline_index].vertex_total_cap_tail = vertex_total_cap;
880 }
881 }
882
883 open_spline_ranges[open_spline_index].is_cyclic = is_cyclic;
884 open_spline_index++;
885
886#undef CALC_CAP_RESOL
887 /* end capping */
888 }
889 }
890 }
891
892 if (diff_points) {
893 MEM_freeN(diff_points);
894 }
895
896 if (diff_feather_points) {
897 MEM_freeN(diff_feather_points);
898 }
899 }
900
901 {
902 uint(*face_array)[4], *face; /* access coords */
903 float (*face_coords)[3], *cos; /* xy, z 0-1 (1.0 == filled) */
904 uint sf_tri_tot;
905 rctf bounds;
906 uint face_index;
907 int scanfill_flag = 0;
908
909 bool is_isect = false;
910 ListBase isect_remvertbase = {nullptr, nullptr};
911 ListBase isect_remedgebase = {nullptr, nullptr};
912
913 /* now we have all the splines */
914 face_coords = MEM_calloc_arrayN<float[3]>(sf_vert_tot, "maskrast_face_coords");
915
916 /* init bounds */
918
919 /* coords */
920 cos = (float *)face_coords;
921 for (sf_vert = static_cast<ScanFillVert *>(sf_ctx.fillvertbase.first); sf_vert;
922 sf_vert = sf_vert_next)
923 {
924 sf_vert_next = sf_vert->next;
925 copy_v3_v3(cos, sf_vert->co);
926
927 /* remove so as not to interfere with fill (called after) */
928 if (sf_vert->keyindex == SF_KEYINDEX_TEMP_ID) {
929 BLI_remlink(&sf_ctx.fillvertbase, sf_vert);
930 }
931
932 /* bounds */
934
935 cos += 3;
936 }
937
938 /* --- inefficient self-intersect case --- */
939 /* if self intersections are found, its too tricky to attempt to map vertices
940 * so just realloc and add entirely new vertices - the result of the self-intersect check.
941 */
942 if ((masklay->flag & MASK_LAYERFLAG_FILL_OVERLAP) &&
944 &sf_ctx, &isect_remvertbase, &isect_remedgebase)))
945 {
946 uint sf_vert_tot_isect = uint(BLI_listbase_count(&sf_ctx.fillvertbase));
947 uint i = sf_vert_tot;
948
949 face_coords = static_cast<float (*)[3]>(
950 MEM_reallocN(face_coords, sizeof(float[3]) * (sf_vert_tot + sf_vert_tot_isect)));
951
952 cos = (&face_coords[sf_vert_tot][0]);
953
954 for (sf_vert = static_cast<ScanFillVert *>(sf_ctx.fillvertbase.first); sf_vert;
955 sf_vert = sf_vert->next)
956 {
957 copy_v3_v3(cos, sf_vert->co);
958 sf_vert->tmp.u = i++;
959 cos += 3;
960 }
961
962 sf_vert_tot += sf_vert_tot_isect;
963
964 /* we need to calc polys after self intersect */
965 scanfill_flag |= BLI_SCANFILL_CALC_POLYS;
966 }
967 /* --- end inefficient code --- */
968
969 /* main scan-fill */
970 if ((masklay->flag & MASK_LAYERFLAG_FILL_DISCRETE) == 0) {
971 scanfill_flag |= BLI_SCANFILL_CALC_HOLES;
972 }
973
974 /* Store an array of edges from `sf_ctx.filledgebase`
975 * because filling may remove edges, see: #127692. */
976 ScanFillEdge **sf_edge_array = nullptr;
977 uint sf_edge_array_num = 0;
978 if (tot_feather_quads) {
979 const ListBase *lb_array[] = {&sf_ctx.filledgebase, &isect_remedgebase};
980 for (int pass = 0; pass < 2; pass++) {
981 LISTBASE_FOREACH (ScanFillEdge *, sf_edge, lb_array[pass]) {
982 if (sf_edge->tmp.c == SF_EDGE_IS_BOUNDARY) {
983 sf_edge_array_num += 1;
984 }
985 }
986 }
987
988 if (sf_edge_array_num > 0) {
989 sf_edge_array = MEM_malloc_arrayN<ScanFillEdge *>(size_t(sf_edge_array_num), __func__);
990 uint edge_index = 0;
991 for (int pass = 0; pass < 2; pass++) {
992 LISTBASE_FOREACH (ScanFillEdge *, sf_edge, lb_array[pass]) {
993 if (sf_edge->tmp.c == SF_EDGE_IS_BOUNDARY) {
994 sf_edge_array[edge_index++] = sf_edge;
995 }
996 }
997 }
998 BLI_assert(edge_index == sf_edge_array_num);
999 }
1000 }
1001
1002 sf_tri_tot = uint(BLI_scanfill_calc_ex(&sf_ctx, scanfill_flag, zvec));
1003
1004 if (is_isect) {
1005 /* add removed data back, we only need edges for feather,
1006 * but add verts back so they get freed along with others */
1007 BLI_movelisttolist(&sf_ctx.fillvertbase, &isect_remvertbase);
1008 BLI_movelisttolist(&sf_ctx.filledgebase, &isect_remedgebase);
1009 }
1010
1011 face_array = MEM_malloc_arrayN<uint[4]>(size_t(sf_tri_tot) + size_t(tot_feather_quads),
1012 "maskrast_face_index");
1013 face_index = 0;
1014
1015 /* faces */
1016 face = (uint *)face_array;
1017 for (sf_tri = static_cast<ScanFillFace *>(sf_ctx.fillfacebase.first); sf_tri;
1018 sf_tri = sf_tri->next)
1019 {
1020 *(face++) = sf_tri->v3->tmp.u;
1021 *(face++) = sf_tri->v2->tmp.u;
1022 *(face++) = sf_tri->v1->tmp.u;
1023 *(face++) = TRI_VERT;
1024 face_index++;
1025 FACE_ASSERT(face - 4, sf_vert_tot);
1026 }
1027
1028 /* start of feather faces... if we have this set,
1029 * 'face_index' is kept from loop above */
1030
1031 BLI_assert(face_index == sf_tri_tot);
1032 UNUSED_VARS_NDEBUG(face_index);
1033
1034 if (sf_edge_array) {
1035 BLI_assert(tot_feather_quads);
1036 for (uint i = 0; i < sf_edge_array_num; i++) {
1037 ScanFillEdge *sf_edge = sf_edge_array[i];
1038 BLI_assert(sf_edge->tmp.c == SF_EDGE_IS_BOUNDARY);
1039 *(face++) = sf_edge->v1->tmp.u;
1040 *(face++) = sf_edge->v2->tmp.u;
1041 *(face++) = sf_edge->v2->keyindex;
1042 *(face++) = sf_edge->v1->keyindex;
1043 face_index++;
1044 FACE_ASSERT(face - 4, sf_vert_tot);
1045
1046#ifdef USE_SCANFILL_EDGE_WORKAROUND
1047 tot_boundary_found++;
1048#endif
1049 }
1050 MEM_freeN(sf_edge_array);
1051 }
1052
1053#ifdef USE_SCANFILL_EDGE_WORKAROUND
1054 if (tot_boundary_found != tot_boundary_used) {
1055 BLI_assert(tot_boundary_found < tot_boundary_used);
1056 }
1057#endif
1058
1059 /* feather only splines */
1060 while (open_spline_index > 0) {
1061 const uint vertex_offset = open_spline_ranges[--open_spline_index].vertex_offset;
1062 uint vertex_total = open_spline_ranges[open_spline_index].vertex_total;
1063 uint vertex_total_cap_head = open_spline_ranges[open_spline_index].vertex_total_cap_head;
1064 uint vertex_total_cap_tail = open_spline_ranges[open_spline_index].vertex_total_cap_tail;
1065 uint k, j;
1066
1067 j = vertex_offset;
1068
1069 /* subtract one since we reference next vertex triple */
1070 for (k = 0; k < vertex_total - 1; k++, j += 3) {
1071
1072 BLI_assert(j == vertex_offset + (k * 3));
1073
1074 *(face++) = j + 3; /* next span */ /* z 1 */
1075 *(face++) = j + 0; /* z 1 */
1076 *(face++) = j + 1; /* z 0 */
1077 *(face++) = j + 4; /* next span */ /* z 0 */
1078 face_index++;
1079 FACE_ASSERT(face - 4, sf_vert_tot);
1080
1081 *(face++) = j + 0; /* z 1 */
1082 *(face++) = j + 3; /* next span */ /* z 1 */
1083 *(face++) = j + 5; /* next span */ /* z 0 */
1084 *(face++) = j + 2; /* z 0 */
1085 face_index++;
1086 FACE_ASSERT(face - 4, sf_vert_tot);
1087 }
1088
1089 if (open_spline_ranges[open_spline_index].is_cyclic) {
1090 *(face++) = vertex_offset + 0; /* next span */ /* z 1 */
1091 *(face++) = j + 0; /* z 1 */
1092 *(face++) = j + 1; /* z 0 */
1093 *(face++) = vertex_offset + 1; /* next span */ /* z 0 */
1094 face_index++;
1095 FACE_ASSERT(face - 4, sf_vert_tot);
1096
1097 *(face++) = j + 0; /* z 1 */
1098 *(face++) = vertex_offset + 0; /* next span */ /* z 1 */
1099 *(face++) = vertex_offset + 2; /* next span */ /* z 0 */
1100 *(face++) = j + 2; /* z 0 */
1101 face_index++;
1102 FACE_ASSERT(face - 4, sf_vert_tot);
1103 }
1104 else {
1105 uint midvidx = vertex_offset;
1106
1107 /***************
1108 * cap end 'a' */
1109 j = midvidx + (vertex_total * 3);
1110
1111 for (k = 0; k < vertex_total_cap_head - 2; k++, j++) {
1112 *(face++) = midvidx + 0; /* z 1 */
1113 *(face++) = midvidx + 0; /* z 1 */
1114 *(face++) = j + 0; /* z 0 */
1115 *(face++) = j + 1; /* z 0 */
1116 face_index++;
1117 FACE_ASSERT(face - 4, sf_vert_tot);
1118 }
1119
1120 j = vertex_offset + (vertex_total * 3);
1121
1122 /* 2 tris that join the original */
1123 *(face++) = midvidx + 0; /* z 1 */
1124 *(face++) = midvidx + 0; /* z 1 */
1125 *(face++) = midvidx + 1; /* z 0 */
1126 *(face++) = j + 0; /* z 0 */
1127 face_index++;
1128 FACE_ASSERT(face - 4, sf_vert_tot);
1129
1130 *(face++) = midvidx + 0; /* z 1 */
1131 *(face++) = midvidx + 0; /* z 1 */
1132 *(face++) = j + vertex_total_cap_head - 2; /* z 0 */
1133 *(face++) = midvidx + 2; /* z 0 */
1134 face_index++;
1135 FACE_ASSERT(face - 4, sf_vert_tot);
1136
1137 /***************
1138 * cap end 'b' */
1139 /* ... same as previous but v 2-3 flipped, and different initial offsets */
1140
1141 j = vertex_offset + (vertex_total * 3) + (vertex_total_cap_head - 1);
1142
1143 midvidx = vertex_offset + (vertex_total * 3) - 3;
1144
1145 for (k = 0; k < vertex_total_cap_tail - 2; k++, j++) {
1146 *(face++) = midvidx; /* z 1 */
1147 *(face++) = midvidx; /* z 1 */
1148 *(face++) = j + 1; /* z 0 */
1149 *(face++) = j + 0; /* z 0 */
1150 face_index++;
1151 FACE_ASSERT(face - 4, sf_vert_tot);
1152 }
1153
1154 j = vertex_offset + (vertex_total * 3) + (vertex_total_cap_head - 1);
1155
1156 /* 2 tris that join the original */
1157 *(face++) = midvidx + 0; /* z 1 */
1158 *(face++) = midvidx + 0; /* z 1 */
1159 *(face++) = j + 0; /* z 0 */
1160 *(face++) = midvidx + 1; /* z 0 */
1161 face_index++;
1162 FACE_ASSERT(face - 4, sf_vert_tot);
1163
1164 *(face++) = midvidx + 0; /* z 1 */
1165 *(face++) = midvidx + 0; /* z 1 */
1166 *(face++) = midvidx + 2; /* z 0 */
1167 *(face++) = j + vertex_total_cap_tail - 2; /* z 0 */
1168 face_index++;
1169 FACE_ASSERT(face - 4, sf_vert_tot);
1170 }
1171 }
1172
1173 MEM_freeN(open_spline_ranges);
1174
1175#if 0
1176 fprintf(stderr,
1177 "%u %u (%u %u), %u\n",
1178 face_index,
1179 sf_tri_tot + tot_feather_quads,
1180 sf_tri_tot,
1181 tot_feather_quads,
1182 tot_boundary_used - tot_boundary_found);
1183#endif
1184
1185#ifdef USE_SCANFILL_EDGE_WORKAROUND
1186 BLI_assert(face_index + (tot_boundary_used - tot_boundary_found) ==
1187 sf_tri_tot + tot_feather_quads);
1188#else
1189 BLI_assert(face_index == sf_tri_tot + tot_feather_quads);
1190#endif
1191 {
1192 MaskRasterLayer *layer = &mr_handle->layers[masklay_index];
1193
1194 if (BLI_rctf_isect(&default_bounds, &bounds, &bounds)) {
1195#ifdef USE_SCANFILL_EDGE_WORKAROUND
1196 layer->face_tot = (sf_tri_tot + tot_feather_quads) -
1197 (tot_boundary_used - tot_boundary_found);
1198#else
1199 layer->face_tot = (sf_tri_tot + tot_feather_quads);
1200#endif
1201 layer->face_coords = face_coords;
1202 layer->face_array = face_array;
1203 layer->bounds = bounds;
1204
1205 layer_bucket_init(layer, pixel_size);
1206
1207 BLI_rctf_union(&mr_handle->bounds, &bounds);
1208 }
1209 else {
1210 MEM_freeN(face_coords);
1211 MEM_freeN(face_array);
1212
1214 }
1215
1216 /* copy as-is */
1217 layer->alpha = masklay->alpha;
1218 layer->blend = masklay->blend;
1219 layer->blend_flag = masklay->blend_flag;
1220 layer->falloff = masklay->falloff;
1221 }
1222
1223 // printf("tris %d, feather tris %d\n", sf_tri_tot, tot_feather_quads);
1224 }
1225
1226 /* Add triangles. */
1227 BLI_scanfill_end_arena(&sf_ctx, sf_arena);
1228 }
1229
1230 BLI_memarena_free(sf_arena);
1231}
1232
1233/* --------------------------------------------------------------------- */
1234/* functions that run inside the sampling thread (keep fast!) */
1235/* --------------------------------------------------------------------- */
1236
1237/* 2D ray test */
1238#if 0
1239static float maskrasterize_layer_z_depth_tri(const float pt[2],
1240 const float v1[3],
1241 const float v2[3],
1242 const float v3[3])
1243{
1244 float w[3];
1245 barycentric_weights_v2(v1, v2, v3, pt, w);
1246 return (v1[2] * w[0]) + (v2[2] * w[1]) + (v3[2] * w[2]);
1247}
1248#endif
1249
1251 const float pt[2], const float v1[3], const float v2[3], const float v3[3], const float v4[3])
1252{
1253 float w[4];
1254 barycentric_weights_v2_quad(v1, v2, v3, v4, pt, w);
1255 // return (v1[2] * w[0]) + (v2[2] * w[1]) + (v3[2] * w[2]) + (v4[2] * w[3]);
1256 return w[2] + w[3]; /* we can make this assumption for small speedup */
1257}
1258
1259static float maskrasterize_layer_isect(const uint *face,
1260 float (*cos)[3],
1261 const float dist_orig,
1262 const float xy[2])
1263{
1264 /* we always cast from same place only need xy */
1265 if (face[3] == TRI_VERT) {
1266 /* --- tri --- */
1267
1268#if 0
1269 /* not essential but avoids unneeded extra lookups */
1270 if ((cos[0][2] < dist_orig) || (cos[1][2] < dist_orig) || (cos[2][2] < dist_orig)) {
1271 if (isect_point_tri_v2_cw(xy, cos[face[0]], cos[face[1]], cos[face[2]])) {
1272 /* we know all tris are close for now */
1273 return maskrasterize_layer_z_depth_tri(xy, cos[face[0]], cos[face[1]], cos[face[2]]);
1274 }
1275 }
1276#else
1277 /* we know all tris are close for now */
1278 if (isect_point_tri_v2_cw(xy, cos[face[0]], cos[face[1]], cos[face[2]])) {
1279 return 0.0f;
1280 }
1281#endif
1282 }
1283 else {
1284 /* --- quad --- */
1285
1286 /* not essential but avoids unneeded extra lookups */
1287 if ((cos[0][2] < dist_orig) || (cos[1][2] < dist_orig) || (cos[2][2] < dist_orig) ||
1288 (cos[3][2] < dist_orig))
1289 {
1290
1291 /* needs work */
1292#if 1
1293 /* quad check fails for bow-tie, so keep using 2 tri checks */
1294 // if (isect_point_quad_v2(xy, cos[face[0]], cos[face[1]], cos[face[2]], cos[face[3]]))
1295 if (isect_point_tri_v2(xy, cos[face[0]], cos[face[1]], cos[face[2]]) ||
1296 isect_point_tri_v2(xy, cos[face[0]], cos[face[2]], cos[face[3]]))
1297 {
1299 xy, cos[face[0]], cos[face[1]], cos[face[2]], cos[face[3]]);
1300 }
1301#elif 1
1302 /* don't use isect_point_tri_v2_cw because we could have bow-tie quads */
1303
1304 if (isect_point_tri_v2(xy, cos[face[0]], cos[face[1]], cos[face[2]])) {
1305 return maskrasterize_layer_z_depth_tri(xy, cos[face[0]], cos[face[1]], cos[face[2]]);
1306 }
1307 else if (isect_point_tri_v2(xy, cos[face[0]], cos[face[2]], cos[face[3]])) {
1308 return maskrasterize_layer_z_depth_tri(xy, cos[face[0]], cos[face[2]], cos[face[3]]);
1309 }
1310#else
1311 /* cheat - we know first 2 verts are z0.0f and second 2 are z 1.0f */
1312 /* ... worth looking into */
1313#endif
1314 }
1315 }
1316
1317 return 1.0f;
1318}
1319
1321{
1323
1324 return uint((xy[0] - layer->bounds.xmin) * layer->buckets_xy_scalar[0]) +
1325 (uint((xy[1] - layer->bounds.ymin) * layer->buckets_xy_scalar[1]) * layer->buckets_x);
1326}
1327
1328static float layer_bucket_depth_from_xy(MaskRasterLayer *layer, const float xy[2])
1329{
1330 uint index = layer_bucket_index_from_xy(layer, xy);
1331 uint *face_index = layer->buckets_face[index];
1332
1333 if (face_index) {
1334 uint(*face_array)[4] = layer->face_array;
1335 float (*cos)[3] = layer->face_coords;
1336 float best_dist = 1.0f;
1337 while (*face_index != TRI_TERMINATOR_ID) {
1338 const float test_dist = maskrasterize_layer_isect(
1339 face_array[*face_index], cos, best_dist, xy);
1340 if (test_dist < best_dist) {
1341 best_dist = test_dist;
1342 /* comparing with 0.0f is OK here because triangles are always zero depth */
1343 if (best_dist == 0.0f) {
1344 /* bail early, we're as close as possible */
1345 return 0.0f;
1346 }
1347 }
1348 face_index++;
1349 }
1350 return best_dist;
1351 }
1352
1353 return 1.0f;
1354}
1355
1356float BKE_maskrasterize_handle_sample(MaskRasterHandle *mr_handle, const float xy[2])
1357{
1358 /* can't do this because some layers may invert */
1359 /* if (BLI_rctf_isect_pt_v(&mr_handle->bounds, xy)) */
1360
1361 const uint layers_tot = mr_handle->layers_tot;
1362 MaskRasterLayer *layer = mr_handle->layers;
1363
1364 /* return value */
1365 float value = 0.0f;
1366
1367 for (uint i = 0; i < layers_tot; i++, layer++) {
1368 float value_layer;
1369
1370 /* also used as signal for unused layer (when render is disabled) */
1371 if (layer->alpha != 0.0f && BLI_rctf_isect_pt_v(&layer->bounds, xy)) {
1372 value_layer = 1.0f - layer_bucket_depth_from_xy(layer, xy);
1373
1374 switch (layer->falloff) {
1375 case PROP_SMOOTH:
1376 /* ease - gives less hard lines for dilate/erode feather */
1377 value_layer = (3.0f * value_layer * value_layer -
1378 2.0f * value_layer * value_layer * value_layer);
1379 break;
1380 case PROP_SPHERE:
1381 value_layer = sqrtf(2.0f * value_layer - value_layer * value_layer);
1382 break;
1383 case PROP_ROOT:
1384 value_layer = sqrtf(value_layer);
1385 break;
1386 case PROP_SHARP:
1387 value_layer = value_layer * value_layer;
1388 break;
1389 case PROP_INVSQUARE:
1390 value_layer = value_layer * (2.0f - value_layer);
1391 break;
1392 case PROP_LIN:
1393 default:
1394 /* nothing */
1395 break;
1396 }
1397
1398 if (layer->blend != MASK_BLEND_REPLACE) {
1399 value_layer *= layer->alpha;
1400 }
1401 }
1402 else {
1403 value_layer = 0.0f;
1404 }
1405
1406 if (layer->blend_flag & MASK_BLENDFLAG_INVERT) {
1407 value_layer = 1.0f - value_layer;
1408 }
1409
1410 switch (layer->blend) {
1412 value += value_layer * (1.0f - value);
1413 break;
1415 value -= value_layer * value;
1416 break;
1417 case MASK_BLEND_ADD:
1418 value += value_layer;
1419 break;
1421 value -= value_layer;
1422 break;
1423 case MASK_BLEND_LIGHTEN:
1424 value = max_ff(value, value_layer);
1425 break;
1426 case MASK_BLEND_DARKEN:
1427 value = min_ff(value, value_layer);
1428 break;
1429 case MASK_BLEND_MUL:
1430 value *= value_layer;
1431 break;
1432 case MASK_BLEND_REPLACE:
1433 value = (value * (1.0f - layer->alpha)) + (value_layer * layer->alpha);
1434 break;
1436 value = fabsf(value - value_layer);
1437 break;
1438 default: /* same as add */
1439 CLOG_ERROR(&LOG, "unhandled blend type: %d", layer->blend);
1440 BLI_assert(0);
1441 value += value_layer;
1442 break;
1443 }
1444
1445 /* clamp after applying each layer so we don't get
1446 * issues subtracting after accumulating over 1.0f */
1447 CLAMP(value, 0.0f, 1.0f);
1448 }
1449
1450 return value;
1451}
1452
1461
1462static void maskrasterize_buffer_cb(void *__restrict userdata,
1463 const int y,
1464 const TaskParallelTLS *__restrict /*tls*/)
1465{
1466 MaskRasterizeBufferData *data = static_cast<MaskRasterizeBufferData *>(userdata);
1467
1468 MaskRasterHandle *mr_handle = data->mr_handle;
1469 float *buffer = data->buffer;
1470
1471 const uint width = data->width;
1472 const float x_inv = data->x_inv;
1473 const float x_px_ofs = data->x_px_ofs;
1474
1475 uint i = uint(y) * width;
1476 float xy[2];
1477 xy[1] = (float(y) * data->y_inv) + data->y_px_ofs;
1478 for (uint x = 0; x < width; x++, i++) {
1479 xy[0] = (float(x) * x_inv) + x_px_ofs;
1480
1481 buffer[i] = BKE_maskrasterize_handle_sample(mr_handle, xy);
1482 }
1483}
1484
1486 const uint width,
1487 const uint height,
1488 /* Cannot be const, because it is assigned to non-const variable.
1489 * NOLINTNEXTLINE: readability-non-const-parameter. */
1490 float *buffer)
1491{
1492 const float x_inv = 1.0f / float(width);
1493 const float y_inv = 1.0f / float(height);
1494
1496 data.mr_handle = mr_handle;
1497 data.x_inv = x_inv;
1498 data.y_inv = y_inv;
1499 data.x_px_ofs = x_inv * 0.5f;
1500 data.y_px_ofs = y_inv * 0.5f;
1501 data.width = width;
1502 data.buffer = buffer;
1503 TaskParallelSettings settings;
1505 settings.use_threading = (size_t(height) * width > 10000);
1506 BLI_task_parallel_range(0, int(height), &data, maskrasterize_buffer_cb, &settings);
1507}
int BKE_mask_spline_resolution(struct MaskSpline *spline, int width, int height)
float(* BKE_mask_spline_differentiate_with_resolution(struct MaskSpline *spline, unsigned int resol, unsigned int *r_tot_diff_point))[2]
unsigned int BKE_mask_spline_feather_resolution(struct MaskSpline *spline, int width, int height)
void BKE_mask_spline_feather_collapse_inner_loops(struct MaskSpline *spline, float(*feather_points)[2], unsigned int tot_feather_point)
float(* BKE_mask_spline_feather_differentiated_points_with_resolution(struct MaskSpline *spline, unsigned int resol, bool do_feather_isect, unsigned int *r_tot_feather_point))[2]
#define BLI_assert(a)
Definition BLI_assert.h:46
#define BLI_INLINE
void void void BLI_movelisttolist(ListBase *dst, ListBase *src) ATTR_NONNULL(1
#define LISTBASE_FOREACH(type, var, list)
void BLI_remlink(ListBase *listbase, void *vlink) ATTR_NONNULL(1)
Definition listbase.cc:131
int BLI_listbase_count(const ListBase *listbase) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
Definition listbase.cc:524
MINLINE float max_ff(float a, float b)
MINLINE int min_ii(int a, int b)
MINLINE float min_ff(float a, float b)
#define M_SQRT2
#define M_PI
void barycentric_weights_v2(const float v1[2], const float v2[2], const float v3[2], const float co[2], float w[3])
bool isect_point_tri_v2_cw(const float pt[2], const float v1[2], const float v2[2], const float v3[2])
void barycentric_weights_v2_quad(const float v1[2], const float v2[2], const float v3[2], const float v4[2], const float co[2], float w[4])
int isect_point_tri_v2(const float pt[2], const float v1[2], const float v2[2], const float v3[2])
float dist_squared_to_line_segment_v2(const float p[2], const float l1[2], const float l2[2])
Definition math_geom.cc:291
MINLINE float len_squared_v2v2(const float a[2], const float b[2]) ATTR_WARN_UNUSED_RESULT
MINLINE void copy_v2_v2(float r[2], const float a[2])
MINLINE void copy_v3_v3(float r[3], const float a[3])
MINLINE void add_v2_v2v2(float r[2], const float a[2], const float b[2])
MINLINE void sub_v2_v2v2(float r[2], const float a[2], const float b[2])
MINLINE float normalize_v2(float n[2])
MemArena * BLI_memarena_new(size_t bufsize, const char *name) ATTR_WARN_UNUSED_RESULT ATTR_RETURNS_NONNULL ATTR_NONNULL(2) ATTR_MALLOC
void BLI_memarena_free(MemArena *ma) ATTR_NONNULL(1)
void BLI_rctf_union(struct rctf *rct_a, const struct rctf *rct_b)
bool BLI_rctf_isect_pt_v(const struct rctf *rect, const float xy[2])
bool BLI_rctf_isect(const struct rctf *src1, const struct rctf *src2, struct rctf *dest)
void BLI_rctf_init(struct rctf *rect, float xmin, float xmax, float ymin, float ymax)
Definition rct.cc:404
BLI_INLINE float BLI_rctf_size_x(const struct rctf *rct)
Definition BLI_rect.h:202
void BLI_rctf_do_minmax_v(struct rctf *rect, const float xy[2])
Definition rct.cc:510
BLI_INLINE float BLI_rctf_size_y(const struct rctf *rct)
Definition BLI_rect.h:206
void BLI_rctf_init_minmax(struct rctf *rect)
Definition rct.cc:480
@ BLI_SCANFILL_CALC_POLYS
@ BLI_SCANFILL_CALC_HOLES
struct ScanFillVert * BLI_scanfill_vert_add(ScanFillContext *sf_ctx, const float vec[3])
Definition scanfill.cc:100
struct ScanFillEdge * BLI_scanfill_edge_add(ScanFillContext *sf_ctx, struct ScanFillVert *v1, struct ScanFillVert *v2)
Definition scanfill.cc:122
void BLI_scanfill_begin_arena(ScanFillContext *sf_ctx, struct MemArena *arena)
Definition scanfill.cc:782
unsigned int BLI_scanfill_calc_ex(ScanFillContext *sf_ctx, int flag, const float nor_proj[3])
Definition scanfill.cc:809
#define BLI_SCANFILL_ARENA_SIZE
void BLI_scanfill_end_arena(ScanFillContext *sf_ctx, struct MemArena *arena)
Definition scanfill.cc:799
bool BLI_scanfill_calc_self_isect(ScanFillContext *sf_ctx, ListBase *remvertbase, ListBase *remedgebase)
unsigned int uint
void BLI_task_parallel_range(int start, int stop, void *userdata, TaskParallelRangeFunc func, const TaskParallelSettings *settings)
Definition task_range.cc:99
BLI_INLINE void BLI_parallel_range_settings_defaults(TaskParallelSettings *settings)
Definition BLI_task.h:221
#define CLAMP(a, b, c)
#define POINTER_AS_UINT(i)
#define UNUSED_VARS_NDEBUG(...)
#define POINTER_FROM_UINT(i)
#define CLOG_ERROR(clg_ref,...)
Definition CLG_log.h:188
@ MASK_BLENDFLAG_INVERT
@ MASK_BLEND_ADD
@ MASK_BLEND_REPLACE
@ MASK_BLEND_DARKEN
@ MASK_BLEND_DIFFERENCE
@ MASK_BLEND_LIGHTEN
@ MASK_BLEND_MERGE_ADD
@ MASK_BLEND_SUBTRACT
@ MASK_BLEND_MUL
@ MASK_BLEND_MERGE_SUBTRACT
@ MASK_HIDE_RENDER
@ MASK_LAYERFLAG_FILL_OVERLAP
@ MASK_LAYERFLAG_FILL_DISCRETE
@ MASK_SPLINE_CYCLIC
@ MASK_SPLINE_NOINTERSECT
@ MASK_SPLINE_NOFILL
@ PROP_SMOOTH
@ PROP_ROOT
@ PROP_SHARP
@ PROP_LIN
@ PROP_INVSQUARE
@ PROP_SPHERE
static double angle(const Eigen::Vector3d &v1, const Eigen::Vector3d &v2)
Definition IK_Math.h:117
Read Guarded memory(de)allocation.
#define MEM_SIZE_OPTIMAL(size)
#define MEM_reallocN(vmemh, len)
BMesh const char void * data
ATTR_WARN_UNUSED_RESULT const BMVert * v2
ATTR_WARN_UNUSED_RESULT const BMVert * v
static btDbvtVolume bounds(btDbvtNode **leaves, int count)
Definition btDbvt.cpp:299
SIMD_FORCE_INLINE const btScalar & w() const
Return the w value.
Definition btQuadWord.h:119
nullptr float
static bool is_cyclic(const Nurb *nu)
#define cos
#define LOG(level)
Definition log.h:97
void * MEM_calloc_arrayN(size_t len, size_t size, const char *str)
Definition mallocn.cc:123
void * MEM_callocN(size_t len, const char *str)
Definition mallocn.cc:118
void * MEM_malloc_arrayN(size_t len, size_t size, const char *str)
Definition mallocn.cc:133
void MEM_freeN(void *vmemh)
Definition mallocn.cc:113
#define SF_EDGE_IS_BOUNDARY
void BKE_maskrasterize_handle_free(MaskRasterHandle *mr_handle)
static void maskrasterize_buffer_cb(void *__restrict userdata, const int y, const TaskParallelTLS *__restrict)
void BKE_maskrasterize_handle_init(MaskRasterHandle *mr_handle, Mask *mask, const int width, const int height, const bool do_aspect_correct, const bool do_mask_aa, const bool do_feather)
static void layer_bucket_init(MaskRasterLayer *layer, const float pixel_size)
float BKE_maskrasterize_handle_sample(MaskRasterHandle *mr_handle, const float xy[2])
#define FACE_ASSERT(face, vert_max)
#define TRI_VERT
static bool layer_bucket_isect_test(const MaskRasterLayer *layer, uint face_index, const uint bucket_x, const uint bucket_y, const float bucket_size_x, const float bucket_size_y, const float bucket_max_rad_squared)
MaskRasterHandle * BKE_maskrasterize_handle_new()
#define SF_KEYINDEX_TEMP_ID
static void rotate_point_v2(float r_p[2], const float p[2], const float cent[2], const float angle, const float asp[2])
static float maskrasterize_layer_z_depth_quad(const float pt[2], const float v1[3], const float v2[3], const float v3[3], const float v4[3])
static float layer_bucket_depth_from_xy(MaskRasterLayer *layer, const float xy[2])
#define TRI_TERMINATOR_ID
#define BUCKET_PIXELS_PER_CELL
static float maskrasterize_layer_isect(const uint *face, float(*cos)[3], const float dist_orig, const float xy[2])
static void layer_bucket_init_dummy(MaskRasterLayer *layer)
#define CALC_CAP_RESOL
static ScanFillVert * scanfill_vert_add_v2_with_depth(ScanFillContext *sf_ctx, const float co_xy[2], const float co_z)
void BKE_maskrasterize_buffer(MaskRasterHandle *mr_handle, const uint width, const uint height, float *buffer)
Rasterize a buffer from a single mask (threaded execution).
static void maskrasterize_spline_differentiate_point_outset(float(*diff_feather_points)[2], float(*diff_points)[2], const uint tot_diff_point, const float ofs, const bool do_test)
BLI_INLINE uint clampis_uint(const uint v, const uint min, const uint max)
BLI_INLINE uint layer_bucket_index_from_xy(MaskRasterLayer *layer, const float xy[2])
ccl_device_inline float2 mask(const MaskType mask, const float2 a)
#define fabsf
#define sqrtf
#define sinf
#define cosf
#define min(a, b)
Definition sort.cc:36
void * link
struct LinkNode * next
void * last
void * first
struct MaskLayer * next
char visibility_flag
ListBase splines
MaskRasterLayer * layers
uint(* face_array)[4]
float buckets_xy_scalar[2]
float(* face_coords)[3]
MaskRasterHandle * mr_handle
ListBase fillvertbase
ListBase filledgebase
unsigned short poly_nr
ListBase fillfacebase
struct ScanFillVert * v1
struct ScanFillVert * v2
unsigned char c
union ScanFillEdge::@026355123305312163137370333220125272167065274016 tmp
struct ScanFillFace * next
struct ScanFillVert * v2
struct ScanFillVert * v3
struct ScanFillVert * v1
struct ScanFillVert * next
float co[3]
union ScanFillVert::@316106002035155215355067045167356240017116022023 tmp
unsigned int u
unsigned int keyindex
float xmin
float ymin
i
Definition text_draw.cc:230
max
Definition text_draw.cc:251
int xy[2]
Definition wm_draw.cc:178