Blender V4.3
mask_rasterize.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2012 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
51#include <algorithm> /* For `min/max`. */
52
53#include "CLG_log.h"
54
55#include "MEM_guardedalloc.h"
56
57#include "DNA_mask_types.h"
58#include "DNA_scene_types.h"
59#include "DNA_vec_types.h"
60
61#include "BLI_math_geom.h"
62#include "BLI_math_vector.h"
63#include "BLI_memarena.h"
64#include "BLI_scanfill.h"
65#include "BLI_utildefines.h"
66
67#include "BLI_linklist.h"
68#include "BLI_listbase.h"
69#include "BLI_rect.h"
70#include "BLI_task.h"
71
72#include "BKE_mask.h"
73
74#include "BLI_strict_flags.h" /* Keep last. */
75
76/* this is rather and annoying hack, use define to isolate it.
77 * problem is caused by scanfill removing edges on us. */
78#define USE_SCANFILL_EDGE_WORKAROUND
79
80#define SPLINE_RESOL_CAP_PER_PIXEL 2
81#define SPLINE_RESOL_CAP_MIN 8
82#define SPLINE_RESOL_CAP_MAX 64
83
84/* found this gives best performance for high detail masks, values between 2 and 8 work best */
85#define BUCKET_PIXELS_PER_CELL 4
86
87#define SF_EDGE_IS_BOUNDARY 0xff
88#define SF_KEYINDEX_TEMP_ID uint(-1)
89
90#define TRI_TERMINATOR_ID uint(-1)
91#define TRI_VERT uint(-1)
92
93/* for debugging add... */
94#ifndef NDEBUG
95// printf("%u %u %u %u\n", _t[0], _t[1], _t[2], _t[3]);
96# define FACE_ASSERT(face, vert_max) \
97 { \
98 uint *_t = face; \
99 BLI_assert(_t[0] < vert_max); \
100 BLI_assert(_t[1] < vert_max); \
101 BLI_assert(_t[2] < vert_max); \
102 BLI_assert(_t[3] < vert_max || _t[3] == TRI_VERT); \
103 } \
104 (void)0
105#else
106/* do nothing */
107# define FACE_ASSERT(face, vert_max)
108#endif
109
110static CLG_LogRef LOG = {"bke.mask_rasterize"};
111
112static void rotate_point_v2(
113 float r_p[2], const float p[2], const float cent[2], const float angle, const float asp[2])
114{
115 const float s = sinf(angle);
116 const float c = cosf(angle);
117 float p_new[2];
118
119 /* translate point back to origin */
120 r_p[0] = (p[0] - cent[0]) / asp[0];
121 r_p[1] = (p[1] - cent[1]) / asp[1];
122
123 /* rotate point */
124 p_new[0] = ((r_p[0] * c) - (r_p[1] * s)) * asp[0];
125 p_new[1] = ((r_p[0] * s) + (r_p[1] * c)) * asp[1];
126
127 /* translate point back */
128 r_p[0] = p_new[0] + cent[0];
129 r_p[1] = p_new[1] + cent[1];
130}
131
132BLI_INLINE uint clampis_uint(const uint v, const uint min, const uint max)
133{
134 return v < min ? min : (v > max ? max : v);
135}
136
138 const float co_xy[2],
139 const float co_z)
140{
141 const float co[3] = {co_xy[0], co_xy[1], co_z};
142 return BLI_scanfill_vert_add(sf_ctx, co);
143}
144
145/* --------------------------------------------------------------------- */
146/* local structs for mask rasterizing */
147/* --------------------------------------------------------------------- */
148
158 /* geometry */
160 uint (*face_array)[4]; /* access coords tri/quad */
161 float (*face_coords)[3]; /* xy, z 0-1 (1.0 == filled) */
162
163 /* 2d bounds (to quickly skip bucket lookup) */
165
166 /* buckets */
168 /* cache divide and subtract */
169 float buckets_xy_scalar[2]; /* (1.0 / (buckets_width + FLT_EPSILON)) * buckets_x */
172
173 /* copied direct from #MaskLayer.--- */
174 /* blending options */
175 float alpha;
176 char blend;
179};
180
182 /* body of the spline */
185
186 /* capping for non-filled, non cyclic splines */
189
191};
192
199
200 /* 2d bounds (to quickly skip bucket lookup) */
202};
203
204/* --------------------------------------------------------------------- */
205/* alloc / free functions */
206/* --------------------------------------------------------------------- */
207
209{
210 MaskRasterHandle *mr_handle;
211
212 mr_handle = MEM_cnew<MaskRasterHandle>("MaskRasterHandle");
213
214 return mr_handle;
215}
216
218{
219 const uint layers_tot = mr_handle->layers_tot;
220 MaskRasterLayer *layer = mr_handle->layers;
221
222 for (uint i = 0; i < layers_tot; i++, layer++) {
223
224 if (layer->face_array) {
225 MEM_freeN(layer->face_array);
226 }
227
228 if (layer->face_coords) {
229 MEM_freeN(layer->face_coords);
230 }
231
232 if (layer->buckets_face) {
233 const uint bucket_tot = layer->buckets_x * layer->buckets_y;
234 uint bucket_index;
235 for (bucket_index = 0; bucket_index < bucket_tot; bucket_index++) {
236 uint *face_index = layer->buckets_face[bucket_index];
237 if (face_index) {
238 MEM_freeN(face_index);
239 }
240 }
241
242 MEM_freeN(layer->buckets_face);
243 }
244 }
245
246 MEM_freeN(mr_handle->layers);
247 MEM_freeN(mr_handle);
248}
249
250static void maskrasterize_spline_differentiate_point_outset(float (*diff_feather_points)[2],
251 float (*diff_points)[2],
252 const uint tot_diff_point,
253 const float ofs,
254 const bool do_test)
255{
256 uint k_prev = tot_diff_point - 2;
257 uint k_curr = tot_diff_point - 1;
258 uint k_next = 0;
259
260 uint k;
261
262 float d_prev[2];
263 float d_next[2];
264 float d[2];
265
266 const float *co_prev;
267 const float *co_curr;
268 const float *co_next;
269
270 const float ofs_squared = ofs * ofs;
271
272 co_prev = diff_points[k_prev];
273 co_curr = diff_points[k_curr];
274 co_next = diff_points[k_next];
275
276 /* precalc */
277 sub_v2_v2v2(d_prev, co_prev, co_curr);
278 normalize_v2(d_prev);
279
280 for (k = 0; k < tot_diff_point; k++) {
281
282 // co_prev = diff_points[k_prev]; /* Precalculate. */
283 co_curr = diff_points[k_curr];
284 co_next = diff_points[k_next];
285
286 // sub_v2_v2v2(d_prev, co_prev, co_curr); /* Precalculate. */
287 sub_v2_v2v2(d_next, co_curr, co_next);
288
289 // normalize_v2(d_prev); /* precalc */
290 normalize_v2(d_next);
291
292 if ((do_test == false) ||
293 (len_squared_v2v2(diff_feather_points[k], diff_points[k]) < ofs_squared))
294 {
295
296 add_v2_v2v2(d, d_prev, d_next);
297
298 normalize_v2(d);
299
300 diff_feather_points[k][0] = diff_points[k][0] + (d[1] * ofs);
301 diff_feather_points[k][1] = diff_points[k][1] + (-d[0] * ofs);
302 }
303
304 /* use next iter */
305 copy_v2_v2(d_prev, d_next);
306
307 // k_prev = k_curr; /* Precalculate. */
308 k_curr = k_next;
309 k_next++;
310 }
311}
312
313/* this function is not exact, sometimes it returns false positives,
314 * the main point of it is to clear out _almost_ all bucket/face non-intersections,
315 * returning true in corner cases is ok but missing an intersection is NOT.
316 *
317 * method used
318 * - check if the center of the buckets bounding box is intersecting the face
319 * - if not get the max radius to a corner of the bucket and see how close we
320 * are to any of the triangle edges.
321 */
323 uint face_index,
324 const uint bucket_x,
325 const uint bucket_y,
326 const float bucket_size_x,
327 const float bucket_size_y,
328 const float bucket_max_rad_squared)
329{
330 uint *face = layer->face_array[face_index];
331 float(*cos)[3] = layer->face_coords;
332
333 const float xmin = layer->bounds.xmin + (bucket_size_x * float(bucket_x));
334 const float ymin = layer->bounds.ymin + (bucket_size_y * float(bucket_y));
335 const float xmax = xmin + bucket_size_x;
336 const float ymax = ymin + bucket_size_y;
337
338 const float cent[2] = {(xmin + xmax) * 0.5f, (ymin + ymax) * 0.5f};
339
340 if (face[3] == TRI_VERT) {
341 const float *v1 = cos[face[0]];
342 const float *v2 = cos[face[1]];
343 const float *v3 = cos[face[2]];
344
345 if (isect_point_tri_v2(cent, v1, v2, v3)) {
346 return true;
347 }
348
349 if ((dist_squared_to_line_segment_v2(cent, v1, v2) < bucket_max_rad_squared) ||
350 (dist_squared_to_line_segment_v2(cent, v2, v3) < bucket_max_rad_squared) ||
351 (dist_squared_to_line_segment_v2(cent, v3, v1) < bucket_max_rad_squared))
352 {
353 return true;
354 }
355
356 // printf("skip tri\n");
357 return false;
358 }
359
360 const float *v1 = cos[face[0]];
361 const float *v2 = cos[face[1]];
362 const float *v3 = cos[face[2]];
363 const float *v4 = cos[face[3]];
364
365 if (isect_point_tri_v2(cent, v1, v2, v3)) {
366 return true;
367 }
368 if (isect_point_tri_v2(cent, v1, v3, v4)) {
369 return true;
370 }
371
372 if ((dist_squared_to_line_segment_v2(cent, v1, v2) < bucket_max_rad_squared) ||
373 (dist_squared_to_line_segment_v2(cent, v2, v3) < bucket_max_rad_squared) ||
374 (dist_squared_to_line_segment_v2(cent, v3, v4) < bucket_max_rad_squared) ||
375 (dist_squared_to_line_segment_v2(cent, v4, v1) < bucket_max_rad_squared))
376 {
377 return true;
378 }
379
380 // printf("skip quad\n");
381 return false;
382}
383
385{
386 layer->face_tot = 0;
387 layer->face_coords = nullptr;
388 layer->face_array = nullptr;
389
390 layer->buckets_x = 0;
391 layer->buckets_y = 0;
392
393 layer->buckets_xy_scalar[0] = 0.0f;
394 layer->buckets_xy_scalar[1] = 0.0f;
395
396 layer->buckets_face = nullptr;
397
398 BLI_rctf_init(&layer->bounds, -1.0f, -1.0f, -1.0f, -1.0f);
399}
400
401static void layer_bucket_init(MaskRasterLayer *layer, const float pixel_size)
402{
403 MemArena *arena = BLI_memarena_new(MEM_SIZE_OPTIMAL(1 << 16), __func__);
404
405 const float bucket_dim_x = BLI_rctf_size_x(&layer->bounds);
406 const float bucket_dim_y = BLI_rctf_size_y(&layer->bounds);
407
408 layer->buckets_x = uint((bucket_dim_x / pixel_size) / float(BUCKET_PIXELS_PER_CELL));
409 layer->buckets_y = uint((bucket_dim_y / pixel_size) / float(BUCKET_PIXELS_PER_CELL));
410
411 // printf("bucket size %ux%u\n", layer->buckets_x, layer->buckets_y);
412
413 CLAMP(layer->buckets_x, 8, 512);
414 CLAMP(layer->buckets_y, 8, 512);
415
416 layer->buckets_xy_scalar[0] = (1.0f / (bucket_dim_x + FLT_EPSILON)) * float(layer->buckets_x);
417 layer->buckets_xy_scalar[1] = (1.0f / (bucket_dim_y + FLT_EPSILON)) * float(layer->buckets_y);
418
419 {
420 /* width and height of each bucket */
421 const float bucket_size_x = (bucket_dim_x + FLT_EPSILON) / float(layer->buckets_x);
422 const float bucket_size_y = (bucket_dim_y + FLT_EPSILON) / float(layer->buckets_y);
423 const float bucket_max_rad = (max_ff(bucket_size_x, bucket_size_y) * float(M_SQRT2)) +
424 FLT_EPSILON;
425 const float bucket_max_rad_squared = bucket_max_rad * bucket_max_rad;
426
427 uint *face = &layer->face_array[0][0];
428 float(*cos)[3] = layer->face_coords;
429
430 const uint bucket_tot = layer->buckets_x * layer->buckets_y;
431 LinkNode **bucketstore = MEM_cnew_array<LinkNode *>(bucket_tot, __func__);
432 uint *bucketstore_tot = MEM_cnew_array<uint>(bucket_tot, __func__);
433
434 uint face_index;
435
436 for (face_index = 0; face_index < layer->face_tot; face_index++, face += 4) {
437 float xmin;
438 float xmax;
439 float ymin;
440 float ymax;
441
442 if (face[3] == TRI_VERT) {
443 const float *v1 = cos[face[0]];
444 const float *v2 = cos[face[1]];
445 const float *v3 = cos[face[2]];
446
447 xmin = min_ff(v1[0], min_ff(v2[0], v3[0]));
448 xmax = max_ff(v1[0], max_ff(v2[0], v3[0]));
449 ymin = min_ff(v1[1], min_ff(v2[1], v3[1]));
450 ymax = max_ff(v1[1], max_ff(v2[1], v3[1]));
451 }
452 else {
453 const float *v1 = cos[face[0]];
454 const float *v2 = cos[face[1]];
455 const float *v3 = cos[face[2]];
456 const float *v4 = cos[face[3]];
457
458 xmin = min_ff(v1[0], min_ff(v2[0], min_ff(v3[0], v4[0])));
459 xmax = max_ff(v1[0], max_ff(v2[0], max_ff(v3[0], v4[0])));
460 ymin = min_ff(v1[1], min_ff(v2[1], min_ff(v3[1], v4[1])));
461 ymax = max_ff(v1[1], max_ff(v2[1], max_ff(v3[1], v4[1])));
462 }
463
464 /* not essential but may as will skip any faces outside the view */
465 if (!((xmax < 0.0f) || (ymax < 0.0f) || (xmin > 1.0f) || (ymin > 1.0f))) {
466
467 CLAMP(xmin, 0.0f, 1.0f);
468 CLAMP(ymin, 0.0f, 1.0f);
469 CLAMP(xmax, 0.0f, 1.0f);
470 CLAMP(ymax, 0.0f, 1.0f);
471
472 {
473 uint xi_min = uint((xmin - layer->bounds.xmin) * layer->buckets_xy_scalar[0]);
474 uint xi_max = uint((xmax - layer->bounds.xmin) * layer->buckets_xy_scalar[0]);
475 uint yi_min = uint((ymin - layer->bounds.ymin) * layer->buckets_xy_scalar[1]);
476 uint yi_max = uint((ymax - layer->bounds.ymin) * layer->buckets_xy_scalar[1]);
477 void *face_index_void = POINTER_FROM_UINT(face_index);
478
479 uint xi, yi;
480
481 /* this should _almost_ never happen but since it can in extreme cases,
482 * we have to clamp the values or we overrun the buffer and crash */
483 if (xi_min >= layer->buckets_x) {
484 xi_min = layer->buckets_x - 1;
485 }
486 if (xi_max >= layer->buckets_x) {
487 xi_max = layer->buckets_x - 1;
488 }
489 if (yi_min >= layer->buckets_y) {
490 yi_min = layer->buckets_y - 1;
491 }
492 if (yi_max >= layer->buckets_y) {
493 yi_max = layer->buckets_y - 1;
494 }
495
496 for (yi = yi_min; yi <= yi_max; yi++) {
497 uint bucket_index = (layer->buckets_x * yi) + xi_min;
498 for (xi = xi_min; xi <= xi_max; xi++, bucket_index++) {
499 /* correct but do in outer loop */
500 // uint bucket_index = (layer->buckets_x * yi) + xi;
501
502 BLI_assert(xi < layer->buckets_x);
503 BLI_assert(yi < layer->buckets_y);
504 BLI_assert(bucket_index < bucket_tot);
505
506 /* Check if the bucket intersects with the face. */
507 /* NOTE: there is a trade off here since checking box/tri intersections isn't as
508 * optimal as it could be, but checking pixels against faces they will never
509 * intersect with is likely the greater slowdown here -
510 * so check if the cell intersects the face. */
511 if (layer_bucket_isect_test(layer,
512 face_index,
513 xi,
514 yi,
515 bucket_size_x,
516 bucket_size_y,
517 bucket_max_rad_squared))
518 {
519 BLI_linklist_prepend_arena(&bucketstore[bucket_index], face_index_void, arena);
520 bucketstore_tot[bucket_index]++;
521 }
522 }
523 }
524 }
525 }
526 }
527
528 if (true) {
529 /* Now convert link-nodes into arrays for faster per pixel access. */
530 uint **buckets_face = MEM_cnew_array<uint *>(bucket_tot, __func__);
531 uint bucket_index;
532
533 for (bucket_index = 0; bucket_index < bucket_tot; bucket_index++) {
534 if (bucketstore_tot[bucket_index]) {
535 uint *bucket = MEM_cnew_array<uint>((bucketstore_tot[bucket_index] + 1), __func__);
536 LinkNode *bucket_node;
537
538 buckets_face[bucket_index] = bucket;
539
540 for (bucket_node = bucketstore[bucket_index]; bucket_node;
541 bucket_node = bucket_node->next)
542 {
543 *bucket = POINTER_AS_UINT(bucket_node->link);
544 bucket++;
545 }
546 *bucket = TRI_TERMINATOR_ID;
547 }
548 else {
549 buckets_face[bucket_index] = nullptr;
550 }
551 }
552
553 layer->buckets_face = buckets_face;
554 }
555
556 MEM_freeN(bucketstore);
557 MEM_freeN(bucketstore_tot);
558 }
559
560 BLI_memarena_free(arena);
561}
562
564 Mask *mask,
565 const int width,
566 const int height,
567 const bool do_aspect_correct,
568 const bool do_mask_aa,
569 const bool do_feather)
570{
571 const rctf default_bounds = {0.0f, 1.0f, 0.0f, 1.0f};
572 const float pixel_size = 1.0f / float(min_ii(width, height));
573 const float asp_xy[2] = {
574 (do_aspect_correct && width > height) ? float(height) / float(width) : 1.0f,
575 (do_aspect_correct && width < height) ? float(width) / float(height) : 1.0f};
576
577 const float zvec[3] = {0.0f, 0.0f, -1.0f};
578 MaskLayer *masklay;
579 uint masklay_index;
580 MemArena *sf_arena;
581
582 mr_handle->layers_tot = uint(BLI_listbase_count(&mask->masklayers));
583 mr_handle->layers = MEM_cnew_array<MaskRasterLayer>(mr_handle->layers_tot, "MaskRasterLayer");
584 BLI_rctf_init_minmax(&mr_handle->bounds);
585
586 sf_arena = BLI_memarena_new(BLI_SCANFILL_ARENA_SIZE, __func__);
587
588 for (masklay = static_cast<MaskLayer *>(mask->masklayers.first), masklay_index = 0; masklay;
589 masklay = masklay->next, masklay_index++)
590 {
591 /* we need to store vertex ranges for open splines for filling */
592 uint tot_splines;
593 MaskRasterSplineInfo *open_spline_ranges;
594 uint open_spline_index = 0;
595
596 /* scanfill */
597 ScanFillContext sf_ctx;
598 ScanFillVert *sf_vert = nullptr;
599 ScanFillVert *sf_vert_next = nullptr;
600 ScanFillFace *sf_tri;
601
602 uint sf_vert_tot = 0;
603 uint tot_feather_quads = 0;
604
605#ifdef USE_SCANFILL_EDGE_WORKAROUND
606 uint tot_boundary_used = 0;
607 uint tot_boundary_found = 0;
608#endif
609
610 if (masklay->visibility_flag & MASK_HIDE_RENDER) {
611 /* skip the layer */
612 mr_handle->layers_tot--;
613 masklay_index--;
614 continue;
615 }
616
617 tot_splines = uint(BLI_listbase_count(&masklay->splines));
618 open_spline_ranges = MEM_cnew_array<MaskRasterSplineInfo>(tot_splines, __func__);
619
620 BLI_scanfill_begin_arena(&sf_ctx, sf_arena);
621
622 LISTBASE_FOREACH (MaskSpline *, spline, &masklay->splines) {
623 const bool is_cyclic = (spline->flag & MASK_SPLINE_CYCLIC) != 0;
624 const bool is_fill = (spline->flag & MASK_SPLINE_NOFILL) == 0;
625
626 float(*diff_points)[2];
627 uint tot_diff_point;
628
629 float(*diff_feather_points)[2];
630 float(*diff_feather_points_flip)[2];
631 uint tot_diff_feather_points;
632
633 const uint resol_a = uint(BKE_mask_spline_resolution(spline, width, height) / 4);
634 const uint resol_b = BKE_mask_spline_feather_resolution(spline, width, height) / 4;
635 const uint resol = std::clamp(std::max(resol_a, resol_b), 4u, 512u);
636
637 diff_points = BKE_mask_spline_differentiate_with_resolution(spline, resol, &tot_diff_point);
638
639 if (do_feather) {
641 spline, resol, false, &tot_diff_feather_points);
642 BLI_assert(diff_feather_points);
643 }
644 else {
645 tot_diff_feather_points = 0;
646 diff_feather_points = nullptr;
647 }
648
649 if (tot_diff_point > 3) {
650 ScanFillVert *sf_vert_prev;
651 uint j;
652
653 sf_ctx.poly_nr++;
654
655 if (do_aspect_correct) {
656 if (width != height) {
657 float *fp;
658 float *ffp;
659 float asp;
660
661 if (width < height) {
662 fp = &diff_points[0][0];
663 ffp = tot_diff_feather_points ? &diff_feather_points[0][0] : nullptr;
664 asp = float(width) / float(height);
665 }
666 else {
667 fp = &diff_points[0][1];
668 ffp = tot_diff_feather_points ? &diff_feather_points[0][1] : nullptr;
669 asp = float(height) / float(width);
670 }
671
672 for (uint i = 0; i < tot_diff_point; i++, fp += 2) {
673 (*fp) = (((*fp) - 0.5f) / asp) + 0.5f;
674 }
675
676 if (tot_diff_feather_points) {
677 for (uint i = 0; i < tot_diff_feather_points; i++, ffp += 2) {
678 (*ffp) = (((*ffp) - 0.5f) / asp) + 0.5f;
679 }
680 }
681 }
682 }
683
684 /* fake aa, using small feather */
685 if (do_mask_aa == true) {
686 if (do_feather == false) {
687 tot_diff_feather_points = tot_diff_point;
688 diff_feather_points = MEM_cnew_array<float[2]>(size_t(tot_diff_feather_points),
689 __func__);
690 /* add single pixel feather */
692 diff_feather_points, diff_points, tot_diff_point, pixel_size, false);
693 }
694 else {
695 /* ensure single pixel feather, on any zero feather areas */
697 diff_feather_points, diff_points, tot_diff_point, pixel_size, true);
698 }
699 }
700
701 if (is_fill) {
702 /* Apply intersections depending on fill settings. */
703 if (spline->flag & MASK_SPLINE_NOINTERSECT) {
705 spline, diff_feather_points, tot_diff_feather_points);
706 }
707
708 sf_vert_prev = scanfill_vert_add_v2_with_depth(&sf_ctx, diff_points[0], 0.0f);
709 sf_vert_prev->tmp.u = sf_vert_tot;
710
711 /* Absolute index of feather vert. */
712 sf_vert_prev->keyindex = sf_vert_tot + tot_diff_point;
713
714 sf_vert_tot++;
715
716 for (j = 1; j < tot_diff_point; j++) {
717 sf_vert = scanfill_vert_add_v2_with_depth(&sf_ctx, diff_points[j], 0.0f);
718 sf_vert->tmp.u = sf_vert_tot;
719 sf_vert->keyindex = sf_vert_tot + tot_diff_point; /* absolute index of feather vert */
720 sf_vert_tot++;
721 }
722
723 sf_vert = sf_vert_prev;
724 sf_vert_prev = static_cast<ScanFillVert *>(sf_ctx.fillvertbase.last);
725
726 for (j = 0; j < tot_diff_point; j++) {
727 ScanFillEdge *sf_edge = BLI_scanfill_edge_add(&sf_ctx, sf_vert_prev, sf_vert);
728
729#ifdef USE_SCANFILL_EDGE_WORKAROUND
730 if (diff_feather_points) {
731 sf_edge->tmp.c = SF_EDGE_IS_BOUNDARY;
732 tot_boundary_used++;
733 }
734#else
735 (void)sf_edge;
736#endif
737 sf_vert_prev = sf_vert;
738 sf_vert = sf_vert->next;
739 }
740
741 if (diff_feather_points) {
742 BLI_assert(tot_diff_feather_points == tot_diff_point);
743
744 /* NOTE: only added for convenience, we don't in fact use these to scan-fill,
745 * only to create feather faces after scan-fill. */
746 for (j = 0; j < tot_diff_feather_points; j++) {
747 sf_vert = scanfill_vert_add_v2_with_depth(&sf_ctx, diff_feather_points[j], 1.0f);
748 sf_vert->keyindex = SF_KEYINDEX_TEMP_ID;
749 sf_vert_tot++;
750 }
751
752 tot_feather_quads += tot_diff_point;
753 }
754 }
755 else {
756 /* unfilled spline */
757 if (diff_feather_points) {
758
759 if (spline->flag & MASK_SPLINE_NOINTERSECT) {
760 diff_feather_points_flip = MEM_cnew_array<float[2]>(tot_diff_feather_points,
761 "diff_feather_points_flip");
762
763 float co_diff[2];
764 for (j = 0; j < tot_diff_point; j++) {
765 sub_v2_v2v2(co_diff, diff_points[j], diff_feather_points[j]);
766 add_v2_v2v2(diff_feather_points_flip[j], diff_points[j], co_diff);
767 }
768
770 spline, diff_feather_points, tot_diff_feather_points);
772 spline, diff_feather_points_flip, tot_diff_feather_points);
773 }
774 else {
775 diff_feather_points_flip = nullptr;
776 }
777
778 open_spline_ranges[open_spline_index].vertex_offset = sf_vert_tot;
779 open_spline_ranges[open_spline_index].vertex_total = tot_diff_point;
780
781 /* TODO: an alternate functions so we can avoid double vector copy! */
782 for (j = 0; j < tot_diff_point; j++) {
783
784 /* center vert */
785 sf_vert = scanfill_vert_add_v2_with_depth(&sf_ctx, diff_points[j], 0.0f);
786 sf_vert->tmp.u = sf_vert_tot;
787 sf_vert->keyindex = SF_KEYINDEX_TEMP_ID;
788 sf_vert_tot++;
789
790 /* feather vert A */
791 sf_vert = scanfill_vert_add_v2_with_depth(&sf_ctx, diff_feather_points[j], 1.0f);
792 sf_vert->tmp.u = sf_vert_tot;
793 sf_vert->keyindex = SF_KEYINDEX_TEMP_ID;
794 sf_vert_tot++;
795
796 /* feather vert B */
797 if (diff_feather_points_flip) {
799 &sf_ctx, diff_feather_points_flip[j], 1.0f);
800 }
801 else {
802 float co_diff[2];
803 sub_v2_v2v2(co_diff, diff_points[j], diff_feather_points[j]);
804 add_v2_v2v2(co_diff, diff_points[j], co_diff);
805 sf_vert = scanfill_vert_add_v2_with_depth(&sf_ctx, co_diff, 1.0f);
806 }
807
808 sf_vert->tmp.u = sf_vert_tot;
809 sf_vert->keyindex = SF_KEYINDEX_TEMP_ID;
810 sf_vert_tot++;
811
812 tot_feather_quads += 2;
813 }
814
815 if (!is_cyclic) {
816 tot_feather_quads -= 2;
817 }
818
819 if (diff_feather_points_flip) {
820 MEM_freeN(diff_feather_points_flip);
821 diff_feather_points_flip = nullptr;
822 }
823
824 /* cap ends */
825
826 /* dummy init value */
827 open_spline_ranges[open_spline_index].vertex_total_cap_head = 0;
828 open_spline_ranges[open_spline_index].vertex_total_cap_tail = 0;
829
830 if (!is_cyclic) {
831 const float *fp_cent;
832 const float *fp_turn;
833
834 uint k;
835
836 fp_cent = diff_points[0];
837 fp_turn = diff_feather_points[0];
838
839#define CALC_CAP_RESOL \
840 clampis_uint(uint(len_v2v2(fp_cent, fp_turn) / (pixel_size * SPLINE_RESOL_CAP_PER_PIXEL)), \
841 SPLINE_RESOL_CAP_MIN, \
842 SPLINE_RESOL_CAP_MAX)
843
844 {
845 const uint vertex_total_cap = CALC_CAP_RESOL;
846
847 for (k = 1; k < vertex_total_cap; k++) {
848 const float angle = float(k) * (1.0f / float(vertex_total_cap)) * float(M_PI);
849 float co_feather[2];
850 rotate_point_v2(co_feather, fp_turn, fp_cent, angle, asp_xy);
851
852 sf_vert = scanfill_vert_add_v2_with_depth(&sf_ctx, co_feather, 1.0f);
853 sf_vert->tmp.u = sf_vert_tot;
854 sf_vert->keyindex = SF_KEYINDEX_TEMP_ID;
855 sf_vert_tot++;
856 }
857 tot_feather_quads += vertex_total_cap;
858
859 open_spline_ranges[open_spline_index].vertex_total_cap_head = vertex_total_cap;
860 }
861
862 fp_cent = diff_points[tot_diff_point - 1];
863 fp_turn = diff_feather_points[tot_diff_point - 1];
864
865 {
866 const uint vertex_total_cap = CALC_CAP_RESOL;
867
868 for (k = 1; k < vertex_total_cap; k++) {
869 const float angle = float(k) * (1.0f / float(vertex_total_cap)) * float(M_PI);
870 float co_feather[2];
871 rotate_point_v2(co_feather, fp_turn, fp_cent, -angle, asp_xy);
872
873 sf_vert = scanfill_vert_add_v2_with_depth(&sf_ctx, co_feather, 1.0f);
874 sf_vert->tmp.u = sf_vert_tot;
875 sf_vert->keyindex = SF_KEYINDEX_TEMP_ID;
876 sf_vert_tot++;
877 }
878 tot_feather_quads += vertex_total_cap;
879
880 open_spline_ranges[open_spline_index].vertex_total_cap_tail = vertex_total_cap;
881 }
882 }
883
884 open_spline_ranges[open_spline_index].is_cyclic = is_cyclic;
885 open_spline_index++;
886
887#undef CALC_CAP_RESOL
888 /* end capping */
889 }
890 }
891 }
892
893 if (diff_points) {
894 MEM_freeN(diff_points);
895 }
896
897 if (diff_feather_points) {
898 MEM_freeN(diff_feather_points);
899 }
900 }
901
902 {
903 uint(*face_array)[4], *face; /* access coords */
904 float(*face_coords)[3], *cos; /* xy, z 0-1 (1.0 == filled) */
905 uint sf_tri_tot;
906 rctf bounds;
907 uint face_index;
908 int scanfill_flag = 0;
909
910 bool is_isect = false;
911 ListBase isect_remvertbase = {nullptr, nullptr};
912 ListBase isect_remedgebase = {nullptr, nullptr};
913
914 /* now we have all the splines */
915 face_coords = MEM_cnew_array<float[3]>(sf_vert_tot, "maskrast_face_coords");
916
917 /* init bounds */
919
920 /* coords */
921 cos = (float *)face_coords;
922 for (sf_vert = static_cast<ScanFillVert *>(sf_ctx.fillvertbase.first); sf_vert;
923 sf_vert = sf_vert_next)
924 {
925 sf_vert_next = sf_vert->next;
926 copy_v3_v3(cos, sf_vert->co);
927
928 /* remove so as not to interfere with fill (called after) */
929 if (sf_vert->keyindex == SF_KEYINDEX_TEMP_ID) {
930 BLI_remlink(&sf_ctx.fillvertbase, sf_vert);
931 }
932
933 /* bounds */
935
936 cos += 3;
937 }
938
939 /* --- inefficient self-intersect case --- */
940 /* if self intersections are found, its too tricky to attempt to map vertices
941 * so just realloc and add entirely new vertices - the result of the self-intersect check.
942 */
943 if ((masklay->flag & MASK_LAYERFLAG_FILL_OVERLAP) &&
945 &sf_ctx, &isect_remvertbase, &isect_remedgebase)))
946 {
947 uint sf_vert_tot_isect = uint(BLI_listbase_count(&sf_ctx.fillvertbase));
948 uint i = sf_vert_tot;
949
950 face_coords = static_cast<float(*)[3]>(
951 MEM_reallocN(face_coords, sizeof(float[3]) * (sf_vert_tot + sf_vert_tot_isect)));
952
953 cos = (float *)&face_coords[sf_vert_tot][0];
954
955 for (sf_vert = static_cast<ScanFillVert *>(sf_ctx.fillvertbase.first); sf_vert;
956 sf_vert = sf_vert->next)
957 {
958 copy_v3_v3(cos, sf_vert->co);
959 sf_vert->tmp.u = i++;
960 cos += 3;
961 }
962
963 sf_vert_tot += sf_vert_tot_isect;
964
965 /* we need to calc polys after self intersect */
966 scanfill_flag |= BLI_SCANFILL_CALC_POLYS;
967 }
968 /* --- end inefficient code --- */
969
970 /* main scan-fill */
971 if ((masklay->flag & MASK_LAYERFLAG_FILL_DISCRETE) == 0) {
972 scanfill_flag |= BLI_SCANFILL_CALC_HOLES;
973 }
974
975 /* Store an array of edges from `sf_ctx.filledgebase`
976 * because filling may remove edges, see: #127692. */
977 ScanFillEdge **sf_edge_array = nullptr;
978 uint sf_edge_array_num = 0;
979 if (tot_feather_quads) {
980 ListBase *lb_array[] = {&sf_ctx.filledgebase, &isect_remedgebase};
981 for (int pass = 0; pass < 2; pass++) {
982 LISTBASE_FOREACH (ScanFillEdge *, sf_edge, lb_array[pass]) {
983 if (sf_edge->tmp.c == SF_EDGE_IS_BOUNDARY) {
984 sf_edge_array_num += 1;
985 }
986 }
987 }
988
989 if (sf_edge_array_num > 0) {
990 sf_edge_array = static_cast<ScanFillEdge **>(
991 MEM_mallocN(sizeof(ScanFillEdge **) * size_t(sf_edge_array_num), __func__));
992 uint edge_index = 0;
993 for (int pass = 0; pass < 2; pass++) {
994 LISTBASE_FOREACH (ScanFillEdge *, sf_edge, lb_array[pass]) {
995 if (sf_edge->tmp.c == SF_EDGE_IS_BOUNDARY) {
996 sf_edge_array[edge_index++] = sf_edge;
997 }
998 }
999 }
1000 BLI_assert(edge_index == sf_edge_array_num);
1001 }
1002 }
1003
1004 sf_tri_tot = uint(BLI_scanfill_calc_ex(&sf_ctx, scanfill_flag, zvec));
1005
1006 if (is_isect) {
1007 /* add removed data back, we only need edges for feather,
1008 * but add verts back so they get freed along with others */
1009 BLI_movelisttolist(&sf_ctx.fillvertbase, &isect_remvertbase);
1010 BLI_movelisttolist(&sf_ctx.filledgebase, &isect_remedgebase);
1011 }
1012
1013 face_array = static_cast<uint(*)[4]>(
1014 MEM_mallocN(sizeof(*face_array) * (size_t(sf_tri_tot) + size_t(tot_feather_quads)),
1015 "maskrast_face_index"));
1016 face_index = 0;
1017
1018 /* faces */
1019 face = (uint *)face_array;
1020 for (sf_tri = static_cast<ScanFillFace *>(sf_ctx.fillfacebase.first); sf_tri;
1021 sf_tri = sf_tri->next)
1022 {
1023 *(face++) = sf_tri->v3->tmp.u;
1024 *(face++) = sf_tri->v2->tmp.u;
1025 *(face++) = sf_tri->v1->tmp.u;
1026 *(face++) = TRI_VERT;
1027 face_index++;
1028 FACE_ASSERT(face - 4, sf_vert_tot);
1029 }
1030
1031 /* start of feather faces... if we have this set,
1032 * 'face_index' is kept from loop above */
1033
1034 BLI_assert(face_index == sf_tri_tot);
1035 UNUSED_VARS_NDEBUG(face_index);
1036
1037 if (sf_edge_array) {
1038 BLI_assert(tot_feather_quads);
1039 for (uint i = 0; i < sf_edge_array_num; i++) {
1040 ScanFillEdge *sf_edge = sf_edge_array[i];
1041 BLI_assert(sf_edge->tmp.c == SF_EDGE_IS_BOUNDARY);
1042 *(face++) = sf_edge->v1->tmp.u;
1043 *(face++) = sf_edge->v2->tmp.u;
1044 *(face++) = sf_edge->v2->keyindex;
1045 *(face++) = sf_edge->v1->keyindex;
1046 face_index++;
1047 FACE_ASSERT(face - 4, sf_vert_tot);
1048
1049#ifdef USE_SCANFILL_EDGE_WORKAROUND
1050 tot_boundary_found++;
1051#endif
1052 }
1053 MEM_freeN(sf_edge_array);
1054 }
1055
1056#ifdef USE_SCANFILL_EDGE_WORKAROUND
1057 if (tot_boundary_found != tot_boundary_used) {
1058 BLI_assert(tot_boundary_found < tot_boundary_used);
1059 }
1060#endif
1061
1062 /* feather only splines */
1063 while (open_spline_index > 0) {
1064 const uint vertex_offset = open_spline_ranges[--open_spline_index].vertex_offset;
1065 uint vertex_total = open_spline_ranges[open_spline_index].vertex_total;
1066 uint vertex_total_cap_head = open_spline_ranges[open_spline_index].vertex_total_cap_head;
1067 uint vertex_total_cap_tail = open_spline_ranges[open_spline_index].vertex_total_cap_tail;
1068 uint k, j;
1069
1070 j = vertex_offset;
1071
1072 /* subtract one since we reference next vertex triple */
1073 for (k = 0; k < vertex_total - 1; k++, j += 3) {
1074
1075 BLI_assert(j == vertex_offset + (k * 3));
1076
1077 *(face++) = j + 3; /* next span */ /* z 1 */
1078 *(face++) = j + 0; /* z 1 */
1079 *(face++) = j + 1; /* z 0 */
1080 *(face++) = j + 4; /* next span */ /* z 0 */
1081 face_index++;
1082 FACE_ASSERT(face - 4, sf_vert_tot);
1083
1084 *(face++) = j + 0; /* z 1 */
1085 *(face++) = j + 3; /* next span */ /* z 1 */
1086 *(face++) = j + 5; /* next span */ /* z 0 */
1087 *(face++) = j + 2; /* z 0 */
1088 face_index++;
1089 FACE_ASSERT(face - 4, sf_vert_tot);
1090 }
1091
1092 if (open_spline_ranges[open_spline_index].is_cyclic) {
1093 *(face++) = vertex_offset + 0; /* next span */ /* z 1 */
1094 *(face++) = j + 0; /* z 1 */
1095 *(face++) = j + 1; /* z 0 */
1096 *(face++) = vertex_offset + 1; /* next span */ /* z 0 */
1097 face_index++;
1098 FACE_ASSERT(face - 4, sf_vert_tot);
1099
1100 *(face++) = j + 0; /* z 1 */
1101 *(face++) = vertex_offset + 0; /* next span */ /* z 1 */
1102 *(face++) = vertex_offset + 2; /* next span */ /* z 0 */
1103 *(face++) = j + 2; /* z 0 */
1104 face_index++;
1105 FACE_ASSERT(face - 4, sf_vert_tot);
1106 }
1107 else {
1108 uint midvidx = vertex_offset;
1109
1110 /***************
1111 * cap end 'a' */
1112 j = midvidx + (vertex_total * 3);
1113
1114 for (k = 0; k < vertex_total_cap_head - 2; k++, j++) {
1115 *(face++) = midvidx + 0; /* z 1 */
1116 *(face++) = midvidx + 0; /* z 1 */
1117 *(face++) = j + 0; /* z 0 */
1118 *(face++) = j + 1; /* z 0 */
1119 face_index++;
1120 FACE_ASSERT(face - 4, sf_vert_tot);
1121 }
1122
1123 j = vertex_offset + (vertex_total * 3);
1124
1125 /* 2 tris that join the original */
1126 *(face++) = midvidx + 0; /* z 1 */
1127 *(face++) = midvidx + 0; /* z 1 */
1128 *(face++) = midvidx + 1; /* z 0 */
1129 *(face++) = j + 0; /* z 0 */
1130 face_index++;
1131 FACE_ASSERT(face - 4, sf_vert_tot);
1132
1133 *(face++) = midvidx + 0; /* z 1 */
1134 *(face++) = midvidx + 0; /* z 1 */
1135 *(face++) = j + vertex_total_cap_head - 2; /* z 0 */
1136 *(face++) = midvidx + 2; /* z 0 */
1137 face_index++;
1138 FACE_ASSERT(face - 4, sf_vert_tot);
1139
1140 /***************
1141 * cap end 'b' */
1142 /* ... same as previous but v 2-3 flipped, and different initial offsets */
1143
1144 j = vertex_offset + (vertex_total * 3) + (vertex_total_cap_head - 1);
1145
1146 midvidx = vertex_offset + (vertex_total * 3) - 3;
1147
1148 for (k = 0; k < vertex_total_cap_tail - 2; k++, j++) {
1149 *(face++) = midvidx; /* z 1 */
1150 *(face++) = midvidx; /* z 1 */
1151 *(face++) = j + 1; /* z 0 */
1152 *(face++) = j + 0; /* z 0 */
1153 face_index++;
1154 FACE_ASSERT(face - 4, sf_vert_tot);
1155 }
1156
1157 j = vertex_offset + (vertex_total * 3) + (vertex_total_cap_head - 1);
1158
1159 /* 2 tris that join the original */
1160 *(face++) = midvidx + 0; /* z 1 */
1161 *(face++) = midvidx + 0; /* z 1 */
1162 *(face++) = j + 0; /* z 0 */
1163 *(face++) = midvidx + 1; /* z 0 */
1164 face_index++;
1165 FACE_ASSERT(face - 4, sf_vert_tot);
1166
1167 *(face++) = midvidx + 0; /* z 1 */
1168 *(face++) = midvidx + 0; /* z 1 */
1169 *(face++) = midvidx + 2; /* z 0 */
1170 *(face++) = j + vertex_total_cap_tail - 2; /* z 0 */
1171 face_index++;
1172 FACE_ASSERT(face - 4, sf_vert_tot);
1173 }
1174 }
1175
1176 MEM_freeN(open_spline_ranges);
1177
1178#if 0
1179 fprintf(stderr,
1180 "%u %u (%u %u), %u\n",
1181 face_index,
1182 sf_tri_tot + tot_feather_quads,
1183 sf_tri_tot,
1184 tot_feather_quads,
1185 tot_boundary_used - tot_boundary_found);
1186#endif
1187
1188#ifdef USE_SCANFILL_EDGE_WORKAROUND
1189 BLI_assert(face_index + (tot_boundary_used - tot_boundary_found) ==
1190 sf_tri_tot + tot_feather_quads);
1191#else
1192 BLI_assert(face_index == sf_tri_tot + tot_feather_quads);
1193#endif
1194 {
1195 MaskRasterLayer *layer = &mr_handle->layers[masklay_index];
1196
1197 if (BLI_rctf_isect(&default_bounds, &bounds, &bounds)) {
1198#ifdef USE_SCANFILL_EDGE_WORKAROUND
1199 layer->face_tot = (sf_tri_tot + tot_feather_quads) -
1200 (tot_boundary_used - tot_boundary_found);
1201#else
1202 layer->face_tot = (sf_tri_tot + tot_feather_quads);
1203#endif
1204 layer->face_coords = face_coords;
1205 layer->face_array = face_array;
1206 layer->bounds = bounds;
1207
1208 layer_bucket_init(layer, pixel_size);
1209
1210 BLI_rctf_union(&mr_handle->bounds, &bounds);
1211 }
1212 else {
1213 MEM_freeN(face_coords);
1214 MEM_freeN(face_array);
1215
1217 }
1218
1219 /* copy as-is */
1220 layer->alpha = masklay->alpha;
1221 layer->blend = masklay->blend;
1222 layer->blend_flag = masklay->blend_flag;
1223 layer->falloff = masklay->falloff;
1224 }
1225
1226 // printf("tris %d, feather tris %d\n", sf_tri_tot, tot_feather_quads);
1227 }
1228
1229 /* Add triangles. */
1230 BLI_scanfill_end_arena(&sf_ctx, sf_arena);
1231 }
1232
1233 BLI_memarena_free(sf_arena);
1234}
1235
1236/* --------------------------------------------------------------------- */
1237/* functions that run inside the sampling thread (keep fast!) */
1238/* --------------------------------------------------------------------- */
1239
1240/* 2D ray test */
1241#if 0
1242static float maskrasterize_layer_z_depth_tri(const float pt[2],
1243 const float v1[3],
1244 const float v2[3],
1245 const float v3[3])
1246{
1247 float w[3];
1248 barycentric_weights_v2(v1, v2, v3, pt, w);
1249 return (v1[2] * w[0]) + (v2[2] * w[1]) + (v3[2] * w[2]);
1250}
1251#endif
1252
1254 const float pt[2], const float v1[3], const float v2[3], const float v3[3], const float v4[3])
1255{
1256 float w[4];
1257 barycentric_weights_v2_quad(v1, v2, v3, v4, pt, w);
1258 // return (v1[2] * w[0]) + (v2[2] * w[1]) + (v3[2] * w[2]) + (v4[2] * w[3]);
1259 return w[2] + w[3]; /* we can make this assumption for small speedup */
1260}
1261
1262static float maskrasterize_layer_isect(const uint *face,
1263 float (*cos)[3],
1264 const float dist_orig,
1265 const float xy[2])
1266{
1267 /* we always cast from same place only need xy */
1268 if (face[3] == TRI_VERT) {
1269 /* --- tri --- */
1270
1271#if 0
1272 /* not essential but avoids unneeded extra lookups */
1273 if ((cos[0][2] < dist_orig) || (cos[1][2] < dist_orig) || (cos[2][2] < dist_orig)) {
1274 if (isect_point_tri_v2_cw(xy, cos[face[0]], cos[face[1]], cos[face[2]])) {
1275 /* we know all tris are close for now */
1276 return maskrasterize_layer_z_depth_tri(xy, cos[face[0]], cos[face[1]], cos[face[2]]);
1277 }
1278 }
1279#else
1280 /* we know all tris are close for now */
1281 if (isect_point_tri_v2_cw(xy, cos[face[0]], cos[face[1]], cos[face[2]])) {
1282 return 0.0f;
1283 }
1284#endif
1285 }
1286 else {
1287 /* --- quad --- */
1288
1289 /* not essential but avoids unneeded extra lookups */
1290 if ((cos[0][2] < dist_orig) || (cos[1][2] < dist_orig) || (cos[2][2] < dist_orig) ||
1291 (cos[3][2] < dist_orig))
1292 {
1293
1294 /* needs work */
1295#if 1
1296 /* quad check fails for bow-tie, so keep using 2 tri checks */
1297 // if (isect_point_quad_v2(xy, cos[face[0]], cos[face[1]], cos[face[2]], cos[face[3]]))
1298 if (isect_point_tri_v2(xy, cos[face[0]], cos[face[1]], cos[face[2]]) ||
1299 isect_point_tri_v2(xy, cos[face[0]], cos[face[2]], cos[face[3]]))
1300 {
1302 xy, cos[face[0]], cos[face[1]], cos[face[2]], cos[face[3]]);
1303 }
1304#elif 1
1305 /* don't use isect_point_tri_v2_cw because we could have bow-tie quads */
1306
1307 if (isect_point_tri_v2(xy, cos[face[0]], cos[face[1]], cos[face[2]])) {
1308 return maskrasterize_layer_z_depth_tri(xy, cos[face[0]], cos[face[1]], cos[face[2]]);
1309 }
1310 else if (isect_point_tri_v2(xy, cos[face[0]], cos[face[2]], cos[face[3]])) {
1311 return maskrasterize_layer_z_depth_tri(xy, cos[face[0]], cos[face[2]], cos[face[3]]);
1312 }
1313#else
1314 /* cheat - we know first 2 verts are z0.0f and second 2 are z 1.0f */
1315 /* ... worth looking into */
1316#endif
1317 }
1318 }
1319
1320 return 1.0f;
1321}
1322
1324{
1325 BLI_assert(BLI_rctf_isect_pt_v(&layer->bounds, xy));
1326
1327 return uint((xy[0] - layer->bounds.xmin) * layer->buckets_xy_scalar[0]) +
1328 (uint((xy[1] - layer->bounds.ymin) * layer->buckets_xy_scalar[1]) * layer->buckets_x);
1329}
1330
1331static float layer_bucket_depth_from_xy(MaskRasterLayer *layer, const float xy[2])
1332{
1333 uint index = layer_bucket_index_from_xy(layer, xy);
1334 uint *face_index = layer->buckets_face[index];
1335
1336 if (face_index) {
1337 uint(*face_array)[4] = layer->face_array;
1338 float(*cos)[3] = layer->face_coords;
1339 float best_dist = 1.0f;
1340 while (*face_index != TRI_TERMINATOR_ID) {
1341 const float test_dist = maskrasterize_layer_isect(
1342 face_array[*face_index], cos, best_dist, xy);
1343 if (test_dist < best_dist) {
1344 best_dist = test_dist;
1345 /* comparing with 0.0f is OK here because triangles are always zero depth */
1346 if (best_dist == 0.0f) {
1347 /* bail early, we're as close as possible */
1348 return 0.0f;
1349 }
1350 }
1351 face_index++;
1352 }
1353 return best_dist;
1354 }
1355
1356 return 1.0f;
1357}
1358
1359float BKE_maskrasterize_handle_sample(MaskRasterHandle *mr_handle, const float xy[2])
1360{
1361 /* can't do this because some layers may invert */
1362 /* if (BLI_rctf_isect_pt_v(&mr_handle->bounds, xy)) */
1363
1364 const uint layers_tot = mr_handle->layers_tot;
1365 MaskRasterLayer *layer = mr_handle->layers;
1366
1367 /* return value */
1368 float value = 0.0f;
1369
1370 for (uint i = 0; i < layers_tot; i++, layer++) {
1371 float value_layer;
1372
1373 /* also used as signal for unused layer (when render is disabled) */
1374 if (layer->alpha != 0.0f && BLI_rctf_isect_pt_v(&layer->bounds, xy)) {
1375 value_layer = 1.0f - layer_bucket_depth_from_xy(layer, xy);
1376
1377 switch (layer->falloff) {
1378 case PROP_SMOOTH:
1379 /* ease - gives less hard lines for dilate/erode feather */
1380 value_layer = (3.0f * value_layer * value_layer -
1381 2.0f * value_layer * value_layer * value_layer);
1382 break;
1383 case PROP_SPHERE:
1384 value_layer = sqrtf(2.0f * value_layer - value_layer * value_layer);
1385 break;
1386 case PROP_ROOT:
1387 value_layer = sqrtf(value_layer);
1388 break;
1389 case PROP_SHARP:
1390 value_layer = value_layer * value_layer;
1391 break;
1392 case PROP_INVSQUARE:
1393 value_layer = value_layer * (2.0f - value_layer);
1394 break;
1395 case PROP_LIN:
1396 default:
1397 /* nothing */
1398 break;
1399 }
1400
1401 if (layer->blend != MASK_BLEND_REPLACE) {
1402 value_layer *= layer->alpha;
1403 }
1404 }
1405 else {
1406 value_layer = 0.0f;
1407 }
1408
1409 if (layer->blend_flag & MASK_BLENDFLAG_INVERT) {
1410 value_layer = 1.0f - value_layer;
1411 }
1412
1413 switch (layer->blend) {
1415 value += value_layer * (1.0f - value);
1416 break;
1418 value -= value_layer * value;
1419 break;
1420 case MASK_BLEND_ADD:
1421 value += value_layer;
1422 break;
1424 value -= value_layer;
1425 break;
1426 case MASK_BLEND_LIGHTEN:
1427 value = max_ff(value, value_layer);
1428 break;
1429 case MASK_BLEND_DARKEN:
1430 value = min_ff(value, value_layer);
1431 break;
1432 case MASK_BLEND_MUL:
1433 value *= value_layer;
1434 break;
1435 case MASK_BLEND_REPLACE:
1436 value = (value * (1.0f - layer->alpha)) + (value_layer * layer->alpha);
1437 break;
1439 value = fabsf(value - value_layer);
1440 break;
1441 default: /* same as add */
1442 CLOG_ERROR(&LOG, "unhandled blend type: %d", layer->blend);
1443 BLI_assert(0);
1444 value += value_layer;
1445 break;
1446 }
1447
1448 /* clamp after applying each layer so we don't get
1449 * issues subtracting after accumulating over 1.0f */
1450 CLAMP(value, 0.0f, 1.0f);
1451 }
1452
1453 return value;
1454}
1455
1464
1465static void maskrasterize_buffer_cb(void *__restrict userdata,
1466 const int y,
1467 const TaskParallelTLS *__restrict /*tls*/)
1468{
1469 MaskRasterizeBufferData *data = static_cast<MaskRasterizeBufferData *>(userdata);
1470
1471 MaskRasterHandle *mr_handle = data->mr_handle;
1472 float *buffer = data->buffer;
1473
1474 const uint width = data->width;
1475 const float x_inv = data->x_inv;
1476 const float x_px_ofs = data->x_px_ofs;
1477
1478 uint i = uint(y) * width;
1479 float xy[2];
1480 xy[1] = (float(y) * data->y_inv) + data->y_px_ofs;
1481 for (uint x = 0; x < width; x++, i++) {
1482 xy[0] = (float(x) * x_inv) + x_px_ofs;
1483
1484 buffer[i] = BKE_maskrasterize_handle_sample(mr_handle, xy);
1485 }
1486}
1487
1489 const uint width,
1490 const uint height,
1491 /* Cannot be const, because it is assigned to non-const variable.
1492 * NOLINTNEXTLINE: readability-non-const-parameter. */
1493 float *buffer)
1494{
1495 const float x_inv = 1.0f / float(width);
1496 const float y_inv = 1.0f / float(height);
1497
1499 data.mr_handle = mr_handle;
1500 data.x_inv = x_inv;
1501 data.y_inv = y_inv;
1502 data.x_px_ofs = x_inv * 0.5f;
1503 data.y_px_ofs = y_inv * 0.5f;
1504 data.width = width;
1505 data.buffer = buffer;
1506 TaskParallelSettings settings;
1508 settings.use_threading = (size_t(height) * width > 10000);
1509 BLI_task_parallel_range(0, int(height), &data, maskrasterize_buffer_cb, &settings);
1510}
int BKE_mask_spline_resolution(struct MaskSpline *spline, int width, int height)
float(* BKE_mask_spline_differentiate_with_resolution(struct MaskSpline *spline, unsigned int resol, unsigned int *r_tot_diff_point))[2]
unsigned int BKE_mask_spline_feather_resolution(struct MaskSpline *spline, int width, int height)
void BKE_mask_spline_feather_collapse_inner_loops(struct MaskSpline *spline, float(*feather_points)[2], unsigned int tot_feather_point)
float(* BKE_mask_spline_feather_differentiated_points_with_resolution(struct MaskSpline *spline, unsigned int resol, bool do_feather_isect, unsigned int *r_tot_feather_point))[2]
#define BLI_assert(a)
Definition BLI_assert.h:50
#define BLI_INLINE
#define LISTBASE_FOREACH(type, var, list)
void void void BLI_movelisttolist(struct ListBase *dst, struct ListBase *src) ATTR_NONNULL(1
void BLI_remlink(struct ListBase *listbase, void *vlink) ATTR_NONNULL(1)
Definition listbase.cc:130
int BLI_listbase_count(const struct ListBase *listbase) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
MINLINE float max_ff(float a, float b)
MINLINE int min_ii(int a, int b)
#define M_SQRT2
MINLINE float min_ff(float a, float b)
#define M_PI
void barycentric_weights_v2(const float v1[2], const float v2[2], const float v3[2], const float co[2], float w[3])
bool isect_point_tri_v2_cw(const float pt[2], const float v1[2], const float v2[2], const float v3[2])
void barycentric_weights_v2_quad(const float v1[2], const float v2[2], const float v3[2], const float v4[2], const float co[2], float w[4])
int isect_point_tri_v2(const float pt[2], const float v1[2], const float v2[2], const float v3[2])
float dist_squared_to_line_segment_v2(const float p[2], const float l1[2], const float l2[2])
Definition math_geom.cc:289
MINLINE float len_squared_v2v2(const float a[2], const float b[2]) ATTR_WARN_UNUSED_RESULT
MINLINE void copy_v2_v2(float r[2], const float a[2])
MINLINE void copy_v3_v3(float r[3], const float a[3])
MINLINE void add_v2_v2v2(float r[2], const float a[2], const float b[2])
MINLINE void sub_v2_v2v2(float r[2], const float a[2], const float b[2])
MINLINE float normalize_v2(float n[2])
void BLI_memarena_free(struct MemArena *ma) ATTR_NONNULL(1)
struct MemArena * BLI_memarena_new(size_t bufsize, const char *name) ATTR_WARN_UNUSED_RESULT ATTR_RETURNS_NONNULL ATTR_NONNULL(2) ATTR_MALLOC
void BLI_rctf_union(struct rctf *rct_a, const struct rctf *rct_b)
bool BLI_rctf_isect_pt_v(const struct rctf *rect, const float xy[2])
bool BLI_rctf_isect(const struct rctf *src1, const struct rctf *src2, struct rctf *dest)
void BLI_rctf_init(struct rctf *rect, float xmin, float xmax, float ymin, float ymax)
Definition rct.c:408
BLI_INLINE float BLI_rctf_size_x(const struct rctf *rct)
Definition BLI_rect.h:197
void BLI_rctf_do_minmax_v(struct rctf *rect, const float xy[2])
Definition rct.c:514
BLI_INLINE float BLI_rctf_size_y(const struct rctf *rct)
Definition BLI_rect.h:201
void BLI_rctf_init_minmax(struct rctf *rect)
Definition rct.c:484
@ BLI_SCANFILL_CALC_POLYS
@ BLI_SCANFILL_CALC_HOLES
struct ScanFillVert * BLI_scanfill_vert_add(ScanFillContext *sf_ctx, const float vec[3])
Definition scanfill.c:95
struct ScanFillEdge * BLI_scanfill_edge_add(ScanFillContext *sf_ctx, struct ScanFillVert *v1, struct ScanFillVert *v2)
Definition scanfill.c:117
void BLI_scanfill_begin_arena(ScanFillContext *sf_ctx, struct MemArena *arena)
Definition scanfill.c:798
unsigned int BLI_scanfill_calc_ex(ScanFillContext *sf_ctx, int flag, const float nor_proj[3])
Definition scanfill.c:825
#define BLI_SCANFILL_ARENA_SIZE
void BLI_scanfill_end_arena(ScanFillContext *sf_ctx, struct MemArena *arena)
Definition scanfill.c:815
bool BLI_scanfill_calc_self_isect(ScanFillContext *sf_ctx, ListBase *remvertbase, ListBase *remedgebase)
unsigned int uint
void BLI_task_parallel_range(int start, int stop, void *userdata, TaskParallelRangeFunc func, const TaskParallelSettings *settings)
Definition task_range.cc:99
BLI_INLINE void BLI_parallel_range_settings_defaults(TaskParallelSettings *settings)
Definition BLI_task.h:230
#define CLAMP(a, b, c)
#define POINTER_AS_UINT(i)
#define UNUSED_VARS_NDEBUG(...)
#define POINTER_FROM_UINT(i)
#define CLOG_ERROR(clg_ref,...)
Definition CLG_log.h:182
@ MASK_BLEND_ADD
@ MASK_BLEND_REPLACE
@ MASK_BLEND_DARKEN
@ MASK_BLEND_DIFFERENCE
@ MASK_BLEND_LIGHTEN
@ MASK_BLEND_MERGE_ADD
@ MASK_BLEND_SUBTRACT
@ MASK_BLEND_MUL
@ MASK_BLEND_MERGE_SUBTRACT
@ MASK_SPLINE_CYCLIC
@ MASK_SPLINE_NOINTERSECT
@ MASK_SPLINE_NOFILL
@ MASK_HIDE_RENDER
@ MASK_BLENDFLAG_INVERT
@ MASK_LAYERFLAG_FILL_OVERLAP
@ MASK_LAYERFLAG_FILL_DISCRETE
@ PROP_SMOOTH
@ PROP_ROOT
@ PROP_SHARP
@ PROP_LIN
@ PROP_INVSQUARE
@ PROP_SPHERE
Read Guarded memory(de)allocation.
#define MEM_SIZE_OPTIMAL(size)
#define MEM_reallocN(vmemh, len)
ATTR_WARN_UNUSED_RESULT const BMVert * v2
ATTR_WARN_UNUSED_RESULT const BMVert * v
static btDbvtVolume bounds(btDbvtNode **leaves, int count)
Definition btDbvt.cpp:299
SIMD_FORCE_INLINE const btScalar & w() const
Return the w value.
Definition btQuadWord.h:119
#define sinf(x)
#define cosf(x)
#define fabsf(x)
#define sqrtf(x)
static bool is_cyclic(const Nurb *nu)
draw_view in_light_buf[] float
void *(* MEM_mallocN)(size_t len, const char *str)
Definition mallocn.cc:44
void MEM_freeN(void *vmemh)
Definition mallocn.cc:105
#define SF_EDGE_IS_BOUNDARY
void BKE_maskrasterize_handle_free(MaskRasterHandle *mr_handle)
static void maskrasterize_buffer_cb(void *__restrict userdata, const int y, const TaskParallelTLS *__restrict)
void BKE_maskrasterize_handle_init(MaskRasterHandle *mr_handle, Mask *mask, const int width, const int height, const bool do_aspect_correct, const bool do_mask_aa, const bool do_feather)
static void layer_bucket_init(MaskRasterLayer *layer, const float pixel_size)
float BKE_maskrasterize_handle_sample(MaskRasterHandle *mr_handle, const float xy[2])
#define FACE_ASSERT(face, vert_max)
#define TRI_VERT
static bool layer_bucket_isect_test(const MaskRasterLayer *layer, uint face_index, const uint bucket_x, const uint bucket_y, const float bucket_size_x, const float bucket_size_y, const float bucket_max_rad_squared)
MaskRasterHandle * BKE_maskrasterize_handle_new()
#define SF_KEYINDEX_TEMP_ID
static void rotate_point_v2(float r_p[2], const float p[2], const float cent[2], const float angle, const float asp[2])
static float maskrasterize_layer_z_depth_quad(const float pt[2], const float v1[3], const float v2[3], const float v3[3], const float v4[3])
static float layer_bucket_depth_from_xy(MaskRasterLayer *layer, const float xy[2])
#define TRI_TERMINATOR_ID
#define BUCKET_PIXELS_PER_CELL
static float maskrasterize_layer_isect(const uint *face, float(*cos)[3], const float dist_orig, const float xy[2])
static CLG_LogRef LOG
static void layer_bucket_init_dummy(MaskRasterLayer *layer)
#define CALC_CAP_RESOL
static ScanFillVert * scanfill_vert_add_v2_with_depth(ScanFillContext *sf_ctx, const float co_xy[2], const float co_z)
void BKE_maskrasterize_buffer(MaskRasterHandle *mr_handle, const uint width, const uint height, float *buffer)
Rasterize a buffer from a single mask (threaded execution).
static void maskrasterize_spline_differentiate_point_outset(float(*diff_feather_points)[2], float(*diff_points)[2], const uint tot_diff_point, const float ofs, const bool do_test)
BLI_INLINE uint clampis_uint(const uint v, const uint min, const uint max)
BLI_INLINE uint layer_bucket_index_from_xy(MaskRasterLayer *layer, const float xy[2])
ccl_device_inline float3 cos(float3 v)
#define min(a, b)
Definition sort.c:32
void * link
struct LinkNode * next
void * last
void * first
struct MaskLayer * next
char visibility_flag
ListBase splines
MaskRasterLayer * layers
uint(* face_array)[4]
float buckets_xy_scalar[2]
float(* face_coords)[3]
MaskRasterHandle * mr_handle
ListBase fillvertbase
ListBase filledgebase
unsigned short poly_nr
ListBase fillfacebase
union ScanFillEdge::@115 tmp
struct ScanFillVert * v1
struct ScanFillVert * v2
unsigned char c
struct ScanFillFace * next
struct ScanFillVert * v2
struct ScanFillVert * v3
struct ScanFillVert * v1
struct ScanFillVert * next
float co[3]
unsigned int u
union ScanFillVert::@114 tmp
unsigned int keyindex
float max
int xy[2]
Definition wm_draw.cc:170