Blender V5.0
node_geo_scale_elements.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2023 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
5#include "atomic_ops.h"
6
7#include "BLI_array.hh"
8#include "BLI_array_utils.hh"
10#include "BLI_math_matrix.hh"
11#include "BLI_sort.hh"
12#include "BLI_task.hh"
13#include "BLI_virtual_array.hh"
14
15#include "DNA_mesh_types.h"
16
18#include "UI_resources.hh"
19
21#include "GEO_mesh_selection.hh"
22
23#include "NOD_rna_define.hh"
24
25#include "node_geometry_util.hh"
26
28
31 "UNIFORM",
32 ICON_NONE,
33 N_("Uniform"),
34 N_("Scale elements by the same factor in every direction")},
36 "SINGLE_AXIS",
37 ICON_NONE,
38 N_("Single Axis"),
39 N_("Scale elements in a single direction")},
40 {0, nullptr, 0, nullptr, nullptr},
41};
42
44{
45 b.use_custom_socket_order();
46 b.allow_any_socket_order();
47 b.add_default_layout();
48 b.add_input<decl::Geometry>("Geometry")
49 .supported_type(GeometryComponent::Type::Mesh)
50 .description("Geometry to scale elements of");
51 b.add_output<decl::Geometry>("Geometry").propagate_all().align_with_previous();
52 b.add_input<decl::Bool>("Selection").default_value(true).hide_value().field_on_all();
53
54 b.add_input<decl::Float>("Scale", "Scale").default_value(1.0f).min(0.0f).field_on_all();
55 b.add_input<decl::Vector>("Center")
56 .subtype(PROP_TRANSLATION)
57 .implicit_field_on_all(NODE_DEFAULT_INPUT_POSITION_FIELD)
59 "Origin of the scaling for each element. If multiple elements are connected, their "
60 "center is averaged");
61 b.add_input<decl::Menu>("Scale Mode")
62 .static_items(scale_mode_items)
64 .optional_label();
65 b.add_input<decl::Vector>("Axis")
66 .default_value({1.0f, 0.0f, 0.0f})
67 .field_on_all()
68 .description("Direction in which to scale the element")
69 .usage_by_single_menu(GEO_NODE_SCALE_ELEMENTS_SINGLE_AXIS);
70};
71
72static void node_layout(uiLayout *layout, bContext * /*C*/, PointerRNA *ptr)
73{
74 layout->prop(ptr, "domain", UI_ITEM_NONE, "", ICON_NONE);
75}
76
77static void node_init(bNodeTree * /*tree*/, bNode *node)
78{
79 node->custom1 = int16_t(AttrDomain::Face);
80}
81
82static Array<int> create_reverse_offsets(const Span<int> indices, const int items_num)
83{
84 Array<int> offsets(items_num + 1, 0);
86 return offsets;
87}
88
90{
91 const int value = values[indices.first()];
92 const int &first_other = *std::find_if(
93 indices.begin(), indices.end(), [&](const int index) { return values[index] != value; });
94 return indices.take_front(&first_other - indices.begin());
95}
96
97static void from_indices_large_groups(const Span<int> group_indices,
98 MutableSpan<int> r_counts_to_offset,
99 MutableSpan<int> r_indices)
100{
101 constexpr const int segment_size = 1024;
102 constexpr const IndexRange segment(segment_size);
103 const bool last_small_segmet = bool(group_indices.size() % segment_size);
104 const int total_segments = group_indices.size() / segment_size + int(last_small_segmet);
105
106 Array<int> src_indices(group_indices.size());
107 threading::parallel_for_each(IndexRange(total_segments), [&](const int segment_index) {
108 const IndexRange range = segment.shift(segment_size * segment_index);
109 MutableSpan<int> segment_indices = src_indices.as_mutable_span().slice_safe(range);
110 std::iota(segment_indices.begin(), segment_indices.end(), segment_size * segment_index);
111 parallel_sort(segment_indices.begin(), segment_indices.end(), [&](const int a, const int b) {
112 return group_indices[a] < group_indices[b];
113 });
114
115 for (Span<int> indices = segment_indices; !indices.is_empty();) {
116 const int group = group_indices[indices.first()];
117 const int step_size = front_indices_to_same_value(indices, group_indices).size();
118 atomic_add_and_fetch_int32(&r_counts_to_offset[group], step_size);
119 indices = indices.drop_front(step_size);
120 }
121 });
122
124 r_counts_to_offset);
125 Array<int> counts(offset.size(), 0);
126 threading::parallel_for_each(IndexRange(total_segments), [&](const int segment_index) {
127 const IndexRange range = segment.shift(segment_size * segment_index);
128 const Span<int> segment_indices = src_indices.as_span().slice_safe(range);
129 for (Span<int> indices = segment_indices; !indices.is_empty();) {
130 const Span<int> indices_of_current_group = front_indices_to_same_value(indices,
131 group_indices);
132 const int step_size = indices_of_current_group.size();
133 const int group = group_indices[indices.first()];
134 const int start = atomic_add_and_fetch_int32(&counts[group], step_size) - step_size;
135 const IndexRange dst_range = offset[group].slice(start, step_size);
136 array_utils::copy(indices_of_current_group, r_indices.slice(dst_range));
137 indices = indices.drop_front(step_size);
138 }
139 });
140}
141
143 const OffsetIndices<int> offsets)
144{
145 if (group_indices.is_empty()) {
146 return {};
147 }
148 BLI_assert(*std::max_element(group_indices.begin(), group_indices.end()) < offsets.size());
149 BLI_assert(*std::min_element(group_indices.begin(), group_indices.end()) >= 0);
150
151 /* `counts` keeps track of how many elements have been added to each group, and is incremented
152 * atomically by many threads in parallel. `calloc` can be measurably faster than a parallel fill
153 * of zero. Alternatively the offsets could be copied and incremented directly, but the cost of
154 * the copy is slightly higher than the cost of `calloc`. */
155 int *counts = MEM_calloc_arrayN<int>(offsets.size(), __func__);
156 BLI_SCOPED_DEFER([&]() { MEM_freeN(counts); })
157 Array<int> results(group_indices.size());
158 threading::parallel_for(group_indices.index_range(), 1024, [&](const IndexRange range) {
159 for (const int64_t i : range) {
160 const int group_index = group_indices[i];
161 const int index_in_group = atomic_fetch_and_add_int32(&counts[group_index], 1);
162 results[offsets[group_index][index_in_group]] = int(i);
163 }
164 });
165 return results;
166}
167
168static GroupedSpan<int> gather_groups(const Span<int> group_indices,
169 const int groups_num,
170 Array<int> &r_offsets,
171 Array<int> &r_indices)
172{
173 if (group_indices.size() / groups_num > 1000) {
174 r_offsets.reinitialize(groups_num + 1);
175 r_offsets.as_mutable_span().fill(0);
176 r_indices.reinitialize(group_indices.size());
177 from_indices_large_groups(group_indices, r_offsets, r_indices);
178 }
179 else {
180 r_offsets = create_reverse_offsets(group_indices, groups_num);
181 r_indices = reverse_indices_in_groups(group_indices, r_offsets.as_span());
182 }
183 return {OffsetIndices<int>(r_offsets), r_indices};
184}
185
186template<typename T> static T gather_mean(const VArray<T> &values, const Span<int> indices)
187{
188 BLI_assert(!indices.is_empty());
189 if (const std::optional<T> value = values.get_if_single()) {
190 return *value;
191 }
192
193 using MeanAccumulator = std::pair<T, int>;
194 const auto join_accumulators = [](const MeanAccumulator a,
195 const MeanAccumulator b) -> MeanAccumulator {
196 return {(a.first + b.first) / (a.second + b.second), 1};
197 };
198
199 T value;
200 devirtualize_varray(values, [&](const auto values) {
202 indices.index_range(),
203 2048,
204 MeanAccumulator(T(), 0),
205 [&](const IndexRange range, MeanAccumulator other) -> MeanAccumulator {
206 T value(0);
207 for (const int i : indices.slice(range)) {
208 value += values[i];
209 }
210 return join_accumulators({value, int(range.size())}, other);
211 },
212 join_accumulators);
213 value = accumulator.first / accumulator.second;
214 });
215 return value;
216}
217
219 const float3 &center,
220 const float scale)
221{
222 const float3 diff = position - center;
223 const float3 scaled_diff = scale * diff;
224 const float3 new_position = center + scaled_diff;
225 return new_position;
226}
227
228static void scale_uniformly(const GroupedSpan<int> elem_islands,
229 const GroupedSpan<int> vert_islands,
230 const VArray<float> &scale_varray,
231 const VArray<float3> &center_varray,
232 Mesh &mesh)
233{
234 MutableSpan<float3> positions = mesh.vert_positions_for_write();
236 elem_islands.index_range(),
237 512,
238 [&](const IndexRange range) {
239 for (const int island_index : range) {
240 const Span<int> vert_island = vert_islands[island_index];
241 const Span<int> elem_island = elem_islands[island_index];
242
243 const float scale = gather_mean<float>(scale_varray, elem_island);
244 const float3 center = gather_mean<float3>(center_varray, elem_island);
245
246 threading::parallel_for(vert_island.index_range(), 2048, [&](const IndexRange range) {
247 for (const int vert_i : vert_island.slice(range)) {
248 positions[vert_i] = transform_with_uniform_scale(positions[vert_i], center, scale);
249 }
250 });
251 }
252 },
254 return elem_islands.offsets[range].size() + vert_islands.offsets[range].size();
255 }));
256}
257
259 const float3 &axis,
260 const float scale)
261{
262 /* Scale along x axis. The other axis need to be orthogonal, but their specific value does not
263 * matter. */
264 const float3 x_axis = math::normalize(axis);
265 float3 y_axis = math::cross(x_axis, float3(0.0f, 0.0f, 1.0f));
266 if (math::is_zero(y_axis)) {
267 y_axis = math::cross(x_axis, float3(0.0f, 1.0f, 0.0f));
268 }
269 y_axis = math::normalize(y_axis);
270 const float3 z_axis = math::cross(x_axis, y_axis);
271
273
274 /* Move scaling center to the origin. */
275 transform.location() -= center;
276
277 /* `base_change` and `base_change_inv` are used to rotate space so that scaling along the
278 * provided axis is the same as scaling along the x axis. */
279 float4x4 base_change = float4x4::identity();
280 base_change.x_axis() = x_axis;
281 base_change.y_axis() = y_axis;
282 base_change.z_axis() = z_axis;
283
284 /* Can invert by transposing, because the matrix is orthonormal. */
285 float4x4 base_change_inv = math::transpose(base_change);
286
287 float4x4 scale_transform = float4x4::identity();
288 scale_transform[0][0] = scale;
289
290 transform = base_change * scale_transform * base_change_inv * transform;
291
292 /* Move scaling center back to where it was. */
293 transform.location() += center;
294
295 return transform;
296}
297
298static void scale_on_axis(const GroupedSpan<int> elem_islands,
299 const GroupedSpan<int> vert_islands,
300 const VArray<float> &scale_varray,
301 const VArray<float3> &center_varray,
302 const VArray<float3> &axis_varray,
303 Mesh &mesh)
304{
305 MutableSpan<float3> positions = mesh.vert_positions_for_write();
307 elem_islands.index_range(),
308 512,
309 [&](const IndexRange range) {
310 for (const int island_index : range) {
311 const Span<int> vert_island = vert_islands[island_index];
312 const Span<int> elem_island = elem_islands[island_index];
313
314 const float scale = gather_mean<float>(scale_varray, elem_island);
315 const float3 center = gather_mean<float3>(center_varray, elem_island);
316 const float3 axis = gather_mean<float3>(axis_varray, elem_island);
317 const float3 fixed_axis = math::is_zero(axis) ? float3(1.0f, 0.0f, 0.0f) : axis;
318
319 const float4x4 transform = create_single_axis_transform(center, fixed_axis, scale);
320 threading::parallel_for(vert_island.index_range(), 2048, [&](const IndexRange range) {
321 for (const int vert_i : vert_island.slice(range)) {
322 positions[vert_i] = math::transform_point(transform, positions[vert_i]);
323 }
324 });
325 }
326 },
328 return vert_islands.offsets[range].size() + elem_islands.offsets[range].size();
329 }));
330}
331
332static int face_to_vert_islands(const Mesh &mesh,
333 const IndexMask &face_mask,
334 const IndexMask &vert_mask,
335 MutableSpan<int> face_island_indices,
336 MutableSpan<int> vert_island_indices)
337{
338 Array<int> verts_pos(vert_mask.min_array_size());
339 index_mask::build_reverse_map<int>(vert_mask, verts_pos);
340
341 AtomicDisjointSet disjoint_set(vert_mask.size());
342 const GroupedSpan<int> face_verts(mesh.faces(), mesh.corner_verts());
343
344 face_mask.foreach_index_optimized<int>(GrainSize(4096), [&](const int face_i) {
345 const Span<int> verts = face_verts[face_i];
346 const int v1 = verts_pos[verts.first()];
347 for (const int vert_i : verts.drop_front(1)) {
348 const int v2 = verts_pos[vert_i];
349 disjoint_set.join(v1, v2);
350 }
351 });
352
353 disjoint_set.calc_reduced_ids(vert_island_indices);
354
355 face_mask.foreach_index(GrainSize(4096), [&](const int face_i, const int face_pos) {
356 const int face_vert_i = face_verts[face_i].first();
357 const int vert_pos = verts_pos[face_vert_i];
358 const int vert_island = vert_island_indices[vert_pos];
359 face_island_indices[face_pos] = vert_island;
360 });
361
362 return disjoint_set.count_sets();
363}
364
365static void gather_face_islands(const Mesh &mesh,
366 const IndexMask &face_mask,
367 Array<int> &r_item_offsets,
368 Array<int> &r_item_indices,
369 Array<int> &r_vert_offsets,
370 Array<int> &r_vert_indices)
371{
372 IndexMaskMemory memory;
374 mesh.face_offsets(), face_mask, mesh.corner_verts(), mesh.verts_num, memory);
375
376 Array<int> face_island_indices(face_mask.size());
377 Array<int> vert_island_indices(vert_mask.size());
378 const int total_islands = face_to_vert_islands(
379 mesh, face_mask, vert_mask, face_island_indices, vert_island_indices);
380
381 /* Group gathered vertices and faces. */
382 gather_groups(vert_island_indices, total_islands, r_vert_offsets, r_vert_indices);
383 gather_groups(face_island_indices, total_islands, r_item_offsets, r_item_indices);
384
385 /* If result indices is for gathered array, map than back into global indices. */
386 if (face_mask.size() != mesh.faces_num) {
387 Array<int> face_mask_map(face_mask.size());
388 face_mask.to_indices<int>(face_mask_map);
390 face_mask_map.as_span(), r_item_indices.as_span(), r_item_indices.as_mutable_span());
391 }
392 if (vert_mask.size() != mesh.verts_num) {
393 Array<int> vert_mask_map(vert_mask.size());
394 vert_mask.to_indices<int>(vert_mask_map);
396 vert_mask_map.as_span(), r_vert_indices.as_span(), r_vert_indices.as_mutable_span());
397 }
398}
399
400static int edge_to_vert_islands(const Mesh &mesh,
401 const IndexMask &edge_mask,
402 const IndexMask &vert_mask,
403 MutableSpan<int> edge_island_indices,
404 MutableSpan<int> vert_island_indices)
405{
406 Array<int> verts_pos(vert_mask.min_array_size());
407 index_mask::build_reverse_map<int>(vert_mask, verts_pos);
408
409 AtomicDisjointSet disjoint_set(vert_mask.size());
410 const Span<int2> edges = mesh.edges();
411
412 edge_mask.foreach_index_optimized<int>(GrainSize(4096), [&](const int edge_i) {
413 const int2 edge = edges[edge_i];
414 const int v1 = verts_pos[edge[0]];
415 const int v2 = verts_pos[edge[1]];
416 disjoint_set.join(v1, v2);
417 });
418
419 disjoint_set.calc_reduced_ids(vert_island_indices);
420
421 edge_mask.foreach_index(GrainSize(4096), [&](const int edge_i, const int edge_pos) {
422 const int2 edge = edges[edge_i];
423 const int edge_vert_i = edge[0];
424 const int vert_pos = verts_pos[edge_vert_i];
425 const int vert_island = vert_island_indices[vert_pos];
426 edge_island_indices[edge_pos] = vert_island;
427 });
428
429 return disjoint_set.count_sets();
430}
431
432static void gather_edge_islands(const Mesh &mesh,
433 const IndexMask &edge_mask,
434 Array<int> &r_item_offsets,
435 Array<int> &r_item_indices,
436 Array<int> &r_vert_offsets,
437 Array<int> &r_vert_indices)
438{
439 IndexMaskMemory memory;
441 mesh.edges(), edge_mask, mesh.verts_num, memory);
442
443 Array<int> edge_island_indices(edge_mask.size());
444 Array<int> vert_island_indices(vert_mask.size());
445 const int total_islands = edge_to_vert_islands(
446 mesh, edge_mask, vert_mask, edge_island_indices, vert_island_indices);
447
448 /* Group gathered vertices and edges. */
449 gather_groups(vert_island_indices, total_islands, r_vert_offsets, r_vert_indices);
450 gather_groups(edge_island_indices, total_islands, r_item_offsets, r_item_indices);
451
452 /* If result indices is for gathered array, map than back into global indices. */
453 if (edge_mask.size() != mesh.edges_num) {
454 Array<int> edge_mask_map(edge_mask.size());
455 edge_mask.to_indices<int>(edge_mask_map);
457 edge_mask_map.as_span(), r_item_indices.as_span(), r_item_indices.as_mutable_span());
458 }
459 if (vert_mask.size() != mesh.verts_num) {
460 Array<int> vert_mask_map(vert_mask.size());
461 vert_mask.to_indices<int>(vert_mask_map);
463 vert_mask_map.as_span(), r_vert_indices.as_span(), r_vert_indices.as_mutable_span());
464 }
465}
466
468{
469 const bNode &node = params.node();
470 const AttrDomain domain = AttrDomain(node.custom1);
471 const auto scale_mode = params.get_input<GeometryNodeScaleElementsMode>("Scale Mode");
472
473 GeometrySet geometry = params.extract_input<GeometrySet>("Geometry");
474
475 const Field<bool> selection_field = params.extract_input<Field<bool>>("Selection");
476 const Field<float> scale_field = params.extract_input<Field<float>>("Scale");
477 const Field<float3> center_field = params.extract_input<Field<float3>>("Center");
478
480 if (Mesh *mesh = geometry.get_mesh_for_write()) {
481 const bke::MeshFieldContext context{*mesh, domain};
482 FieldEvaluator evaluator{context, mesh->attributes().domain_size(domain)};
483 evaluator.set_selection(selection_field);
484 evaluator.add(scale_field);
485 evaluator.add(center_field);
486 if (scale_mode == GEO_NODE_SCALE_ELEMENTS_SINGLE_AXIS) {
487 evaluator.add(params.get_input<Field<float3>>("Axis"));
488 }
489 evaluator.evaluate();
491 if (mask.is_empty()) {
492 return;
493 }
494
495 Array<int> item_offsets;
496 Array<int> item_indices;
497
498 Array<int> vert_offsets;
499 Array<int> vert_indices;
500
501 switch (domain) {
502 case AttrDomain::Face:
503 gather_face_islands(*mesh, mask, item_offsets, item_indices, vert_offsets, vert_indices);
504 break;
505 case AttrDomain::Edge:
506 gather_edge_islands(*mesh, mask, item_offsets, item_indices, vert_offsets, vert_indices);
507 break;
508 default:
510 }
511
512 const GroupedSpan<int> item_islands(item_offsets.as_span(), item_indices);
513 const GroupedSpan<int> vert_islands(vert_offsets.as_span(), vert_indices);
514
515 const VArray<float> scale_varray = evaluator.get_evaluated<float>(0);
516 const VArray<float3> center_varray = evaluator.get_evaluated<float3>(1);
517
518 switch (scale_mode) {
520 scale_uniformly(item_islands, vert_islands, scale_varray, center_varray, *mesh);
521 break;
523 const VArray<float3> axis_varray = evaluator.get_evaluated<float3>(2);
525 item_islands, vert_islands, scale_varray, center_varray, axis_varray, *mesh);
526 break;
527 }
528 }
529 mesh->tag_positions_changed();
530 }
531 });
532
533 params.set_output("Geometry", std::move(geometry));
534}
535
536static void node_rna(StructRNA *srna)
537{
538 static const EnumPropertyItem domain_items[] = {
539 {int(AttrDomain::Face),
540 "FACE",
541 ICON_NONE,
542 "Face",
543 "Scale individual faces or neighboring face islands"},
544 {int(AttrDomain::Edge),
545 "EDGE",
546 ICON_NONE,
547 "Edge",
548 "Scale individual edges or neighboring edge islands"},
549 {0, nullptr, 0, nullptr, nullptr},
550 };
551
553 "domain",
554 "Domain",
555 "Element type to transform",
556 domain_items,
558 int(AttrDomain::Face));
559}
560
561static void node_register()
562{
563 static blender::bke::bNodeType ntype;
564
565 geo_node_type_base(&ntype, "GeometryNodeScaleElements", GEO_NODE_SCALE_ELEMENTS);
566 ntype.ui_name = "Scale Elements";
567 ntype.ui_description = "Scale groups of connected edges and faces";
568 ntype.enum_name_legacy = "SCALE_ELEMENTS";
571 ntype.declare = node_declare;
573 ntype.initfunc = node_init;
575
576 node_rna(ntype.rna_ext.srna);
577}
578NOD_REGISTER_NODE(node_register)
579
580} // namespace blender::nodes::node_geo_scale_elements_cc
#define NODE_CLASS_GEOMETRY
Definition BKE_node.hh:461
#define GEO_NODE_SCALE_ELEMENTS
#define BLI_assert_unreachable()
Definition BLI_assert.h:93
#define BLI_assert(a)
Definition BLI_assert.h:46
#define BLI_SCOPED_DEFER(function_to_defer)
@ NODE_DEFAULT_INPUT_POSITION_FIELD
GeometryNodeScaleElementsMode
@ GEO_NODE_SCALE_ELEMENTS_SINGLE_AXIS
@ GEO_NODE_SCALE_ELEMENTS_UNIFORM
#define NOD_REGISTER_NODE(REGISTER_FUNC)
#define NOD_inline_enum_accessors(member)
@ PROP_TRANSLATION
Definition RNA_types.hh:261
#define UI_ITEM_NONE
Provides wrapper around system-specific atomic primitives, and some extensions (faked-atomic operatio...
ATOMIC_INLINE int32_t atomic_add_and_fetch_int32(int32_t *p, int32_t x)
AttrDomain
ATTR_WARN_UNUSED_RESULT const BMVert * v2
SIMD_FORCE_INLINE btVector3 transform(const btVector3 &point) const
constexpr int64_t size() const
Definition BLI_span.hh:252
Span< T > as_span() const
Definition BLI_array.hh:243
MutableSpan< T > as_mutable_span()
Definition BLI_array.hh:248
void reinitialize(const int64_t new_size)
Definition BLI_array.hh:419
int calc_reduced_ids(MutableSpan< int > result) const
constexpr int64_t size() const
constexpr IndexRange slice(int64_t start, int64_t size) const
constexpr MutableSpan slice(const int64_t start, const int64_t size) const
Definition BLI_span.hh:573
constexpr T * end() const
Definition BLI_span.hh:548
constexpr T * begin() const
Definition BLI_span.hh:544
constexpr int64_t size() const
Definition BLI_span.hh:252
constexpr const T * end() const
Definition BLI_span.hh:224
constexpr IndexRange index_range() const
Definition BLI_span.hh:401
constexpr const T * begin() const
Definition BLI_span.hh:220
constexpr bool is_empty() const
Definition BLI_span.hh:260
std::optional< T > get_if_single() const
void set_selection(Field< bool > selection)
Definition FN_field.hh:383
int add(GField field, GVArray *varray_ptr)
Definition field.cc:751
IndexMask get_evaluated_selection_as_mask() const
Definition field.cc:817
const GVArray & get_evaluated(const int field_index) const
Definition FN_field.hh:448
void to_indices(MutableSpan< T > r_indices) const
void foreach_index_optimized(Fn &&fn) const
void foreach_index(Fn &&fn) const
IMETHOD Vector diff(const Vector &a, const Vector &b, double dt)
Definition frames.inl:1166
static ushort indices[]
static float verts[][3]
VecBase< float, 3 > float3
uiWidgetBaseParameters params[MAX_WIDGET_BASE_BATCH]
void * MEM_calloc_arrayN(size_t len, size_t size, const char *str)
Definition mallocn.cc:123
void MEM_freeN(void *vmemh)
Definition mallocn.cc:113
ccl_device_inline float2 mask(const MaskType mask, const float2 a)
#define T
void copy(const GVArray &src, GMutableSpan dst, int64_t grain_size=4096)
void gather(const GVArray &src, const IndexMask &indices, GMutableSpan dst, int64_t grain_size=4096)
void node_register_type(bNodeType &ntype)
Definition node.cc:2416
void foreach_real_geometry(bke::GeometrySet &geometry, FunctionRef< void(bke::GeometrySet &geometry_set)> fn)
IndexMask vert_selection_from_face(OffsetIndices< int > faces, const IndexMask &face_mask, Span< int > corner_verts, int verts_num, IndexMaskMemory &memory)
IndexMask vert_selection_from_edge(Span< int2 > edges, const IndexMask &edge_mask, int verts_num, IndexMaskMemory &memory)
void build_reverse_map(const IndexMask &mask, MutableSpan< T > r_map)
Definition index_mask.cc:34
MatBase< T, NumCol, NumRow > transpose(const MatBase< T, NumRow, NumCol > &mat)
bool is_zero(const T &a)
AxisSigned cross(const AxisSigned a, const AxisSigned b)
MatBase< T, NumCol, NumRow > normalize(const MatBase< T, NumCol, NumRow > &a)
static Array< int > reverse_indices_in_groups(const Span< int > group_indices, const OffsetIndices< int > offsets)
static int face_to_vert_islands(const Mesh &mesh, const IndexMask &face_mask, const IndexMask &vert_mask, MutableSpan< int > face_island_indices, MutableSpan< int > vert_island_indices)
static float3 transform_with_uniform_scale(const float3 &position, const float3 &center, const float scale)
static T gather_mean(const VArray< T > &values, const Span< int > indices)
static int edge_to_vert_islands(const Mesh &mesh, const IndexMask &edge_mask, const IndexMask &vert_mask, MutableSpan< int > edge_island_indices, MutableSpan< int > vert_island_indices)
static Span< int > front_indices_to_same_value(const Span< int > indices, const Span< int > values)
static void node_declare(NodeDeclarationBuilder &b)
static void node_init(bNodeTree *, bNode *node)
static void scale_on_axis(const GroupedSpan< int > elem_islands, const GroupedSpan< int > vert_islands, const VArray< float > &scale_varray, const VArray< float3 > &center_varray, const VArray< float3 > &axis_varray, Mesh &mesh)
static Array< int > create_reverse_offsets(const Span< int > indices, const int items_num)
static GroupedSpan< int > gather_groups(const Span< int > group_indices, const int groups_num, Array< int > &r_offsets, Array< int > &r_indices)
static void node_layout(uiLayout *layout, bContext *, PointerRNA *ptr)
static void gather_edge_islands(const Mesh &mesh, const IndexMask &edge_mask, Array< int > &r_item_offsets, Array< int > &r_item_indices, Array< int > &r_vert_offsets, Array< int > &r_vert_indices)
static void gather_face_islands(const Mesh &mesh, const IndexMask &face_mask, Array< int > &r_item_offsets, Array< int > &r_item_indices, Array< int > &r_vert_offsets, Array< int > &r_vert_indices)
static float4x4 create_single_axis_transform(const float3 &center, const float3 &axis, const float scale)
static void from_indices_large_groups(const Span< int > group_indices, MutableSpan< int > r_counts_to_offset, MutableSpan< int > r_indices)
static void node_geo_exec(GeoNodeExecParams params)
static void scale_uniformly(const GroupedSpan< int > elem_islands, const GroupedSpan< int > vert_islands, const VArray< float > &scale_varray, const VArray< float3 > &center_varray, Mesh &mesh)
PropertyRNA * RNA_def_node_enum(StructRNA *srna, const char *identifier, const char *ui_name, const char *ui_description, const EnumPropertyItem *static_items, const EnumRNAAccessors accessors, std::optional< int > default_value, const EnumPropertyItemFunc item_func, const bool allow_animation)
OffsetIndices< int > accumulate_counts_to_offsets(MutableSpan< int > counts_to_offsets, int start_offset=0)
void build_reverse_offsets(Span< int > indices, MutableSpan< int > offsets)
void parallel_for_each(Range &&range, const Function &function)
Definition BLI_task.hh:56
Value parallel_deterministic_reduce(IndexRange range, int64_t grain_size, const Value &identity, const Function &function, const Reduction &reduction)
Definition BLI_task.hh:194
void parallel_for(const IndexRange range, const int64_t grain_size, const Function &function, const TaskSizeHints &size_hints=detail::TaskSizeHints_Static(1))
Definition BLI_task.hh:93
auto accumulated_task_sizes(Fn &&fn)
void devirtualize_varray(const VArray< T > &varray, const Func &func, bool enable=true)
MatBase< float, 4, 4 > float4x4
void parallel_sort(RandomAccessIterator begin, RandomAccessIterator end)
Definition BLI_sort.hh:23
VecBase< int32_t, 2 > int2
VecBase< float, 3 > float3
void geo_node_type_base(blender::bke::bNodeType *ntype, std::string idname, const std::optional< int16_t > legacy_type)
StructRNA * srna
int edges_num
int faces_num
int verts_num
int16_t custom1
Defines a node type.
Definition BKE_node.hh:238
std::string ui_description
Definition BKE_node.hh:244
void(* initfunc)(bNodeTree *ntree, bNode *node)
Definition BKE_node.hh:289
NodeGeometryExecFunction geometry_node_execute
Definition BKE_node.hh:354
const char * enum_name_legacy
Definition BKE_node.hh:247
void(* draw_buttons)(uiLayout *, bContext *C, PointerRNA *ptr)
Definition BKE_node.hh:259
NodeDeclareFunction declare
Definition BKE_node.hh:362
void prop(PointerRNA *ptr, PropertyRNA *prop, int index, int value, eUI_Item_Flag flag, std::optional< blender::StringRef > name_opt, int icon, std::optional< blender::StringRef > placeholder=std::nullopt)
i
Definition text_draw.cc:230
#define N_(msgid)
PointerRNA * ptr
Definition wm_files.cc:4238