Blender V4.5
node_geo_scale_elements.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2023 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
5#include "atomic_ops.h"
6
7#include "BLI_array.hh"
8#include "BLI_array_utils.hh"
10#include "BLI_math_matrix.hh"
11#include "BLI_sort.hh"
12#include "BLI_task.hh"
13#include "BLI_virtual_array.hh"
14
15#include "DNA_mesh_types.h"
16
17#include "UI_interface.hh"
18#include "UI_resources.hh"
19
20#include "GEO_mesh_selection.hh"
21
22#include "NOD_rna_define.hh"
23
24#include "node_geometry_util.hh"
25
27
29{
30 b.use_custom_socket_order();
31 b.allow_any_socket_order();
32 b.add_default_layout();
33 b.add_input<decl::Geometry>("Geometry").supported_type(GeometryComponent::Type::Mesh);
34 b.add_output<decl::Geometry>("Geometry").propagate_all().align_with_previous();
35 b.add_input<decl::Bool>("Selection").default_value(true).hide_value().field_on_all();
36 b.add_input<decl::Float>("Scale", "Scale").default_value(1.0f).min(0.0f).field_on_all();
37 b.add_input<decl::Vector>("Center")
38 .subtype(PROP_TRANSLATION)
39 .implicit_field_on_all(NODE_DEFAULT_INPUT_POSITION_FIELD)
41 "Origin of the scaling for each element. If multiple elements are connected, their "
42 "center is averaged");
43 auto &axis = b.add_input<decl::Vector>("Axis")
44 .default_value({1.0f, 0.0f, 0.0f})
45 .field_on_all()
46 .description("Direction in which to scale the element")
47 .make_available(
49
50 const bNode *node = b.node_or_null();
51 if (node != nullptr) {
53 axis.available(mode == GEO_NODE_SCALE_ELEMENTS_SINGLE_AXIS);
54 }
55};
56
57static void node_layout(uiLayout *layout, bContext * /*C*/, PointerRNA *ptr)
58{
59 layout->prop(ptr, "domain", UI_ITEM_NONE, "", ICON_NONE);
60 layout->prop(ptr, "scale_mode", UI_ITEM_NONE, "", ICON_NONE);
61}
62
63static void node_init(bNodeTree * /*tree*/, bNode *node)
64{
65 node->custom1 = int16_t(AttrDomain::Face);
67}
68
69static Array<int> create_reverse_offsets(const Span<int> indices, const int items_num)
70{
71 Array<int> offsets(items_num + 1, 0);
73 return offsets;
74}
75
77{
78 const int value = values[indices.first()];
79 const int &first_other = *std::find_if(
80 indices.begin(), indices.end(), [&](const int index) { return values[index] != value; });
81 return indices.take_front(&first_other - indices.begin());
82}
83
84static void from_indices_large_groups(const Span<int> group_indices,
85 MutableSpan<int> r_counts_to_offset,
86 MutableSpan<int> r_indices)
87{
88 constexpr const int segment_size = 1024;
89 constexpr const IndexRange segment(segment_size);
90 const bool last_small_segmet = bool(group_indices.size() % segment_size);
91 const int total_segments = group_indices.size() / segment_size + int(last_small_segmet);
92
93 Array<int> src_indices(group_indices.size());
94 threading::parallel_for_each(IndexRange(total_segments), [&](const int segment_index) {
95 const IndexRange range = segment.shift(segment_size * segment_index);
96 MutableSpan<int> segment_indices = src_indices.as_mutable_span().slice_safe(range);
97 std::iota(segment_indices.begin(), segment_indices.end(), segment_size * segment_index);
98 parallel_sort(segment_indices.begin(), segment_indices.end(), [&](const int a, const int b) {
99 return group_indices[a] < group_indices[b];
100 });
101
102 for (Span<int> indices = segment_indices; !indices.is_empty();) {
103 const int group = group_indices[indices.first()];
104 const int step_size = front_indices_to_same_value(indices, group_indices).size();
105 atomic_add_and_fetch_int32(&r_counts_to_offset[group], step_size);
106 indices = indices.drop_front(step_size);
107 }
108 });
109
111 r_counts_to_offset);
112 Array<int> counts(offset.size(), 0);
113 threading::parallel_for_each(IndexRange(total_segments), [&](const int segment_index) {
114 const IndexRange range = segment.shift(segment_size * segment_index);
115 const Span<int> segment_indices = src_indices.as_span().slice_safe(range);
116 for (Span<int> indices = segment_indices; !indices.is_empty();) {
117 const Span<int> indices_of_current_group = front_indices_to_same_value(indices,
118 group_indices);
119 const int step_size = indices_of_current_group.size();
120 const int group = group_indices[indices.first()];
121 const int start = atomic_add_and_fetch_int32(&counts[group], step_size) - step_size;
122 const IndexRange dst_range = offset[group].slice(start, step_size);
123 array_utils::copy(indices_of_current_group, r_indices.slice(dst_range));
124 indices = indices.drop_front(step_size);
125 }
126 });
127}
128
130 const OffsetIndices<int> offsets)
131{
132 if (group_indices.is_empty()) {
133 return {};
134 }
135 BLI_assert(*std::max_element(group_indices.begin(), group_indices.end()) < offsets.size());
136 BLI_assert(*std::min_element(group_indices.begin(), group_indices.end()) >= 0);
137
138 /* `counts` keeps track of how many elements have been added to each group, and is incremented
139 * atomically by many threads in parallel. `calloc` can be measurably faster than a parallel fill
140 * of zero. Alternatively the offsets could be copied and incremented directly, but the cost of
141 * the copy is slightly higher than the cost of `calloc`. */
142 int *counts = MEM_calloc_arrayN<int>(offsets.size(), __func__);
143 BLI_SCOPED_DEFER([&]() { MEM_freeN(counts); })
144 Array<int> results(group_indices.size());
145 threading::parallel_for(group_indices.index_range(), 1024, [&](const IndexRange range) {
146 for (const int64_t i : range) {
147 const int group_index = group_indices[i];
148 const int index_in_group = atomic_fetch_and_add_int32(&counts[group_index], 1);
149 results[offsets[group_index][index_in_group]] = int(i);
150 }
151 });
152 return results;
153}
154
155static GroupedSpan<int> gather_groups(const Span<int> group_indices,
156 const int groups_num,
157 Array<int> &r_offsets,
158 Array<int> &r_indices)
159{
160 if (group_indices.size() / groups_num > 1000) {
161 r_offsets.reinitialize(groups_num + 1);
162 r_offsets.as_mutable_span().fill(0);
163 r_indices.reinitialize(group_indices.size());
164 from_indices_large_groups(group_indices, r_offsets, r_indices);
165 }
166 else {
167 r_offsets = create_reverse_offsets(group_indices, groups_num);
168 r_indices = reverse_indices_in_groups(group_indices, r_offsets.as_span());
169 }
170 return {OffsetIndices<int>(r_offsets), r_indices};
171}
172
173template<typename T> static T gather_mean(const VArray<T> &values, const Span<int> indices)
174{
175 BLI_assert(!indices.is_empty());
176 if (const std::optional<T> value = values.get_if_single()) {
177 return *value;
178 }
179
180 using MeanAccumulator = std::pair<T, int>;
181 const auto join_accumulators = [](const MeanAccumulator a,
182 const MeanAccumulator b) -> MeanAccumulator {
183 return {(a.first + b.first) / (a.second + b.second), 1};
184 };
185
186 T value;
187 devirtualize_varray(values, [&](const auto values) {
189 indices.index_range(),
190 2048,
191 MeanAccumulator(T(), 0),
192 [&](const IndexRange range, MeanAccumulator other) -> MeanAccumulator {
193 T value(0);
194 for (const int i : indices.slice(range)) {
195 value += values[i];
196 }
197 return join_accumulators({value, int(range.size())}, other);
198 },
199 join_accumulators);
200 value = accumulator.first / accumulator.second;
201 });
202 return value;
203}
204
206 const float3 &center,
207 const float scale)
208{
209 const float3 diff = position - center;
210 const float3 scaled_diff = scale * diff;
211 const float3 new_position = center + scaled_diff;
212 return new_position;
213}
214
215static void scale_uniformly(const GroupedSpan<int> elem_islands,
216 const GroupedSpan<int> vert_islands,
217 const VArray<float> &scale_varray,
218 const VArray<float3> &center_varray,
219 Mesh &mesh)
220{
221 MutableSpan<float3> positions = mesh.vert_positions_for_write();
223 elem_islands.index_range(),
224 512,
225 [&](const IndexRange range) {
226 for (const int island_index : range) {
227 const Span<int> vert_island = vert_islands[island_index];
228 const Span<int> elem_island = elem_islands[island_index];
229
230 const float scale = gather_mean<float>(scale_varray, elem_island);
231 const float3 center = gather_mean<float3>(center_varray, elem_island);
232
233 threading::parallel_for(vert_island.index_range(), 2048, [&](const IndexRange range) {
234 for (const int vert_i : vert_island.slice(range)) {
235 positions[vert_i] = transform_with_uniform_scale(positions[vert_i], center, scale);
236 }
237 });
238 }
239 },
241 return elem_islands.offsets[range].size() + vert_islands.offsets[range].size();
242 }));
243}
244
246 const float3 &axis,
247 const float scale)
248{
249 /* Scale along x axis. The other axis need to be orthogonal, but their specific value does not
250 * matter. */
251 const float3 x_axis = math::normalize(axis);
252 float3 y_axis = math::cross(x_axis, float3(0.0f, 0.0f, 1.0f));
253 if (math::is_zero(y_axis)) {
254 y_axis = math::cross(x_axis, float3(0.0f, 1.0f, 0.0f));
255 }
256 y_axis = math::normalize(y_axis);
257 const float3 z_axis = math::cross(x_axis, y_axis);
258
260
261 /* Move scaling center to the origin. */
262 transform.location() -= center;
263
264 /* `base_change` and `base_change_inv` are used to rotate space so that scaling along the
265 * provided axis is the same as scaling along the x axis. */
266 float4x4 base_change = float4x4::identity();
267 base_change.x_axis() = x_axis;
268 base_change.y_axis() = y_axis;
269 base_change.z_axis() = z_axis;
270
271 /* Can invert by transposing, because the matrix is orthonormal. */
272 float4x4 base_change_inv = math::transpose(base_change);
273
274 float4x4 scale_transform = float4x4::identity();
275 scale_transform[0][0] = scale;
276
277 transform = base_change * scale_transform * base_change_inv * transform;
278
279 /* Move scaling center back to where it was. */
280 transform.location() += center;
281
282 return transform;
283}
284
285static void scale_on_axis(const GroupedSpan<int> elem_islands,
286 const GroupedSpan<int> vert_islands,
287 const VArray<float> &scale_varray,
288 const VArray<float3> &center_varray,
289 const VArray<float3> &axis_varray,
290 Mesh &mesh)
291{
292 MutableSpan<float3> positions = mesh.vert_positions_for_write();
294 elem_islands.index_range(),
295 512,
296 [&](const IndexRange range) {
297 for (const int island_index : range) {
298 const Span<int> vert_island = vert_islands[island_index];
299 const Span<int> elem_island = elem_islands[island_index];
300
301 const float scale = gather_mean<float>(scale_varray, elem_island);
302 const float3 center = gather_mean<float3>(center_varray, elem_island);
303 const float3 axis = gather_mean<float3>(axis_varray, elem_island);
304 const float3 fixed_axis = math::is_zero(axis) ? float3(1.0f, 0.0f, 0.0f) : axis;
305
306 const float4x4 transform = create_single_axis_transform(center, fixed_axis, scale);
307 threading::parallel_for(vert_island.index_range(), 2048, [&](const IndexRange range) {
308 for (const int vert_i : vert_island.slice(range)) {
309 positions[vert_i] = math::transform_point(transform, positions[vert_i]);
310 }
311 });
312 }
313 },
315 return vert_islands.offsets[range].size() + elem_islands.offsets[range].size();
316 }));
317}
318
319static int face_to_vert_islands(const Mesh &mesh,
320 const IndexMask &face_mask,
321 const IndexMask &vert_mask,
322 MutableSpan<int> face_island_indices,
323 MutableSpan<int> vert_island_indices)
324{
325 Array<int> verts_pos(vert_mask.min_array_size());
326 index_mask::build_reverse_map<int>(vert_mask, verts_pos);
327
328 AtomicDisjointSet disjoint_set(vert_mask.size());
329 const GroupedSpan<int> face_verts(mesh.faces(), mesh.corner_verts());
330
331 face_mask.foreach_index_optimized<int>(GrainSize(4096), [&](const int face_i) {
332 const Span<int> verts = face_verts[face_i];
333 const int v1 = verts_pos[verts.first()];
334 for (const int vert_i : verts.drop_front(1)) {
335 const int v2 = verts_pos[vert_i];
336 disjoint_set.join(v1, v2);
337 }
338 });
339
340 disjoint_set.calc_reduced_ids(vert_island_indices);
341
342 face_mask.foreach_index(GrainSize(4096), [&](const int face_i, const int face_pos) {
343 const int face_vert_i = face_verts[face_i].first();
344 const int vert_pos = verts_pos[face_vert_i];
345 const int vert_island = vert_island_indices[vert_pos];
346 face_island_indices[face_pos] = vert_island;
347 });
348
349 return disjoint_set.count_sets();
350}
351
352static void gather_face_islands(const Mesh &mesh,
353 const IndexMask &face_mask,
354 Array<int> &r_item_offsets,
355 Array<int> &r_item_indices,
356 Array<int> &r_vert_offsets,
357 Array<int> &r_vert_indices)
358{
359 IndexMaskMemory memory;
361 mesh.face_offsets(), face_mask, mesh.corner_verts(), mesh.verts_num, memory);
362
363 Array<int> face_island_indices(face_mask.size());
364 Array<int> vert_island_indices(vert_mask.size());
365 const int total_islands = face_to_vert_islands(
366 mesh, face_mask, vert_mask, face_island_indices, vert_island_indices);
367
368 /* Group gathered vertices and faces. */
369 gather_groups(vert_island_indices, total_islands, r_vert_offsets, r_vert_indices);
370 gather_groups(face_island_indices, total_islands, r_item_offsets, r_item_indices);
371
372 /* If result indices is for gathered array, map than back into global indices. */
373 if (face_mask.size() != mesh.faces_num) {
374 Array<int> face_mask_map(face_mask.size());
375 face_mask.to_indices<int>(face_mask_map);
377 face_mask_map.as_span(), r_item_indices.as_span(), r_item_indices.as_mutable_span());
378 }
379 if (vert_mask.size() != mesh.verts_num) {
380 Array<int> vert_mask_map(vert_mask.size());
381 vert_mask.to_indices<int>(vert_mask_map);
383 vert_mask_map.as_span(), r_vert_indices.as_span(), r_vert_indices.as_mutable_span());
384 }
385}
386
387static int edge_to_vert_islands(const Mesh &mesh,
388 const IndexMask &edge_mask,
389 const IndexMask &vert_mask,
390 MutableSpan<int> edge_island_indices,
391 MutableSpan<int> vert_island_indices)
392{
393 Array<int> verts_pos(vert_mask.min_array_size());
394 index_mask::build_reverse_map<int>(vert_mask, verts_pos);
395
396 AtomicDisjointSet disjoint_set(vert_mask.size());
397 const Span<int2> edges = mesh.edges();
398
399 edge_mask.foreach_index_optimized<int>(GrainSize(4096), [&](const int edge_i) {
400 const int2 edge = edges[edge_i];
401 const int v1 = verts_pos[edge[0]];
402 const int v2 = verts_pos[edge[1]];
403 disjoint_set.join(v1, v2);
404 });
405
406 disjoint_set.calc_reduced_ids(vert_island_indices);
407
408 edge_mask.foreach_index(GrainSize(4096), [&](const int edge_i, const int edge_pos) {
409 const int2 edge = edges[edge_i];
410 const int edge_vert_i = edge[0];
411 const int vert_pos = verts_pos[edge_vert_i];
412 const int vert_island = vert_island_indices[vert_pos];
413 edge_island_indices[edge_pos] = vert_island;
414 });
415
416 return disjoint_set.count_sets();
417}
418
419static void gather_edge_islands(const Mesh &mesh,
420 const IndexMask &edge_mask,
421 Array<int> &r_item_offsets,
422 Array<int> &r_item_indices,
423 Array<int> &r_vert_offsets,
424 Array<int> &r_vert_indices)
425{
426 IndexMaskMemory memory;
428 mesh.edges(), edge_mask, mesh.verts_num, memory);
429
430 Array<int> edge_island_indices(edge_mask.size());
431 Array<int> vert_island_indices(vert_mask.size());
432 const int total_islands = edge_to_vert_islands(
433 mesh, edge_mask, vert_mask, edge_island_indices, vert_island_indices);
434
435 /* Group gathered vertices and edges. */
436 gather_groups(vert_island_indices, total_islands, r_vert_offsets, r_vert_indices);
437 gather_groups(edge_island_indices, total_islands, r_item_offsets, r_item_indices);
438
439 /* If result indices is for gathered array, map than back into global indices. */
440 if (edge_mask.size() != mesh.edges_num) {
441 Array<int> edge_mask_map(edge_mask.size());
442 edge_mask.to_indices<int>(edge_mask_map);
444 edge_mask_map.as_span(), r_item_indices.as_span(), r_item_indices.as_mutable_span());
445 }
446 if (vert_mask.size() != mesh.verts_num) {
447 Array<int> vert_mask_map(vert_mask.size());
448 vert_mask.to_indices<int>(vert_mask_map);
450 vert_mask_map.as_span(), r_vert_indices.as_span(), r_vert_indices.as_mutable_span());
451 }
452}
453
455{
456 const bNode &node = params.node();
457 const AttrDomain domain = AttrDomain(node.custom1);
459
460 GeometrySet geometry = params.extract_input<GeometrySet>("Geometry");
461
462 const Field<bool> selection_field = params.extract_input<Field<bool>>("Selection");
463 const Field<float> scale_field = params.extract_input<Field<float>>("Scale");
464 const Field<float3> center_field = params.extract_input<Field<float3>>("Center");
465
466 geometry.modify_geometry_sets([&](GeometrySet &geometry) {
467 if (Mesh *mesh = geometry.get_mesh_for_write()) {
468 const bke::MeshFieldContext context{*mesh, domain};
469 FieldEvaluator evaluator{context, mesh->attributes().domain_size(domain)};
470 evaluator.set_selection(selection_field);
471 evaluator.add(scale_field);
472 evaluator.add(center_field);
473 if (scale_mode == GEO_NODE_SCALE_ELEMENTS_SINGLE_AXIS) {
474 evaluator.add(params.get_input<Field<float3>>("Axis"));
475 }
476 evaluator.evaluate();
478 if (mask.is_empty()) {
479 return;
480 }
481
482 Array<int> item_offsets;
483 Array<int> item_indices;
484
485 Array<int> vert_offsets;
486 Array<int> vert_indices;
487
488 switch (domain) {
489 case AttrDomain::Face:
490 gather_face_islands(*mesh, mask, item_offsets, item_indices, vert_offsets, vert_indices);
491 break;
492 case AttrDomain::Edge:
493 gather_edge_islands(*mesh, mask, item_offsets, item_indices, vert_offsets, vert_indices);
494 break;
495 default:
497 }
498
499 const GroupedSpan<int> item_islands(item_offsets.as_span(), item_indices);
500 const GroupedSpan<int> vert_islands(vert_offsets.as_span(), vert_indices);
501
502 const VArray<float> scale_varray = evaluator.get_evaluated<float>(0);
503 const VArray<float3> center_varray = evaluator.get_evaluated<float3>(1);
504
505 switch (scale_mode) {
507 scale_uniformly(item_islands, vert_islands, scale_varray, center_varray, *mesh);
508 break;
510 const VArray<float3> axis_varray = evaluator.get_evaluated<float3>(2);
512 item_islands, vert_islands, scale_varray, center_varray, axis_varray, *mesh);
513 break;
514 }
515 }
516 mesh->tag_positions_changed();
517 }
518 });
519
520 params.set_output("Geometry", std::move(geometry));
521}
522
523static void node_rna(StructRNA *srna)
524{
525 static const EnumPropertyItem domain_items[] = {
526 {int(AttrDomain::Face),
527 "FACE",
528 ICON_NONE,
529 "Face",
530 "Scale individual faces or neighboring face islands"},
531 {int(AttrDomain::Edge),
532 "EDGE",
533 ICON_NONE,
534 "Edge",
535 "Scale individual edges or neighboring edge islands"},
536 {0, nullptr, 0, nullptr, nullptr},
537 };
538
539 static const EnumPropertyItem scale_mode_items[] = {
541 "UNIFORM",
542 ICON_NONE,
543 "Uniform",
544 "Scale elements by the same factor in every direction"},
546 "SINGLE_AXIS",
547 ICON_NONE,
548 "Single Axis",
549 "Scale elements in a single direction"},
550 {0, nullptr, 0, nullptr, nullptr},
551 };
552
554 "domain",
555 "Domain",
556 "Element type to transform",
557 domain_items,
559 int(AttrDomain::Face));
560
562 srna, "scale_mode", "Scale Mode", "", scale_mode_items, NOD_inline_enum_accessors(custom2));
563}
564
565static void node_register()
566{
567 static blender::bke::bNodeType ntype;
568
569 geo_node_type_base(&ntype, "GeometryNodeScaleElements", GEO_NODE_SCALE_ELEMENTS);
570 ntype.ui_name = "Scale Elements";
571 ntype.ui_description = "Scale groups of connected edges and faces";
572 ntype.enum_name_legacy = "SCALE_ELEMENTS";
575 ntype.declare = node_declare;
577 ntype.initfunc = node_init;
579
580 node_rna(ntype.rna_ext.srna);
581}
582NOD_REGISTER_NODE(node_register)
583
584} // namespace blender::nodes::node_geo_scale_elements_cc
#define NODE_CLASS_GEOMETRY
Definition BKE_node.hh:447
#define GEO_NODE_SCALE_ELEMENTS
#define BLI_assert_unreachable()
Definition BLI_assert.h:93
#define BLI_assert(a)
Definition BLI_assert.h:46
#define BLI_SCOPED_DEFER(function_to_defer)
@ NODE_DEFAULT_INPUT_POSITION_FIELD
GeometryNodeScaleElementsMode
@ GEO_NODE_SCALE_ELEMENTS_SINGLE_AXIS
@ GEO_NODE_SCALE_ELEMENTS_UNIFORM
#define NOD_REGISTER_NODE(REGISTER_FUNC)
#define NOD_inline_enum_accessors(member)
@ PROP_TRANSLATION
Definition RNA_types.hh:249
#define UI_ITEM_NONE
Provides wrapper around system-specific atomic primitives, and some extensions (faked-atomic operatio...
ATOMIC_INLINE int32_t atomic_add_and_fetch_int32(int32_t *p, int32_t x)
AttrDomain
ATTR_WARN_UNUSED_RESULT const BMVert * v2
SIMD_FORCE_INLINE btVector3 transform(const btVector3 &point) const
constexpr int64_t size() const
Definition BLI_span.hh:252
Span< T > as_span() const
Definition BLI_array.hh:232
MutableSpan< T > as_mutable_span()
Definition BLI_array.hh:237
void reinitialize(const int64_t new_size)
Definition BLI_array.hh:398
int calc_reduced_ids(MutableSpan< int > result) const
constexpr int64_t size() const
constexpr IndexRange slice(int64_t start, int64_t size) const
constexpr MutableSpan slice(const int64_t start, const int64_t size) const
Definition BLI_span.hh:573
constexpr T * end() const
Definition BLI_span.hh:548
constexpr T * begin() const
Definition BLI_span.hh:544
constexpr int64_t size() const
Definition BLI_span.hh:252
constexpr const T * end() const
Definition BLI_span.hh:224
constexpr IndexRange index_range() const
Definition BLI_span.hh:401
constexpr const T * begin() const
Definition BLI_span.hh:220
constexpr bool is_empty() const
Definition BLI_span.hh:260
std::optional< T > get_if_single() const
void set_selection(Field< bool > selection)
Definition FN_field.hh:383
int add(GField field, GVArray *varray_ptr)
Definition field.cc:751
IndexMask get_evaluated_selection_as_mask() const
Definition field.cc:817
const GVArray & get_evaluated(const int field_index) const
Definition FN_field.hh:448
void to_indices(MutableSpan< T > r_indices) const
void foreach_index_optimized(Fn &&fn) const
void foreach_index(Fn &&fn) const
IMETHOD Vector diff(const Vector &a, const Vector &b, double dt)
Definition frames.inl:1166
static ushort indices[]
static float verts[][3]
VecBase< float, 3 > float3
uiWidgetBaseParameters params[MAX_WIDGET_BASE_BATCH]
void * MEM_calloc_arrayN(size_t len, size_t size, const char *str)
Definition mallocn.cc:123
void MEM_freeN(void *vmemh)
Definition mallocn.cc:113
ccl_device_inline float2 mask(const MaskType mask, const float2 a)
#define T
void copy(const GVArray &src, GMutableSpan dst, int64_t grain_size=4096)
void gather(const GVArray &src, const IndexMask &indices, GMutableSpan dst, int64_t grain_size=4096)
void node_register_type(bNodeType &ntype)
Definition node.cc:2748
IndexMask vert_selection_from_face(OffsetIndices< int > faces, const IndexMask &face_mask, Span< int > corner_verts, int verts_num, IndexMaskMemory &memory)
IndexMask vert_selection_from_edge(Span< int2 > edges, const IndexMask &edge_mask, int verts_num, IndexMaskMemory &memory)
void build_reverse_map(const IndexMask &mask, MutableSpan< T > r_map)
Definition index_mask.cc:34
MatBase< T, NumCol, NumRow > transpose(const MatBase< T, NumRow, NumCol > &mat)
bool is_zero(const T &a)
AxisSigned cross(const AxisSigned a, const AxisSigned b)
MatBase< T, NumCol, NumRow > normalize(const MatBase< T, NumCol, NumRow > &a)
static Array< int > reverse_indices_in_groups(const Span< int > group_indices, const OffsetIndices< int > offsets)
static int face_to_vert_islands(const Mesh &mesh, const IndexMask &face_mask, const IndexMask &vert_mask, MutableSpan< int > face_island_indices, MutableSpan< int > vert_island_indices)
static float3 transform_with_uniform_scale(const float3 &position, const float3 &center, const float scale)
static T gather_mean(const VArray< T > &values, const Span< int > indices)
static int edge_to_vert_islands(const Mesh &mesh, const IndexMask &edge_mask, const IndexMask &vert_mask, MutableSpan< int > edge_island_indices, MutableSpan< int > vert_island_indices)
static Span< int > front_indices_to_same_value(const Span< int > indices, const Span< int > values)
static void node_declare(NodeDeclarationBuilder &b)
static void node_init(bNodeTree *, bNode *node)
static void scale_on_axis(const GroupedSpan< int > elem_islands, const GroupedSpan< int > vert_islands, const VArray< float > &scale_varray, const VArray< float3 > &center_varray, const VArray< float3 > &axis_varray, Mesh &mesh)
static Array< int > create_reverse_offsets(const Span< int > indices, const int items_num)
static GroupedSpan< int > gather_groups(const Span< int > group_indices, const int groups_num, Array< int > &r_offsets, Array< int > &r_indices)
static void node_layout(uiLayout *layout, bContext *, PointerRNA *ptr)
static void gather_edge_islands(const Mesh &mesh, const IndexMask &edge_mask, Array< int > &r_item_offsets, Array< int > &r_item_indices, Array< int > &r_vert_offsets, Array< int > &r_vert_indices)
static void gather_face_islands(const Mesh &mesh, const IndexMask &face_mask, Array< int > &r_item_offsets, Array< int > &r_item_indices, Array< int > &r_vert_offsets, Array< int > &r_vert_indices)
static float4x4 create_single_axis_transform(const float3 &center, const float3 &axis, const float scale)
static void from_indices_large_groups(const Span< int > group_indices, MutableSpan< int > r_counts_to_offset, MutableSpan< int > r_indices)
static void node_geo_exec(GeoNodeExecParams params)
static void scale_uniformly(const GroupedSpan< int > elem_islands, const GroupedSpan< int > vert_islands, const VArray< float > &scale_varray, const VArray< float3 > &center_varray, Mesh &mesh)
PropertyRNA * RNA_def_node_enum(StructRNA *srna, const char *identifier, const char *ui_name, const char *ui_description, const EnumPropertyItem *static_items, const EnumRNAAccessors accessors, std::optional< int > default_value, const EnumPropertyItemFunc item_func, const bool allow_animation)
OffsetIndices< int > accumulate_counts_to_offsets(MutableSpan< int > counts_to_offsets, int start_offset=0)
void build_reverse_offsets(Span< int > indices, MutableSpan< int > offsets)
void parallel_for_each(Range &&range, const Function &function)
Definition BLI_task.hh:56
Value parallel_deterministic_reduce(IndexRange range, int64_t grain_size, const Value &identity, const Function &function, const Reduction &reduction)
Definition BLI_task.hh:194
void parallel_for(const IndexRange range, const int64_t grain_size, const Function &function, const TaskSizeHints &size_hints=detail::TaskSizeHints_Static(1))
Definition BLI_task.hh:93
auto accumulated_task_sizes(Fn &&fn)
void devirtualize_varray(const VArray< T > &varray, const Func &func, bool enable=true)
MatBase< float, 4, 4 > float4x4
void parallel_sort(RandomAccessIterator begin, RandomAccessIterator end)
Definition BLI_sort.hh:23
VecBase< int32_t, 2 > int2
VecBase< float, 3 > float3
void geo_node_type_base(blender::bke::bNodeType *ntype, std::string idname, const std::optional< int16_t > legacy_type)
StructRNA * srna
Definition RNA_types.hh:909
int edges_num
int faces_num
int verts_num
int16_t custom1
int16_t custom2
Defines a node type.
Definition BKE_node.hh:226
std::string ui_description
Definition BKE_node.hh:232
void(* initfunc)(bNodeTree *ntree, bNode *node)
Definition BKE_node.hh:277
NodeGeometryExecFunction geometry_node_execute
Definition BKE_node.hh:347
const char * enum_name_legacy
Definition BKE_node.hh:235
void(* draw_buttons)(uiLayout *, bContext *C, PointerRNA *ptr)
Definition BKE_node.hh:247
NodeDeclareFunction declare
Definition BKE_node.hh:355
void prop(PointerRNA *ptr, PropertyRNA *prop, int index, int value, eUI_Item_Flag flag, std::optional< blender::StringRef > name_opt, int icon, std::optional< blender::StringRef > placeholder=std::nullopt)
i
Definition text_draw.cc:230
PointerRNA * ptr
Definition wm_files.cc:4227