Blender V4.3
node_geo_scale_elements.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2023 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
5#include "atomic_ops.h"
6
7#include "BLI_array.hh"
8#include "BLI_array_utils.hh"
10#include "BLI_math_matrix.hh"
11#include "BLI_sort.hh"
12#include "BLI_task.hh"
13#include "BLI_virtual_array.hh"
14
15#include "DNA_mesh_types.h"
16
17#include "UI_interface.hh"
18#include "UI_resources.hh"
19
20#include "BKE_mesh.hh"
21
22#include "GEO_mesh_selection.hh"
23
24#include "NOD_rna_define.hh"
25
26#include "node_geometry_util.hh"
27
29
31{
32 b.add_input<decl::Geometry>("Geometry").supported_type(GeometryComponent::Type::Mesh);
33 b.add_input<decl::Bool>("Selection").default_value(true).hide_value().field_on_all();
34 b.add_input<decl::Float>("Scale", "Scale").default_value(1.0f).min(0.0f).field_on_all();
35 b.add_input<decl::Vector>("Center")
36 .subtype(PROP_TRANSLATION)
37 .implicit_field_on_all(implicit_field_inputs::position)
39 "Origin of the scaling for each element. If multiple elements are connected, their "
40 "center is averaged");
41 auto &axis = b.add_input<decl::Vector>("Axis")
42 .default_value({1.0f, 0.0f, 0.0f})
43 .field_on_all()
44 .description("Direction in which to scale the element")
45 .make_available(
46 [](bNode &node) { node.custom2 = GEO_NODE_SCALE_ELEMENTS_SINGLE_AXIS; });
47 b.add_output<decl::Geometry>("Geometry").propagate_all();
48
49 const bNode *node = b.node_or_null();
50 if (node != nullptr) {
52 axis.available(mode == GEO_NODE_SCALE_ELEMENTS_SINGLE_AXIS);
53 }
54};
55
56static void node_layout(uiLayout *layout, bContext * /*C*/, PointerRNA *ptr)
57{
58 uiItemR(layout, ptr, "domain", UI_ITEM_NONE, "", ICON_NONE);
59 uiItemR(layout, ptr, "scale_mode", UI_ITEM_NONE, "", ICON_NONE);
60}
61
62static void node_init(bNodeTree * /*tree*/, bNode *node)
63{
64 node->custom1 = int16_t(AttrDomain::Face);
65 node->custom2 = GEO_NODE_SCALE_ELEMENTS_UNIFORM;
66}
67
68static Array<int> create_reverse_offsets(const Span<int> indices, const int items_num)
69{
70 Array<int> offsets(items_num + 1, 0);
72 return offsets;
73}
74
75static Span<int> front_indices_to_same_value(const Span<int> indices, const Span<int> values)
76{
77 const int value = values[indices.first()];
78 const int &first_other = *std::find_if(
79 indices.begin(), indices.end(), [&](const int index) { return values[index] != value; });
80 return indices.take_front(&first_other - indices.begin());
81}
82
83static void from_indices_large_groups(const Span<int> group_indices,
84 MutableSpan<int> r_counts_to_offset,
85 MutableSpan<int> r_indices)
86{
87 constexpr const int segment_size = 1024;
88 constexpr const IndexRange segment(segment_size);
89 const bool last_small_segmet = bool(group_indices.size() % segment_size);
90 const int total_segments = group_indices.size() / segment_size + int(last_small_segmet);
91
92 Array<int> src_indices(group_indices.size());
93 threading::parallel_for_each(IndexRange(total_segments), [&](const int segment_index) {
94 const IndexRange range = segment.shift(segment_size * segment_index);
95 MutableSpan<int> segment_indices = src_indices.as_mutable_span().slice_safe(range);
96 std::iota(segment_indices.begin(), segment_indices.end(), segment_size * segment_index);
97 parallel_sort(segment_indices.begin(), segment_indices.end(), [&](const int a, const int b) {
98 return group_indices[a] < group_indices[b];
99 });
100
101 for (Span<int> indices = segment_indices; !indices.is_empty();) {
102 const int group = group_indices[indices.first()];
103 const int step_size = front_indices_to_same_value(indices, group_indices).size();
104 atomic_add_and_fetch_int32(&r_counts_to_offset[group], step_size);
105 indices = indices.drop_front(step_size);
106 }
107 });
108
110 r_counts_to_offset);
111 Array<int> counts(offset.size(), 0);
112 threading::parallel_for_each(IndexRange(total_segments), [&](const int segment_index) {
113 const IndexRange range = segment.shift(segment_size * segment_index);
114 const Span<int> segment_indices = src_indices.as_span().slice_safe(range);
115 for (Span<int> indices = segment_indices; !indices.is_empty();) {
116 const Span<int> indices_of_current_group = front_indices_to_same_value(indices,
117 group_indices);
118 const int step_size = indices_of_current_group.size();
119 const int group = group_indices[indices.first()];
120 const int start = atomic_add_and_fetch_int32(&counts[group], step_size) - step_size;
121 const IndexRange dst_range = offset[group].slice(start, step_size);
122 array_utils::copy(indices_of_current_group, r_indices.slice(dst_range));
123 indices = indices.drop_front(step_size);
124 }
125 });
126}
127
129 const OffsetIndices<int> offsets)
130{
131 if (group_indices.is_empty()) {
132 return {};
133 }
134 BLI_assert(*std::max_element(group_indices.begin(), group_indices.end()) < offsets.size());
135 BLI_assert(*std::min_element(group_indices.begin(), group_indices.end()) >= 0);
136
137 /* `counts` keeps track of how many elements have been added to each group, and is incremented
138 * atomically by many threads in parallel. `calloc` can be measurably faster than a parallel fill
139 * of zero. Alternatively the offsets could be copied and incremented directly, but the cost of
140 * the copy is slightly higher than the cost of `calloc`. */
141 int *counts = MEM_cnew_array<int>(size_t(offsets.size()), __func__);
142 BLI_SCOPED_DEFER([&]() { MEM_freeN(counts); })
143 Array<int> results(group_indices.size());
144 threading::parallel_for(group_indices.index_range(), 1024, [&](const IndexRange range) {
145 for (const int64_t i : range) {
146 const int group_index = group_indices[i];
147 const int index_in_group = atomic_fetch_and_add_int32(&counts[group_index], 1);
148 results[offsets[group_index][index_in_group]] = int(i);
149 }
150 });
151 return results;
152}
153
154static GroupedSpan<int> gather_groups(const Span<int> group_indices,
155 const int groups_num,
156 Array<int> &r_offsets,
157 Array<int> &r_indices)
158{
159 if (group_indices.size() / groups_num > 1000) {
160 r_offsets.reinitialize(groups_num + 1);
161 r_offsets.as_mutable_span().fill(0);
162 r_indices.reinitialize(group_indices.size());
163 from_indices_large_groups(group_indices, r_offsets, r_indices);
164 }
165 else {
166 r_offsets = create_reverse_offsets(group_indices, groups_num);
167 r_indices = reverse_indices_in_groups(group_indices, r_offsets.as_span());
168 }
169 return {OffsetIndices<int>(r_offsets), r_indices};
170}
171
172template<typename T> static T gather_mean(const VArray<T> &values, const Span<int> indices)
173{
174 BLI_assert(!indices.is_empty());
175 if (const std::optional<T> value = values.get_if_single()) {
176 return *value;
177 }
178
179 using MeanAccumulator = std::pair<T, int>;
180 const auto join_accumulators = [](const MeanAccumulator a,
181 const MeanAccumulator b) -> MeanAccumulator {
182 return {(a.first + b.first) / (a.second + b.second), 1};
183 };
184
185 T value;
186 devirtualize_varray(values, [&](const auto values) {
187 const auto accumulator = threading::parallel_reduce<MeanAccumulator>(
188 indices.index_range(),
189 2048,
190 MeanAccumulator(T(), 0),
191 [&](const IndexRange range, MeanAccumulator other) -> MeanAccumulator {
192 T value(0);
193 for (const int i : indices.slice(range)) {
194 value += values[i];
195 }
196 return join_accumulators({value, int(range.size())}, other);
197 },
198 join_accumulators);
199 value = accumulator.first / accumulator.second;
200 });
201 return value;
202}
203
205 const float3 &center,
206 const float scale)
207{
208 const float3 diff = position - center;
209 const float3 scaled_diff = scale * diff;
210 const float3 new_position = center + scaled_diff;
211 return new_position;
212}
213
214static void scale_uniformly(const GroupedSpan<int> elem_islands,
215 const GroupedSpan<int> vert_islands,
216 const VArray<float> &scale_varray,
217 const VArray<float3> &center_varray,
218 Mesh &mesh)
219{
220 MutableSpan<float3> positions = mesh.vert_positions_for_write();
221 threading::parallel_for(
222 elem_islands.index_range(),
223 512,
224 [&](const IndexRange range) {
225 for (const int island_index : range) {
226 const Span<int> vert_island = vert_islands[island_index];
227 const Span<int> elem_island = elem_islands[island_index];
228
229 const float scale = gather_mean<float>(scale_varray, elem_island);
230 const float3 center = gather_mean<float3>(center_varray, elem_island);
231
232 threading::parallel_for(vert_island.index_range(), 2048, [&](const IndexRange range) {
233 for (const int vert_i : vert_island.slice(range)) {
234 positions[vert_i] = transform_with_uniform_scale(positions[vert_i], center, scale);
235 }
236 });
237 }
238 },
239 threading::accumulated_task_sizes([&](const IndexRange range) {
240 return elem_islands.offsets[range].size() + vert_islands.offsets[range].size();
241 }));
242}
243
245 const float3 &axis,
246 const float scale)
247{
248 /* Scale along x axis. The other axis need to be orthogonal, but their specific value does not
249 * matter. */
250 const float3 x_axis = math::normalize(axis);
251 float3 y_axis = math::cross(x_axis, float3(0.0f, 0.0f, 1.0f));
252 if (math::is_zero(y_axis)) {
253 y_axis = math::cross(x_axis, float3(0.0f, 1.0f, 0.0f));
254 }
255 y_axis = math::normalize(y_axis);
256 const float3 z_axis = math::cross(x_axis, y_axis);
257
258 float4x4 transform = float4x4::identity();
259
260 /* Move scaling center to the origin. */
261 transform.location() -= center;
262
263 /* `base_change` and `base_change_inv` are used to rotate space so that scaling along the
264 * provided axis is the same as scaling along the x axis. */
265 float4x4 base_change = float4x4::identity();
266 base_change.x_axis() = x_axis;
267 base_change.y_axis() = y_axis;
268 base_change.z_axis() = z_axis;
269
270 /* Can invert by transposing, because the matrix is orthonormal. */
271 float4x4 base_change_inv = math::transpose(base_change);
272
273 float4x4 scale_transform = float4x4::identity();
274 scale_transform[0][0] = scale;
275
276 transform = base_change * scale_transform * base_change_inv * transform;
277
278 /* Move scaling center back to where it was. */
279 transform.location() += center;
280
281 return transform;
282}
283
284static void scale_on_axis(const GroupedSpan<int> elem_islands,
285 const GroupedSpan<int> vert_islands,
286 const VArray<float> &scale_varray,
287 const VArray<float3> &center_varray,
288 const VArray<float3> &axis_varray,
289 Mesh &mesh)
290{
291 MutableSpan<float3> positions = mesh.vert_positions_for_write();
292 threading::parallel_for(
293 elem_islands.index_range(),
294 512,
295 [&](const IndexRange range) {
296 for (const int island_index : range) {
297 const Span<int> vert_island = vert_islands[island_index];
298 const Span<int> elem_island = elem_islands[island_index];
299
300 const float scale = gather_mean<float>(scale_varray, elem_island);
301 const float3 center = gather_mean<float3>(center_varray, elem_island);
302 const float3 axis = gather_mean<float3>(axis_varray, elem_island);
303 const float3 fixed_axis = math::is_zero(axis) ? float3(1.0f, 0.0f, 0.0f) : axis;
304
305 const float4x4 transform = create_single_axis_transform(center, fixed_axis, scale);
306 threading::parallel_for(vert_island.index_range(), 2048, [&](const IndexRange range) {
307 for (const int vert_i : vert_island.slice(range)) {
308 positions[vert_i] = math::transform_point(transform, positions[vert_i]);
309 }
310 });
311 }
312 },
313 threading::accumulated_task_sizes([&](const IndexRange range) {
314 return vert_islands.offsets[range].size() + elem_islands.offsets[range].size();
315 }));
316}
317
318static int face_to_vert_islands(const Mesh &mesh,
319 const IndexMask &face_mask,
320 const IndexMask &vert_mask,
321 MutableSpan<int> face_island_indices,
322 MutableSpan<int> vert_island_indices)
323{
324 Array<int> verts_pos(vert_mask.min_array_size());
325 index_mask::build_reverse_map<int>(vert_mask, verts_pos);
326
327 AtomicDisjointSet disjoint_set(vert_mask.size());
328 const GroupedSpan<int> face_verts(mesh.faces(), mesh.corner_verts());
329
330 face_mask.foreach_index_optimized<int>(GrainSize(4096), [&](const int face_i) {
331 const Span<int> verts = face_verts[face_i];
332 const int v1 = verts_pos[verts.first()];
333 for (const int vert_i : verts.drop_front(1)) {
334 const int v2 = verts_pos[vert_i];
335 disjoint_set.join(v1, v2);
336 }
337 });
338
339 disjoint_set.calc_reduced_ids(vert_island_indices);
340
341 face_mask.foreach_index(GrainSize(4096), [&](const int face_i, const int face_pos) {
342 const int face_vert_i = face_verts[face_i].first();
343 const int vert_pos = verts_pos[face_vert_i];
344 const int vert_island = vert_island_indices[vert_pos];
345 face_island_indices[face_pos] = vert_island;
346 });
347
348 return disjoint_set.count_sets();
349}
350
351static void gather_face_islands(const Mesh &mesh,
352 const IndexMask &face_mask,
353 Array<int> &r_item_offsets,
354 Array<int> &r_item_indices,
355 Array<int> &r_vert_offsets,
356 Array<int> &r_vert_indices)
357{
358 IndexMaskMemory memory;
359 const IndexMask vert_mask = geometry::vert_selection_from_face(
360 mesh.face_offsets(), face_mask, mesh.corner_verts(), mesh.verts_num, memory);
361
362 Array<int> face_island_indices(face_mask.size());
363 Array<int> vert_island_indices(vert_mask.size());
364 const int total_islands = face_to_vert_islands(
365 mesh, face_mask, vert_mask, face_island_indices, vert_island_indices);
366
367 /* Group gathered vertices and faces. */
368 gather_groups(vert_island_indices, total_islands, r_vert_offsets, r_vert_indices);
369 gather_groups(face_island_indices, total_islands, r_item_offsets, r_item_indices);
370
371 /* If result indices is for gathered array, map than back into global indices. */
372 if (face_mask.size() != mesh.faces_num) {
373 Array<int> face_mask_map(face_mask.size());
374 face_mask.to_indices<int>(face_mask_map);
375 array_utils::gather<int>(
376 face_mask_map.as_span(), r_item_indices.as_span(), r_item_indices.as_mutable_span());
377 }
378 if (vert_mask.size() != mesh.verts_num) {
379 Array<int> vert_mask_map(vert_mask.size());
380 vert_mask.to_indices<int>(vert_mask_map);
381 array_utils::gather<int>(
382 vert_mask_map.as_span(), r_vert_indices.as_span(), r_vert_indices.as_mutable_span());
383 }
384}
385
386static int edge_to_vert_islands(const Mesh &mesh,
387 const IndexMask &edge_mask,
388 const IndexMask &vert_mask,
389 MutableSpan<int> edge_island_indices,
390 MutableSpan<int> vert_island_indices)
391{
392 Array<int> verts_pos(vert_mask.min_array_size());
393 index_mask::build_reverse_map<int>(vert_mask, verts_pos);
394
395 AtomicDisjointSet disjoint_set(vert_mask.size());
396 const Span<int2> edges = mesh.edges();
397
398 edge_mask.foreach_index_optimized<int>(GrainSize(4096), [&](const int edge_i) {
399 const int2 edge = edges[edge_i];
400 const int v1 = verts_pos[edge[0]];
401 const int v2 = verts_pos[edge[1]];
402 disjoint_set.join(v1, v2);
403 });
404
405 disjoint_set.calc_reduced_ids(vert_island_indices);
406
407 edge_mask.foreach_index(GrainSize(4096), [&](const int edge_i, const int edge_pos) {
408 const int2 edge = edges[edge_i];
409 const int edge_vert_i = edge[0];
410 const int vert_pos = verts_pos[edge_vert_i];
411 const int vert_island = vert_island_indices[vert_pos];
412 edge_island_indices[edge_pos] = vert_island;
413 });
414
415 return disjoint_set.count_sets();
416}
417
418static void gather_edge_islands(const Mesh &mesh,
419 const IndexMask &edge_mask,
420 Array<int> &r_item_offsets,
421 Array<int> &r_item_indices,
422 Array<int> &r_vert_offsets,
423 Array<int> &r_vert_indices)
424{
425 IndexMaskMemory memory;
426 const IndexMask vert_mask = geometry::vert_selection_from_edge(
427 mesh.edges(), edge_mask, mesh.verts_num, memory);
428
429 Array<int> edge_island_indices(edge_mask.size());
430 Array<int> vert_island_indices(vert_mask.size());
431 const int total_islands = edge_to_vert_islands(
432 mesh, edge_mask, vert_mask, edge_island_indices, vert_island_indices);
433
434 /* Group gathered vertices and edges. */
435 gather_groups(vert_island_indices, total_islands, r_vert_offsets, r_vert_indices);
436 gather_groups(edge_island_indices, total_islands, r_item_offsets, r_item_indices);
437
438 /* If result indices is for gathered array, map than back into global indices. */
439 if (edge_mask.size() != mesh.edges_num) {
440 Array<int> edge_mask_map(edge_mask.size());
441 edge_mask.to_indices<int>(edge_mask_map);
442 array_utils::gather<int>(
443 edge_mask_map.as_span(), r_item_indices.as_span(), r_item_indices.as_mutable_span());
444 }
445 if (vert_mask.size() != mesh.verts_num) {
446 Array<int> vert_mask_map(vert_mask.size());
447 vert_mask.to_indices<int>(vert_mask_map);
448 array_utils::gather<int>(
449 vert_mask_map.as_span(), r_vert_indices.as_span(), r_vert_indices.as_mutable_span());
450 }
451}
452
454{
455 const bNode &node = params.node();
456 const AttrDomain domain = AttrDomain(node.custom1);
457 const GeometryNodeScaleElementsMode scale_mode = GeometryNodeScaleElementsMode(node.custom2);
458
459 GeometrySet geometry = params.extract_input<GeometrySet>("Geometry");
460
461 const Field<bool> selection_field = params.extract_input<Field<bool>>("Selection");
462 const Field<float> scale_field = params.extract_input<Field<float>>("Scale");
463 const Field<float3> center_field = params.extract_input<Field<float3>>("Center");
464
465 geometry.modify_geometry_sets([&](GeometrySet &geometry) {
466 if (Mesh *mesh = geometry.get_mesh_for_write()) {
467 const bke::MeshFieldContext context{*mesh, domain};
468 FieldEvaluator evaluator{context, mesh->attributes().domain_size(domain)};
469 evaluator.set_selection(selection_field);
470 evaluator.add(scale_field);
471 evaluator.add(center_field);
472 if (scale_mode == GEO_NODE_SCALE_ELEMENTS_SINGLE_AXIS) {
473 evaluator.add(params.get_input<Field<float3>>("Axis"));
474 }
475 evaluator.evaluate();
476 const IndexMask &mask = evaluator.get_evaluated_selection_as_mask();
477 if (mask.is_empty()) {
478 return;
479 }
480
481 Array<int> item_offsets;
482 Array<int> item_indices;
483
484 Array<int> vert_offsets;
485 Array<int> vert_indices;
486
487 switch (domain) {
488 case AttrDomain::Face:
489 gather_face_islands(*mesh, mask, item_offsets, item_indices, vert_offsets, vert_indices);
490 break;
491 case AttrDomain::Edge:
492 gather_edge_islands(*mesh, mask, item_offsets, item_indices, vert_offsets, vert_indices);
493 break;
494 default:
496 }
497
498 const GroupedSpan<int> item_islands(item_offsets.as_span(), item_indices);
499 const GroupedSpan<int> vert_islands(vert_offsets.as_span(), vert_indices);
500
501 const VArray<float> scale_varray = evaluator.get_evaluated<float>(0);
502 const VArray<float3> center_varray = evaluator.get_evaluated<float3>(1);
503
504 switch (scale_mode) {
506 scale_uniformly(item_islands, vert_islands, scale_varray, center_varray, *mesh);
507 break;
509 const VArray<float3> axis_varray = evaluator.get_evaluated<float3>(2);
511 item_islands, vert_islands, scale_varray, center_varray, axis_varray, *mesh);
512 break;
513 }
514 }
515 mesh->tag_positions_changed();
516 }
517 });
518
519 params.set_output("Geometry", std::move(geometry));
520}
521
522static void node_rna(StructRNA *srna)
523{
524 static const EnumPropertyItem domain_items[] = {
525 {int(AttrDomain::Face),
526 "FACE",
527 ICON_NONE,
528 "Face",
529 "Scale individual faces or neighboring face islands"},
530 {int(AttrDomain::Edge),
531 "EDGE",
532 ICON_NONE,
533 "Edge",
534 "Scale individual edges or neighboring edge islands"},
535 {0, nullptr, 0, nullptr, nullptr},
536 };
537
538 static const EnumPropertyItem scale_mode_items[] = {
540 "UNIFORM",
541 ICON_NONE,
542 "Uniform",
543 "Scale elements by the same factor in every direction"},
545 "SINGLE_AXIS",
546 ICON_NONE,
547 "Single Axis",
548 "Scale elements in a single direction"},
549 {0, nullptr, 0, nullptr, nullptr},
550 };
551
553 "domain",
554 "Domain",
555 "Element type to transform",
556 domain_items,
558 int(AttrDomain::Face));
559
561 srna, "scale_mode", "Scale Mode", "", scale_mode_items, NOD_inline_enum_accessors(custom2));
562}
563
564static void node_register()
565{
566 static blender::bke::bNodeType ntype;
567
568 geo_node_type_base(&ntype, GEO_NODE_SCALE_ELEMENTS, "Scale Elements", NODE_CLASS_GEOMETRY);
570 ntype.declare = node_declare;
572 ntype.initfunc = node_init;
574
575 node_rna(ntype.rna_ext.srna);
576}
577NOD_REGISTER_NODE(node_register)
578
579} // namespace blender::nodes::node_geo_scale_elements_cc
#define NODE_CLASS_GEOMETRY
Definition BKE_node.hh:418
#define BLI_assert_unreachable()
Definition BLI_assert.h:97
#define BLI_assert(a)
Definition BLI_assert.h:50
#define BLI_SCOPED_DEFER(function_to_defer)
GeometryNodeScaleElementsMode
@ GEO_NODE_SCALE_ELEMENTS_SINGLE_AXIS
@ GEO_NODE_SCALE_ELEMENTS_UNIFORM
#define NOD_REGISTER_NODE(REGISTER_FUNC)
#define NOD_inline_enum_accessors(member)
@ PROP_TRANSLATION
Definition RNA_types.hh:164
#define UI_ITEM_NONE
void uiItemR(uiLayout *layout, PointerRNA *ptr, const char *propname, eUI_Item_Flag flag, const char *name, int icon)
Provides wrapper around system-specific atomic primitives, and some extensions (faked-atomic operatio...
ATOMIC_INLINE int32_t atomic_add_and_fetch_int32(int32_t *p, int32_t x)
ATTR_WARN_UNUSED_RESULT const BMVert * v2
SIMD_FORCE_INLINE btVector3 transform(const btVector3 &point) const
Span< T > as_span() const
Definition BLI_array.hh:232
MutableSpan< T > as_mutable_span()
Definition BLI_array.hh:237
void reinitialize(const int64_t new_size)
Definition BLI_array.hh:388
int calc_reduced_ids(MutableSpan< int > result) const
constexpr IndexRange shift(int64_t n) const
constexpr IndexRange slice(int64_t start, int64_t size) const
constexpr MutableSpan slice(const int64_t start, const int64_t size) const
Definition BLI_span.hh:574
constexpr bool is_empty() const
Definition BLI_span.hh:510
constexpr T * end() const
Definition BLI_span.hh:549
constexpr T * begin() const
Definition BLI_span.hh:545
constexpr int64_t size() const
Definition BLI_span.hh:253
constexpr const T * end() const
Definition BLI_span.hh:225
constexpr IndexRange index_range() const
Definition BLI_span.hh:402
constexpr const T * begin() const
Definition BLI_span.hh:221
constexpr bool is_empty() const
Definition BLI_span.hh:261
void set_selection(Field< bool > selection)
Definition FN_field.hh:385
void to_indices(MutableSpan< T > r_indices) const
void foreach_index_optimized(Fn &&fn) const
void foreach_index(Fn &&fn) const
local_group_size(16, 16) .push_constant(Type b
draw_view push_constant(Type::INT, "radiance_src") .push_constant(Type capture_info_buf storage_buf(1, Qualifier::READ, "ObjectBounds", "bounds_buf[]") .push_constant(Type draw_view int
IMETHOD Vector diff(const Vector &a, const Vector &b, double dt)
Definition frames.inl:1166
static float verts[][3]
IndexRange range
uiWidgetBaseParameters params[MAX_WIDGET_BASE_BATCH]
void MEM_freeN(void *vmemh)
Definition mallocn.cc:105
#define T
void copy(const GVArray &src, GMutableSpan dst, int64_t grain_size=4096)
void node_register_type(bNodeType *ntype)
Definition node.cc:1708
MatBase< T, NumCol, NumRow > transpose(const MatBase< T, NumRow, NumCol > &mat)
bool is_zero(const T &a)
AxisSigned cross(const AxisSigned a, const AxisSigned b)
MatBase< T, NumCol, NumRow > normalize(const MatBase< T, NumCol, NumRow > &a)
void position(const bNode &, void *r_value)
static Array< int > reverse_indices_in_groups(const Span< int > group_indices, const OffsetIndices< int > offsets)
static int face_to_vert_islands(const Mesh &mesh, const IndexMask &face_mask, const IndexMask &vert_mask, MutableSpan< int > face_island_indices, MutableSpan< int > vert_island_indices)
static float3 transform_with_uniform_scale(const float3 &position, const float3 &center, const float scale)
static T gather_mean(const VArray< T > &values, const Span< int > indices)
static int edge_to_vert_islands(const Mesh &mesh, const IndexMask &edge_mask, const IndexMask &vert_mask, MutableSpan< int > edge_island_indices, MutableSpan< int > vert_island_indices)
static Span< int > front_indices_to_same_value(const Span< int > indices, const Span< int > values)
static void node_declare(NodeDeclarationBuilder &b)
static void node_init(bNodeTree *, bNode *node)
static void scale_on_axis(const GroupedSpan< int > elem_islands, const GroupedSpan< int > vert_islands, const VArray< float > &scale_varray, const VArray< float3 > &center_varray, const VArray< float3 > &axis_varray, Mesh &mesh)
static Array< int > create_reverse_offsets(const Span< int > indices, const int items_num)
static GroupedSpan< int > gather_groups(const Span< int > group_indices, const int groups_num, Array< int > &r_offsets, Array< int > &r_indices)
static void node_layout(uiLayout *layout, bContext *, PointerRNA *ptr)
static void gather_edge_islands(const Mesh &mesh, const IndexMask &edge_mask, Array< int > &r_item_offsets, Array< int > &r_item_indices, Array< int > &r_vert_offsets, Array< int > &r_vert_indices)
static void gather_face_islands(const Mesh &mesh, const IndexMask &face_mask, Array< int > &r_item_offsets, Array< int > &r_item_indices, Array< int > &r_vert_offsets, Array< int > &r_vert_indices)
static float4x4 create_single_axis_transform(const float3 &center, const float3 &axis, const float scale)
static void from_indices_large_groups(const Span< int > group_indices, MutableSpan< int > r_counts_to_offset, MutableSpan< int > r_indices)
static void node_geo_exec(GeoNodeExecParams params)
static void scale_uniformly(const GroupedSpan< int > elem_islands, const GroupedSpan< int > vert_islands, const VArray< float > &scale_varray, const VArray< float3 > &center_varray, Mesh &mesh)
PropertyRNA * RNA_def_node_enum(StructRNA *srna, const char *identifier, const char *ui_name, const char *ui_description, const EnumPropertyItem *static_items, const EnumRNAAccessors accessors, std::optional< int > default_value, const EnumPropertyItemFunc item_func, const bool allow_animation)
OffsetIndices< int > accumulate_counts_to_offsets(MutableSpan< int > counts_to_offsets, int start_offset=0)
void build_reverse_offsets(Span< int > indices, MutableSpan< int > offsets)
void parallel_for_each(Range &&range, const Function &function)
Definition BLI_task.hh:58
void parallel_for(const IndexRange range, const int64_t grain_size, const Function &function, const TaskSizeHints &size_hints=detail::TaskSizeHints_Static(1))
Definition BLI_task.hh:95
void devirtualize_varray(const VArray< T > &varray, const Func &func, bool enable=true)
void parallel_sort(RandomAccessIterator begin, RandomAccessIterator end)
Definition BLI_sort.hh:23
void geo_node_type_base(blender::bke::bNodeType *ntype, int type, const char *name, short nclass)
signed short int16_t
Definition stdint.h:76
StructRNA * srna
Definition RNA_types.hh:780
Defines a node type.
Definition BKE_node.hh:218
void(* initfunc)(bNodeTree *ntree, bNode *node)
Definition BKE_node.hh:267
NodeGeometryExecFunction geometry_node_execute
Definition BKE_node.hh:339
void(* draw_buttons)(uiLayout *, bContext *C, PointerRNA *ptr)
Definition BKE_node.hh:238
NodeDeclareFunction declare
Definition BKE_node.hh:347
PointerRNA * ptr
Definition wm_files.cc:4126