Blender V5.0
node_composite_dilate.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2006 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
8
9#include <limits>
10
11#include "BLI_assert.h"
12#include "BLI_index_range.hh"
13#include "BLI_math_base.hh"
15#include "BLI_task.hh"
16
17#include "RNA_enum_types.hh"
18#include "RNA_types.hh"
19
20#include "GPU_shader.hh"
21
24#include "COM_algorithm_smaa.hh"
25#include "COM_node_operation.hh"
26#include "COM_utilities.hh"
27
29
31
32static const EnumPropertyItem type_items[] = {
33 {CMP_NODE_DILATE_ERODE_STEP, "STEP", 0, N_("Steps"), ""},
34 {CMP_NODE_DILATE_ERODE_DISTANCE_THRESHOLD, "THRESHOLD", 0, N_("Threshold"), ""},
35 {CMP_NODE_DILATE_ERODE_DISTANCE, "DISTANCE", 0, N_("Distance"), ""},
36 {CMP_NODE_DILATE_ERODE_DISTANCE_FEATHER, "FEATHER", 0, N_("Feather"), ""},
37 {0, nullptr, 0, nullptr, nullptr},
38};
39
41{
42 b.add_input<decl::Float>("Mask").default_value(0.0f).min(0.0f).max(1.0f).structure_type(
43 StructureType::Dynamic);
44 b.add_input<decl::Int>("Size").default_value(0).description(
45 "The size of dilation/erosion in pixels. Positive values dilates and negative values "
46 "erodes");
47 b.add_input<decl::Menu>("Type")
48 .default_value(CMP_NODE_DILATE_ERODE_STEP)
49 .static_items(type_items)
51 b.add_input<decl::Float>("Falloff Size")
52 .default_value(0.0f)
53 .min(0.0f)
54 .usage_by_menu("Type", CMP_NODE_DILATE_ERODE_DISTANCE_THRESHOLD)
56 "The size of the falloff from the edges in pixels. If less than two pixels, the edges "
57 "will be anti-aliased");
58 b.add_input<decl::Menu>("Falloff")
59 .default_value(PROP_SMOOTH)
62 .usage_by_menu("Type", CMP_NODE_DILATE_ERODE_DISTANCE_FEATHER)
63 .translation_context(BLT_I18NCONTEXT_ID_CURVE_LEGACY);
64
65 b.add_output<decl::Float>("Mask").structure_type(StructureType::Dynamic);
66}
67
68static void node_composit_init_dilateerode(bNodeTree * /*ntree*/, bNode *node)
69{
70 /* Unused but kept for forward compatibility. */
72 node->storage = data;
73}
74
75using namespace blender::compositor;
76
78 public:
80
81 void execute() override
82 {
83 const Result &input = this->get_input("Mask");
84 Result &output = this->get_result("Mask");
85
86 if (this->is_identity()) {
87 output.share_data(input);
88 return;
89 }
90
91 switch (this->get_type()) {
94 return;
97 return;
100 return;
103 return;
104 }
105
106 output.share_data(input);
107 }
108
109 /* ----------------------------
110 * Step Morphological Operator.
111 * ---------------------------- */
112
114 {
115 Result horizontal_pass_result = execute_step_horizontal_pass();
116 execute_step_vertical_pass(horizontal_pass_result);
117 horizontal_pass_result.release();
118 }
119
121 {
122 if (this->context().use_gpu()) {
124 }
126 }
127
129 {
131 GPU_shader_bind(shader);
132
133 GPU_shader_uniform_1i(shader, "radius", this->get_structuring_element_size() / 2);
134
135 const Result &input_mask = get_input("Mask");
136 input_mask.bind_as_texture(shader, "input_tx");
137
138 /* We allocate an output image of a transposed size, that is, with a height equivalent to the
139 * width of the input and vice versa. This is done as a performance optimization. The shader
140 * will process the image horizontally and write it to the intermediate output transposed. Then
141 * the vertical pass will execute the same horizontal pass shader, but since its input is
142 * transposed, it will effectively do a vertical pass and write to the output transposed,
143 * effectively undoing the transposition in the horizontal pass. This is done to improve
144 * spatial cache locality in the shader and to avoid having two separate shaders for each of
145 * the passes. */
146 const Domain domain = compute_domain();
147 const int2 transposed_domain = int2(domain.size.y, domain.size.x);
148
149 Result horizontal_pass_result = context().create_result(ResultType::Float);
150 horizontal_pass_result.allocate_texture(transposed_domain);
151 horizontal_pass_result.bind_as_image(shader, "output_img");
152
154
156 input_mask.unbind_as_texture();
157 horizontal_pass_result.unbind_as_image();
158
159 return horizontal_pass_result;
160 }
161
163 {
164 const Result &input = get_input("Mask");
165
166 /* We allocate an output image of a transposed size, that is, with a height equivalent to the
167 * width of the input and vice versa. This is done as a performance optimization. The shader
168 * will process the image horizontally and write it to the intermediate output transposed. Then
169 * the vertical pass will execute the same horizontal pass shader, but since its input is
170 * transposed, it will effectively do a vertical pass and write to the output transposed,
171 * effectively undoing the transposition in the horizontal pass. This is done to improve
172 * spatial cache locality in the shader and to avoid having two separate shaders for each of
173 * the passes. */
174 const Domain domain = compute_domain();
175 const int2 transposed_domain = int2(domain.size.y, domain.size.x);
176
177 Result horizontal_pass_result = context().create_result(ResultType::Float);
178 horizontal_pass_result.allocate_texture(transposed_domain);
179
180 if (this->is_dilation()) {
181 this->execute_step_pass_cpu<true>(input, horizontal_pass_result);
182 }
183 else {
184 this->execute_step_pass_cpu<false>(input, horizontal_pass_result);
185 }
186
187 return horizontal_pass_result;
188 }
189
190 void execute_step_vertical_pass(Result &horizontal_pass_result)
191 {
192 if (this->context().use_gpu()) {
193 this->execute_step_vertical_pass_gpu(horizontal_pass_result);
194 }
195 else {
196 this->execute_step_vertical_pass_cpu(horizontal_pass_result);
197 }
198 }
199
200 void execute_step_vertical_pass_gpu(Result &horizontal_pass_result)
201 {
203 GPU_shader_bind(shader);
204
205 GPU_shader_uniform_1i(shader, "radius", this->get_structuring_element_size() / 2);
206
207 horizontal_pass_result.bind_as_texture(shader, "input_tx");
208
209 const Domain domain = compute_domain();
210 Result &output_mask = get_result("Mask");
211 output_mask.allocate_texture(domain);
212 output_mask.bind_as_image(shader, "output_img");
213
214 /* Notice that the domain is transposed, see the note on the horizontal pass method for more
215 * information on the reasoning behind this. */
216 compute_dispatch_threads_at_least(shader, int2(domain.size.y, domain.size.x));
217
219 horizontal_pass_result.unbind_as_texture();
220 output_mask.unbind_as_image();
221 }
222
224 {
225 if (this->is_dilation()) {
226 return "compositor_morphological_step_dilate";
227 }
228 return "compositor_morphological_step_erode";
229 }
230
231 void execute_step_vertical_pass_cpu(Result &horizontal_pass_result)
232 {
233 const Domain domain = compute_domain();
234 Result &output_mask = get_result("Mask");
235 output_mask.allocate_texture(domain);
236
237 if (this->is_dilation()) {
238 this->execute_step_pass_cpu<true>(horizontal_pass_result, output_mask);
239 }
240 else {
241 this->execute_step_pass_cpu<false>(horizontal_pass_result, output_mask);
242 }
243 }
244
245 /* Apply a van Herk/Gil-Werman algorithm on the input based on:
246 *
247 * Domanski, Luke, Pascal Vallotton, and Dadong Wang. "Parallel van Herk/Gil-Werman image
248 * morphology on GPUs using CUDA." GTC 2009 Conference posters. 2009.
249 *
250 * The output is written transposed for more efficient execution, see the horizontal pass method
251 * for more information. The template argument IsDilate decides if dilation or erosion will be
252 * performed. */
253 template<bool IsDilate> void execute_step_pass_cpu(const Result &input, Result &output)
254 {
255 const float limit = IsDilate ? std::numeric_limits<float>::lowest() :
256 std::numeric_limits<float>::max();
257 const auto morphology_operator = [](const float a, const float b) {
258 if constexpr (IsDilate) {
259 return math::max(a, b);
260 }
261 else {
262 return math::min(a, b);
263 }
264 };
265
266 /* Notice that the domain is transposed, see the note on the horizontal pass method for more
267 * information on the reasoning behind this. */
268 const int2 image_size = int2(output.domain().size.y, output.domain().size.x);
269
270 /* We process rows in tiles whose size is the same as the structuring element size. So we
271 * compute the number of tiles using ceiling division, noting that the last tile might not be
272 * complete. */
273 const int size = this->get_structuring_element_size();
274 const int tiles_count = int(math::ceil(float(image_size.x) / size));
275
276 /* Process along rows in parallel. */
277 threading::parallel_for(IndexRange(image_size.y), 1, [&](const IndexRange sub_y_range) {
278 Array<float> prefix_table(size);
279 Array<float> suffix_table(size);
280 for (const int64_t y : sub_y_range) {
281 for (const int64_t tile_index : IndexRange(tiles_count)) {
282 const int64_t tile_start = tile_index * size;
283 /* Compute the x texel location of the pixel at the center of the tile. Noting that the
284 * size of the structuring element is guaranteed to be odd. */
285 const int64_t tile_center = tile_start + size / 2;
286
287 float prefix_value = limit;
288 float suffix_value = limit;
289 /* Starting from the pixel at the center of the tile, recursively compute the prefix
290 * table to the right and the suffix table to the left by applying the morphology
291 * operator. */
292 for (const int64_t i : IndexRange(size)) {
293 const float right_value = input.load_pixel_fallback(int2(tile_center + i, y), limit);
294 prefix_value = morphology_operator(prefix_value, right_value);
295 prefix_table[i] = prefix_value;
296
297 /* Note that we access pixels increasingly to the left, so invert the suffix table when
298 * writing to it. */
299 const float left_value = input.load_pixel_fallback(int2(tile_center - i, y), limit);
300 suffix_value = morphology_operator(suffix_value, left_value);
301 suffix_table[size - 1 - i] = suffix_value;
302 }
303
304 const IndexRange tile_range = IndexRange(tile_start, size);
305 const IndexRange safe_tile_range = tile_range.intersect(IndexRange(image_size.x));
306 /* For each pixel in the tile, write the result of applying the morphology operator on
307 * the prefix and suffix values. */
308 for (const int64_t x : safe_tile_range) {
309 /* Compute the local table index, since the prefix and suffix tables are local to each
310 * tile. */
311 const int64_t table_index = x - tile_start;
312 const float prefix_value = prefix_table[table_index];
313 const float suffix_value = suffix_table[table_index];
314
315 const float value = morphology_operator(prefix_value, suffix_value);
316
317 /* Write the value using the transposed texel. See the horizontal pass method for more
318 * information on the rational behind this. */
319 output.store_pixel(int2(y, x), value);
320 }
321 }
322 }
323 });
324 }
325
326 /* --------------------------------
327 * Distance Morphological Operator.
328 * -------------------------------- */
329
331 {
332 morphological_distance(context(), get_input("Mask"), get_result("Mask"), this->get_size());
333 }
334
335 /* ------------------------------------------
336 * Distance Threshold Morphological Operator.
337 * ------------------------------------------ */
338
340 {
341 Result output_mask = context().create_result(ResultType::Float);
342
343 if (this->context().use_gpu()) {
344 this->execute_distance_threshold_gpu(output_mask);
345 }
346 else {
347 this->execute_distance_threshold_cpu(output_mask);
348 }
349
350 /* For configurations where there is little user-specified falloff size, anti-alias the result
351 * for smoother edges. */
352 Result &output = this->get_result("Mask");
353 if (this->get_falloff_size() < 2.0f) {
354 smaa(this->context(), output_mask, output);
355 output_mask.release();
356 }
357 else {
358 output.steal_data(output_mask);
359 }
360 }
361
363 {
364 gpu::Shader *shader = context().get_shader("compositor_morphological_distance_threshold");
365 GPU_shader_bind(shader);
366
367 GPU_shader_uniform_1f(shader, "inset", math::max(this->get_falloff_size(), 10e-6f));
369 GPU_shader_uniform_1i(shader, "distance", this->get_size());
370
371 const Result &input_mask = get_input("Mask");
372 input_mask.bind_as_texture(shader, "input_tx");
373
374 const Domain domain = compute_domain();
375 output.allocate_texture(domain);
376 output.bind_as_image(shader, "output_img");
377
379
381 output.unbind_as_image();
382 input_mask.unbind_as_texture();
383 }
384
386 {
387 const Result &input = get_input("Mask");
388
389 const Domain domain = compute_domain();
390 output.allocate_texture(domain);
391
392 const int2 image_size = input.domain().size;
393
394 const float inset = math::max(this->get_falloff_size(), 10e-6f);
395 const int radius = this->get_morphological_distance_threshold_radius();
396 const int distance = this->get_size();
397
398 /* The Morphological Distance Threshold operation is effectively three consecutive operations
399 * implemented as a single operation. The three operations are as follows:
400 *
401 * .-----------. .--------------. .----------------.
402 * | Threshold |-->| Dilate/Erode |-->| Distance Inset |
403 * '-----------' '--------------' '----------------'
404 *
405 * The threshold operation just converts the input into a binary image, where the pixel is 1 if
406 * it is larger than 0.5 and 0 otherwise. Pixels that are 1 in the output of the threshold
407 * operation are said to be masked. The dilate/erode operation is a dilate or erode
408 * morphological operation with a circular structuring element depending on the sign of the
409 * distance, where it is a dilate operation if the distance is positive and an erode operation
410 * otherwise. This is equivalent to the Morphological Distance operation, see its
411 * implementation for more information. Finally, the distance inset is an operation that
412 * converts the binary image into a narrow band distance field. That is, pixels that are
413 * unmasked will remain 0, while pixels that are masked will start from zero at the boundary of
414 * the masked region and linearly increase until reaching 1 in the span of a number pixels
415 * given by the inset value.
416 *
417 * As a performance optimization, the dilate/erode operation is omitted and its effective
418 * result is achieved by slightly adjusting the distance inset operation. The base distance
419 * inset operation works by computing the signed distance from the current center pixel to the
420 * nearest pixel with a different value. Since our image is a binary image, that means that if
421 * the pixel is masked, we compute the signed distance to the nearest unmasked pixel, and if
422 * the pixel unmasked, we compute the signed distance to the nearest masked pixel. The distance
423 * is positive if the pixel is masked and negative otherwise. The distance is then normalized
424 * by dividing by the given inset value and clamped to the [0, 1] range. Since distances larger
425 * than the inset value are eventually clamped, the distance search window is limited to a
426 * radius equivalent to the inset value.
427 *
428 * To archive the effective result of the omitted dilate/erode operation, we adjust the
429 * distance inset operation as follows. First, we increase the radius of the distance search
430 * window by the radius of the dilate/erode operation. Then we adjust the resulting narrow band
431 * signed distance field as follows.
432 *
433 * For the erode case, we merely subtract the erode distance, which makes the outermost erode
434 * distance number of pixels zero due to clamping, consequently achieving the result of the
435 * erode, while retaining the needed inset because we increased the distance search window by
436 * the same amount we subtracted.
437 *
438 * Similarly, for the dilate case, we add the dilate distance, which makes the dilate distance
439 * number of pixels just outside of the masked region positive and part of the narrow band
440 * distance field, consequently achieving the result of the dilate, while at the same time, the
441 * innermost dilate distance number of pixels become 1 due to clamping, retaining the needed
442 * inset because we increased the distance search window by the same amount we added.
443 *
444 * Since the erode/dilate distance is already signed appropriately as described before, we just
445 * add it in both cases. */
446 parallel_for(domain.size, [&](const int2 texel) {
447 /* Apply a threshold operation on the center pixel, where the threshold is currently
448 * hard-coded at 0.5. The pixels with values larger than the threshold are said to be
449 * masked. */
450 bool is_center_masked = input.load_pixel<float>(texel) > 0.5f;
451
452 /* Since the distance search window is limited to the given radius, the maximum possible
453 * squared distance to the center is double the squared radius. */
454 int minimum_squared_distance = radius * radius * 2;
455
456 /* Compute the start and end bounds of the window such that no out-of-bounds processing
457 * happen in the loops. */
458 const int2 start = math::max(texel - radius, int2(0)) - texel;
459 const int2 end = math::min(texel + radius + 1, image_size) - texel;
460
461 /* Find the squared distance to the nearest different pixel in the search window of the given
462 * radius. */
463 for (int y = start.y; y < end.y; y++) {
464 const int yy = y * y;
465 for (int x = start.x; x < end.x; x++) {
466 bool is_sample_masked = input.load_pixel<float>(texel + int2(x, y)) > 0.5f;
467 if (is_center_masked != is_sample_masked) {
468 minimum_squared_distance = math::min(minimum_squared_distance, x * x + yy);
469 }
470 }
471 }
472
473 /* Compute the actual distance from the squared distance and assign it an appropriate sign
474 * depending on whether it lies in a masked region or not. */
475 float signed_minimum_distance = math::sqrt(float(minimum_squared_distance)) *
476 (is_center_masked ? 1.0f : -1.0f);
477
478 /* Add the erode/dilate distance and divide by the inset amount as described in the
479 * discussion, then clamp to the [0, 1] range. */
480 float value = math::clamp((signed_minimum_distance + distance) / inset, 0.0f, 1.0f);
481
482 output.store_pixel(texel, value);
483 });
484 }
485
486 /* See the discussion in the implementation for more information. */
488 {
489 return int(math::ceil(this->get_falloff_size())) + math::abs(this->get_size());
490 }
491
492 /* ----------------------------------------
493 * Distance Feather Morphological Operator.
494 * ---------------------------------------- */
495
497 {
499 context(), get_input("Mask"), get_result("Mask"), this->get_size(), this->get_falloff());
500 }
501
502 /* ---------------
503 * Common Methods.
504 * --------------- */
505
507 {
508 const Result &input = get_input("Mask");
509 if (input.is_single_value()) {
510 return true;
511 }
512
514 this->get_falloff_size() != 0.0f)
515 {
516 return false;
517 }
518
519 if (this->get_size() == 0) {
520 return true;
521 }
522
523 return false;
524 }
525
526 /* Gets the size of the structuring element. See the get_size method for more information. */
528 {
529 return math::abs(this->get_size()) * 2 + 1;
530 }
531
532 /* Returns true if dilation should be performed, as opposed to erosion. See the get_size()
533 * method for more information. */
535 {
536 return this->get_size() > 0;
537 }
538
539 /* The signed radius of the structuring element, that is, half the structuring element size. The
540 * sign indicates either dilation or erosion, where negative values means erosion. */
542 {
543 return this->get_input("Size").get_single_value_default(0);
544 }
545
547 {
548 return math::max(0.0f, this->get_input("Falloff Size").get_single_value_default(0.0f));
549 }
550
552 {
553 const Result &input = this->get_input("Type");
554 const MenuValue default_menu_value = MenuValue(CMP_NODE_DILATE_ERODE_STEP);
555 const MenuValue menu_value = input.get_single_value_default(default_menu_value);
556 return static_cast<CMPNodeDilateErodeMethod>(menu_value.value);
557 }
558
560 {
561 const Result &input = this->get_input("Falloff");
562 const MenuValue default_menu_value = MenuValue(PROP_SMOOTH);
563 const MenuValue menu_value = input.get_single_value_default(default_menu_value);
564 return menu_value.value;
565 }
566};
567
569{
570 return new DilateErodeOperation(context, node);
571}
572
573} // namespace blender::nodes::node_composite_dilate_cc
574
576{
578
579 static blender::bke::bNodeType ntype;
580
581 cmp_node_type_base(&ntype, "CompositorNodeDilateErode", CMP_NODE_DILATEERODE);
582 ntype.ui_name = "Dilate/Erode";
583 ntype.ui_description = "Expand and shrink masks";
584 ntype.enum_name_legacy = "DILATEERODE";
586 ntype.declare = file_ns::cmp_node_dilate_declare;
587 ntype.initfunc = file_ns::node_composit_init_dilateerode;
589 ntype, "NodeDilateErode", node_free_standard_storage, node_copy_standard_storage);
590 ntype.get_compositor_operation = file_ns::get_compositor_operation;
591
593}
#define NODE_CLASS_OP_FILTER
Definition BKE_node.hh:451
#define CMP_NODE_DILATEERODE
#define BLT_I18NCONTEXT_ID_CURVE_LEGACY
CMPNodeDilateErodeMethod
@ CMP_NODE_DILATE_ERODE_STEP
@ CMP_NODE_DILATE_ERODE_DISTANCE_FEATHER
@ CMP_NODE_DILATE_ERODE_DISTANCE_THRESHOLD
@ CMP_NODE_DILATE_ERODE_DISTANCE
@ PROP_SMOOTH
void GPU_shader_uniform_1f(blender::gpu::Shader *sh, const char *name, float value)
void GPU_shader_bind(blender::gpu::Shader *shader, const blender::gpu::shader::SpecializationConstants *constants_state=nullptr)
void GPU_shader_uniform_1i(blender::gpu::Shader *sh, const char *name, int value)
void GPU_shader_unbind()
#define NOD_REGISTER_NODE(REGISTER_FUNC)
BMesh const char void * data
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition btDbvt.cpp:52
Result create_result(ResultType type, ResultPrecision precision)
gpu::Shader * get_shader(const char *info_name, ResultPrecision precision)
NodeOperation(Context &context, DNode node)
Result & get_result(StringRef identifier)
Definition operation.cc:39
Result & get_input(StringRef identifier) const
Definition operation.cc:138
virtual Domain compute_domain()
Definition operation.cc:56
void share_data(const Result &source)
Definition result.cc:523
void allocate_texture(const Domain domain, const bool from_pool=true, const std::optional< ResultStorageType > storage_type=std::nullopt)
Definition result.cc:389
void unbind_as_texture() const
Definition result.cc:511
void bind_as_texture(gpu::Shader *shader, const char *texture_name) const
Definition result.cc:487
void unbind_as_image() const
Definition result.cc:517
void bind_as_image(gpu::Shader *shader, const char *image_name, bool read=false) const
Definition result.cc:498
#define input
#define output
float distance(VecOp< float, D >, VecOp< float, D >) RET
void * MEM_callocN(size_t len, const char *str)
Definition mallocn.cc:118
void node_register_type(bNodeType &ntype)
Definition node.cc:2416
void node_type_storage(bNodeType &ntype, std::optional< StringRefNull > storagename, void(*freefunc)(bNode *node), void(*copyfunc)(bNodeTree *dest_ntree, bNode *dest_node, const bNode *src_node))
Definition node.cc:5414
void compute_dispatch_threads_at_least(gpu::Shader *shader, int2 threads_range, int2 local_size=int2(16))
Definition utilities.cc:196
void morphological_distance(Context &context, const Result &input, Result &output, const int distance)
void morphological_distance_feather(Context &context, const Result &input, Result &output, const int distance, const int falloff_type=PROP_SMOOTH)
void smaa(Context &context, const Result &input, Result &output, const float threshold=0.1f, const float local_contrast_adaptation_factor=2.0f, const int corner_rounding=25)
Definition smaa.cc:1646
void parallel_for(const int2 range, const Function &function)
T clamp(const T &a, const T &min, const T &max)
T sqrt(const T &a)
T min(const T &a, const T &b)
T ceil(const T &a)
T max(const T &a, const T &b)
T abs(const T &a)
static void cmp_node_dilate_declare(NodeDeclarationBuilder &b)
static void node_composit_init_dilateerode(bNodeTree *, bNode *node)
static const EnumPropertyItem type_items[]
static NodeOperation * get_compositor_operation(Context &context, DNode node)
void parallel_for(const IndexRange range, const int64_t grain_size, const Function &function, const TaskSizeHints &size_hints=detail::TaskSizeHints_Static(1))
Definition BLI_task.hh:93
VecBase< int32_t, 2 > int2
static void register_node_type_cmp_dilateerode()
void cmp_node_type_base(blender::bke::bNodeType *ntype, std::string idname, const std::optional< int16_t > legacy_type)
void node_free_standard_storage(bNode *node)
Definition node_util.cc:42
void node_copy_standard_storage(bNodeTree *, bNode *dest_node, const bNode *src_node)
Definition node_util.cc:54
const EnumPropertyItem rna_enum_proportional_falloff_curve_only_items[]
Definition rna_scene.cc:112
void * storage
Defines a node type.
Definition BKE_node.hh:238
std::string ui_description
Definition BKE_node.hh:244
NodeGetCompositorOperationFunction get_compositor_operation
Definition BKE_node.hh:348
void(* initfunc)(bNodeTree *ntree, bNode *node)
Definition BKE_node.hh:289
const char * enum_name_legacy
Definition BKE_node.hh:247
NodeDeclareFunction declare
Definition BKE_node.hh:362
static pxr::UsdShadeInput get_input(const pxr::UsdShadeShader &usd_shader, const pxr::TfToken &input_name)
#define N_(msgid)