Blender V4.3
node_runtime.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2023 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
5#include "BKE_node.hh"
6#include "BKE_node_runtime.hh"
7
8#include "DNA_node_types.h"
9
10#include "BLI_function_ref.hh"
11#include "BLI_stack.hh"
12#include "BLI_task.hh"
13
15
17
19{
20 BLI_assert(tree_cow.type == NTREE_GEOMETRY);
21 /* Rebuild geometry nodes lazy function graph. */
22 tree_cow.runtime->geometry_nodes_lazy_function_graph_info.reset();
24}
25
26static void update_node_vector(const bNodeTree &ntree)
27{
28 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
29 const Span<bNode *> nodes = tree_runtime.nodes_by_id;
30 tree_runtime.group_nodes.clear();
31 tree_runtime.has_undefined_nodes_or_sockets = false;
32 for (const int i : nodes.index_range()) {
33 bNode &node = *nodes[i];
34 node.runtime->index_in_tree = i;
35 node.runtime->owner_tree = const_cast<bNodeTree *>(&ntree);
36 tree_runtime.has_undefined_nodes_or_sockets |= node.typeinfo == &bke::NodeTypeUndefined;
37 if (node.is_group()) {
38 tree_runtime.group_nodes.append(&node);
39 }
40 }
41}
42
43static void update_link_vector(const bNodeTree &ntree)
44{
45 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
46 tree_runtime.links.clear();
47 LISTBASE_FOREACH (bNodeLink *, link, &ntree.links) {
48 /* Check that the link connects nodes within this tree. */
49 BLI_assert(tree_runtime.nodes_by_id.contains(link->fromnode));
50 BLI_assert(tree_runtime.nodes_by_id.contains(link->tonode));
51
52 tree_runtime.links.append(link);
53 }
54}
55
57{
58 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
59 tree_runtime.sockets.clear();
60 tree_runtime.input_sockets.clear();
61 tree_runtime.output_sockets.clear();
62 for (bNode *node : tree_runtime.nodes_by_id) {
63 bNodeRuntime &node_runtime = *node->runtime;
64 node_runtime.inputs.clear();
65 node_runtime.outputs.clear();
66 LISTBASE_FOREACH (bNodeSocket *, socket, &node->inputs) {
67 socket->runtime->index_in_node = node_runtime.inputs.append_and_get_index(socket);
68 socket->runtime->index_in_all_sockets = tree_runtime.sockets.append_and_get_index(socket);
69 socket->runtime->index_in_inout_sockets = tree_runtime.input_sockets.append_and_get_index(
70 socket);
71 socket->runtime->owner_node = node;
72 tree_runtime.has_undefined_nodes_or_sockets |= socket->typeinfo ==
74 }
75 LISTBASE_FOREACH (bNodeSocket *, socket, &node->outputs) {
76 socket->runtime->index_in_node = node_runtime.outputs.append_and_get_index(socket);
77 socket->runtime->index_in_all_sockets = tree_runtime.sockets.append_and_get_index(socket);
78 socket->runtime->index_in_inout_sockets = tree_runtime.output_sockets.append_and_get_index(
79 socket);
80 socket->runtime->owner_node = node;
81 tree_runtime.has_undefined_nodes_or_sockets |= socket->typeinfo ==
83 }
84 }
85}
86
87static void update_panels(const bNodeTree &ntree)
88{
89 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
90 for (bNode *node : tree_runtime.nodes_by_id) {
91 bNodeRuntime &node_runtime = *node->runtime;
92 node_runtime.panels.reinitialize(node->num_panel_states);
93 }
94}
95
96static void update_internal_link_inputs(const bNodeTree &ntree)
97{
98 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
99 for (bNode *node : tree_runtime.nodes_by_id) {
100 for (bNodeSocket *socket : node->runtime->outputs) {
101 socket->runtime->internal_link_input = nullptr;
102 }
103 for (bNodeLink &link : node->runtime->internal_links) {
104 link.tosock->runtime->internal_link_input = link.fromsock;
105 }
106 }
107}
108
110{
111 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
112 for (bNode *node : tree_runtime.nodes_by_id) {
113 for (bNodeSocket *socket : node->runtime->inputs) {
114 socket->runtime->directly_linked_links.clear();
115 socket->runtime->directly_linked_sockets.clear();
116 }
117 for (bNodeSocket *socket : node->runtime->outputs) {
118 socket->runtime->directly_linked_links.clear();
119 socket->runtime->directly_linked_sockets.clear();
120 }
121 node->runtime->has_available_linked_inputs = false;
122 node->runtime->has_available_linked_outputs = false;
123 }
124 for (bNodeLink *link : tree_runtime.links) {
125 link->fromsock->runtime->directly_linked_links.append(link);
126 link->fromsock->runtime->directly_linked_sockets.append(link->tosock);
127 link->tosock->runtime->directly_linked_links.append(link);
128 if (link->is_available()) {
129 link->fromnode->runtime->has_available_linked_outputs = true;
130 link->tonode->runtime->has_available_linked_inputs = true;
131 }
132 }
133 for (bNodeSocket *socket : tree_runtime.input_sockets) {
134 if (socket->flag & SOCK_MULTI_INPUT) {
135 std::sort(socket->runtime->directly_linked_links.begin(),
136 socket->runtime->directly_linked_links.end(),
137 [&](const bNodeLink *a, const bNodeLink *b) {
138 return a->multi_input_sort_id > b->multi_input_sort_id;
139 });
140 }
141 }
142 for (bNodeSocket *socket : tree_runtime.input_sockets) {
143 for (bNodeLink *link : socket->runtime->directly_linked_links) {
144 /* Do this after sorting the input links. */
145 socket->runtime->directly_linked_sockets.append(link->fromsock);
146 }
147 }
148}
149
151 bNodeSocket &input_socket,
152 bool only_follow_first_input_link,
153 Vector<bNodeSocket *, 16> &sockets_in_current_chain,
154 Vector<bNodeSocket *> &r_logical_origins,
155 Vector<bNodeSocket *> &r_skipped_origins)
156{
157 if (sockets_in_current_chain.contains(&input_socket)) {
158 /* Protect against reroute recursions. */
159 return;
160 }
161 sockets_in_current_chain.append(&input_socket);
162
163 Span<bNodeLink *> links_to_check = input_socket.runtime->directly_linked_links;
164 if (only_follow_first_input_link) {
165 links_to_check = links_to_check.take_front(1);
166 }
167 for (bNodeLink *link : links_to_check) {
168 if (link->is_muted()) {
169 continue;
170 }
171 if (!link->is_available()) {
172 continue;
173 }
174 bNodeSocket &origin_socket = *link->fromsock;
175 bNode &origin_node = *link->fromnode;
176 if (!origin_socket.is_available()) {
177 /* Non available sockets are ignored. */
178 continue;
179 }
180 if (origin_node.type == NODE_REROUTE) {
181 bNodeSocket &reroute_input = *origin_node.runtime->inputs[0];
182 bNodeSocket &reroute_output = *origin_node.runtime->outputs[0];
183 r_skipped_origins.append(&reroute_input);
184 r_skipped_origins.append(&reroute_output);
186 reroute_input, false, sockets_in_current_chain, r_logical_origins, r_skipped_origins);
187 continue;
188 }
189 if (origin_node.is_muted()) {
190 if (bNodeSocket *mute_input = origin_socket.runtime->internal_link_input) {
191 r_skipped_origins.append(&origin_socket);
192 r_skipped_origins.append(mute_input);
194 *mute_input, true, sockets_in_current_chain, r_logical_origins, r_skipped_origins);
195 }
196 continue;
197 }
198 r_logical_origins.append(&origin_socket);
199 }
200
201 sockets_in_current_chain.pop_last();
202}
203
205{
206 /* Compute logically linked sockets to inputs. */
207 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
208 Span<bNode *> nodes = tree_runtime.nodes_by_id;
209 threading::parallel_for(nodes.index_range(), 128, [&](const IndexRange range) {
210 for (const int i : range) {
211 bNode &node = *nodes[i];
212 for (bNodeSocket *socket : node.runtime->inputs) {
213 Vector<bNodeSocket *, 16> sockets_in_current_chain;
214 socket->runtime->logically_linked_sockets.clear();
215 socket->runtime->logically_linked_skipped_sockets.clear();
216 find_logical_origins_for_socket_recursive(
217 *socket,
218 false,
219 sockets_in_current_chain,
220 socket->runtime->logically_linked_sockets,
221 socket->runtime->logically_linked_skipped_sockets);
222 }
223 }
224 });
225
226 /* Clear logically linked sockets to outputs. */
227 threading::parallel_for(nodes.index_range(), 128, [&](const IndexRange range) {
228 for (const int i : range) {
229 bNode &node = *nodes[i];
230 for (bNodeSocket *socket : node.runtime->outputs) {
231 socket->runtime->logically_linked_sockets.clear();
232 }
233 }
234 });
235
236 /* Compute logically linked sockets to outputs using the previously computed logically linked
237 * sockets to inputs. */
238 for (const bNode *node : nodes) {
239 for (bNodeSocket *input_socket : node->runtime->inputs) {
240 for (bNodeSocket *output_socket : input_socket->runtime->logically_linked_sockets) {
241 output_socket->runtime->logically_linked_sockets.append(input_socket);
242 }
243 }
244 }
245}
246
247static void update_nodes_by_type(const bNodeTree &ntree)
248{
249 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
250 tree_runtime.nodes_by_type.clear();
251 for (bNode *node : tree_runtime.nodes_by_id) {
252 tree_runtime.nodes_by_type.add(node->typeinfo, node);
253 }
254}
255
257{
258 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
259 Span<bNode *> nodes = tree_runtime.nodes_by_id;
260 threading::parallel_for(nodes.index_range(), 128, [&](const IndexRange range) {
261 for (bNode *node : nodes.slice(range)) {
262 node->runtime->inputs_by_identifier.clear();
263 node->runtime->outputs_by_identifier.clear();
264 for (bNodeSocket *socket : node->runtime->inputs) {
265 node->runtime->inputs_by_identifier.add_new(socket->identifier, socket);
266 }
267 for (bNodeSocket *socket : node->runtime->outputs) {
268 node->runtime->outputs_by_identifier.add_new(socket->identifier, socket);
269 }
270 }
271 });
272}
273
277};
278
280 bool is_done = false;
281 bool is_in_stack = false;
282};
283
285{
286 Vector<const bNode *> origin_nodes;
287 if (all_zone_output_node_types().contains(node.type)) {
288 const bNodeZoneType &zone_type = *zone_type_by_node_type(node.type);
289 /* Can't use #zone_type.get_corresponding_input because that expects the topology cache to be
290 * build already, but we are still building it here. */
291 for (const bNode *input_node :
292 ntree.runtime->nodes_by_type.lookup(bke::node_type_find(zone_type.input_idname.c_str())))
293 {
294 if (zone_type.get_corresponding_output_id(*input_node) == node.identifier) {
295 origin_nodes.append(input_node);
296 }
297 }
298 }
299 return origin_nodes;
300}
301
303{
304 Vector<const bNode *> target_nodes;
305 if (all_zone_input_node_types().contains(node.type)) {
306 const bNodeZoneType &zone_type = *zone_type_by_node_type(node.type);
307 if (const bNode *output_node = zone_type.get_corresponding_output(ntree, node)) {
308 target_nodes.append(output_node);
309 }
310 }
311 return target_nodes;
312}
313
314static void toposort_from_start_node(const bNodeTree &ntree,
315 const ToposortDirection direction,
316 bNode &start_node,
318 Vector<bNode *> &r_sorted_nodes,
319 bool &r_cycle_detected)
320{
321 struct Item {
322 bNode *node;
323 int socket_index = 0;
324 int link_index = 0;
325 int implicit_link_index = 0;
326 };
327
328 Stack<Item, 64> nodes_to_check;
329 nodes_to_check.push({&start_node});
330 node_states[start_node.index()].is_in_stack = true;
331 while (!nodes_to_check.is_empty()) {
332 Item &item = nodes_to_check.peek();
333 bNode &node = *item.node;
334 bool pushed_node = false;
335
336 auto handle_linked_node = [&](bNode &linked_node) {
337 ToposortNodeState &linked_node_state = node_states[linked_node.index()];
338 if (linked_node_state.is_done) {
339 /* The linked node has already been visited. */
340 return true;
341 }
342 if (linked_node_state.is_in_stack) {
343 r_cycle_detected = true;
344 }
345 else {
346 nodes_to_check.push({&linked_node});
347 linked_node_state.is_in_stack = true;
348 pushed_node = true;
349 }
350 return false;
351 };
352
353 const Span<bNodeSocket *> sockets = (direction == ToposortDirection::LeftToRight) ?
354 node.runtime->inputs :
355 node.runtime->outputs;
356 while (true) {
357 if (item.socket_index == sockets.size()) {
358 /* All sockets have already been visited. */
359 break;
360 }
361 bNodeSocket &socket = *sockets[item.socket_index];
362 const Span<bNodeLink *> linked_links = socket.runtime->directly_linked_links;
363 if (item.link_index == linked_links.size()) {
364 /* All links connected to this socket have already been visited. */
365 item.socket_index++;
366 item.link_index = 0;
367 continue;
368 }
369 bNodeLink &link = *linked_links[item.link_index];
370 if (!link.is_available()) {
371 /* Ignore unavailable links. */
372 item.link_index++;
373 continue;
374 }
375 bNodeSocket &linked_socket = *socket.runtime->directly_linked_sockets[item.link_index];
376 bNode &linked_node = *linked_socket.runtime->owner_node;
377 if (handle_linked_node(linked_node)) {
378 /* The linked node has already been visited. */
379 item.link_index++;
380 continue;
381 }
382 break;
383 }
384
385 if (!pushed_node) {
386 /* Some nodes are internally linked without an explicit `bNodeLink`. The toposort should
387 * still order them correctly and find cycles. */
388 const Vector<const bNode *> implicitly_linked_nodes =
389 (direction == ToposortDirection::LeftToRight) ? get_implicit_origin_nodes(ntree, node) :
390 get_implicit_target_nodes(ntree, node);
391 while (true) {
392 if (item.implicit_link_index == implicitly_linked_nodes.size()) {
393 /* All implicitly linked nodes have already been visited. */
394 break;
395 }
396 const bNode &linked_node = *implicitly_linked_nodes[item.implicit_link_index];
397 if (handle_linked_node(const_cast<bNode &>(linked_node))) {
398 /* The implicitly linked node has already been visited. */
399 item.implicit_link_index++;
400 continue;
401 }
402 break;
403 }
404 }
405
406 /* If no other element has been pushed, the current node can be pushed to the sorted list.
407 */
408 if (!pushed_node) {
409 ToposortNodeState &node_state = node_states[node.index()];
410 node_state.is_done = true;
411 node_state.is_in_stack = false;
412 r_sorted_nodes.append(&node);
413 nodes_to_check.pop();
414 }
415 }
416}
417
418static void update_toposort(const bNodeTree &ntree,
419 const ToposortDirection direction,
420 Vector<bNode *> &r_sorted_nodes,
421 bool &r_cycle_detected)
422{
423 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
424 r_sorted_nodes.clear();
425 r_sorted_nodes.reserve(tree_runtime.nodes_by_id.size());
426 r_cycle_detected = false;
427
428 Array<ToposortNodeState> node_states(tree_runtime.nodes_by_id.size());
429 for (bNode *node : tree_runtime.nodes_by_id) {
430 if (node_states[node->index()].is_done) {
431 /* Ignore nodes that are done already. */
432 continue;
433 }
434 if ((direction == ToposortDirection::LeftToRight) ?
435 node->runtime->has_available_linked_outputs :
436 node->runtime->has_available_linked_inputs)
437 {
438 /* Ignore non-start nodes. */
439 continue;
440 }
442 ntree, direction, *node, node_states, r_sorted_nodes, r_cycle_detected);
443 }
444
445 if (r_sorted_nodes.size() < tree_runtime.nodes_by_id.size()) {
446 r_cycle_detected = true;
447 for (bNode *node : tree_runtime.nodes_by_id) {
448 if (node_states[node->index()].is_done) {
449 /* Ignore nodes that are done already. */
450 continue;
451 }
452 /* Start toposort at this node which is somewhere in the middle of a loop. */
454 ntree, direction, *node, node_states, r_sorted_nodes, r_cycle_detected);
455 }
456 }
457
458 BLI_assert(tree_runtime.nodes_by_id.size() == r_sorted_nodes.size());
459}
460
461static void update_root_frames(const bNodeTree &ntree)
462{
463 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
464 Span<bNode *> nodes = tree_runtime.nodes_by_id;
465
466 tree_runtime.root_frames.clear();
467
468 for (bNode *node : nodes) {
469 if (!node->parent && node->is_frame()) {
470 tree_runtime.root_frames.append(node);
471 }
472 }
473}
474
476{
477 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
478 Span<bNode *> nodes = tree_runtime.nodes_by_id;
479
480 for (bNode *node : nodes) {
481 node->runtime->direct_children_in_frame.clear();
482 }
483
484 for (bNode *node : nodes) {
485 if (const bNode *frame = node->parent) {
486 frame->runtime->direct_children_in_frame.append(node);
487 }
488 }
489}
490
491static void update_group_output_node(const bNodeTree &ntree)
492{
493 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
494 const bke::bNodeType *node_type = bke::node_type_find("NodeGroupOutput");
495 const Span<bNode *> group_output_nodes = tree_runtime.nodes_by_type.lookup(node_type);
496 if (group_output_nodes.is_empty()) {
497 tree_runtime.group_output_node = nullptr;
498 }
499 else if (group_output_nodes.size() == 1) {
500 tree_runtime.group_output_node = group_output_nodes[0];
501 }
502 else {
503 for (bNode *group_output : group_output_nodes) {
504 if (group_output->flag & NODE_DO_OUTPUT) {
505 tree_runtime.group_output_node = group_output;
506 break;
507 }
508 }
509 }
510}
511
513{
514 for (const bNode *node : ntree.runtime->toposort_left_to_right) {
515 bNodeRuntime &node_runtime = *node->runtime;
516 if (!node->is_reroute()) {
517 node_runtime.is_dangling_reroute = false;
518 continue;
519 }
520 const Span<const bNodeLink *> links = node_runtime.inputs[0]->runtime->directly_linked_links;
521 if (links.is_empty()) {
522 node_runtime.is_dangling_reroute = true;
523 continue;
524 }
525 BLI_assert(links.size() == 1);
526 const bNode &source_node = *links.first()->fromnode;
527 node_runtime.is_dangling_reroute = source_node.runtime->is_dangling_reroute;
528 }
529}
530
531static void ensure_topology_cache(const bNodeTree &ntree)
532{
533 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
534 tree_runtime.topology_cache_mutex.ensure([&]() {
535 update_node_vector(ntree);
536 update_link_vector(ntree);
538 update_panels(ntree);
542 threading::parallel_invoke(
543 tree_runtime.nodes_by_id.size() > 32,
544 [&]() { update_logically_linked_sockets(ntree); },
545 [&]() { update_sockets_by_identifier(ntree); },
546 [&]() {
547 update_toposort(ntree,
548 ToposortDirection::LeftToRight,
549 tree_runtime.toposort_left_to_right,
550 tree_runtime.has_available_link_cycle);
551 for (const int i : tree_runtime.toposort_left_to_right.index_range()) {
552 const bNode &node = *tree_runtime.toposort_left_to_right[i];
553 node.runtime->toposort_left_to_right_index = i;
554 }
555 },
556 [&]() {
557 bool dummy;
559 ntree, ToposortDirection::RightToLeft, tree_runtime.toposort_right_to_left, dummy);
560 for (const int i : tree_runtime.toposort_right_to_left.index_range()) {
561 const bNode &node = *tree_runtime.toposort_right_to_left[i];
562 node.runtime->toposort_right_to_left_index = i;
563 }
564 },
565 [&]() { update_root_frames(ntree); },
566 [&]() { update_direct_frames_childrens(ntree); });
569 tree_runtime.topology_cache_exists = true;
570 });
571}
572
573} // namespace blender::bke::node_tree_runtime
574
575void bNodeTree::ensure_topology_cache() const
576{
578}
579
580const bNestedNodeRef *bNodeTree::find_nested_node_ref(const int32_t nested_node_id) const
581{
582 for (const bNestedNodeRef &ref : this->nested_node_refs_span()) {
583 if (ref.id == nested_node_id) {
584 return &ref;
585 }
586 }
587 return nullptr;
588}
589
590const bNestedNodeRef *bNodeTree::nested_node_ref_from_node_id_path(
591 const blender::Span<int32_t> node_ids) const
592{
593 if (node_ids.is_empty()) {
594 return nullptr;
595 }
596 for (const bNestedNodeRef &ref : this->nested_node_refs_span()) {
597 blender::Vector<int> current_node_ids;
598 if (this->node_id_path_from_nested_node_ref(ref.id, current_node_ids)) {
599 if (current_node_ids.as_span() == node_ids) {
600 return &ref;
601 }
602 }
603 }
604 return nullptr;
605}
606
607bool bNodeTree::node_id_path_from_nested_node_ref(const int32_t nested_node_id,
608 blender::Vector<int> &r_node_ids) const
609{
610 const bNestedNodeRef *ref = this->find_nested_node_ref(nested_node_id);
611 if (ref == nullptr) {
612 return false;
613 }
614 const int32_t node_id = ref->path.node_id;
615 const bNode *node = this->node_by_id(node_id);
616 if (node == nullptr) {
617 return false;
618 }
619 r_node_ids.append(node_id);
620 if (!node->is_group()) {
621 return true;
622 }
623 const bNodeTree *group = reinterpret_cast<const bNodeTree *>(node->id);
624 if (group == nullptr) {
625 return false;
626 }
627 return group->node_id_path_from_nested_node_ref(ref->path.id_in_node, r_node_ids);
628}
629
630const bNode *bNodeTree::find_nested_node(const int32_t nested_node_id,
631 const bNodeTree **r_tree) const
632{
633 const bNestedNodeRef *ref = this->find_nested_node_ref(nested_node_id);
634 if (ref == nullptr) {
635 return nullptr;
636 }
637 const int32_t node_id = ref->path.node_id;
638 const bNode *node = this->node_by_id(node_id);
639 if (node == nullptr) {
640 return nullptr;
641 }
642 if (!node->is_group()) {
643 if (r_tree) {
644 *r_tree = this;
645 }
646 return node;
647 }
648 const bNodeTree *group = reinterpret_cast<const bNodeTree *>(node->id);
649 if (group == nullptr) {
650 return nullptr;
651 }
652 return group->find_nested_node(ref->path.id_in_node, r_tree);
653}
#define NODE_REROUTE
Definition BKE_node.hh:804
#define BLI_assert(a)
Definition BLI_assert.h:50
#define LISTBASE_FOREACH(type, var, list)
@ NTREE_GEOMETRY
@ NODE_DO_OUTPUT
@ SOCK_MULTI_INPUT
void ensure(FunctionRef< void()> compute_cache)
constexpr const T & first() const
Definition BLI_span.hh:316
constexpr int64_t size() const
Definition BLI_span.hh:253
constexpr Span take_front(int64_t n) const
Definition BLI_span.hh:194
constexpr bool is_empty() const
Definition BLI_span.hh:261
bool is_empty() const
Definition BLI_stack.hh:308
void push(const T &value)
Definition BLI_stack.hh:213
int64_t size() const
bool contains(const Key &key) const
int64_t size() const
int64_t append_and_get_index(const T &value)
bool contains(const T &value) const
void append(const T &value)
IndexRange index_range() const
void reserve(const int64_t min_capacity)
Span< T > as_span() const
Array< bNodePanelRuntime > panels
Vector< bNodeSocket * > outputs
Vector< bNodeSocket * > inputs
Vector< bNodeSocket * > output_sockets
Vector< bNodeSocket * > sockets
std::atomic< bool > topology_cache_exists
MultiValueMap< const bNodeType *, bNode * > nodes_by_type
Vector< bNodeSocket * > input_sockets
virtual const int & get_corresponding_output_id(const bNode &input_bnode) const =0
const bNode * get_corresponding_output(const bNodeTree &tree, const bNode &input_bnode) const
local_group_size(16, 16) .push_constant(Type b
OperationNode * node
static void toposort_from_start_node(const bNodeTree &ntree, const ToposortDirection direction, bNode &start_node, MutableSpan< ToposortNodeState > node_states, Vector< bNode * > &r_sorted_nodes, bool &r_cycle_detected)
static void update_dangling_reroute_nodes(const bNodeTree &ntree)
static void update_nodes_by_type(const bNodeTree &ntree)
static void update_direct_frames_childrens(const bNodeTree &ntree)
static Vector< const bNode * > get_implicit_origin_nodes(const bNodeTree &ntree, bNode &node)
static void update_group_output_node(const bNodeTree &ntree)
static void update_link_vector(const bNodeTree &ntree)
static void update_sockets_by_identifier(const bNodeTree &ntree)
static void update_logically_linked_sockets(const bNodeTree &ntree)
static void ensure_topology_cache(const bNodeTree &ntree)
static void find_logical_origins_for_socket_recursive(bNodeSocket &input_socket, bool only_follow_first_input_link, Vector< bNodeSocket *, 16 > &sockets_in_current_chain, Vector< bNodeSocket * > &r_logical_origins, Vector< bNodeSocket * > &r_skipped_origins)
static void update_internal_link_inputs(const bNodeTree &ntree)
static void update_panels(const bNodeTree &ntree)
static void update_node_vector(const bNodeTree &ntree)
static void update_directly_linked_links_and_sockets(const bNodeTree &ntree)
static void update_root_frames(const bNodeTree &ntree)
static void update_toposort(const bNodeTree &ntree, const ToposortDirection direction, Vector< bNode * > &r_sorted_nodes, bool &r_cycle_detected)
void preprocess_geometry_node_tree_for_evaluation(bNodeTree &tree_cow)
static Vector< const bNode * > get_implicit_target_nodes(const bNodeTree &ntree, bNode &node)
static void update_socket_vectors_and_owner_node(const bNodeTree &ntree)
const bNodeZoneType * zone_type_by_node_type(const int node_type)
bNodeSocketType NodeSocketTypeUndefined
Definition node.cc:136
Span< int > all_zone_output_node_types()
bNodeType NodeTypeUndefined
Definition node.cc:135
Span< int > all_zone_input_node_types()
const GeometryNodesLazyFunctionGraphInfo * ensure_geometry_nodes_lazy_function_graph(const bNodeTree &btree)
void parallel_for(const IndexRange range, const int64_t grain_size, const Function &function, const TaskSizeHints &size_hints=detail::TaskSizeHints_Static(1))
Definition BLI_task.hh:95
signed int int32_t
Definition stdint.h:77
bNestedNodePath path
bNodeSocketRuntimeHandle * runtime
bNodeTreeRuntimeHandle * runtime
ListBase links
struct bNode * parent
bNodeRuntimeHandle * runtime
int16_t type
Defines a node type.
Definition BKE_node.hh:218