Blender V5.0
node_runtime.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2023 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
5#include "BKE_node.hh"
6#include "BKE_node_runtime.hh"
7
8#include "DNA_node_types.h"
9
10#include "BLI_function_ref.hh"
11#include "BLI_listbase.h"
12#include "BLI_stack.hh"
13#include "BLI_task.hh"
14
18
20
22{
23 BLI_assert(tree_cow.type == NTREE_GEOMETRY);
24 /* Rebuild geometry nodes lazy function graph. */
25 tree_cow.runtime->geometry_nodes_lazy_function_graph_info_mutex.tag_dirty();
27}
28
29static void update_node_vector(const bNodeTree &ntree)
30{
31 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
32 const Span<bNode *> nodes = tree_runtime.nodes_by_id;
33 tree_runtime.group_nodes.clear();
34 tree_runtime.has_undefined_nodes_or_sockets = false;
35 for (const int i : nodes.index_range()) {
36 bNode &node = *nodes[i];
37 node.runtime->index_in_tree = i;
38 node.runtime->owner_tree = const_cast<bNodeTree *>(&ntree);
39 tree_runtime.has_undefined_nodes_or_sockets |= node.is_undefined();
40 if (node.is_group()) {
41 tree_runtime.group_nodes.append(&node);
42 }
43 }
44}
45
46static void update_link_vector(const bNodeTree &ntree)
47{
48 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
49 tree_runtime.links.clear();
50 LISTBASE_FOREACH (bNodeLink *, link, &ntree.links) {
51 /* Check that the link connects nodes within this tree. */
52 BLI_assert(tree_runtime.nodes_by_id.contains(link->fromnode));
53 BLI_assert(tree_runtime.nodes_by_id.contains(link->tonode));
54
55 tree_runtime.links.append(link);
56 }
57}
58
60{
61 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
62 tree_runtime.sockets.clear();
63 tree_runtime.input_sockets.clear();
64 tree_runtime.output_sockets.clear();
65 for (bNode *node : tree_runtime.nodes_by_id) {
66 bNodeRuntime &node_runtime = *node->runtime;
67 node_runtime.inputs.clear();
68 node_runtime.outputs.clear();
69 LISTBASE_FOREACH (bNodeSocket *, socket, &node->inputs) {
70 socket->runtime->index_in_node = node_runtime.inputs.append_and_get_index(socket);
71 socket->runtime->index_in_all_sockets = tree_runtime.sockets.append_and_get_index(socket);
72 socket->runtime->index_in_inout_sockets = tree_runtime.input_sockets.append_and_get_index(
73 socket);
74 socket->runtime->owner_node = node;
75 tree_runtime.has_undefined_nodes_or_sockets |= socket->typeinfo ==
77 }
78 LISTBASE_FOREACH (bNodeSocket *, socket, &node->outputs) {
79 socket->runtime->index_in_node = node_runtime.outputs.append_and_get_index(socket);
80 socket->runtime->index_in_all_sockets = tree_runtime.sockets.append_and_get_index(socket);
81 socket->runtime->index_in_inout_sockets = tree_runtime.output_sockets.append_and_get_index(
82 socket);
83 socket->runtime->owner_node = node;
84 tree_runtime.has_undefined_nodes_or_sockets |= socket->typeinfo ==
86 }
87 }
88}
89
90static void update_panels(const bNodeTree &ntree)
91{
92 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
93 for (bNode *node : tree_runtime.nodes_by_id) {
94 bNodeRuntime &node_runtime = *node->runtime;
95 node_runtime.panels.reinitialize(node->num_panel_states);
96 }
97}
98
99static void update_internal_link_inputs(const bNodeTree &ntree)
100{
101 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
102 for (bNode *node : tree_runtime.nodes_by_id) {
103 for (bNodeSocket *socket : node->runtime->outputs) {
104 socket->runtime->internal_link_input = nullptr;
105 }
106 for (bNodeLink &link : node->runtime->internal_links) {
107 link.tosock->runtime->internal_link_input = link.fromsock;
108 }
109 }
110}
111
113{
114 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
115 for (bNode *node : tree_runtime.nodes_by_id) {
116 for (bNodeSocket *socket : node->runtime->inputs) {
117 socket->runtime->directly_linked_links.clear();
118 socket->runtime->directly_linked_sockets.clear();
119 }
120 for (bNodeSocket *socket : node->runtime->outputs) {
121 socket->runtime->directly_linked_links.clear();
122 socket->runtime->directly_linked_sockets.clear();
123 }
124 node->runtime->has_available_linked_inputs = false;
125 node->runtime->has_available_linked_outputs = false;
126 }
127 for (bNodeLink *link : tree_runtime.links) {
128 link->fromsock->runtime->directly_linked_links.append(link);
129 link->fromsock->runtime->directly_linked_sockets.append(link->tosock);
130 link->tosock->runtime->directly_linked_links.append(link);
131 if (link->is_available()) {
132 link->fromnode->runtime->has_available_linked_outputs = true;
133 link->tonode->runtime->has_available_linked_inputs = true;
134 }
135 BLI_assert(link->fromsock->runtime->owner_node == link->fromnode);
136 BLI_assert(link->tosock->runtime->owner_node == link->tonode);
137 }
138 for (bNodeSocket *socket : tree_runtime.input_sockets) {
139 if (socket->flag & SOCK_MULTI_INPUT) {
140 std::sort(socket->runtime->directly_linked_links.begin(),
141 socket->runtime->directly_linked_links.end(),
142 [&](const bNodeLink *a, const bNodeLink *b) {
143 return a->multi_input_sort_id > b->multi_input_sort_id;
144 });
145 }
146 }
147 for (bNodeSocket *socket : tree_runtime.input_sockets) {
148 for (bNodeLink *link : socket->runtime->directly_linked_links) {
149 /* Do this after sorting the input links. */
150 socket->runtime->directly_linked_sockets.append(link->fromsock);
151 }
152 }
153}
154
156 bNodeSocket &input_socket,
157 bool only_follow_first_input_link,
158 Vector<bNodeSocket *, 16> &sockets_in_current_chain,
159 Vector<bNodeSocket *> &r_logical_origins,
160 Vector<bNodeSocket *> &r_skipped_origins)
161{
162 if (sockets_in_current_chain.contains(&input_socket)) {
163 /* Protect against reroute recursions. */
164 return;
165 }
166 sockets_in_current_chain.append(&input_socket);
167
168 Span<bNodeLink *> links_to_check = input_socket.runtime->directly_linked_links;
169 if (only_follow_first_input_link) {
170 links_to_check = links_to_check.take_front(1);
171 }
172 for (bNodeLink *link : links_to_check) {
173 if (link->is_muted()) {
174 continue;
175 }
176 if (!link->is_available()) {
177 continue;
178 }
179 bNodeSocket &origin_socket = *link->fromsock;
180 bNode &origin_node = *link->fromnode;
181 if (!origin_socket.is_available()) {
182 /* Non available sockets are ignored. */
183 continue;
184 }
185 if (origin_node.is_reroute()) {
186 bNodeSocket &reroute_input = *origin_node.runtime->inputs[0];
187 bNodeSocket &reroute_output = *origin_node.runtime->outputs[0];
188 r_skipped_origins.append(&reroute_input);
189 r_skipped_origins.append(&reroute_output);
191 reroute_input, false, sockets_in_current_chain, r_logical_origins, r_skipped_origins);
192 continue;
193 }
194 if (origin_node.is_muted()) {
195 if (bNodeSocket *mute_input = origin_socket.runtime->internal_link_input) {
196 r_skipped_origins.append(&origin_socket);
197 r_skipped_origins.append(mute_input);
199 *mute_input, true, sockets_in_current_chain, r_logical_origins, r_skipped_origins);
200 }
201 continue;
202 }
203 r_logical_origins.append(&origin_socket);
204 }
205
206 sockets_in_current_chain.pop_last();
207}
208
210{
211 /* Compute logically linked sockets to inputs. */
212 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
213 Span<bNode *> nodes = tree_runtime.nodes_by_id;
214 threading::parallel_for(nodes.index_range(), 128, [&](const IndexRange range) {
215 for (const int i : range) {
216 bNode &node = *nodes[i];
217 for (bNodeSocket *socket : node.runtime->inputs) {
218 Vector<bNodeSocket *, 16> sockets_in_current_chain;
219 socket->runtime->logically_linked_sockets.clear();
220 socket->runtime->logically_linked_skipped_sockets.clear();
221 find_logical_origins_for_socket_recursive(
222 *socket,
223 false,
224 sockets_in_current_chain,
225 socket->runtime->logically_linked_sockets,
226 socket->runtime->logically_linked_skipped_sockets);
227 }
228 }
229 });
230
231 /* Clear logically linked sockets to outputs. */
232 threading::parallel_for(nodes.index_range(), 128, [&](const IndexRange range) {
233 for (const int i : range) {
234 bNode &node = *nodes[i];
235 for (bNodeSocket *socket : node.runtime->outputs) {
236 socket->runtime->logically_linked_sockets.clear();
237 }
238 }
239 });
240
241 /* Compute logically linked sockets to outputs using the previously computed logically linked
242 * sockets to inputs. */
243 for (const bNode *node : nodes) {
244 for (bNodeSocket *input_socket : node->runtime->inputs) {
245 for (bNodeSocket *output_socket : input_socket->runtime->logically_linked_sockets) {
246 output_socket->runtime->logically_linked_sockets.append(input_socket);
247 }
248 }
249 }
250}
251
252static void update_nodes_by_type(const bNodeTree &ntree)
253{
254 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
255 tree_runtime.nodes_by_type.clear();
256 for (bNode *node : tree_runtime.nodes_by_id) {
257 tree_runtime.nodes_by_type.add(node->typeinfo, node);
258 }
259}
260
262{
263 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
264 Span<bNode *> nodes = tree_runtime.nodes_by_id;
265 threading::parallel_for(nodes.index_range(), 128, [&](const IndexRange range) {
266 for (bNode *node : nodes.slice(range)) {
267 node->runtime->inputs_by_identifier.clear();
268 node->runtime->outputs_by_identifier.clear();
269 for (bNodeSocket *socket : node->runtime->inputs) {
270 node->runtime->inputs_by_identifier.add_new(socket->identifier, socket);
271 }
272 for (bNodeSocket *socket : node->runtime->outputs) {
273 node->runtime->outputs_by_identifier.add_new(socket->identifier, socket);
274 }
275 }
276 });
277}
278
283
285 bool is_done = false;
286 bool is_in_stack = false;
287};
288
290{
291 Vector<const bNode *> origin_nodes;
292 if (all_zone_output_node_types().contains(node.type_legacy)) {
293 const bNodeZoneType &zone_type = *zone_type_by_node_type(node.type_legacy);
294 /* Can't use #zone_type.get_corresponding_input because that expects the topology cache to be
295 * build already, but we are still building it here. */
296 for (const bNode *input_node :
297 ntree.runtime->nodes_by_type.lookup(bke::node_type_find(zone_type.input_idname.c_str())))
298 {
299 if (zone_type.get_corresponding_output_id(*input_node) == node.identifier) {
300 origin_nodes.append(input_node);
301 }
302 }
303 }
304 return origin_nodes;
305}
306
308{
309 Vector<const bNode *> target_nodes;
310 if (all_zone_input_node_types().contains(node.type_legacy)) {
311 const bNodeZoneType &zone_type = *zone_type_by_node_type(node.type_legacy);
312 if (const bNode *output_node = zone_type.get_corresponding_output(ntree, node)) {
313 target_nodes.append(output_node);
314 }
315 }
316 return target_nodes;
317}
318
319static void toposort_from_start_node(const bNodeTree &ntree,
320 const ToposortDirection direction,
321 bNode &start_node,
323 Vector<bNode *> &r_sorted_nodes,
324 bool &r_cycle_detected)
325{
326 struct Item {
327 bNode *node;
328 int socket_index = 0;
329 int link_index = 0;
330 int implicit_link_index = 0;
331 };
332
333 Stack<Item, 64> nodes_to_check;
334 nodes_to_check.push({&start_node});
335 node_states[start_node.index()].is_in_stack = true;
336 while (!nodes_to_check.is_empty()) {
337 Item &item = nodes_to_check.peek();
338 bNode &node = *item.node;
339 bool pushed_node = false;
340
341 auto handle_linked_node = [&](bNode &linked_node) {
342 ToposortNodeState &linked_node_state = node_states[linked_node.index()];
343 if (linked_node_state.is_done) {
344 /* The linked node has already been visited. */
345 return true;
346 }
347 if (linked_node_state.is_in_stack) {
348 r_cycle_detected = true;
349 }
350 else {
351 nodes_to_check.push({&linked_node});
352 linked_node_state.is_in_stack = true;
353 pushed_node = true;
354 }
355 return false;
356 };
357
358 const Span<bNodeSocket *> sockets = (direction == ToposortDirection::LeftToRight) ?
359 node.runtime->inputs :
360 node.runtime->outputs;
361 while (true) {
362 if (item.socket_index == sockets.size()) {
363 /* All sockets have already been visited. */
364 break;
365 }
366 bNodeSocket &socket = *sockets[item.socket_index];
367 const Span<bNodeLink *> linked_links = socket.runtime->directly_linked_links;
368 if (item.link_index == linked_links.size()) {
369 /* All links connected to this socket have already been visited. */
370 item.socket_index++;
371 item.link_index = 0;
372 continue;
373 }
374 bNodeLink &link = *linked_links[item.link_index];
375 if (!link.is_available()) {
376 /* Ignore unavailable links. */
377 item.link_index++;
378 continue;
379 }
380 bNodeSocket &linked_socket = *socket.runtime->directly_linked_sockets[item.link_index];
381 bNode &linked_node = *linked_socket.runtime->owner_node;
382 if (handle_linked_node(linked_node)) {
383 /* The linked node has already been visited. */
384 item.link_index++;
385 continue;
386 }
387 break;
388 }
389
390 if (!pushed_node) {
391 /* Some nodes are internally linked without an explicit `bNodeLink`. The toposort should
392 * still order them correctly and find cycles. */
393 const Vector<const bNode *> implicitly_linked_nodes =
394 (direction == ToposortDirection::LeftToRight) ? get_implicit_origin_nodes(ntree, node) :
395 get_implicit_target_nodes(ntree, node);
396 while (true) {
397 if (item.implicit_link_index == implicitly_linked_nodes.size()) {
398 /* All implicitly linked nodes have already been visited. */
399 break;
400 }
401 const bNode &linked_node = *implicitly_linked_nodes[item.implicit_link_index];
402 if (handle_linked_node(const_cast<bNode &>(linked_node))) {
403 /* The implicitly linked node has already been visited. */
404 item.implicit_link_index++;
405 continue;
406 }
407 break;
408 }
409 }
410
411 /* If no other element has been pushed, the current node can be pushed to the sorted list.
412 */
413 if (!pushed_node) {
414 ToposortNodeState &node_state = node_states[node.index()];
415 node_state.is_done = true;
416 node_state.is_in_stack = false;
417 r_sorted_nodes.append(&node);
418 nodes_to_check.pop();
419 }
420 }
421}
422
423static void update_toposort(const bNodeTree &ntree,
424 const ToposortDirection direction,
425 Vector<bNode *> &r_sorted_nodes,
426 bool &r_cycle_detected)
427{
428 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
429 r_sorted_nodes.clear();
430 r_sorted_nodes.reserve(tree_runtime.nodes_by_id.size());
431 r_cycle_detected = false;
432
433 Array<ToposortNodeState> node_states(tree_runtime.nodes_by_id.size());
434 for (bNode *node : tree_runtime.nodes_by_id) {
435 if (node_states[node->index()].is_done) {
436 /* Ignore nodes that are done already. */
437 continue;
438 }
439 if ((direction == ToposortDirection::LeftToRight) ?
440 node->runtime->has_available_linked_outputs :
441 node->runtime->has_available_linked_inputs)
442 {
443 /* Ignore non-start nodes. */
444 continue;
445 }
447 ntree, direction, *node, node_states, r_sorted_nodes, r_cycle_detected);
448 }
449
450 if (r_sorted_nodes.size() < tree_runtime.nodes_by_id.size()) {
451 r_cycle_detected = true;
452 for (bNode *node : tree_runtime.nodes_by_id) {
453 if (node_states[node->index()].is_done) {
454 /* Ignore nodes that are done already. */
455 continue;
456 }
457 /* Start toposort at this node which is somewhere in the middle of a loop. */
459 ntree, direction, *node, node_states, r_sorted_nodes, r_cycle_detected);
460 }
461 }
462
463 BLI_assert(tree_runtime.nodes_by_id.size() == r_sorted_nodes.size());
464}
465
466static void update_root_frames(const bNodeTree &ntree)
467{
468 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
469 Span<bNode *> nodes = tree_runtime.nodes_by_id;
470
471 tree_runtime.root_frames.clear();
472
473 for (bNode *node : nodes) {
474 if (!node->parent && node->is_frame()) {
475 tree_runtime.root_frames.append(node);
476 }
477 }
478}
479
481{
482 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
483 Span<bNode *> nodes = tree_runtime.nodes_by_id;
484
485 for (bNode *node : nodes) {
486 node->runtime->direct_children_in_frame.clear();
487 }
488
489 for (bNode *node : nodes) {
490 if (const bNode *frame = node->parent) {
491 frame->runtime->direct_children_in_frame.append(node);
492 }
493 }
494}
495
496static void update_group_output_node(const bNodeTree &ntree)
497{
498 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
499 const bke::bNodeType *node_type = bke::node_type_find("NodeGroupOutput");
500 const Span<bNode *> group_output_nodes = tree_runtime.nodes_by_type.lookup(node_type);
501 if (group_output_nodes.is_empty()) {
502 tree_runtime.group_output_node = nullptr;
503 }
504 else if (group_output_nodes.size() == 1) {
505 tree_runtime.group_output_node = group_output_nodes[0];
506 }
507 else {
508 tree_runtime.group_output_node = nullptr;
509 for (bNode *group_output : group_output_nodes) {
510 if (group_output->flag & NODE_DO_OUTPUT) {
511 tree_runtime.group_output_node = group_output;
512 break;
513 }
514 }
515 }
516}
517
519{
520 for (const bNode *node : ntree.runtime->toposort_left_to_right) {
521 bNodeRuntime &node_runtime = *node->runtime;
522 if (!node->is_reroute()) {
523 node_runtime.is_dangling_reroute = false;
524 continue;
525 }
526 const Span<const bNodeLink *> links = node_runtime.inputs[0]->runtime->directly_linked_links;
527 if (links.is_empty()) {
528 node_runtime.is_dangling_reroute = true;
529 continue;
530 }
531 BLI_assert(links.size() == 1);
532 const bNode &source_node = *links.first()->fromnode;
533 node_runtime.is_dangling_reroute = source_node.runtime->is_dangling_reroute;
534 }
535}
536
537static void ensure_topology_cache(const bNodeTree &ntree)
538{
539 bNodeTreeRuntime &tree_runtime = *ntree.runtime;
540 tree_runtime.topology_cache_mutex.ensure([&]() {
541 update_node_vector(ntree);
542 update_link_vector(ntree);
544 update_panels(ntree);
549 tree_runtime.nodes_by_id.size() > 32,
550 [&]() { update_logically_linked_sockets(ntree); },
551 [&]() { update_sockets_by_identifier(ntree); },
552 [&]() {
553 update_toposort(ntree,
554 ToposortDirection::LeftToRight,
555 tree_runtime.toposort_left_to_right,
556 tree_runtime.has_available_link_cycle);
557 for (const int i : tree_runtime.toposort_left_to_right.index_range()) {
558 const bNode &node = *tree_runtime.toposort_left_to_right[i];
559 node.runtime->toposort_left_to_right_index = i;
560 }
561 },
562 [&]() {
563 bool dummy;
565 ntree, ToposortDirection::RightToLeft, tree_runtime.toposort_right_to_left, dummy);
566 for (const int i : tree_runtime.toposort_right_to_left.index_range()) {
567 const bNode &node = *tree_runtime.toposort_right_to_left[i];
568 node.runtime->toposort_right_to_left_index = i;
569 }
570 },
571 [&]() { update_root_frames(ntree); },
572 [&]() { update_direct_frames_childrens(ntree); });
575 tree_runtime.topology_cache_exists = true;
576 });
577}
578
579} // namespace blender::bke::node_tree_runtime
580
581namespace blender::bke {
582
584{
585 to_node_id_ = link.tonode->identifier;
586 input_socket_index_ = link.tosock->index();
587 input_link_index_ =
588 const_cast<const bNodeSocket *>(link.tosock)->directly_linked_links().first_index(&link);
589}
590
592{
593 return const_cast<bNodeLink *>(this->try_find(const_cast<const bNodeTree &>(ntree)));
594}
595
596const bNodeLink *NodeLinkKey::try_find(const bNodeTree &ntree) const
597{
598 const bNode *to_node = ntree.node_by_id(to_node_id_);
599 if (!to_node) {
600 return nullptr;
601 }
602 if (input_socket_index_ >= to_node->input_sockets().size()) {
603 return nullptr;
604 }
605 const bNodeSocket &input_socket = to_node->input_socket(input_socket_index_);
606 if (input_link_index_ >= input_socket.directly_linked_links().size()) {
607 return nullptr;
608 }
609 return input_socket.directly_linked_links()[input_link_index_];
610}
611
612} // namespace blender::bke
613
614void bNodeTree::ensure_topology_cache() const
615{
617}
618
619const bNestedNodeRef *bNodeTree::find_nested_node_ref(const int32_t nested_node_id) const
620{
621 for (const bNestedNodeRef &ref : this->nested_node_refs_span()) {
622 if (ref.id == nested_node_id) {
623 return &ref;
624 }
625 }
626 return nullptr;
627}
628
629const bNestedNodeRef *bNodeTree::nested_node_ref_from_node_id_path(
630 const blender::Span<int32_t> node_ids) const
631{
632 if (node_ids.is_empty()) {
633 return nullptr;
634 }
635 for (const bNestedNodeRef &ref : this->nested_node_refs_span()) {
636 blender::Vector<int> current_node_ids;
637 if (this->node_id_path_from_nested_node_ref(ref.id, current_node_ids)) {
638 if (current_node_ids.as_span() == node_ids) {
639 return &ref;
640 }
641 }
642 }
643 return nullptr;
644}
645
646bool bNodeTree::node_id_path_from_nested_node_ref(const int32_t nested_node_id,
647 blender::Vector<int> &r_node_ids) const
648{
649 const bNestedNodeRef *ref = this->find_nested_node_ref(nested_node_id);
650 if (ref == nullptr) {
651 return false;
652 }
653 const int32_t node_id = ref->path.node_id;
654 const bNode *node = this->node_by_id(node_id);
655 if (node == nullptr) {
656 return false;
657 }
658 r_node_ids.append(node_id);
659 if (!node->is_group()) {
660 return true;
661 }
662 const bNodeTree *group = reinterpret_cast<const bNodeTree *>(node->id);
663 if (group == nullptr) {
664 return false;
665 }
666 return group->node_id_path_from_nested_node_ref(ref->path.id_in_node, r_node_ids);
667}
668
669const bNode *bNodeTree::find_nested_node(const int32_t nested_node_id,
670 const bNodeTree **r_tree) const
671{
672 const bNestedNodeRef *ref = this->find_nested_node_ref(nested_node_id);
673 if (ref == nullptr) {
674 return nullptr;
675 }
676 const int32_t node_id = ref->path.node_id;
677 const bNode *node = this->node_by_id(node_id);
678 if (node == nullptr) {
679 return nullptr;
680 }
681 if (!node->is_group()) {
682 if (r_tree) {
683 *r_tree = this;
684 }
685 return node;
686 }
687 const bNodeTree *group = reinterpret_cast<const bNodeTree *>(node->id);
688 if (group == nullptr) {
689 return nullptr;
690 }
691 return group->find_nested_node(ref->path.id_in_node, r_tree);
692}
693
694const bNodeSocket &bNode::socket_by_decl(const blender::nodes::SocketDeclaration &decl) const
695{
696 return decl.in_out == SOCK_IN ? this->input_socket(decl.index) : this->output_socket(decl.index);
697}
698
699bNodeSocket &bNode::socket_by_decl(const blender::nodes::SocketDeclaration &decl)
700{
701 return decl.in_out == SOCK_IN ? this->input_socket(decl.index) : this->output_socket(decl.index);
702}
703
705{
706 tree.runtime->inferenced_input_socket_usage_mutex.ensure([&]() {
707 tree.runtime->inferenced_socket_usage =
709 });
710}
711
712bool bNodeSocket::affects_node_output() const
713{
714 BLI_assert(this->is_input());
716 const bNodeTree &tree = this->owner_tree();
718 return tree.runtime->inferenced_socket_usage[this->index_in_tree()].is_used;
719}
720
721bool bNodeSocket::inferred_socket_visibility() const
722{
724 const bNode &node = this->owner_node();
725 if (node.typeinfo->ignore_inferred_input_socket_visibility) {
726 return true;
727 }
728 const bNodeTree &tree = this->owner_tree();
729
731 return tree.runtime->inferenced_socket_usage[this->index_in_tree()].is_visible;
732}
#define BLI_assert(a)
Definition BLI_assert.h:46
#define LISTBASE_FOREACH(type, var, list)
struct bNestedNodeRef bNestedNodeRef
@ NODE_DO_OUTPUT
@ NTREE_GEOMETRY
@ SOCK_IN
@ SOCK_MULTI_INPUT
struct bNode bNode
struct bNodeTree bNodeTree
struct bNodeSocket bNodeSocket
int64_t append_and_get_index(const T &value)
void append(const T &value)
IndexRange index_range() const
void clear()
void ensure(const FunctionRef< void()> compute_cache)
constexpr const T & first() const
Definition BLI_span.hh:315
constexpr int64_t size() const
Definition BLI_span.hh:252
constexpr Span take_front(int64_t n) const
Definition BLI_span.hh:193
constexpr bool is_empty() const
Definition BLI_span.hh:260
bool is_empty() const
Definition BLI_stack.hh:308
void push(const T &value)
Definition BLI_stack.hh:213
int64_t size() const
bool contains(const Key &key) const
int64_t size() const
bool contains(const T &value) const
void append(const T &value)
void reserve(const int64_t min_capacity)
Span< T > as_span() const
Array< bNodePanelRuntime > panels
Vector< bNodeSocket * > outputs
Vector< bNodeSocket * > inputs
Vector< bNodeSocket * > output_sockets
Vector< bNodeSocket * > sockets
std::atomic< bool > topology_cache_exists
MultiValueMap< const bNodeType *, bNode * > nodes_by_type
Vector< bNodeSocket * > input_sockets
virtual const int & get_corresponding_output_id(const bNode &input_bnode) const =0
const bNode * get_corresponding_output(const bNodeTree &tree, const bNode &input_bnode) const
KDTree_3d * tree
static void toposort_from_start_node(const bNodeTree &ntree, const ToposortDirection direction, bNode &start_node, MutableSpan< ToposortNodeState > node_states, Vector< bNode * > &r_sorted_nodes, bool &r_cycle_detected)
static void update_dangling_reroute_nodes(const bNodeTree &ntree)
static void update_nodes_by_type(const bNodeTree &ntree)
static void update_direct_frames_childrens(const bNodeTree &ntree)
static Vector< const bNode * > get_implicit_origin_nodes(const bNodeTree &ntree, bNode &node)
static void update_group_output_node(const bNodeTree &ntree)
static void update_link_vector(const bNodeTree &ntree)
static void update_sockets_by_identifier(const bNodeTree &ntree)
static void update_logically_linked_sockets(const bNodeTree &ntree)
static void ensure_topology_cache(const bNodeTree &ntree)
static void find_logical_origins_for_socket_recursive(bNodeSocket &input_socket, bool only_follow_first_input_link, Vector< bNodeSocket *, 16 > &sockets_in_current_chain, Vector< bNodeSocket * > &r_logical_origins, Vector< bNodeSocket * > &r_skipped_origins)
static void update_internal_link_inputs(const bNodeTree &ntree)
bool topology_cache_is_available(const bNodeTree &tree)
static void update_panels(const bNodeTree &ntree)
static void update_node_vector(const bNodeTree &ntree)
static void update_directly_linked_links_and_sockets(const bNodeTree &ntree)
static void update_root_frames(const bNodeTree &ntree)
static void update_toposort(const bNodeTree &ntree, const ToposortDirection direction, Vector< bNode * > &r_sorted_nodes, bool &r_cycle_detected)
void preprocess_geometry_node_tree_for_evaluation(bNodeTree &tree_cow)
static Vector< const bNode * > get_implicit_target_nodes(const bNodeTree &ntree, bNode &node)
static void update_socket_vectors_and_owner_node(const bNodeTree &ntree)
const bNodeZoneType * zone_type_by_node_type(const int node_type)
bNodeSocketType NodeSocketTypeUndefined
Definition node.cc:127
Span< int > all_zone_output_node_types()
Span< int > all_zone_input_node_types()
bNodeType * node_type_find(StringRef idname)
Definition node.cc:2379
Array< SocketUsage > infer_all_sockets_usage(const bNodeTree &tree)
const GeometryNodesLazyFunctionGraphInfo * ensure_geometry_nodes_lazy_function_graph(const bNodeTree &btree)
void parallel_invoke(Functions &&...functions)
Definition BLI_task.hh:221
void parallel_for(const IndexRange range, const int64_t grain_size, const Function &function, const TaskSizeHints &size_hints=detail::TaskSizeHints_Static(1))
Definition BLI_task.hh:93
static void ensure_inference_usage_cache(const bNodeTree &tree)
bNestedNodePath path
bNodeSocketRuntimeHandle * runtime
bNodeTreeRuntimeHandle * runtime
ListBase links
bNodeTypeHandle * typeinfo
ListBase inputs
int num_panel_states
struct ID * id
struct bNode * parent
int16_t type_legacy
bNodeRuntimeHandle * runtime
ListBase outputs
int32_t identifier
bNodeLink * try_find(bNodeTree &ntree) const
NodeLinkKey(const bNodeLink &link)
Defines a node type.
Definition BKE_node.hh:238
i
Definition text_draw.cc:230