116 scheduled_nodes_queue;
120 const auto schedule_node = [&](
const NodeInContext &ctx_node) {
121 if (scheduled_nodes_set.
add(ctx_node)) {
122 scheduled_nodes_queue.push(ctx_node);
126 const auto forward_group_node_input_into_group =
128 const bNode &node = ctx_group_node_input.socket->owner_node();
134 group_tree->ensure_topology_cache();
135 if (group_tree->has_available_link_cycle()) {
139 ctx_group_node_input.context,
node, node.owner_tree());
140 const int socket_index = ctx_group_node_input.socket->index();
142 for (
const bNode *group_input_node : group_tree->group_input_nodes()) {
143 if (propagate_value_fn(ctx_group_node_input,
144 {&group_context, &group_input_node->output_socket(socket_index)}))
146 schedule_node({&group_context, group_input_node});
151 const auto forward_output = [&](
const SocketInContext &ctx_output_socket) {
153 for (
const bNodeLink *link : ctx_output_socket.socket->directly_linked_links()) {
154 if (!link->is_used()) {
157 const bNode &target_node = *link->tonode;
159 if (!propagate_value_fn(ctx_output_socket, {context, &target_socket})) {
162 schedule_node({context, &target_node});
163 if (target_node.is_group()) {
164 forward_group_node_input_into_group({context, &target_socket});
171 if (ctx_socket.socket->is_input()) {
172 const bNode &node = ctx_socket.socket->owner_node();
173 if (node.is_group()) {
174 forward_group_node_input_into_group(ctx_socket);
176 schedule_node({ctx_socket.context, &node});
179 forward_output(ctx_socket);
188 while (!scheduled_nodes_queue.empty()) {
190 scheduled_nodes_queue.pop();
195 if (node.is_reroute()) {
196 if (propagate_value_fn({context, &node.input_socket(0)}, {context, &node.output_socket(0)}))
198 forward_output({context, &node.output_socket(0)});
201 else if (node.is_muted()) {
202 for (
const bNodeLink &link : node.internal_links()) {
203 if (propagate_value_fn({context, link.fromsock}, {context, link.tosock})) {
204 forward_output({context, link.tosock});
208 else if (node.is_group()) {
213 group->ensure_topology_cache();
214 if (group->has_available_link_cycle()) {
217 const bNode *group_output = group->group_output_node();
222 context,
node, node.owner_tree());
225 for (
const int index : group->interface_outputs().index_range()) {
226 if (propagate_value_fn({&group_context, &group_output->input_socket(index)},
227 {context, &node.output_socket(index)}))
229 forward_output({context, &node.output_socket(index)});
233 else if (node.is_group_input()) {
234 for (
const bNodeSocket *output_socket : node.output_sockets()) {
235 forward_output({context, output_socket});
240 evaluate_node_fn(ctx_node, sockets_vec);
242 forward_output({context, socket});
256 get_inputs_to_propagate_fn)
260 scheduled_nodes_queue;
266 const auto schedule_node = [&](
const NodeInContext &ctx_node) {
267 if (scheduled_nodes_set.
add(ctx_node)) {
268 scheduled_nodes_queue.push(ctx_node);
272 const auto forward_group_node_output_into_group = [&](
const SocketInContext &ctx_output_socket) {
274 const bNode &group_node = ctx_output_socket.socket->owner_node();
279 group->ensure_topology_cache();
280 if (group->has_available_link_cycle()) {
283 const bNode *group_output = group->group_output_node();
288 context, group_node, group_node.owner_tree());
291 {&group_context, &group_output->input_socket(ctx_output_socket.socket->index())});
292 schedule_node({&group_context, group_output});
295 const auto forward_group_input_to_parent = [&](
const SocketInContext &ctx_output_socket) {
297 ctx_output_socket.context);
298 if (!group_context) {
302 const bNodeTree &caller_tree = *group_context->caller_tree();
303 caller_tree.ensure_topology_cache();
304 if (caller_tree.has_available_link_cycle()) {
307 const bNode &caller_node = *group_context->caller_group_node();
308 const bNodeSocket &caller_input_socket = caller_node.input_socket(
309 ctx_output_socket.socket->index());
313 propagate_value_fn(ctx_output_socket, {parent_context, &caller_input_socket});
314 schedule_node({parent_context, &caller_node});
317 const auto forward_input = [&](
const SocketInContext &ctx_input_socket) {
319 if (!ctx_input_socket.socket->is_logically_linked()) {
320 eval_targets.
sockets.add(ctx_input_socket);
323 for (
const bNodeLink *link : ctx_input_socket.socket->directly_linked_links()) {
324 if (!link->is_used()) {
327 const bNode &origin_node = *link->fromnode;
328 const bNodeSocket &origin_socket = *link->fromsock;
329 if (!propagate_value_fn(ctx_input_socket, {context, &origin_socket})) {
332 schedule_node({context, &origin_node});
333 if (origin_node.is_group()) {
334 forward_group_node_output_into_group({context, &origin_socket});
337 if (origin_node.is_group_input()) {
338 forward_group_input_to_parent({context, &origin_socket});
346 if (ctx_socket.socket->is_input()) {
347 forward_input(ctx_socket);
350 const bNode &node = ctx_socket.socket->owner_node();
351 if (node.is_group()) {
352 forward_group_node_output_into_group(ctx_socket);
354 else if (node.is_group_input()) {
355 forward_group_input_to_parent(ctx_socket);
358 schedule_node({ctx_socket.context, &node});
368 while (!scheduled_nodes_queue.empty()) {
370 scheduled_nodes_queue.pop();
379 else if (node.is_reroute()) {
380 propagate_value_fn({context, &node.output_socket(0)}, {context, &node.input_socket(0)});
381 forward_input({context, &node.input_socket(0)});
383 else if (node.is_muted()) {
384 for (
const bNodeLink &link : node.internal_links()) {
385 if (propagate_value_fn({context, link.tosock}, {context, link.fromsock})) {
386 forward_input({context, link.fromsock});
390 else if (node.is_group()) {
394 get_inputs_to_propagate_fn(ctx_node, sockets_vec);
396 forward_input({context, socket});
399 else if (node.is_group_output()) {
401 get_inputs_to_propagate_fn(ctx_node, sockets_vec);
403 forward_input({context, socket});
408 evaluate_node_fn(ctx_node, sockets_vec);
409 for (
const bNodeSocket *input_socket : sockets_vec) {
410 forward_input({context, input_socket});