Blender V4.3
partial_eval.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2024 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
5#include <queue>
6
7#include "NOD_partial_eval.hh"
8
10#include "BKE_node.hh"
11#include "BKE_node_runtime.hh"
12
14
16{
17 return ELEM(node.type,
19 FN_NODE_INPUT_VECTOR,
20 FN_NODE_INPUT_BOOL,
21 FN_NODE_INPUT_INT,
22 FN_NODE_INPUT_ROTATION);
23}
24
30 const bNode &initial_node)
31{
32 Vector<int> vec;
33 vec.append(initial_node.runtime->toposort_right_to_left_index);
34 for (const ComputeContext *context = initial_context; context; context = context->parent()) {
35 if (const auto *group_context = dynamic_cast<const bke::GroupNodeComputeContext *>(context)) {
36 const bNode *caller_group_node = group_context->caller_group_node();
37 BLI_assert(caller_group_node != nullptr);
38 vec.append(caller_group_node->runtime->toposort_right_to_left_index);
39 }
40 }
41 std::reverse(vec.begin(), vec.end());
42 return vec;
43}
44
47 const bNode &initial_node)
48{
49 Vector<int> vec;
50 vec.append(initial_node.runtime->toposort_left_to_right_index);
51 for (const ComputeContext *context = initial_context; context; context = context->parent()) {
52 if (const auto *group_context = dynamic_cast<const bke::GroupNodeComputeContext *>(context)) {
53 const bNode *caller_group_node = group_context->caller_group_node();
54 BLI_assert(caller_group_node != nullptr);
55 vec.append(caller_group_node->runtime->toposort_left_to_right_index);
56 }
57 }
58 std::reverse(vec.begin(), vec.end());
59 return vec;
60}
61
69 bool operator()(const NodeInContext &a, const NodeInContext &b) const
70 {
71 const Vector<int> a_sort_vec = get_global_node_sort_vector_right_to_left(a.context, *a.node);
72 const Vector<int> b_sort_vec = get_global_node_sort_vector_right_to_left(b.context, *b.node);
73 const int common_length = std::min(a_sort_vec.size(), b_sort_vec.size());
74 const Span<int> a_common = Span<int>(a_sort_vec).take_front(common_length);
75 const Span<int> b_common = Span<int>(b_sort_vec).take_front(common_length);
76 if (a_common == b_common) {
77 return a_sort_vec.size() < b_sort_vec.size();
78 }
79 return std::lexicographical_compare(
80 b_common.begin(), b_common.end(), a_common.begin(), a_common.end());
81 }
82};
83
91 bool operator()(const NodeInContext &a, const NodeInContext &b) const
92 {
93 const Vector<int> a_sort_vec = get_global_node_sort_vector_left_to_right(a.context, *a.node);
94 const Vector<int> b_sort_vec = get_global_node_sort_vector_left_to_right(b.context, *b.node);
95 const int common_length = std::min(a_sort_vec.size(), b_sort_vec.size());
96 const Span<int> a_common = Span<int>(a_sort_vec).take_front(common_length);
97 const Span<int> b_common = Span<int>(b_sort_vec).take_front(common_length);
98 if (a_common == b_common) {
99 return a_sort_vec.size() < b_sort_vec.size();
100 }
101 return std::lexicographical_compare(
102 b_common.begin(), b_common.end(), a_common.begin(), a_common.end());
103 }
104};
105
107 const Span<SocketInContext> initial_sockets,
108 ResourceScope &scope,
109 FunctionRef<void(const NodeInContext &ctx_node,
110 Vector<const bNodeSocket *> &r_outputs_to_propagate)> evaluate_node_fn,
111 FunctionRef<bool(const SocketInContext &ctx_from, const SocketInContext &ctx_to)>
112 propagate_value_fn)
113{
114 /* Priority queue that makes sure that nodes are evaluated in the right order. */
115 std::priority_queue<NodeInContext, std::vector<NodeInContext>, NodeInContextDownstreamComparator>
116 scheduled_nodes_queue;
117 /* Used to make sure that the same node is not scheduled more than once. */
118 Set<NodeInContext> scheduled_nodes_set;
119
120 const auto schedule_node = [&](const NodeInContext &ctx_node) {
121 if (scheduled_nodes_set.add(ctx_node)) {
122 scheduled_nodes_queue.push(ctx_node);
123 }
124 };
125
126 const auto forward_group_node_input_into_group =
127 [&](const SocketInContext &ctx_group_node_input) {
128 const bNode &node = ctx_group_node_input.socket->owner_node();
129 BLI_assert(node.is_group());
130 const bNodeTree *group_tree = reinterpret_cast<const bNodeTree *>(node.id);
131 if (!group_tree) {
132 return;
133 }
134 group_tree->ensure_topology_cache();
135 if (group_tree->has_available_link_cycle()) {
136 return;
137 }
138 const auto &group_context = scope.construct<bke::GroupNodeComputeContext>(
139 ctx_group_node_input.context, node, node.owner_tree());
140 const int socket_index = ctx_group_node_input.socket->index();
141 /* Forward the value to every group input node. */
142 for (const bNode *group_input_node : group_tree->group_input_nodes()) {
143 if (propagate_value_fn(ctx_group_node_input,
144 {&group_context, &group_input_node->output_socket(socket_index)}))
145 {
146 schedule_node({&group_context, group_input_node});
147 }
148 }
149 };
150
151 const auto forward_output = [&](const SocketInContext &ctx_output_socket) {
152 const ComputeContext *context = ctx_output_socket.context;
153 for (const bNodeLink *link : ctx_output_socket.socket->directly_linked_links()) {
154 if (!link->is_used()) {
155 continue;
156 }
157 const bNode &target_node = *link->tonode;
158 const bNodeSocket &target_socket = *link->tosock;
159 if (!propagate_value_fn(ctx_output_socket, {context, &target_socket})) {
160 continue;
161 }
162 schedule_node({context, &target_node});
163 if (target_node.is_group()) {
164 forward_group_node_input_into_group({context, &target_socket});
165 }
166 }
167 };
168
169 /* Do initial scheduling based on initial sockets. */
170 for (const SocketInContext &ctx_socket : initial_sockets) {
171 if (ctx_socket.socket->is_input()) {
172 const bNode &node = ctx_socket.socket->owner_node();
173 if (node.is_group()) {
174 forward_group_node_input_into_group(ctx_socket);
175 }
176 schedule_node({ctx_socket.context, &node});
177 }
178 else {
179 forward_output(ctx_socket);
180 }
181 }
182
183 /* Reused in multiple places to avoid allocating it multiple times. Should be cleared before
184 * using it. */
185 Vector<const bNodeSocket *> sockets_vec;
186
187 /* Handle all scheduled nodes in the right order until no more nodes are scheduled. */
188 while (!scheduled_nodes_queue.empty()) {
189 const NodeInContext ctx_node = scheduled_nodes_queue.top();
190 scheduled_nodes_queue.pop();
191
192 const bNode &node = *ctx_node.node;
193 const ComputeContext *context = ctx_node.context;
194
195 if (node.is_reroute()) {
196 if (propagate_value_fn({context, &node.input_socket(0)}, {context, &node.output_socket(0)}))
197 {
198 forward_output({context, &node.output_socket(0)});
199 }
200 }
201 else if (node.is_muted()) {
202 for (const bNodeLink &link : node.internal_links()) {
203 if (propagate_value_fn({context, link.fromsock}, {context, link.tosock})) {
204 forward_output({context, link.tosock});
205 }
206 }
207 }
208 else if (node.is_group()) {
209 const bNodeTree *group = reinterpret_cast<const bNodeTree *>(node.id);
210 if (!group) {
211 continue;
212 }
213 group->ensure_topology_cache();
214 if (group->has_available_link_cycle()) {
215 continue;
216 }
217 const bNode *group_output = group->group_output_node();
218 if (!group_output) {
219 continue;
220 }
221 const ComputeContext &group_context = scope.construct<bke::GroupNodeComputeContext>(
222 context, node, node.owner_tree());
223 /* Propagate the values from the group output node to the outputs of the group node and
224 * continue forwarding them from there. */
225 for (const int index : group->interface_outputs().index_range()) {
226 if (propagate_value_fn({&group_context, &group_output->input_socket(index)},
227 {context, &node.output_socket(index)}))
228 {
229 forward_output({context, &node.output_socket(index)});
230 }
231 }
232 }
233 else if (node.is_group_input()) {
234 for (const bNodeSocket *output_socket : node.output_sockets()) {
235 forward_output({context, output_socket});
236 }
237 }
238 else {
239 sockets_vec.clear();
240 evaluate_node_fn(ctx_node, sockets_vec);
241 for (const bNodeSocket *socket : sockets_vec) {
242 forward_output({context, socket});
243 }
244 }
245 }
246}
247
249 const Span<SocketInContext> initial_sockets,
250 ResourceScope &scope,
251 FunctionRef<void(const NodeInContext &ctx_node,
252 Vector<const bNodeSocket *> &r_modified_inputs)> evaluate_node_fn,
253 FunctionRef<bool(const SocketInContext &ctx_from, const SocketInContext &ctx_to)>
254 propagate_value_fn,
255 FunctionRef<void(const NodeInContext &ctx_node, Vector<const bNodeSocket *> &r_sockets)>
256 get_inputs_to_propagate_fn)
257{
258 /* Priority queue that makes sure that nodes are evaluated in the right order. */
259 std::priority_queue<NodeInContext, std::vector<NodeInContext>, NodeInContextUpstreamComparator>
260 scheduled_nodes_queue;
261 /* Used to make sure that the same node is not scheduled more than once. */
262 Set<NodeInContext> scheduled_nodes_set;
263
264 UpstreamEvalTargets eval_targets;
265
266 const auto schedule_node = [&](const NodeInContext &ctx_node) {
267 if (scheduled_nodes_set.add(ctx_node)) {
268 scheduled_nodes_queue.push(ctx_node);
269 }
270 };
271
272 const auto forward_group_node_output_into_group = [&](const SocketInContext &ctx_output_socket) {
273 const ComputeContext *context = ctx_output_socket.context;
274 const bNode &group_node = ctx_output_socket.socket->owner_node();
275 const bNodeTree *group = reinterpret_cast<const bNodeTree *>(group_node.id);
276 if (!group) {
277 return;
278 }
279 group->ensure_topology_cache();
280 if (group->has_available_link_cycle()) {
281 return;
282 }
283 const bNode *group_output = group->group_output_node();
284 if (!group_output) {
285 return;
286 }
287 const ComputeContext &group_context = scope.construct<bke::GroupNodeComputeContext>(
288 context, group_node, group_node.owner_tree());
289 propagate_value_fn(
290 ctx_output_socket,
291 {&group_context, &group_output->input_socket(ctx_output_socket.socket->index())});
292 schedule_node({&group_context, group_output});
293 };
294
295 const auto forward_group_input_to_parent = [&](const SocketInContext &ctx_output_socket) {
296 const auto *group_context = dynamic_cast<const bke::GroupNodeComputeContext *>(
297 ctx_output_socket.context);
298 if (!group_context) {
299 eval_targets.group_inputs.add(ctx_output_socket);
300 return;
301 }
302 const bNodeTree &caller_tree = *group_context->caller_tree();
303 caller_tree.ensure_topology_cache();
304 if (caller_tree.has_available_link_cycle()) {
305 return;
306 }
307 const bNode &caller_node = *group_context->caller_group_node();
308 const bNodeSocket &caller_input_socket = caller_node.input_socket(
309 ctx_output_socket.socket->index());
310 const ComputeContext *parent_context = ctx_output_socket.context->parent();
311 /* Note that we might propagate multiple values to the same input of the group node. The
312 * callback has to handle that case gracefully. */
313 propagate_value_fn(ctx_output_socket, {parent_context, &caller_input_socket});
314 schedule_node({parent_context, &caller_node});
315 };
316
317 const auto forward_input = [&](const SocketInContext &ctx_input_socket) {
318 const ComputeContext *context = ctx_input_socket.context;
319 if (!ctx_input_socket.socket->is_logically_linked()) {
320 eval_targets.sockets.add(ctx_input_socket);
321 return;
322 }
323 for (const bNodeLink *link : ctx_input_socket.socket->directly_linked_links()) {
324 if (!link->is_used()) {
325 continue;
326 }
327 const bNode &origin_node = *link->fromnode;
328 const bNodeSocket &origin_socket = *link->fromsock;
329 if (!propagate_value_fn(ctx_input_socket, {context, &origin_socket})) {
330 continue;
331 }
332 schedule_node({context, &origin_node});
333 if (origin_node.is_group()) {
334 forward_group_node_output_into_group({context, &origin_socket});
335 continue;
336 }
337 if (origin_node.is_group_input()) {
338 forward_group_input_to_parent({context, &origin_socket});
339 continue;
340 }
341 }
342 };
343
344 /* Do initial scheduling based on initial sockets. */
345 for (const SocketInContext &ctx_socket : initial_sockets) {
346 if (ctx_socket.socket->is_input()) {
347 forward_input(ctx_socket);
348 }
349 else {
350 const bNode &node = ctx_socket.socket->owner_node();
351 if (node.is_group()) {
352 forward_group_node_output_into_group(ctx_socket);
353 }
354 else if (node.is_group_input()) {
355 forward_group_input_to_parent(ctx_socket);
356 }
357 else {
358 schedule_node({ctx_socket.context, &node});
359 }
360 }
361 }
362
363 /* Reused in multiple places to avoid allocating it multiple times. Should be cleared before
364 * using it. */
365 Vector<const bNodeSocket *> sockets_vec;
366
367 /* Handle all nodes in the right order until there are no more nodes to evaluate. */
368 while (!scheduled_nodes_queue.empty()) {
369 const NodeInContext ctx_node = scheduled_nodes_queue.top();
370 scheduled_nodes_queue.pop();
371
372 const bNode &node = *ctx_node.node;
373 const ComputeContext *context = ctx_node.context;
374
375 if (is_supported_value_node(node)) {
376 /* Can't go back further from here, but remember that we reached a value node. */
377 eval_targets.value_nodes.add(ctx_node);
378 }
379 else if (node.is_reroute()) {
380 propagate_value_fn({context, &node.output_socket(0)}, {context, &node.input_socket(0)});
381 forward_input({context, &node.input_socket(0)});
382 }
383 else if (node.is_muted()) {
384 for (const bNodeLink &link : node.internal_links()) {
385 if (propagate_value_fn({context, link.tosock}, {context, link.fromsock})) {
386 forward_input({context, link.fromsock});
387 }
388 }
389 }
390 else if (node.is_group()) {
391 /* Once we get here, the nodes within the group have all been evaluated already and the
392 * inputs of the group node are already set properly by #forward_group_input_to_parent. */
393 sockets_vec.clear();
394 get_inputs_to_propagate_fn(ctx_node, sockets_vec);
395 for (const bNodeSocket *socket : sockets_vec) {
396 forward_input({context, socket});
397 }
398 }
399 else if (node.is_group_output()) {
400 sockets_vec.clear();
401 get_inputs_to_propagate_fn(ctx_node, sockets_vec);
402 for (const bNodeSocket *socket : sockets_vec) {
403 forward_input({context, socket});
404 }
405 }
406 else {
407 sockets_vec.clear();
408 evaluate_node_fn(ctx_node, sockets_vec);
409 for (const bNodeSocket *input_socket : sockets_vec) {
410 forward_input({context, input_socket});
411 }
412 }
413 }
414
415 return eval_targets;
416}
417
418} // namespace blender::nodes::partial_eval
#define SH_NODE_VALUE
Definition BKE_node.hh:892
#define BLI_assert(a)
Definition BLI_assert.h:50
#define ELEM(...)
const ComputeContext * parent() const
T & construct(Args &&...args)
bool add(const Key &key)
Definition BLI_set.hh:248
constexpr const T * end() const
Definition BLI_span.hh:225
constexpr const T * begin() const
Definition BLI_span.hh:221
constexpr Span take_front(int64_t n) const
Definition BLI_span.hh:194
int64_t size() const
void append(const T &value)
local_group_size(16, 16) .push_constant(Type b
OperationNode * node
static Vector< int > get_global_node_sort_vector_left_to_right(const ComputeContext *initial_context, const bNode &initial_node)
void eval_downstream(const Span< SocketInContext > initial_sockets, ResourceScope &scope, FunctionRef< void(const NodeInContext &ctx_node, Vector< const bNodeSocket * > &r_outputs_to_propagate)> evaluate_node_fn, FunctionRef< bool(const SocketInContext &ctx_from, const SocketInContext &ctx_to)> propagate_value_fn)
static Vector< int > get_global_node_sort_vector_right_to_left(const ComputeContext *initial_context, const bNode &initial_node)
UpstreamEvalTargets eval_upstream(const Span< SocketInContext > initial_sockets, ResourceScope &scope, FunctionRef< void(const NodeInContext &ctx_node, Vector< const bNodeSocket * > &r_modified_inputs)> evaluate_node_fn, FunctionRef< bool(const SocketInContext &ctx_from, const SocketInContext &ctx_to)> propagate_value_fn, FunctionRef< void(const NodeInContext &ctx_node, Vector< const bNodeSocket * > &r_sockets)> get_inputs_to_propagate_fn)
bool is_supported_value_node(const bNode &node)
struct ID * id
bNodeRuntimeHandle * runtime
bool operator()(const NodeInContext &a, const NodeInContext &b) const
bool operator()(const NodeInContext &a, const NodeInContext &b) const