Blender V5.0
scheduler.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2023 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
5#include <algorithm>
6
7#include "BLI_map.hh"
8#include "BLI_set.hh"
9#include "BLI_stack.hh"
10#include "BLI_vector.hh"
11
13
14#include "COM_context.hh"
15#include "COM_scheduler.hh"
16#include "COM_utilities.hh"
17
18namespace blender::compositor {
19
20using namespace nodes::derived_node_tree_types;
21
22/* Returns true if any of the node group nodes that make up this tree context are muted. */
23static bool is_tree_context_muted(const DTreeContext &tree_context)
24{
25 /* Root contexts are never muted. */
26 if (tree_context.is_root()) {
27 return false;
28 }
29
30 /* The node group that represents this context is muted. */
31 if (tree_context.parent_node()->is_muted()) {
32 return true;
33 }
34
35 /* Recursively check parent contexts up until the root context. */
36 return is_tree_context_muted(*tree_context.parent_context());
37}
38
39/* Find the active viewer node in the given tree context. Returns a null node if no active node was
40 * found. */
42{
43 /* Do not return viewer nodes that are inside muted contexts. */
44 if (is_tree_context_muted(tree_context)) {
45 return DNode();
46 }
47
48 for (const bNode *node : tree_context.btree().nodes_by_type("CompositorNodeViewer")) {
49 if (node->flag & NODE_DO_OUTPUT && !node->is_muted()) {
50 return DNode(&tree_context, node);
51 }
52 }
53
54 return DNode();
55}
56
57/* Add all File Output nodes inside the given tree_context recursively to the node stack. */
58static void add_file_output_nodes(const DTreeContext &tree_context, Stack<DNode> &node_stack)
59{
60 for (const bNode *node : tree_context.btree().nodes_by_type("CompositorNodeOutputFile")) {
61 if (node->is_muted()) {
62 continue;
63 }
64
65 node_stack.push(DNode(&tree_context, node));
66 }
67
68 for (const bNode *node : tree_context.btree().group_nodes()) {
69 if (node->is_muted()) {
70 continue;
71 }
72
73 const bNodeTree *group_tree = reinterpret_cast<const bNodeTree *>(node->id);
74 if (!group_tree) {
75 continue;
76 }
77
78 add_file_output_nodes(*tree_context.child_context(*node), node_stack);
79 }
80}
81
82/* Add the output nodes whose result should be computed to the given stack. This includes File
83 * Output, Group Output, and Viewer nodes. */
84static void add_output_nodes(const Context &context,
85 const DerivedNodeTree &tree,
86 Stack<DNode> &node_stack)
87{
88 const DTreeContext &root_context = tree.root_context();
89
90 if (bool(context.needed_outputs() & OutputTypes::FileOutput)) {
91 add_file_output_nodes(root_context, node_stack);
92 }
93
94 if (bool(context.needed_outputs() & OutputTypes::Composite)) {
95 for (const bNode *node : root_context.btree().nodes_by_type("NodeGroupOutput")) {
96 if (node->flag & NODE_DO_OUTPUT && !node->is_muted()) {
97 node_stack.push(DNode(&root_context, node));
98 break;
99 }
100 }
101 }
102
103 if (bool(context.needed_outputs() & OutputTypes::Viewer)) {
104 /* Check if the active context has a viewer node, if not, check the root context. */
105 DNode viewer_node = find_viewer_node_in_context(tree.active_context());
106 if (!viewer_node) {
107 viewer_node = find_viewer_node_in_context(tree.root_context());
108 }
109
110 /* No viewer node in either contexts. */
111 if (!viewer_node) {
112 return;
113 }
114
115 /* If the viewer is treated as a compositor output and takes precedence over it, we need to
116 * remove it since the viewer will act in its place. */
117 if (context.treat_viewer_as_compositor_output() && !node_stack.is_empty() &&
118 node_stack.peek()->is_type("NodeGroupOutput"))
119 {
120 node_stack.pop();
121 }
122
123 node_stack.push(viewer_node);
124 }
125}
126
127/* A type representing a mapping that associates each node with a heuristic estimation of the
128 * number of intermediate buffers needed to compute it and all of its dependencies. See the
129 * compute_number_of_needed_buffers function for more information. */
131
132/* Compute a heuristic estimation of the number of intermediate buffers needed to compute each node
133 * and all of its dependencies for all nodes that the given node depends on. The output is a map
134 * that maps each node with the number of intermediate buffers needed to compute it and all of its
135 * dependencies.
136 *
137 * Consider a node that takes n number of buffers as an input from a number of node dependencies,
138 * which we shall call the input nodes. The node also computes and outputs m number of buffers.
139 * In order for the node to compute its output, a number of intermediate buffers will be needed.
140 * Since the node takes n buffers and outputs m buffers, then the number of buffers directly
141 * needed by the node is (n + m). But each of the input buffers are computed by a node that, in
142 * turn, needs a number of buffers to compute its output. So the total number of buffers needed
143 * to compute the output of the node is max(n + m, d) where d is the number of buffers needed by
144 * the input node that needs the largest number of buffers. We only consider the input node that
145 * needs the largest number of buffers, because those buffers can be reused by any input node
146 * that needs a lesser number of buffers.
147 *
148 * Pixel nodes, however, are a special case because links between two pixel nodes inside the same
149 * pixel operation don't pass a buffer, but a single value in the pixel processor. So for pixel
150 * nodes, only inputs and outputs linked to nodes that are not pixel nodes should be considered.
151 * Note that this might not actually be true, because the compiler may decide to split a pixel
152 * operation into multiples ones that will pass buffers, but this is not something that can be
153 * known at scheduling-time. See the discussion in COM_compile_state.hh, COM_evaluator.hh, and
154 * COM_shader_operation.hh for more information. In the node tree shown below, node 4 will have
155 * exactly the same number of needed buffers by node 3, because its inputs and outputs are all
156 * internally linked in the pixel operation.
157 *
158 * Pixel Operation
159 * +------------------------------------------------------+
160 * .------------. | .------------. .------------. .------------. | .------------.
161 * | Node 1 | | | Node 3 | | Node 4 | | Node 5 | | | Node 6 |
162 * | |----|--| |--| |------| |--|--| |
163 * | | .-|--| | | | .---| | | | |
164 * '------------' | | '------------' '------------' | '------------' | '------------'
165 * | +----------------------------------|-------------------+
166 * .------------. | |
167 * | Node 2 | | |
168 * | |--'------------------------------------'
169 * | |
170 * '------------'
171 *
172 * Note that the computed output is not guaranteed to be accurate, and will not be in most cases.
173 * The computation is merely a heuristic estimation that works well in most cases. This is due to a
174 * number of reasons:
175 * - The node tree is actually a graph that allows output sharing, which is not something that was
176 * taken into consideration in this implementation because it is difficult to correctly consider.
177 * - Each node may allocate any number of internal buffers, which is not taken into account in this
178 * implementation because it rarely affects the output and is done by very few nodes.
179 * - The compiler may decide to compiler the schedule differently depending on runtime information
180 * which we can merely speculate at scheduling-time as described above. */
182{
183 NeededBuffers needed_buffers;
184
185 /* A stack of nodes used to traverse the node tree starting from the output nodes. */
186 Stack<DNode> node_stack = output_nodes;
187
188 /* Traverse the node tree in a post order depth first manner and compute the number of needed
189 * buffers for each node. Post order traversal guarantee that all the node dependencies of each
190 * node are computed before it. This is done by pushing all the uncomputed node dependencies to
191 * the node stack first and only popping and computing the node when all its node dependencies
192 * were computed. */
193 while (!node_stack.is_empty()) {
194 /* Do not pop the node immediately, as it may turn out that we can't compute its number of
195 * needed buffers just yet because its dependencies weren't computed, it will be popped later
196 * when needed. */
197 DNode &node = node_stack.peek();
198
199 /* Go over the node dependencies connected to the inputs of the node and push them to the node
200 * stack if they were not computed already. */
201 Set<DNode> pushed_nodes;
202 for (const bNodeSocket *input : node->input_sockets()) {
203 const DInputSocket dinput{node.context(), input};
204
206 continue;
207 }
208
209 /* Get the output linked to the input. If it is null, that means the input is unlinked and
210 * has no dependency node. */
211 const DOutputSocket doutput = get_output_linked_to_input(dinput);
212 if (!doutput) {
213 continue;
214 }
215
216 /* The node dependency was already computed or pushed before, so skip it. */
217 if (needed_buffers.contains(doutput.node()) || pushed_nodes.contains(doutput.node())) {
218 continue;
219 }
220
221 /* The output node needs to be computed, push the node dependency to the node stack and
222 * indicate that it was pushed. */
223 node_stack.push(doutput.node());
224 pushed_nodes.add_new(doutput.node());
225 }
226
227 /* If any of the node dependencies were pushed, that means that not all of them were computed
228 * and consequently we can't compute the number of needed buffers for this node just yet. */
229 if (!pushed_nodes.is_empty()) {
230 continue;
231 }
232
233 /* We don't need to store the result of the pop because we already peeked at it before. */
234 node_stack.pop();
235
236 /* Compute the number of buffers that the node takes as an input as well as the number of
237 * buffers needed to compute the most demanding of the node dependencies. */
238 int number_of_input_buffers = 0;
239 int buffers_needed_by_dependencies = 0;
240 for (const bNodeSocket *input : node->input_sockets()) {
241 const DInputSocket dinput{node.context(), input};
242
244 continue;
245 }
246
247 /* Get the output linked to the input. If it is null, that means the input is unlinked.
248 * Unlinked inputs do not take a buffer, so skip those inputs. */
249 const DOutputSocket doutput = get_output_linked_to_input(dinput);
250 if (!doutput) {
251 continue;
252 }
253
254 /* Since this input is linked, if the link is not between two pixel nodes, it means that the
255 * node takes a buffer through this input and so we increment the number of input buffers. */
256 if (!is_pixel_node(node) || !is_pixel_node(doutput.node())) {
257 number_of_input_buffers++;
258 }
259
260 /* If the number of buffers needed by the node dependency is more than the total number of
261 * buffers needed by the dependencies, then update the latter to be the former. This is
262 * computing the "d" in the aforementioned equation "max(n + m, d)". */
263 const int buffers_needed_by_dependency = needed_buffers.lookup(doutput.node());
264 buffers_needed_by_dependencies = std::max(buffers_needed_by_dependency,
265 buffers_needed_by_dependencies);
266 }
267
268 /* Compute the number of buffers that will be computed/output by this node. */
269 int number_of_output_buffers = 0;
270 for (const bNodeSocket *output : node->output_sockets()) {
271 const DOutputSocket doutput{node.context(), output};
272
274 continue;
275 }
276
277 /* The output is not linked, it outputs no buffer. */
278 if (!output->is_logically_linked()) {
279 continue;
280 }
281
282 /* If any of the links is not between two pixel nodes, it means that the node outputs
283 * a buffer through this output and so we increment the number of output buffers. */
285 number_of_output_buffers++;
286 }
287 }
288
289 /* Compute the heuristic estimation of the number of needed intermediate buffers to compute
290 * this node and all of its dependencies. This is computing the aforementioned equation
291 * "max(n + m, d)". */
292 const int total_buffers = std::max(number_of_input_buffers + number_of_output_buffers,
293 buffers_needed_by_dependencies);
294 needed_buffers.add(node, total_buffers);
295 }
296
297 return needed_buffers;
298}
299
300/* There are multiple different possible orders of evaluating a node graph, each of which needs
301 * to allocate a number of intermediate buffers to store its intermediate results. It follows
302 * that we need to find the evaluation order which uses the least amount of intermediate buffers.
303 * For instance, consider a node that takes two input buffers A and B. Each of those buffers is
304 * computed through a number of nodes constituting a sub-graph whose root is the node that
305 * outputs that buffer. Suppose the number of intermediate buffers needed to compute A and B are
306 * N(A) and N(B) respectively and N(A) > N(B). Then evaluating the sub-graph computing A would be
307 * a better option than that of B, because had B was computed first, its outputs will need to be
308 * stored in extra buffers in addition to the buffers needed by A. The number of buffers needed by
309 * each node is estimated as described in the compute_number_of_needed_buffers function.
310 *
311 * This is a heuristic generalization of the Sethi-Ullman algorithm, a generalization that
312 * doesn't always guarantee an optimal evaluation order, as the optimal evaluation order is very
313 * difficult to compute, however, this method works well in most cases. Moreover it assumes that
314 * all buffers will have roughly the same size, which may not always be the case. */
316{
317 Schedule schedule;
318
319 /* A stack of nodes used to traverse the node tree starting from the output nodes. */
320 Stack<DNode> node_stack;
321
322 /* Add the output nodes whose result should be computed to the stack. */
323 add_output_nodes(context, tree, node_stack);
324
325 /* No output nodes, the node tree has no effect, return an empty schedule. */
326 if (node_stack.is_empty()) {
327 return schedule;
328 }
329
330 /* Compute the number of buffers needed by each node connected to the outputs. */
331 const NeededBuffers needed_buffers = compute_number_of_needed_buffers(node_stack);
332
333 /* Traverse the node tree in a post order depth first manner, scheduling the nodes in an order
334 * informed by the number of buffers needed by each node. Post order traversal guarantee that all
335 * the node dependencies of each node are scheduled before it. This is done by pushing all the
336 * unscheduled node dependencies to the node stack first and only popping and scheduling the node
337 * when all its node dependencies were scheduled. */
338 while (!node_stack.is_empty()) {
339 /* Do not pop the node immediately, as it may turn out that we can't schedule it just yet
340 * because its dependencies weren't scheduled, it will be popped later when needed. */
341 DNode &node = node_stack.peek();
342
343 /* Compute the nodes directly connected to the node inputs sorted by their needed buffers such
344 * that the node with the lowest number of needed buffers comes first. Note that we actually
345 * want the node with the highest number of needed buffers to be schedule first, but since
346 * those are pushed to the traversal stack, we need to push them in reverse order. */
347 Vector<DNode> sorted_dependency_nodes;
348 for (const bNodeSocket *input : node->input_sockets()) {
349 const DInputSocket dinput{node.context(), input};
350
352 continue;
353 }
354
355 /* Get the output linked to the input. If it is null, that means the input is unlinked and
356 * has no dependency node, so skip it. */
357 const DOutputSocket doutput = get_output_linked_to_input(dinput);
358 if (!doutput) {
359 continue;
360 }
361
362 /* The dependency node was added before, so skip it. The number of dependency nodes is very
363 * small, typically less than 3, so a linear search is okay. */
364 if (sorted_dependency_nodes.contains(doutput.node())) {
365 continue;
366 }
367
368 /* The dependency node was already schedule, so skip it. */
369 if (schedule.contains(doutput.node())) {
370 continue;
371 }
372
373 /* Sort in ascending order on insertion, the number of dependency nodes is very small,
374 * typically less than 3, so insertion sort is okay. */
375 int insertion_position = 0;
376 for (int i = 0; i < sorted_dependency_nodes.size(); i++) {
377 if (needed_buffers.lookup(doutput.node()) >
378 needed_buffers.lookup(sorted_dependency_nodes[i]))
379 {
380 insertion_position++;
381 }
382 else {
383 break;
384 }
385 }
386 sorted_dependency_nodes.insert(insertion_position, doutput.node());
387 }
388
389 /* Push the sorted dependency nodes to the node stack in order. */
390 for (const DNode &dependency_node : sorted_dependency_nodes) {
391 node_stack.push(dependency_node);
392 }
393
394 /* If there are no sorted dependency nodes, that means they were all already scheduled or that
395 * none exists in the first place, so we can pop and schedule the node now. */
396 if (sorted_dependency_nodes.is_empty()) {
397 /* The node might have already been scheduled, so we don't use add_new here and simply don't
398 * add it if it was already scheduled. */
399 schedule.add(node_stack.pop());
400 }
401 }
402
403 return schedule;
404}
405
406} // namespace blender::compositor
@ NODE_DO_OUTPUT
bool add(const Key &key, const Value &value)
Definition BLI_map.hh:295
const Value & lookup(const Key &key) const
Definition BLI_map.hh:545
bool contains(const Key &key) const
Definition BLI_map.hh:353
bool contains(const Key &key) const
Definition BLI_set.hh:310
bool is_empty() const
Definition BLI_set.hh:595
void add_new(const Key &key)
Definition BLI_set.hh:233
bool is_empty() const
Definition BLI_stack.hh:308
void push(const T &value)
Definition BLI_stack.hh:213
bool add(const Key &key)
bool contains(const Key &key) const
int64_t size() const
bool contains(const T &value) const
void insert(const int64_t insert_index, const T &value)
bool is_empty() const
const DTreeContext * child_context(const bNode &node) const
const DTreeContext * parent_context() const
const bNodeTree & btree() const
KDTree_3d * tree
#define input
#define output
bool is_output_linked_to_node_conditioned(DOutputSocket output, FunctionRef< bool(DNode)> condition)
Definition utilities.cc:113
VectorSet< DNode > Schedule
static void add_output_nodes(const Context &context, const DerivedNodeTree &tree, Stack< DNode > &node_stack)
Definition scheduler.cc:84
static bool is_tree_context_muted(const DTreeContext &tree_context)
Definition scheduler.cc:23
static DNode find_viewer_node_in_context(const DTreeContext &tree_context)
Definition scheduler.cc:41
Schedule compute_schedule(const Context &context, const DerivedNodeTree &tree)
Definition scheduler.cc:315
DOutputSocket get_output_linked_to_input(DInputSocket input)
Definition utilities.cc:52
static NeededBuffers compute_number_of_needed_buffers(Stack< DNode > &output_nodes)
Definition scheduler.cc:181
Map< DNode, int > NeededBuffers
Definition scheduler.cc:130
bool is_pixel_node(DNode node)
Definition utilities.cc:143
bool is_socket_available(const bNodeSocket *socket)
Definition utilities.cc:27
static void add_file_output_nodes(const DTreeContext &tree_context, Stack< DNode > &node_stack)
Definition scheduler.cc:58
i
Definition text_draw.cc:230