Blender V5.0
deg_eval.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2013 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
10
11#include <atomic>
12#include <cstdint>
13
15
16#include "BLI_function_ref.hh"
17#include "BLI_gsqueue.h"
18#include "BLI_task.h"
19#include "BLI_time.h"
20
21#include "BKE_global.hh"
22
23#include "DNA_object_types.h"
24#include "DNA_scene_types.h"
25
26#include "DEG_depsgraph.hh"
28
29#ifdef WITH_PYTHON
30# include "BPY_extern.hh"
31#endif
32
33#include "atomic_ops.h"
34
35#include "intern/depsgraph.hh"
46
47namespace blender::deg {
48
49namespace {
50
51struct DepsgraphEvalState;
52
53void deg_task_run_func(TaskPool *pool, void *taskdata);
54
55void schedule_children(DepsgraphEvalState *state,
56 OperationNode *node,
57 FunctionRef<void(OperationNode *node)> schedule_fn);
58
59/* Denotes which part of dependency graph is being evaluated. */
60enum class EvaluationStage {
61 /* Stage 1: Only Copy-on-Write operations are to be evaluated, prior to anything else.
62 * This allows other operations to access its dependencies when there is a dependency cycle
63 * involved. */
65
66 /* Evaluate actual ID nodes visibility based on the current state of animation and drivers. */
67 DYNAMIC_VISIBILITY,
68
69 /* Threaded evaluation of all possible operations. */
70 THREADED_EVALUATION,
71
72 /* Workaround for areas which can not be evaluated in threads.
73 *
74 * For example, meta-balls, which are iterating over all bases and are requesting dupli-lists
75 * to see whether there are meta-balls inside. */
76 SINGLE_THREADED_WORKAROUND,
77};
78
79struct DepsgraphEvalState {
80 Depsgraph *graph;
81 bool do_stats;
82 EvaluationStage stage;
83 bool need_update_pending_parents = true;
84 bool need_single_thread_pass = false;
85};
86
87void evaluate_node(const DepsgraphEvalState *state, OperationNode *operation_node)
88{
89 ::Depsgraph *depsgraph = reinterpret_cast<::Depsgraph *>(state->graph);
90
91 /* Sanity checks. */
92 BLI_assert_msg(!operation_node->is_noop(), "NOOP nodes should not actually be scheduled");
93 /* Perform operation. */
94 if (state->do_stats) {
95 const double start_time = BLI_time_now_seconds();
96 operation_node->evaluate(depsgraph);
97 operation_node->stats.current_time += BLI_time_now_seconds() - start_time;
98 }
99 else {
100 operation_node->evaluate(depsgraph);
101 }
102
103 /* Clear the flag early on, allowing partial updates without re-evaluating the same node multiple
104 * times.
105 * This is a thread-safe modification as the node's flags are only read for a non-scheduled nodes
106 * and this node has been scheduled. */
107 operation_node->flag &= ~DEPSOP_FLAG_CLEAR_ON_EVAL;
108}
109
110void deg_task_run_func(TaskPool *pool, void *taskdata)
111{
112 void *userdata_v = BLI_task_pool_user_data(pool);
113 DepsgraphEvalState *state = (DepsgraphEvalState *)userdata_v;
114
115 /* Evaluate node. */
116 OperationNode *operation_node = reinterpret_cast<OperationNode *>(taskdata);
117 evaluate_node(state, operation_node);
118
119 /* Schedule children. */
120 schedule_children(state, operation_node, [&](OperationNode *node) {
121 BLI_task_pool_push(pool, deg_task_run_func, node, false, nullptr);
122 });
123}
124
125bool check_operation_node_visible(const DepsgraphEvalState *state, OperationNode *op_node)
126{
127 const ComponentNode *comp_node = op_node->owner;
128 /* Special case for copy-on-eval component: it is to be always evaluated, to keep copied
129 * "database" in a consistent state. */
130 if (comp_node->type == NodeType::COPY_ON_EVAL) {
131 return true;
132 }
133
134 /* Special case for dynamic visibility pass: the actual visibility is not yet known, so limit to
135 * only operations which affects visibility. */
136 if (state->stage == EvaluationStage::DYNAMIC_VISIBILITY) {
138 }
139
140 return comp_node->affects_visible_id;
141}
142
143void calculate_pending_parents_for_node(const DepsgraphEvalState *state, OperationNode *node)
144{
145 /* Update counters, applies for both visible and invisible IDs. */
146 node->num_links_pending = 0;
147 node->scheduled = false;
148 /* Invisible IDs requires no pending operations. */
149 if (!check_operation_node_visible(state, node)) {
150 return;
151 }
152 /* No need to bother with anything if node is not tagged for update. */
153 if ((node->flag & DEPSOP_FLAG_NEEDS_UPDATE) == 0) {
154 return;
155 }
156 for (Relation *rel : node->inlinks) {
157 if (rel->from->type == NodeType::OPERATION && (rel->flag & RELATION_FLAG_CYCLIC) == 0) {
158 OperationNode *from = (OperationNode *)rel->from;
159 /* TODO(sergey): This is how old layer system was checking for the
160 * calculation, but how is it possible that visible object depends
161 * on an invisible? This is something what is prohibited after
162 * deg_graph_build_flush_layers(). */
163 if (!check_operation_node_visible(state, from)) {
164 continue;
165 }
166 /* No need to wait for operation which is up to date. */
167 if ((from->flag & DEPSOP_FLAG_NEEDS_UPDATE) == 0) {
168 continue;
169 }
170 ++node->num_links_pending;
171 }
172 }
173}
174
175void calculate_pending_parents_if_needed(DepsgraphEvalState *state)
176{
177 if (!state->need_update_pending_parents) {
178 return;
179 }
180
181 for (OperationNode *node : state->graph->operations) {
182 calculate_pending_parents_for_node(state, node);
183 }
184
185 state->need_update_pending_parents = false;
186}
187
188void initialize_execution(DepsgraphEvalState *state, Depsgraph *graph)
189{
190 /* Clear tags and other things which needs to be clear. */
191 if (state->do_stats) {
192 for (OperationNode *node : graph->operations) {
193 node->stats.reset_current();
194 }
195 }
196}
197
198bool is_metaball_object_operation(const OperationNode *operation_node)
199{
200 const ComponentNode *component_node = operation_node->owner;
201 const IDNode *id_node = component_node->owner;
202 if (GS(id_node->id_cow->name) != ID_OB) {
203 return false;
204 }
205 const Object *object = reinterpret_cast<const Object *>(id_node->id_cow);
206 return object->type == OB_MBALL;
207}
208
209bool need_evaluate_operation_at_stage(DepsgraphEvalState *state,
210 const OperationNode *operation_node)
211{
212 const ComponentNode *component_node = operation_node->owner;
213 switch (state->stage) {
214 case EvaluationStage::COPY_ON_EVAL:
215 return (component_node->type == NodeType::COPY_ON_EVAL);
216
217 case EvaluationStage::DYNAMIC_VISIBILITY:
218 return operation_node->flag & OperationFlag::DEPSOP_FLAG_AFFECTS_VISIBILITY;
219
220 case EvaluationStage::THREADED_EVALUATION:
221 if (is_metaball_object_operation(operation_node)) {
222 state->need_single_thread_pass = true;
223 return false;
224 }
225 return true;
226
227 case EvaluationStage::SINGLE_THREADED_WORKAROUND:
228 return true;
229 }
230 BLI_assert_msg(0, "Unhandled evaluation stage, should never happen.");
231 return false;
232}
233
234/* Schedule a node if it needs evaluation.
235 * dec_parents: Decrement pending parents count, true when child nodes are
236 * scheduled after a task has been completed.
237 */
238void schedule_node(DepsgraphEvalState *state,
239 OperationNode *node,
240 bool dec_parents,
241 const FunctionRef<void(OperationNode *node)> schedule_fn)
242{
243 /* No need to schedule nodes of invisible ID. */
244 if (!check_operation_node_visible(state, node)) {
245 return;
246 }
247 /* No need to schedule operations which are not tagged for update, they are
248 * considered to be up to date. */
249 if ((node->flag & DEPSOP_FLAG_NEEDS_UPDATE) == 0) {
250 return;
251 }
252 /* TODO(sergey): This is not strictly speaking safe to read
253 * num_links_pending. */
254 if (dec_parents) {
255 BLI_assert(node->num_links_pending > 0);
256 atomic_sub_and_fetch_uint32(&node->num_links_pending, 1);
257 }
258 /* Cal not schedule operation while its dependencies are not yet
259 * evaluated. */
260 if (node->num_links_pending != 0) {
261 return;
262 }
263 /* During the copy-on-eval stage only schedule copy-on-eval nodes. */
264 if (!need_evaluate_operation_at_stage(state, node)) {
265 return;
266 }
267 /* Actually schedule the node. */
268 bool is_scheduled = atomic_fetch_and_or_uint8((uint8_t *)&node->scheduled, uint8_t(true));
269 if (!is_scheduled) {
270 if (node->is_noop()) {
271 /* Clear flags to avoid affecting subsequent update propagation.
272 * For normal nodes these are cleared when it is evaluated. */
273 node->flag &= ~DEPSOP_FLAG_CLEAR_ON_EVAL;
274
275 /* skip NOOP node, schedule children right away */
276 schedule_children(state, node, schedule_fn);
277 }
278 else {
279 /* children are scheduled once this task is completed */
280 schedule_fn(node);
281 }
282 }
283}
284
285void schedule_graph(DepsgraphEvalState *state,
286 const FunctionRef<void(OperationNode *node)> schedule_fn)
287{
288 for (OperationNode *node : state->graph->operations) {
289 schedule_node(state, node, false, schedule_fn);
290 }
291}
292
293void schedule_children(DepsgraphEvalState *state,
294 OperationNode *node,
295 const FunctionRef<void(OperationNode *node)> schedule_fn)
296{
297 for (Relation *rel : node->outlinks) {
298 OperationNode *child = (OperationNode *)rel->to;
299 BLI_assert(child->type == NodeType::OPERATION);
300 if (child->scheduled) {
301 /* Happens when having cyclic dependencies. */
302 continue;
303 }
304 schedule_node(state, child, (rel->flag & RELATION_FLAG_CYCLIC) == 0, schedule_fn);
305 }
306}
307
308/* Evaluate given stage of the dependency graph evaluation using multiple threads.
309 *
310 * NOTE: Will assign the `state->stage` to the given stage. */
311void evaluate_graph_threaded_stage(DepsgraphEvalState *state,
313 const EvaluationStage stage)
314{
315 state->stage = stage;
316
317 calculate_pending_parents_if_needed(state);
318
319 schedule_graph(state, [&](OperationNode *node) {
320 BLI_task_pool_push(task_pool, deg_task_run_func, node, false, nullptr);
321 });
323}
324
325/* Evaluate remaining operations of the dependency graph in a single threaded manner. */
326void evaluate_graph_single_threaded_if_needed(DepsgraphEvalState *state)
327{
328 if (!state->need_single_thread_pass) {
329 return;
330 }
331
332 BLI_assert(!state->need_update_pending_parents);
333
334 state->stage = EvaluationStage::SINGLE_THREADED_WORKAROUND;
335
336 GSQueue *evaluation_queue = BLI_gsqueue_new(sizeof(OperationNode *));
337 auto schedule_node_to_queue = [&](OperationNode *node) {
338 BLI_gsqueue_push(evaluation_queue, &node);
339 };
340 schedule_graph(state, schedule_node_to_queue);
341
342 while (!BLI_gsqueue_is_empty(evaluation_queue)) {
343 OperationNode *operation_node;
344 BLI_gsqueue_pop(evaluation_queue, &operation_node);
345
346 evaluate_node(state, operation_node);
347 schedule_children(state, operation_node, schedule_node_to_queue);
348 }
349
350 BLI_gsqueue_free(evaluation_queue);
351}
352
353void depsgraph_ensure_view_layer(Depsgraph *graph)
354{
355 /* We update evaluated scene in the following cases:
356 * - It was not expanded yet.
357 * - It was tagged for update of evaluated component.
358 * This allows us to have proper view layer pointer. */
359 Scene *scene_cow = graph->scene_cow;
360 if (deg_eval_copy_is_expanded(&scene_cow->id) &&
361 (scene_cow->id.recalc & ID_RECALC_SYNC_TO_EVAL) == 0)
362 {
363 return;
364 }
365
366 const IDNode *scene_id_node = graph->find_id_node(&graph->scene->id);
367 deg_update_eval_copy_datablock(graph, scene_id_node);
368}
369
370TaskPool *deg_evaluate_task_pool_create(DepsgraphEvalState *state)
371{
372 if (G.debug & G_DEBUG_DEPSGRAPH_NO_THREADS) {
374 }
375
377}
378
379} // namespace
380
382{
383 /* Nothing to update, early out. */
384 if (graph->entry_tags.is_empty()) {
385 return;
386 }
387
388 /* The update counts can be used to check if the Depsgraph was changed since the last time it was
389 * cached by comparing its current update count with the one stored at the moment the Depsgraph
390 * data were cached.
391 *
392 * A global atomic is used as opposed to incrementing the update count per Depsgraph to protect
393 * against the case where the Depsgraph is being recreated for each update and used to feed the
394 * same running engine instances. This can happen when using a brute force update pattern (see
395 * #135635). */
396 static std::atomic<uint64_t> global_update_count = 0;
397 graph->update_count = global_update_count.fetch_add(1) + 1;
398
400
401#ifdef WITH_PYTHON
402 /* Release the GIL so that Python drivers can be evaluated. See #91046. */
404#endif
405
406 graph->is_evaluating = true;
407 depsgraph_ensure_view_layer(graph);
408
409 /* Set up evaluation state. */
410 DepsgraphEvalState state;
411 state.graph = graph;
412 state.do_stats = graph->debug.do_time_debug();
413
414 /* Prepare all nodes for evaluation. */
415 initialize_execution(&state, graph);
416
417 /* Evaluation happens in several incremental steps:
418 *
419 * - Start with the copy-on-evaluation operations which never form dependency cycles. This will
420 * ensure that if a dependency graph has a cycle evaluation functions will always "see" valid
421 * expanded datablock. It might not be evaluated yet, but at least the datablock will be valid.
422 *
423 * - If there is potentially dynamically changing visibility in the graph update the actual
424 * nodes visibilities, so that actual heavy data evaluation can benefit from knowledge that
425 * something heavy is not currently visible.
426 *
427 * - Multi-threaded evaluation of all possible nodes.
428 * Certain operations (and their subtrees) could be ignored. For example, meta-balls are not
429 * safe from threading point of view, so the threaded evaluation will stop at the metaball
430 * operation node.
431 *
432 * - Single-threaded pass of all remaining operations. */
433
434 TaskPool *task_pool = deg_evaluate_task_pool_create(&state);
435
436 evaluate_graph_threaded_stage(&state, task_pool, EvaluationStage::COPY_ON_EVAL);
437
439 /* Update pending parents including only the ones which are affecting operations which are
440 * affecting visibility. */
441 state.need_update_pending_parents = true;
442
443 evaluate_graph_threaded_stage(&state, task_pool, EvaluationStage::DYNAMIC_VISIBILITY);
444
446
447 /* Update parents to an updated visibility and evaluation stage.
448 *
449 * Need to do it regardless of whether visibility is actually changed or not: current state of
450 * the pending parents are all zeroes because it was previously calculated for only visibility
451 * related nodes and those are fully evaluated by now. */
452 state.need_update_pending_parents = true;
453 }
454
455 evaluate_graph_threaded_stage(&state, task_pool, EvaluationStage::THREADED_EVALUATION);
456
458
459 evaluate_graph_single_threaded_if_needed(&state);
460
461 /* Finalize statistics gathering. This is because we only gather single
462 * operation timing here, without aggregating anything to avoid any extra
463 * synchronization. */
464 if (state.do_stats) {
466 }
467
468 /* Clear any uncleared tags. */
470 graph->is_evaluating = false;
471
472#ifdef WITH_PYTHON
474#endif
475
477}
478
479} // namespace blender::deg
@ G_DEBUG_DEPSGRAPH_NO_THREADS
#define BLI_assert(a)
Definition BLI_assert.h:46
#define BLI_assert_msg(a, msg)
Definition BLI_assert.h:53
void BLI_gsqueue_free(GSQueue *queue)
Definition gsqueue.cc:93
void BLI_gsqueue_push(GSQueue *queue, const void *item)
Definition gsqueue.cc:100
void BLI_gsqueue_pop(GSQueue *queue, void *r_item)
Definition gsqueue.cc:135
GSQueue * BLI_gsqueue_new(size_t elem_size)
Definition gsqueue.cc:72
struct _GSQueue GSQueue
Definition BLI_gsqueue.h:13
bool BLI_gsqueue_is_empty(const GSQueue *queue)
Definition gsqueue.cc:163
@ TASK_PRIORITY_HIGH
Definition BLI_task.h:53
TaskPool * BLI_task_pool_create_suspended(void *userdata, eTaskPriority priority)
Definition task_pool.cc:503
void * BLI_task_pool_user_data(TaskPool *pool)
Definition task_pool.cc:550
struct TaskPool TaskPool
Definition BLI_task.h:56
TaskPool * BLI_task_pool_create_no_threads(void *userdata)
Definition task_pool.cc:511
void BLI_task_pool_work_and_wait(TaskPool *pool)
Definition task_pool.cc:535
void BLI_task_pool_free(TaskPool *pool)
Definition task_pool.cc:521
void BLI_task_pool_push(TaskPool *pool, TaskRunFunction run, void *taskdata, bool free_taskdata, TaskFreeFunction freedata)
Definition task_pool.cc:526
Platform independent time functions.
double BLI_time_now_seconds(void)
Definition time.cc:113
#define BPy_BEGIN_ALLOW_THREADS
Definition BPY_extern.hh:52
#define BPy_END_ALLOW_THREADS
Definition BPY_extern.hh:56
@ ID_RECALC_SYNC_TO_EVAL
Definition DNA_ID.h:1118
@ ID_OB
Object is a sort of wrapper for general info.
@ OB_MBALL
struct Object Object
struct Scene Scene
Provides wrapper around system-specific atomic primitives, and some extensions (faked-atomic operatio...
ATOMIC_INLINE uint8_t atomic_fetch_and_or_uint8(uint8_t *p, uint8_t b)
ATOMIC_INLINE uint32_t atomic_sub_and_fetch_uint32(uint32_t *p, uint32_t x)
BPy_StructRNA * depsgraph
TaskPool * task_pool
#define GS(x)
static ulong state[N]
#define G(x, y, z)
void deg_eval_stats_aggregate(Depsgraph *graph)
bool deg_eval_copy_is_expanded(const ID *id_cow)
void deg_graph_clear_tags(Depsgraph *graph)
ID * deg_update_eval_copy_datablock(const Depsgraph *depsgraph, const IDNode *id_node)
void deg_evaluate_on_refresh(Depsgraph *graph)
Definition deg_eval.cc:381
void deg_graph_flush_visibility_flags_if_needed(Depsgraph *graph)
unsigned int recalc
Definition DNA_ID.h:445
DepsgraphDebug debug
Definition depsgraph.hh:166
Set< OperationNode * > entry_tags
Definition depsgraph.hh:127