35using TraversalQueue = deque<OperationNode *>;
37using DEGForeachOperation = void (*)(OperationNode *,
void *);
39bool deg_foreach_needs_visit(
const OperationNode *op_node,
const int flags)
49void deg_foreach_dependent_operation(
const Depsgraph * ,
63 Set<OperationNode *> scheduled;
64 for (ComponentNode *comp_node :
target_id_node->components.values()) {
76 for (OperationNode *op_node : comp_node->operations) {
77 if (!deg_foreach_needs_visit(op_node, flags)) {
80 queue.push_back(op_node);
81 scheduled.add(op_node);
85 while (!queue.empty()) {
87 OperationNode *op_node = queue.front();
92 if (op_node->outlinks.size() == 1) {
93 OperationNode *to_node = (OperationNode *)op_node->outlinks[0]->to;
94 if (!scheduled.contains(to_node) && deg_foreach_needs_visit(to_node, flags)) {
95 scheduled.add_new(to_node);
103 for (Relation *rel : op_node->outlinks) {
104 OperationNode *to_node = (OperationNode *)rel->to;
105 if (!scheduled.contains(to_node) && deg_foreach_needs_visit(to_node, flags)) {
106 queue.push_front(to_node);
107 scheduled.add_new(to_node);
116struct ForeachIDComponentData {
122void deg_foreach_dependent_component_callback(OperationNode *op_node,
void *user_data_v)
124 ForeachIDComponentData *user_data =
reinterpret_cast<ForeachIDComponentData *
>(user_data_v);
125 ComponentNode *comp_node = op_node->owner;
126 IDNode *
id_node = comp_node->owner;
127 if (
id_node != user_data->target_id_node && !user_data->visited.contains(comp_node)) {
129 user_data->visited.add_new(comp_node);
133void deg_foreach_dependent_ID_component(
const Depsgraph *graph,
139 ForeachIDComponentData
data;
141 data.target_id_node = graph->find_id_node(
id);
142 deg_foreach_dependent_operation(graph,
144 source_component_type,
146 deg_foreach_dependent_component_callback,
150struct ForeachIDData {
156void deg_foreach_dependent_ID_callback(OperationNode *op_node,
void *user_data_v)
158 ForeachIDData *user_data =
reinterpret_cast<ForeachIDData *
>(user_data_v);
159 ComponentNode *comp_node = op_node->owner;
160 IDNode *
id_node = comp_node->owner;
161 if (
id_node != user_data->target_id_node && !user_data->visited.contains(
id_node)) {
163 user_data->visited.add_new(
id_node);
171 data.target_id_node = graph->find_id_node(
id);
172 deg_foreach_dependent_operation(
173 graph, data.target_id_node,
DEG_OB_COMP_ANY, 0, deg_foreach_dependent_ID_callback, &data);
186 TraversalQueue queue;
187 Set<OperationNode *> scheduled;
188 for (ComponentNode *comp_node :
target_id_node->components.values()) {
189 for (OperationNode *op_node : comp_node->operations) {
190 queue.push_back(op_node);
191 scheduled.add(op_node);
197 while (!queue.empty()) {
199 OperationNode *op_node = queue.front();
203 ComponentNode *comp_node = op_node->owner;
204 IDNode *
id_node = comp_node->owner;
211 if (op_node->inlinks.size() == 1) {
212 Node *from = op_node->inlinks[0]->from;
214 OperationNode *from_node = (OperationNode *)from;
215 if (scheduled.add(from_node)) {
224 for (Relation *rel : op_node->inlinks) {
225 Node *from = rel->from;
227 OperationNode *from_node = (OperationNode *)from;
228 if (scheduled.add(from_node)) {
229 queue.push_front(from_node);
262 deg::deg_foreach_dependent_ID_component(
@ DEG_FOREACH_COMPONENT_IGNORE_TRANSFORM_SOLVERS
Object is a sort of wrapper for general info.
Read Guarded memory(de)allocation.
const Depsgraph * depsgraph
void DEG_foreach_ID(const Depsgraph *depsgraph, DEGForeachIDCallback callback)
void DEG_foreach_dependent_ID_component(const Depsgraph *depsgraph, const ID *id, eDepsObjectComponentType source_component_type, int flags, DEGForeachIDComponentCallback callback)
void DEG_foreach_dependent_ID(const Depsgraph *depsgraph, const ID *id, DEGForeachIDCallback callback)
Set< ComponentNode * > visited
DEGForeachIDComponentCallback callback
void DEG_foreach_ancestor_ID(const Depsgraph *depsgraph, const ID *id, DEGForeachIDCallback callback)
eDepsObjectComponentType nodeTypeToObjectComponent(NodeType type)