20using namespace nodes::derived_node_tree_types;
30 return inputs_.
data();
35 return outputs_.
data();
40 return inputs_[node_.input_by_identifier(identifier)->index()];
45 return outputs_[node_.output_by_identifier(identifier)->index()];
87 switch (socket->
type) {
110 stack.
vec[0] = (value[0] + value[1] + value[2]) / 3.0f;
128 stack.
vec[0] = (value[0] + value[1] + value[2]) / 3.0f;
151 stack.
link =
nullptr;
156 if (socket->is_input()) {
163 stack.
hasinput = origin->is_output();
168 if (origin->is_input()) {
176 stack.
hasoutput = socket->is_logically_linked();
180void ShaderNode::populate_inputs()
184 const int num_input_sockets = node_->input_sockets().size();
185 inputs_.
resize(num_input_sockets + 1);
188 for (
int i = 0; i < num_input_sockets; i++) {
193void ShaderNode::populate_outputs()
197 const int num_output_sockets = node_->output_sockets().size();
198 outputs_.
resize(num_output_sockets + 1);
201 for (
int i = 0; i < num_output_sockets; i++) {
#define BLI_assert_unreachable()
MINLINE void copy_v4_v4(float r[4], const float a[4])
MINLINE void copy_v3_v3(float r[3], const float a[3])
MINLINE void copy_v3_fl(float r[3], float f)
MINLINE void copy_v4_fl(float r[4], float f)
GPUNodeLink * GPU_uniform(const float *num)
const T & last(const int64_t n=0) const
void resize(const int64_t new_size)
const bNodeSocket * bsocket() const
GPUNodeStack * get_inputs_array()
GPUNodeStack * get_outputs_array()
GPUNodeLink * get_input_link(StringRef identifier)
GPUNodeStack & get_input(StringRef identifier)
const DNode & node() const
GPUNodeStack & get_output(StringRef identifier)
const bNode & bnode() const
static void gpu_stack_vector_from_socket(GPUNodeStack &stack, const bNodeSocket *socket)
static void populate_gpu_node_stack(DSocket socket, GPUNodeStack &stack)
static eGPUType gpu_type_from_socket_type(eNodeSocketDatatype type)
DSocket get_input_origin_socket(DInputSocket input)