Blender V5.0
gpu_codegen.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2005 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
10
11#include "MEM_guardedalloc.h"
12
13#include "DNA_material_types.h"
14
15#include "BLI_span.hh"
16#include "BLI_string.h"
17#include "BLI_vector.hh"
18
19#include "BKE_cryptomatte.hh"
20
22
23#include "GPU_capabilities.hh"
24#include "GPU_shader.hh"
25#include "GPU_uniform_buffer.hh"
26#include "GPU_vertex_format.hh"
27
28#include "gpu_codegen.hh"
31
32#include <cstdarg>
33#include <cstring>
34
35using namespace blender;
36using namespace blender::gpu::shader;
37
38/* -------------------------------------------------------------------- */
41
42static std::ostream &operator<<(std::ostream &stream, const GPUInput *input)
43{
44 switch (input->source) {
47 return stream << (input->is_zone_io ? "zone" : "tmp") << input->id;
49 return stream << (input->is_zone_io ? "zone" : "cons") << input->id;
51 return stream << "node_tree.u" << input->id << (input->is_duplicate ? "b" : "");
52 case GPU_SOURCE_ATTR:
53 return stream << "var_attrs.v" << input->attr->id;
55 return stream << "UNI_ATTR(unf_attrs[resource_id].attr" << input->uniform_attr->id << ")";
57 return stream << "attr_load_layer(" << input->layer_attr->hash_code << ")";
59 return stream << "strct" << input->id;
60 case GPU_SOURCE_TEX:
61 return stream << input->texture->sampler_name;
63 return stream << input->texture->tiled_mapping_name;
64 default:
65 BLI_assert(0);
66 return stream;
67 }
68}
69
70static std::ostream &operator<<(std::ostream &stream, const GPUOutput *output)
71{
72 return stream << (output->is_zone_io ? "zone" : "tmp") << output->id;
73}
74
75/* Print data constructor (i.e: vec2(1.0f, 1.0f)). */
76static std::ostream &operator<<(std::ostream &stream, const Span<float> &span)
77{
78 stream << (GPUType)span.size() << "(";
79 /* Use uint representation to allow exact same bit pattern even if NaN. This is
80 * because we can pass UINTs as floats for constants. */
81 const Span<uint32_t> uint_span = span.cast<uint32_t>();
82 for (const uint32_t &element : uint_span) {
83 char formatted_float[32];
84 SNPRINTF(formatted_float, "uintBitsToFloat(%uu)", element);
85 stream << formatted_float;
86 if (&element != &uint_span.last()) {
87 stream << ", ";
88 }
89 }
90 stream << ")";
91 return stream;
92}
93
94/* Trick type to change overload and keep a somewhat nice syntax. */
95struct GPUConstant : public GPUInput {};
96
97static std::ostream &operator<<(std::ostream &stream, const GPUConstant *input)
98{
99 stream << Span<float>(input->vec, input->type);
100 return stream;
101}
102
103namespace blender::gpu::shader {
104/* Needed to use the << operators from nested namespaces. :(
105 * https://stackoverflow.com/questions/5195512/namespaces-and-operator-resolution */
106using ::operator<<;
107} // namespace blender::gpu::shader
108
110
111/* -------------------------------------------------------------------- */
114
116{
117 auto index = sampler_names.size();
118 sampler_names.append(std::make_unique<NameEntry>());
119 char *name_buffer = sampler_names[index]->data();
120 memcpy(name_buffer, name, 32);
121 return name_buffer;
122}
123
124GPUCodegen::GPUCodegen(GPUMaterial *mat_, GPUNodeGraph *graph_, const char *debug_name)
125 : mat(*mat_), graph(*graph_)
126{
129 create_info = MEM_new<GPUCodegenCreateInfo>(__func__, debug_name);
130 output.create_info = reinterpret_cast<GPUShaderCreateInfo *>(
131 static_cast<ShaderCreateInfo *>(create_info));
132}
133
135{
136 MEM_SAFE_FREE(cryptomatte_input_);
137 MEM_delete(create_info);
138 BLI_freelistN(&ubo_inputs_);
139};
140
142{
143 /* If each of the maximal attributes are exceeded, we can optimize, but we should also ensure
144 * the baseline is met. */
145 bool do_optimize = (nodes_total_ >= 60 || textures_total_ >= 4 || uniforms_total_ >= 64) &&
146 (textures_total_ >= 1 && uniforms_total_ >= 8 && nodes_total_ >= 4);
147 return do_optimize;
148}
149
151{
152 if (BLI_listbase_is_empty(&graph.attributes)) {
153 output.attr_load.clear();
154 return;
155 }
156
158
159 info.interface_generated = MEM_new<StageInterfaceInfo>(__func__, "codegen_iface", "var_attrs");
161 info.vertex_out(iface);
162
163 /* Input declaration, loading / assignment to interface and geometry shader passthrough. */
164 std::stringstream load_ss;
165
166 /* Index of the attribute as ordered in graph.attributes. */
167 int attr_n = 0;
168 int slot = 15;
169 LISTBASE_FOREACH (GPUMaterialAttribute *, attr, &graph.attributes) {
170 if (slot == -1) {
171 BLI_assert_msg(0, "Too many attributes");
172 break;
173 }
174 STRNCPY(info.name_buffer.attr_names[slot], attr->input_name);
175 SNPRINTF(info.name_buffer.var_names[slot], "v%d", attr->id);
176
177 StringRefNull attr_name = info.name_buffer.attr_names[slot];
178 StringRefNull var_name = info.name_buffer.var_names[slot];
179
180 GPUType input_type, iface_type;
181
182 load_ss << "var_attrs." << var_name;
183 if (attr->is_hair_length || attr->is_hair_intercept) {
184 iface_type = input_type = GPU_FLOAT;
185 load_ss << " = attr_load_" << input_type << "(domain, " << attr_name << ", " << attr_n
186 << ");\n";
187 }
188 else {
189 switch (attr->type) {
190 case CD_ORCO:
191 /* Need vec4 to detect usage of default attribute. */
192 input_type = GPU_VEC4;
193 iface_type = GPU_VEC3;
194 load_ss << " = attr_load_orco(domain, " << attr_name << ", " << attr_n << ");\n";
195 break;
196 case CD_TANGENT:
197 iface_type = input_type = GPU_VEC4;
198 load_ss << " = attr_load_tangent(domain, " << attr_name << ", " << attr_n << ");\n";
199 break;
200 default:
201 iface_type = input_type = GPU_VEC4;
202 load_ss << " = attr_load_" << input_type << "(domain, " << attr_name << ", " << attr_n
203 << ");\n";
204 break;
205 }
206 }
207 attr_n++;
208
209 info.vertex_in(slot--, to_type(input_type), attr_name);
210 iface.smooth(to_type(iface_type), var_name);
211 }
212
213 output.attr_load = load_ss.str();
214}
215
217{
219
220 std::stringstream ss;
221
222 /* Textures. */
223 int slot = 0;
224 LISTBASE_FOREACH (GPUMaterialTexture *, tex, &graph.textures) {
225 if (tex->colorband) {
226 const char *name = info.name_buffer.append_sampler_name(tex->sampler_name);
227 info.sampler(slot++, ImageType::Float1DArray, name, Frequency::BATCH);
228 }
229 else if (tex->sky) {
230 const char *name = info.name_buffer.append_sampler_name(tex->sampler_name);
231 info.sampler(0, ImageType::Float2DArray, name, Frequency::BATCH);
232 }
233 else if (tex->tiled_mapping_name[0] != '\0') {
234 const char *name = info.name_buffer.append_sampler_name(tex->sampler_name);
235 info.sampler(slot++, ImageType::Float2DArray, name, Frequency::BATCH);
236
237 const char *name_mapping = info.name_buffer.append_sampler_name(tex->tiled_mapping_name);
238 info.sampler(slot++, ImageType::Float1DArray, name_mapping, Frequency::BATCH);
239 }
240 else {
241 const char *name = info.name_buffer.append_sampler_name(tex->sampler_name);
242 info.sampler(slot++, ImageType::Float2D, name, Frequency::BATCH);
243 }
244 }
245
246 /* Increment heuristic. */
247 textures_total_ = slot;
248
249 if (!BLI_listbase_is_empty(&ubo_inputs_)) {
250 /* NOTE: generate_uniform_buffer() should have sorted the inputs before this. */
251 ss << "struct NodeTree {\n";
252 LISTBASE_FOREACH (LinkData *, link, &ubo_inputs_) {
253 GPUInput *input = (GPUInput *)(link->data);
254 if (input->source == GPU_SOURCE_CRYPTOMATTE) {
255 ss << input->type << " crypto_hash;\n";
256 }
257 else {
258 ss << input->type << " u" << input->id << (input->is_duplicate ? "b" : "") << ";\n";
259 }
260 }
261 ss << "};\n\n";
262
264 }
265
266 if (!BLI_listbase_is_empty(&graph.uniform_attrs.list)) {
267 ss << "struct UniformAttrs {\n";
268 LISTBASE_FOREACH (GPUUniformAttr *, attr, &graph.uniform_attrs.list) {
269 ss << "vec4 attr" << attr->id << ";\n";
270 }
271 ss << "};\n\n";
272
273 /* TODO(fclem): Use the macro for length. Currently not working for EEVEE. */
274 /* DRW_RESOURCE_CHUNK_LEN = 512 */
275 info.uniform_buf(2, "UniformAttrs", GPU_ATTRIBUTE_UBO_BLOCK_NAME "[512]", Frequency::BATCH);
276 }
277
278 if (!BLI_listbase_is_empty(&graph.layer_attrs)) {
279 info.additional_info("draw_layer_attributes");
280 }
281
282 info.typedef_source_generated = ss.str();
283}
284
285void GPUCodegen::node_serialize(Set<StringRefNull> &used_libraries,
286 std::stringstream &eval_ss,
287 const GPUNode *node)
288{
289 gpu_material_library_use_function(used_libraries, node->name);
290
291 auto source_reference = [&](GPUInput *input) {
293 /* These inputs can have non matching types. Do conversion. */
294 GPUType to = input->type;
295 GPUType from = (input->source == GPU_SOURCE_ATTR) ? input->attr->gputype :
296 input->link->output->type;
297 if (from != to) {
298 /* Use defines declared inside codegen_lib (e.g. vec4_from_float). */
299 eval_ss << to << "_from_" << from << "(";
300 }
301
302 if (input->source == GPU_SOURCE_ATTR) {
303 eval_ss << input;
304 }
305 else {
306 eval_ss << input->link->output;
307 }
308
309 if (from != to) {
310 /* Special case that needs luminance coefficients as argument. */
311 if (from == GPU_VEC4 && to == GPU_FLOAT) {
312 float coefficients[3];
314 eval_ss << ", " << blender::Span<float>(coefficients, 3);
315 }
316 eval_ss << ")";
317 }
318 };
319
320 /* Declare constants. */
321 LISTBASE_FOREACH (GPUInput *, input, &node->inputs) {
322 auto type = [&]() {
323 /* Don't declare zone io variables twice. */
324 std::stringstream ss;
325 if (!input->is_duplicate) {
326 ss << input->type;
327 }
328 return ss.str();
329 };
330 switch (input->source) {
332 eval_ss << type() << " " << input << "; " << input->function_call << input << ");\n";
333 break;
335 eval_ss << input->type << " " << input << " = CLOSURE_DEFAULT;\n";
336 break;
338 if (!input->is_duplicate) {
339 eval_ss << type() << " " << input << " = " << (GPUConstant *)input << ";\n";
340 }
341 break;
343 case GPU_SOURCE_ATTR:
344 if (input->is_zone_io) {
345 eval_ss << type() << " " << input << " = ";
346 source_reference(input);
347 eval_ss << ";\n";
348 }
349 break;
350 default:
351 if (input->is_zone_io && (!input->is_duplicate || !input->link)) {
352 eval_ss << type() << " zone" << input->id << " = " << input << ";\n";
353 }
354 break;
355 }
356 }
357 /* Declare temporary variables for node output storage. */
358 LISTBASE_FOREACH (GPUOutput *, output, &node->outputs) {
359 if (output->is_zone_io) {
360 break;
361 }
362 eval_ss << output->type << " " << output << ";\n";
363 }
364
365 /* Function call. */
366 eval_ss << node->name << "(";
367 /* Input arguments. */
368 LISTBASE_FOREACH (GPUInput *, input, &node->inputs) {
369 if (input->is_zone_io) {
370 break;
371 }
372 switch (input->source) {
374 case GPU_SOURCE_ATTR: {
375 source_reference(input);
376 break;
377 }
378 default:
379 eval_ss << input;
380 break;
381 }
382 GPUOutput *output = static_cast<GPUOutput *>(node->outputs.first);
383 if ((input->next && !input->next->is_zone_io) || (output && !output->is_zone_io)) {
384 eval_ss << ", ";
385 }
386 }
387 /* Output arguments. */
388 LISTBASE_FOREACH (GPUOutput *, output, &node->outputs) {
389 if (output->is_zone_io) {
390 break;
391 }
392 eval_ss << output;
393 if (output->next && !output->next->is_zone_io) {
394 eval_ss << ", ";
395 }
396 }
397 eval_ss << ");\n\n";
398
399 /* Increment heuristic. */
400 nodes_total_++;
401}
402
404{
405 Vector<StringRefNull> source_files;
406 for (const StringRefNull &str : set) {
407 source_files.append(str);
408 }
409 /* Sort dependencies to avoid random order causing shader caching to fail (see #108289). */
410 std::sort(source_files.begin(), source_files.end());
411 return source_files;
412}
413
414GPUGraphOutput GPUCodegen::graph_serialize(GPUNodeTag tree_tag,
415 GPUNodeLink *output_link,
416 const char *output_default)
417{
418 if (output_link == nullptr && output_default == nullptr) {
419 return {};
420 }
421
422 Set<StringRefNull> used_libraries;
423 std::stringstream eval_ss;
424 bool has_nodes = false;
425 /* NOTE: The node order is already top to bottom (or left to right in node editor)
426 * because of the evaluation order inside ntreeExecGPUNodes(). */
427 LISTBASE_FOREACH (GPUNode *, node, &graph.nodes) {
428 if ((node->tag & tree_tag) == 0) {
429 continue;
430 }
431 node_serialize(used_libraries, eval_ss, node);
432 has_nodes = true;
433 }
434
435 if (!has_nodes) {
436 return {};
437 }
438
439 if (output_link) {
440 eval_ss << "return " << output_link->output << ";\n";
441 }
442 else {
443 /* Default output in case there are only AOVs. */
444 eval_ss << "return " << output_default << ";\n";
445 }
446
447 std::string str = eval_ss.str();
448 BLI_hash_mm2a_add(&hm2a_, reinterpret_cast<const uchar *>(str.c_str()), str.size());
449 return {str, set_to_vector_stable(used_libraries)};
450}
451
452GPUGraphOutput GPUCodegen::graph_serialize(GPUNodeTag tree_tag)
453{
454 std::stringstream eval_ss;
455 Set<StringRefNull> used_libraries;
456 LISTBASE_FOREACH (GPUNode *, node, &graph.nodes) {
457 if (node->tag & tree_tag) {
458 node_serialize(used_libraries, eval_ss, node);
459 }
460 }
461 std::string str = eval_ss.str();
462 BLI_hash_mm2a_add(&hm2a_, reinterpret_cast<const uchar *>(str.c_str()), str.size());
463 return {str, set_to_vector_stable(used_libraries)};
464}
465
467{
468 cryptomatte_input_ = MEM_callocN<GPUInput>(__func__);
469 cryptomatte_input_->type = GPU_FLOAT;
470 cryptomatte_input_->source = GPU_SOURCE_CRYPTOMATTE;
471
472 float material_hash = 0.0f;
474 if (material) {
476 BLI_strnlen(material->id.name + 2, MAX_NAME - 2));
477 material_hash = hash.float_encoded();
478 }
479 cryptomatte_input_->vec[0] = material_hash;
480
481 BLI_addtail(&ubo_inputs_, BLI_genericNodeN(cryptomatte_input_));
482}
483
485{
486 /* Extract uniform inputs. */
487 LISTBASE_FOREACH (GPUNode *, node, &graph.nodes) {
489 if (input->source == GPU_SOURCE_UNIFORM && !input->link) {
490 /* We handle the UBO uniforms separately. */
491 BLI_addtail(&ubo_inputs_, BLI_genericNodeN(input));
492 uniforms_total_++;
493 }
494 }
495 }
496 if (!BLI_listbase_is_empty(&ubo_inputs_)) {
497 /* This sorts the inputs based on size. */
499 }
500}
501
502/* Sets id for unique names for all inputs, resources and temp variables. */
503void GPUCodegen::set_unique_ids()
504{
507
508 int id = 1;
509 LISTBASE_FOREACH (GPUNode *, node, &graph.nodes) {
511 input->id = id++;
512 }
514 output->id = id++;
515 }
516 if (node->zone_index != -1) {
517 auto &map = node->is_zone_end ? zone_ends : zone_starts;
518 map.add(node->zone_index, node);
519 }
520 }
521
522 auto find_zone_io = [](auto first) {
523 while (first && !first->is_zone_io && first->next) {
524 first = first->next;
525 }
526 return first;
527 };
528
529 /* Assign the same id to inputs and outputs of start and end zones. */
530 for (GPUNode *end : zone_ends.values()) {
531
532 GPUInput *end_input = find_zone_io((GPUInput *)end->inputs.first);
533 GPUOutput *end_output = find_zone_io((GPUOutput *)end->outputs.first);
534
535 GPUNode *start = zone_starts.lookup(end->zone_index);
536
537 GPUInput *start_input = find_zone_io((GPUInput *)start->inputs.first);
538 GPUOutput *start_output = find_zone_io((GPUOutput *)start->outputs.first);
539
540 for (; start_input; start_input = start_input->next,
541 start_output = start_output->next,
542 end_input = end_input->next,
543 end_output = end_output->next)
544 {
545 start_output->id = start_input->id;
546 end_input->id = start_input->id;
547 end_output->id = start_input->id;
548 }
549 }
550}
551
553{
554 set_unique_ids();
555
556 output.surface = graph_serialize(
557 GPU_NODE_TAG_SURFACE | GPU_NODE_TAG_AOV, graph.outlink_surface, "CLOSURE_DEFAULT");
558 output.volume = graph_serialize(GPU_NODE_TAG_VOLUME, graph.outlink_volume, "CLOSURE_DEFAULT");
559 output.displacement = graph_serialize(
560 GPU_NODE_TAG_DISPLACEMENT, graph.outlink_displacement, nullptr);
561 output.thickness = graph_serialize(GPU_NODE_TAG_THICKNESS, graph.outlink_thickness, nullptr);
562 if (!BLI_listbase_is_empty(&graph.outlink_compositor)) {
563 output.composite = graph_serialize(GPU_NODE_TAG_COMPOSITOR);
564 }
565
566 if (!BLI_listbase_is_empty(&graph.material_functions)) {
567 LISTBASE_FOREACH (GPUNodeGraphFunctionLink *, func_link, &graph.material_functions) {
568 std::stringstream eval_ss;
569 /* Untag every node in the graph to avoid serializing nodes from other functions */
570 LISTBASE_FOREACH (GPUNode *, node, &graph.nodes) {
572 }
573 /* Tag only the nodes needed for the current function */
574 gpu_nodes_tag(&graph, func_link->outlink, GPU_NODE_TAG_FUNCTION);
575 GPUGraphOutput graph = graph_serialize(GPU_NODE_TAG_FUNCTION, func_link->outlink);
576 eval_ss << "float " << func_link->name << "() {\n" << graph.serialized << "}\n\n";
577 output.material_functions.append({eval_ss.str(), graph.dependencies});
578 }
579 /* Leave the function tags as they were before serialization */
580 LISTBASE_FOREACH (GPUNodeGraphFunctionLink *, funclink, &graph.material_functions) {
581 gpu_nodes_tag(&graph, funclink->outlink, GPU_NODE_TAG_FUNCTION);
582 }
583 }
584
585 LISTBASE_FOREACH (GPUMaterialAttribute *, attr, &graph.attributes) {
586 BLI_hash_mm2a_add(&hm2a_, (uchar *)attr->name, strlen(attr->name));
587 }
588
589 hash_ = BLI_hash_mm2a_end(&hm2a_);
590}
591
#define BLI_assert(a)
Definition BLI_assert.h:46
#define BLI_assert_msg(a, msg)
Definition BLI_assert.h:53
void BLI_hash_mm2a_init(BLI_HashMurmur2A *mm2, uint32_t seed)
Definition hash_mm2a.cc:62
void BLI_hash_mm2a_add(BLI_HashMurmur2A *mm2, const unsigned char *data, size_t len)
Definition hash_mm2a.cc:70
void BLI_hash_mm2a_add_int(BLI_HashMurmur2A *mm2, int data)
Definition hash_mm2a.cc:85
uint32_t BLI_hash_mm2a_end(BLI_HashMurmur2A *mm2)
Definition hash_mm2a.cc:90
LinkData * BLI_genericNodeN(void *data)
Definition listbase.cc:922
#define LISTBASE_FOREACH(type, var, list)
BLI_INLINE bool BLI_listbase_is_empty(const ListBase *lb)
void void BLI_freelistN(ListBase *listbase) ATTR_NONNULL(1)
Definition listbase.cc:497
void BLI_addtail(ListBase *listbase, void *vlink) ATTR_NONNULL(1)
Definition listbase.cc:111
#define SNPRINTF(dst, format,...)
Definition BLI_string.h:604
int char char int int int int size_t BLI_strnlen(const char *str, size_t maxlen) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
Definition string.cc:913
char * STRNCPY(char(&dst)[N], const char *src)
Definition BLI_string.h:693
unsigned char uchar
#define ELEM(...)
#define MAX_NAME
Definition DNA_defs.h:50
uint64_t GPU_material_uuid_get(GPUMaterial *mat)
GPUType
@ GPU_VEC4
@ GPU_VEC3
@ GPU_FLOAT
Material * GPU_material_get_material(GPUMaterial *material)
eGPUMaterialFlag GPU_material_flag(const GPUMaterial *mat)
void GPU_material_uniform_buffer_create(GPUMaterial *material, ListBase *inputs)
#define GPU_UBO_BLOCK_NAME
static constexpr int GPU_NODE_TREE_UBO_SLOT
#define GPU_ATTRIBUTE_UBO_BLOCK_NAME
BLI_INLINE void IMB_colormanagement_get_luminance_coefficients(float r_rgb[3])
Read Guarded memory(de)allocation.
#define MEM_SAFE_FREE(v)
ATTR_WARN_UNUSED_RESULT const void * element
ValueIterator values() const &
Definition BLI_map.hh:884
bool add(const Key &key, const Value &value)
Definition BLI_map.hh:295
const Value & lookup(const Key &key) const
Definition BLI_map.hh:545
Span< NewT > constexpr cast() const
Definition BLI_span.hh:418
constexpr int64_t size() const
Definition BLI_span.hh:252
constexpr const T & last(const int64_t n=0) const
Definition BLI_span.hh:325
void append(const T &value)
GPUCodegenCreateInfo * create_info
GPUCodegen(GPUMaterial *mat_, GPUNodeGraph *graph_, const char *debug_name)
#define str(s)
static Vector< StringRefNull > set_to_vector_stable(Set< StringRefNull > &set)
void gpu_material_library_use_function(blender::Set< blender::StringRefNull > &used_libraries, const char *name)
void gpu_nodes_tag(GPUNodeGraph *graph, GPUNodeLink *link_start, GPUNodeTag tag)
@ GPU_SOURCE_CONSTANT
@ GPU_SOURCE_FUNCTION_CALL
@ GPU_SOURCE_ATTR
@ GPU_SOURCE_CRYPTOMATTE
@ GPU_SOURCE_UNIFORM
@ GPU_SOURCE_OUTPUT
@ GPU_SOURCE_TEX_TILED_MAPPING
@ GPU_SOURCE_UNIFORM_ATTR
@ GPU_SOURCE_LAYER_ATTR
@ GPU_SOURCE_STRUCT
@ GPU_SOURCE_TEX
GPUNodeTag
@ GPU_NODE_TAG_SURFACE
@ GPU_NODE_TAG_DISPLACEMENT
@ GPU_NODE_TAG_VOLUME
@ GPU_NODE_TAG_FUNCTION
@ GPU_NODE_TAG_COMPOSITOR
@ GPU_NODE_TAG_THICKNESS
@ GPU_NODE_TAG_AOV
#define input
#define output
void * MEM_callocN(size_t len, const char *str)
Definition mallocn.cc:118
static Type to_type(const GPUType type)
std::ostream & operator<<(std::ostream &stream, const eAlpha &space)
Definition BLI_color.cc:15
#define hash
Definition noise_c.cc:154
const char * name
GPUInput * next
GPUNodeTag tag
ListBase outputs
ListBase inputs
const char * name
bool is_zone_end
GPUOutput * next
char name[258]
Definition DNA_ID.h:432
void * first
char attr_names[16][GPU_MAX_SAFE_ATTR_NAME+1]
const char * append_sampler_name(const char name[32])
Vector< std::unique_ptr< NameEntry >, 16 > sampler_names
Describe inputs & outputs, stage interfaces, resources and sources of a shader. If all data is correc...
Self & vertex_in(int slot, Type type, StringRefNull name)
Self & additional_info(StringRefNull info_name)
Self & vertex_out(StageInterfaceInfo &interface)
Self & sampler(int slot, ImageType type, StringRefNull name, Frequency freq=Frequency::PASS, GPUSamplerState sampler=GPUSamplerState::internal_sampler())
Self & uniform_buf(int slot, StringRefNull type_name, StringRefNull name, Frequency freq=Frequency::PASS)
Self & smooth(Type type, StringRefNull _name)