Blender V4.5
mtl_shader.hh
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2023 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
8
9#pragma once
10
11#include "MEM_guardedalloc.h"
12
13#include "GPU_batch.hh"
14#include "GPU_capabilities.hh"
15#include "GPU_shader.hh"
16#include "GPU_vertex_format.hh"
17
18#include <Metal/Metal.h>
19#include <QuartzCore/QuartzCore.h>
20#include <functional>
21#include <unordered_map>
22
23#include <deque>
24#include <mutex>
25#include <thread>
26
27#include "mtl_framebuffer.hh"
29#include "mtl_shader_shared.hh"
30#include "mtl_state.hh"
31#include "mtl_texture.hh"
32
34#include "gpu_shader_private.hh"
35
36namespace blender::gpu {
37
39class MTLContext;
40
41/* Debug control. */
42#define MTL_SHADER_DEBUG_EXPORT_SOURCE 0
43#define MTL_SHADER_TRANSLATION_DEBUG_OUTPUT 0
44
45/* Separate print used only during development and debugging. */
46#if MTL_SHADER_TRANSLATION_DEBUG_OUTPUT
47# define shader_debug_printf printf
48#else
49# define shader_debug_printf(...) /* Null print. */
50#endif
51
52/* Offset base specialization constant ID for function constants declared in CreateInfo. */
53#define MTL_SHADER_SPECIALIZATION_CONSTANT_BASE_ID 30
54/* Maximum threshold for specialized shader variant count.
55 * This is a catch-all to prevent excessive PSO permutations from being created and also catch
56 * parameters which should ideally not be used for specialization. */
57#define MTL_SHADER_MAX_SPECIALIZED_PSOS 5
58
59/* Desired reflection data for a buffer binding. */
61 uint32_t index;
62 uint32_t size;
63 uint32_t alignment;
64 bool active;
65};
66
67/* Metal Render Pipeline State Instance. */
69 /* Function instances with specialization.
70 * Required for argument encoder construction. */
71 id<MTLFunction> vert;
72 id<MTLFunction> frag;
73
74 /* PSO handle. */
75 id<MTLRenderPipelineState> pso;
76
78 /* Unique index for PSO variant. */
80 /* Base bind index for binding uniform buffers, offset based on other
81 * bound buffers such as vertex buffers, as the count can vary. */
83 /* Base bind index for binding storage buffers. */
85 /* buffer bind slot used for null attributes (-1 if not needed). */
87 /* Topology class. */
88 MTLPrimitiveTopologyClass prim_type;
89
99};
100
101/* Common compute pipeline state. */
103
104 /* Thread-group information is common for all PSO variants. */
108
109 inline void set_compute_workgroup_size(int workgroup_size_x,
110 int workgroup_size_y,
111 int workgroup_size_z)
112 {
113 this->threadgroup_x_len = workgroup_size_x;
114 this->threadgroup_y_len = workgroup_size_y;
115 this->threadgroup_z_len = workgroup_size_z;
116 }
117};
118
119/* Metal Compute Pipeline State instance per PSO. */
121
123 /* Unique index for PSO variant. */
125 /* Base bind index for binding uniform buffers, offset based on other
126 * bound buffers such as vertex buffers, as the count can vary. */
128 /* Base bind index for binding storage buffers. */
130
131 /* Function instances with specialization.
132 * Required for argument encoder construction. */
133 id<MTLFunction> compute = nil;
134 /* PSO handle. */
135 id<MTLComputePipelineState> pso = nil;
136};
137
138/* #MTLShaderBuilder source wrapper used during initial compilation. */
140 NSString *msl_source_vert_ = @"";
141 NSString *msl_source_frag_ = @"";
142 NSString *msl_source_compute_ = @"";
143
144 /* Generated GLSL source used during compilation. */
145 std::string glsl_vertex_source_ = "";
146 std::string glsl_fragment_source_ = "";
147 std::string glsl_compute_source_ = "";
148
149 /* Indicates whether source code has been provided via MSL directly. */
150 bool source_from_msl_ = false;
151};
152
166class MTLShader : public Shader {
169
170 private:
171 /* Context Handle. */
172 MTLContext *context_ = nullptr;
173
175 MTLShaderBuilder *shd_builder_ = nullptr;
176 NSString *vertex_function_name_ = @"";
177 NSString *fragment_function_name_ = @"";
178 NSString *compute_function_name_ = @"";
179
181 id<MTLLibrary> shader_library_vert_ = nil;
182 id<MTLLibrary> shader_library_frag_ = nil;
183 id<MTLLibrary> shader_library_compute_ = nil;
184 bool valid_ = false;
185
187 /* Metal API Descriptor used for creation of unique PSOs based on rendering state. */
188 MTLRenderPipelineDescriptor *pso_descriptor_ = nil;
189 /* Metal backend struct containing all high-level pipeline state parameters
190 * which contribute to instantiation of a unique PSO. */
191 MTLRenderPipelineStateDescriptor current_pipeline_state_;
192 /* Cache of compiled PipelineStateObjects. */
194 std::mutex pso_cache_lock_;
195
197 MTLComputePipelineStateCommon compute_pso_common_state_;
199 compute_pso_cache_;
200
201 /* True to enable multi-layered rendering support. */
202 bool uses_gpu_layer = false;
203
204 /* True to enable multi-viewport rendering support. */
205 bool uses_gpu_viewport_index = false;
206
207 /* Metal Shader Uniform data store.
208 * This blocks is used to store current shader push_constant
209 * data before it is submitted to the GPU. This is currently
210 * stored per shader instance, though depending on GPU module
211 * functionality, this could potentially be a global data store.
212 * This data is associated with the PushConstantBlock, which is
213 * always at index zero in the UBO list. */
214 void *push_constant_data_ = nullptr;
215 bool push_constant_modified_ = false;
216
217 /* Special definition for Max TotalThreadsPerThreadgroup tuning. */
218 uint maxTotalThreadsPerThreadgroup_Tuning_ = 0;
219
220 /* Set to true when batch compiling */
221 bool async_compilation_ = false;
222
223 bool finalize_shader(const shader::ShaderCreateInfo *info = nullptr);
224
225 public:
226 MTLShader(MTLContext *ctx, const char *name);
229 const char *name,
230 NSString *input_vertex_source,
231 NSString *input_fragment_source,
232 NSString *vertex_function_name_,
233 NSString *fragment_function_name_);
234 ~MTLShader();
235
236 void init(const shader::ShaderCreateInfo & /*info*/, bool is_batch_compilation) override;
237 void init() override {}
238
239 /* Assign GLSL source. */
244
245 /* Compile and build - Return true if successful. */
246 bool finalize(const shader::ShaderCreateInfo *info = nullptr) override;
248 void warm_cache(int limit) override;
249
250 /* Utility. */
251 bool is_valid()
252 {
253 return valid_;
254 }
256 {
257 return (shader_library_compute_ != nil);
258 }
260 {
261 return (parent_shader_ != nil);
262 }
264 {
265 return current_pipeline_state_;
266 }
268 {
269 return static_cast<MTLShaderInterface *>(this->interface);
270 }
272 {
273 return push_constant_data_;
274 }
275
276 /* Shader source generators from create-info.
277 * These aren't all used by Metal, as certain parts of source code generation
278 * for shader entry-points and resource mapping occur during `finalize`. */
279 std::string resources_declare(const shader::ShaderCreateInfo &info) const override;
280 std::string vertex_interface_declare(const shader::ShaderCreateInfo &info) const override;
281 std::string fragment_interface_declare(const shader::ShaderCreateInfo &info) const override;
282 std::string geometry_interface_declare(const shader::ShaderCreateInfo &info) const override;
283 std::string geometry_layout_declare(const shader::ShaderCreateInfo &info) const override;
284 std::string compute_layout_declare(const shader::ShaderCreateInfo &info) const override;
285
286 void bind(const shader::SpecializationConstants *constants_state) override;
287 void unbind() override;
288
289 void uniform_float(int location, int comp_len, int array_size, const float *data) override;
290 void uniform_int(int location, int comp_len, int array_size, const int *data) override;
292 void push_constant_bindstate_mark_dirty(bool is_dirty);
293
294 /* Metal shader properties and source mapping. */
295 void set_vertex_function_name(NSString *vetex_function_name);
296 void set_fragment_function_name(NSString *fragment_function_name);
297 void set_compute_function_name(NSString *compute_function_name);
298 void shader_source_from_msl(NSString *input_vertex_source, NSString *input_fragment_source);
299 void shader_compute_source_from_msl(NSString *input_compute_source);
301
303 MTLPrimitiveTopologyClass prim_type);
305 MTLContext *ctx,
306 MTLPrimitiveTopologyClass prim_type,
307 const MTLRenderPipelineStateDescriptor &pipeline_descriptor);
308
310 MTLContext *ctx, MTLComputePipelineStateDescriptor &compute_pipeline_descriptor);
311
313 {
314 return compute_pso_common_state_;
315 }
316
317 private:
318 /* Generate MSL shader from GLSL source. */
319 bool generate_msl_from_glsl(const shader::ShaderCreateInfo *info);
320 bool generate_msl_from_glsl_compute(const shader::ShaderCreateInfo *info);
321
322 MEM_CXX_CLASS_ALLOC_FUNCS("MTLShader");
323};
324
326 public:
328
329 Shader *compile_shader(const shader::ShaderCreateInfo &info) override;
330 void specialize_shader(ShaderSpecialization &specialization) override;
331};
332
333/* Vertex format conversion.
334 * Determines whether it is possible to resize a vertex attribute type
335 * during input assembly. A conversion is implied by the difference
336 * between the input vertex descriptor (from MTLBatch/MTLImmediate)
337 * and the type specified in the shader source.
338 *
339 * e.g. vec3 to vec4 expansion, or vec4 to vec2 truncation.
340 * NOTE: Vector expansion will replace empty elements with the values
341 * (0,0,0,1).
342 *
343 * If implicit format resize is not possible, this function
344 * returns false.
345 *
346 * Implicitly supported conversions in Metal are described here:
347 * https://developer.apple.com/documentation/metal/mtlvertexattributedescriptor/1516081-format?language=objc
348 */
349inline MTLVertexFormat format_resize_comp(MTLVertexFormat mtl_format, uint32_t components)
350{
351#define RESIZE_TYPE(_type, _suffix) \
352 case MTLVertexFormat##_type##_suffix: \
353 case MTLVertexFormat##_type##2##_suffix: \
354 case MTLVertexFormat##_type##3##_suffix: \
355 case MTLVertexFormat##_type##4##_suffix: \
356 switch (components) { \
357 case 1: \
358 return MTLVertexFormat##_type##_suffix; \
359 case 2: \
360 return MTLVertexFormat##_type##2##_suffix; \
361 case 3: \
362 return MTLVertexFormat##_type##3##_suffix; \
363 case 4: \
364 return MTLVertexFormat##_type##4##_suffix; \
365 } \
366 break;
367
368 switch (mtl_format) {
369 RESIZE_TYPE(Char, )
370 RESIZE_TYPE(Char, Normalized)
371 RESIZE_TYPE(UChar, )
372 RESIZE_TYPE(UChar, Normalized)
373 RESIZE_TYPE(Short, )
374 RESIZE_TYPE(Short, Normalized)
375 RESIZE_TYPE(UShort, )
376 RESIZE_TYPE(UShort, Normalized)
377 RESIZE_TYPE(Int, )
378 RESIZE_TYPE(UInt, )
379 RESIZE_TYPE(Half, )
380 RESIZE_TYPE(Float, )
381 default:
382 /* Can only call this function on format that can be resized. */
384 break;
385 }
386
387#undef RESIZE_TYPE
388 return MTLVertexFormatInvalid;
389}
390
391inline MTLVertexFormat format_get_component_type(MTLVertexFormat mtl_format)
392{
393 return format_resize_comp(mtl_format, 1);
394}
395
396inline MTLVertexFormat to_mtl(GPUVertCompType component_type,
397 GPUVertFetchMode fetch_mode,
398 uint32_t component_len)
399{
400#define FORMAT_PER_COMP(_type, _suffix) \
401 switch (component_len) { \
402 case 1: \
403 return MTLVertexFormat##_type##_suffix; \
404 case 2: \
405 return MTLVertexFormat##_type##2##_suffix; \
406 case 3: \
407 return MTLVertexFormat##_type##3##_suffix; \
408 case 4: \
409 return MTLVertexFormat##_type##4##_suffix; \
410 default: \
411 BLI_assert_msg(0, "Invalid attribute component count"); \
412 break; \
413 } \
414 break;
415
416#define FORMAT_PER_COMP_SMALL_INT(_type) \
417 switch (fetch_mode) { \
418 case GPU_FETCH_INT: \
419 FORMAT_PER_COMP(_type, ) \
420 case GPU_FETCH_INT_TO_FLOAT_UNIT: \
421 FORMAT_PER_COMP(_type, Normalized) \
422 case GPU_FETCH_FLOAT: \
423 BLI_assert_msg(0, "Invalid fetch mode for integer attribute"); \
424 break; \
425 } \
426 break;
427
428#define FORMAT_PER_COMP_INT(_type) \
429 switch (fetch_mode) { \
430 case GPU_FETCH_INT: \
431 FORMAT_PER_COMP(_type, ) \
432 case GPU_FETCH_FLOAT: \
433 BLI_assert_msg(0, "Invalid fetch mode for integer attribute"); \
434 break; \
435 case GPU_FETCH_INT_TO_FLOAT_UNIT: \
436 /* Fallback to manual conversion */ \
437 break; \
438 } \
439 break;
440
441 switch (component_type) {
442 case GPU_COMP_I8:
444 case GPU_COMP_U8:
446 case GPU_COMP_I16:
448 case GPU_COMP_U16:
450 case GPU_COMP_I32:
452 case GPU_COMP_U32:
454 case GPU_COMP_F32:
455 switch (fetch_mode) {
456 case GPU_FETCH_FLOAT:
457 FORMAT_PER_COMP(Float, )
458 break;
459 case GPU_FETCH_INT:
461 BLI_assert_msg(0, "Invalid fetch mode for float attribute");
462 break;
463 }
464 case GPU_COMP_I10:
465 switch (fetch_mode) {
467 return MTLVertexFormatInt1010102Normalized;
468 case GPU_FETCH_FLOAT:
469 case GPU_FETCH_INT:
470 BLI_assert_msg(0, "Invalid fetch mode for compressed attribute");
471 break;
472 }
473 case GPU_COMP_MAX:
475 break;
476 }
477#undef FORMAT_PER_COMP
478 /* Loading mode not natively supported. */
479 return MTLVertexFormatInvalid;
480}
481
482inline int mtl_format_component_len(MTLVertexFormat format)
483{
484#define FORMAT_PER_TYPE(_comp, _value) \
485 case MTLVertexFormatChar##_comp: \
486 case MTLVertexFormatChar##_comp##Normalized: \
487 case MTLVertexFormatUChar##_comp: \
488 case MTLVertexFormatUChar##_comp##Normalized: \
489 case MTLVertexFormatShort##_comp: \
490 case MTLVertexFormatShort##_comp##Normalized: \
491 case MTLVertexFormatUShort##_comp: \
492 case MTLVertexFormatUShort##_comp##Normalized: \
493 case MTLVertexFormatInt##_comp: \
494 case MTLVertexFormatUInt##_comp: \
495 case MTLVertexFormatHalf##_comp: \
496 case MTLVertexFormatFloat##_comp: \
497 return _value;
498
499 switch (format) {
500 FORMAT_PER_TYPE(, 1)
501 FORMAT_PER_TYPE(2, 2)
502 FORMAT_PER_TYPE(3, 3)
503 FORMAT_PER_TYPE(4, 4)
504 case MTLVertexFormatUInt1010102Normalized:
505 case MTLVertexFormatInt1010102Normalized:
506 case MTLVertexFormatUChar4Normalized_BGRA:
507 return 4;
508#if defined(MAC_OS_VERSION_14_0)
509 case MTLVertexFormatFloatRG11B10:
510 return 3;
511 case MTLVertexFormatFloatRGB9E5:
512 return 3;
513#endif
514 case MTLVertexFormatInvalid:
515 return -1;
516 }
517
518#undef FORMAT_PER_TYPE
519 return -1;
520}
521
522inline bool mtl_format_is_normalized(MTLVertexFormat format)
523{
524#define FORMAT_PER_TYPE(_comp) \
525 case MTLVertexFormatChar##_comp##Normalized: \
526 case MTLVertexFormatUChar##_comp##Normalized: \
527 case MTLVertexFormatShort##_comp##Normalized: \
528 case MTLVertexFormatUShort##_comp##Normalized: \
529 return true;
530
531 switch (format) {
536 default:
537 break;
538 }
539
540#undef FORMAT_PER_TYPE
541 return false;
542}
543
558inline MTLVertexFormat mtl_convert_vertex_format_ex(MTLVertexFormat shader_attr_format,
559 GPUVertCompType component_type,
560 uint32_t component_len,
561 GPUVertFetchMode fetch_mode)
562{
563 MTLVertexFormat vertex_attr_format = to_mtl(component_type, fetch_mode, component_len);
564
565 if (vertex_attr_format == MTLVertexFormatInvalid) {
566 /* No valid builtin conversion known or error. */
567 return vertex_attr_format;
568 }
569
570 if (vertex_attr_format == shader_attr_format) {
571 /* Everything matches. Nothing to do. */
572 return vertex_attr_format;
573 }
574
575 if (vertex_attr_format == MTLVertexFormatInt1010102Normalized) {
576 BLI_assert_msg(format_get_component_type(shader_attr_format) == MTLVertexFormatFloat,
577 "Vertex format is GPU_COMP_I10 but shader input is not float");
578 return vertex_attr_format;
579 }
580
581 /* Attribute type mismatch. Check if casting is supported. */
582 MTLVertexFormat shader_attr_comp_type = format_get_component_type(shader_attr_format);
583 MTLVertexFormat vertex_attr_comp_type = format_get_component_type(vertex_attr_format);
584
585 if (shader_attr_comp_type == vertex_attr_comp_type) {
586 /* Conversion of vectors of different lengths is valid. */
587 return vertex_attr_format;
588 }
589
590 if (shader_attr_comp_type != MTLVertexFormatFloat) {
591 BLI_assert_msg(vertex_attr_comp_type != MTLVertexFormatFloat,
592 "Vertex format is GPU_COMP_F32 but shader input is not float");
593 }
594 /* Casting normalized MTLVertexFormat types are only valid to float or half. */
595 if (shader_attr_comp_type == MTLVertexFormatFloat) {
596 BLI_assert_msg(mtl_format_is_normalized(vertex_attr_comp_type),
597 "Vertex format is INT_TO_FLOAT_UNIT but shader input is not float");
598 }
599 /* The sign of an integer MTLVertexFormat can not be cast to a shader argument with an integer
600 * type of a different sign. */
601 if (shader_attr_comp_type == MTLVertexFormatInt) {
602 BLI_assert_msg(ELEM(vertex_attr_comp_type, MTLVertexFormatChar, MTLVertexFormatShort),
603 "Vertex format is either I8 or I16 but shader input is not float");
604 }
605 if (shader_attr_comp_type == MTLVertexFormatUInt) {
606 BLI_assert_msg(ELEM(vertex_attr_comp_type, MTLVertexFormatUChar, MTLVertexFormatUShort),
607 "Vertex format is either U8 or U16 but shader input is not float");
608 }
609 /* Valid automatic conversion. */
610 return vertex_attr_format;
611}
612
613inline bool mtl_convert_vertex_format(MTLVertexFormat shader_attr_format,
614 GPUVertCompType component_type,
615 uint32_t component_len,
616 GPUVertFetchMode fetch_mode,
617 MTLVertexFormat *r_convertedFormat)
618{
619 *r_convertedFormat = mtl_convert_vertex_format_ex(
620 shader_attr_format, component_type, component_len, fetch_mode);
621 return (*r_convertedFormat != MTLVertexFormatInvalid);
622}
623
624} // namespace blender::gpu
#define BLI_assert_unreachable()
Definition BLI_assert.h:93
#define BLI_assert_msg(a, msg)
Definition BLI_assert.h:53
unsigned int uint
#define ELEM(...)
GPUVertFetchMode
@ GPU_FETCH_FLOAT
@ GPU_FETCH_INT_TO_FLOAT_UNIT
@ GPU_FETCH_INT
GPUVertCompType
@ GPU_COMP_U16
@ GPU_COMP_MAX
@ GPU_COMP_I10
@ GPU_COMP_F32
@ GPU_COMP_I32
@ GPU_COMP_I8
@ GPU_COMP_U32
@ GPU_COMP_I16
@ GPU_COMP_U8
Read Guarded memory(de)allocation.
BMesh const char void * data
Shader * compile_shader(const shader::ShaderCreateInfo &info) override
void specialize_shader(ShaderSpecialization &specialization) override
void set_fragment_function_name(NSString *fragment_function_name)
void warm_cache(int limit) override
void geometry_shader_from_glsl(MutableSpan< StringRefNull > sources) override
MTLRenderPipelineStateInstance * bake_pipeline_state(MTLContext *ctx, MTLPrimitiveTopologyClass prim_type, const MTLRenderPipelineStateDescriptor &pipeline_descriptor)
const MTLComputePipelineStateCommon & get_compute_common_state()
std::string vertex_interface_declare(const shader::ShaderCreateInfo &info) const override
MTLComputePipelineStateInstance * bake_compute_pipeline_state(MTLContext *ctx, MTLComputePipelineStateDescriptor &compute_pipeline_descriptor)
std::string geometry_interface_declare(const shader::ShaderCreateInfo &info) const override
bool finalize_compute(const shader::ShaderCreateInfo *info)
void shader_compute_source_from_msl(NSString *input_compute_source)
void uniform_int(int location, int comp_len, int array_size, const int *data) override
void bind(const shader::SpecializationConstants *constants_state) override
bool finalize(const shader::ShaderCreateInfo *info=nullptr) override
void fragment_shader_from_glsl(MutableSpan< StringRefNull > sources) override
std::string compute_layout_declare(const shader::ShaderCreateInfo &info) const override
std::string fragment_interface_declare(const shader::ShaderCreateInfo &info) const override
void compute_shader_from_glsl(MutableSpan< StringRefNull > sources) override
void set_vertex_function_name(NSString *vetex_function_name)
void shader_source_from_msl(NSString *input_vertex_source, NSString *input_fragment_source)
void unbind() override
MTLShaderInterface * get_interface()
void uniform_float(int location, int comp_len, int array_size, const float *data) override
void vertex_shader_from_glsl(MutableSpan< StringRefNull > sources) override
void init() override
MTLRenderPipelineStateInstance * bake_current_pipeline_state(MTLContext *ctx, MTLPrimitiveTopologyClass prim_type)
MTLShader(MTLContext *ctx, const char *name)
Definition mtl_shader.mm:72
std::string geometry_layout_declare(const shader::ShaderCreateInfo &info) const override
std::string resources_declare(const shader::ShaderCreateInfo &info) const override
MTLRenderPipelineStateDescriptor & get_current_pipeline_state()
void set_interface(MTLShaderInterface *interface)
void push_constant_bindstate_mark_dirty(bool is_dirty)
void set_compute_function_name(NSString *compute_function_name)
ShaderCompiler(uint32_t threads_count=1, GPUWorker::ContextType context_type=GPUWorker::ContextType::PerThread, bool support_specializations=false)
ShaderInterface * interface
Shader(const char *name)
Definition gpu_shader.cc:56
format
#define FORMAT_PER_COMP_INT(_type)
#define RESIZE_TYPE(_type, _suffix)
#define FORMAT_PER_TYPE(_comp, _value)
#define FORMAT_PER_COMP_SMALL_INT(_type)
#define FORMAT_PER_COMP(_type, _suffix)
MTLVertexFormat format_resize_comp(MTLVertexFormat mtl_format, uint32_t components)
MTLVertexFormat format_get_component_type(MTLVertexFormat mtl_format)
bool mtl_convert_vertex_format(MTLVertexFormat shader_attr_format, GPUVertCompType component_type, uint32_t component_len, GPUVertFetchMode fetch_mode, MTLVertexFormat *r_convertedFormat)
MTLVertexFormat to_mtl(GPUVertCompType component_type, GPUVertFetchMode fetch_mode, uint32_t component_len)
MTLVertexFormat mtl_convert_vertex_format_ex(MTLVertexFormat shader_attr_format, GPUVertCompType component_type, uint32_t component_len, GPUVertFetchMode fetch_mode)
bool mtl_format_is_normalized(MTLVertexFormat format)
int mtl_format_component_len(MTLVertexFormat format)
void set_compute_workgroup_size(int workgroup_size_x, int workgroup_size_y, int workgroup_size_z)
blender::Vector< MTLBufferArgumentData > buffer_bindings_reflection_data_frag
Definition mtl_shader.hh:98
blender::Vector< MTLBufferArgumentData > buffer_bindings_reflection_data_vert
Definition mtl_shader.hh:97
Describe inputs & outputs, stage interfaces, resources and sources of a shader. If all data is correc...