Blender V5.0
mtl_shader.hh
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2023 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
8
9#pragma once
10
11#include "MEM_guardedalloc.h"
12
13#include "GPU_batch.hh"
14#include "GPU_capabilities.hh"
15#include "GPU_shader.hh"
16#include "GPU_vertex_format.hh"
17
18#include <Metal/Metal.h>
19#include <QuartzCore/QuartzCore.h>
20#include <functional>
21#include <unordered_map>
22
23#include <deque>
24#include <mutex>
25#include <thread>
26
27#include "mtl_framebuffer.hh"
29#include "mtl_shader_shared.hh"
30#include "mtl_state.hh"
31#include "mtl_texture.hh"
32
34#include "gpu_shader_private.hh"
35
36namespace blender::gpu {
37
39class MTLContext;
40
41/* Debug control. */
42#define MTL_SHADER_DEBUG_EXPORT_SOURCE 0
43#define MTL_SHADER_TRANSLATION_DEBUG_OUTPUT 0
44
45/* Separate print used only during development and debugging. */
46#if MTL_SHADER_TRANSLATION_DEBUG_OUTPUT
47# define shader_debug_printf printf
48#else
49# define shader_debug_printf(...) /* Null print. */
50#endif
51
52/* Offset base specialization constant ID for function constants declared in CreateInfo. */
53#define MTL_SHADER_SPECIALIZATION_CONSTANT_BASE_ID 30
54/* Maximum threshold for specialized shader variant count.
55 * This is a catch-all to prevent excessive PSO permutations from being created and also catch
56 * parameters which should ideally not be used for specialization. */
57#define MTL_SHADER_MAX_SPECIALIZED_PSOS 5
58
59/* Desired reflection data for a buffer binding. */
61 uint32_t index;
62 uint32_t size;
63 uint32_t alignment;
64 bool active;
65};
66
67/* Metal Render Pipeline State Instance. */
69 /* Function instances with specialization.
70 * Required for argument encoder construction. */
71 id<MTLFunction> vert;
72 id<MTLFunction> frag;
73
74 /* PSO handle. */
75 id<MTLRenderPipelineState> pso;
76
78 /* Unique index for PSO variant. */
80 /* Base bind index for binding uniform buffers, offset based on other
81 * bound buffers such as vertex buffers, as the count can vary. */
83 /* Base bind index for binding storage buffers. */
85 /* buffer bind slot used for null attributes (-1 if not needed). */
87 /* Topology class. */
88 MTLPrimitiveTopologyClass prim_type;
89
99};
100
101/* Common compute pipeline state. */
103
104 /* Thread-group information is common for all PSO variants. */
108
109 inline void set_compute_workgroup_size(int workgroup_size_x,
110 int workgroup_size_y,
111 int workgroup_size_z)
112 {
113 this->threadgroup_x_len = workgroup_size_x;
114 this->threadgroup_y_len = workgroup_size_y;
115 this->threadgroup_z_len = workgroup_size_z;
116 }
117};
118
119/* Metal Compute Pipeline State instance per PSO. */
121
123 /* Unique index for PSO variant. */
125 /* Base bind index for binding uniform buffers, offset based on other
126 * bound buffers such as vertex buffers, as the count can vary. */
128 /* Base bind index for binding storage buffers. */
130
131 /* Function instances with specialization.
132 * Required for argument encoder construction. */
133 id<MTLFunction> compute = nil;
134 /* PSO handle. */
135 id<MTLComputePipelineState> pso = nil;
136};
137
138/* #MTLShaderBuilder source wrapper used during initial compilation. */
140 NSString *msl_source_vert_ = @"";
141 NSString *msl_source_frag_ = @"";
142 NSString *msl_source_compute_ = @"";
143
144 /* Generated GLSL source used during compilation. */
145 std::string glsl_vertex_source_ = "";
146 std::string glsl_fragment_source_ = "";
147 std::string glsl_compute_source_ = "";
148
149 /* Indicates whether source code has been provided via MSL directly. */
150 bool source_from_msl_ = false;
151};
152
166class MTLShader : public Shader {
169
170 private:
171 /* Context Handle. */
172 MTLContext *context_ = nullptr;
173
175 MTLShaderBuilder *shd_builder_ = nullptr;
176 NSString *vertex_function_name_ = @"";
177 NSString *fragment_function_name_ = @"";
178 NSString *compute_function_name_ = @"";
179
181 id<MTLLibrary> shader_library_vert_ = nil;
182 id<MTLLibrary> shader_library_frag_ = nil;
183 id<MTLLibrary> shader_library_compute_ = nil;
184 bool valid_ = false;
185
187 /* Metal API Descriptor used for creation of unique PSOs based on rendering state. */
188 MTLRenderPipelineDescriptor *pso_descriptor_ = nil;
189 /* Metal backend struct containing all high-level pipeline state parameters
190 * which contribute to instantiation of a unique PSO. */
191 MTLRenderPipelineStateDescriptor current_pipeline_state_;
192 /* Cache of compiled PipelineStateObjects. */
194 std::mutex pso_cache_lock_;
195
197 MTLComputePipelineStateCommon compute_pso_common_state_;
199 compute_pso_cache_;
200
201 /* True to enable multi-layered rendering support. */
202 bool uses_gpu_layer = false;
203
204 /* True to enable multi-viewport rendering support. */
205 bool uses_gpu_viewport_index = false;
206
207 /* Metal Shader Uniform data store.
208 * This blocks is used to store current shader push_constant
209 * data before it is submitted to the GPU. This is currently
210 * stored per shader instance, though depending on GPU module
211 * functionality, this could potentially be a global data store.
212 * This data is associated with the PushConstantBlock, which is
213 * always at index zero in the UBO list. */
214 void *push_constant_data_ = nullptr;
215 bool push_constant_modified_ = false;
216
217 /* Special definition for Max TotalThreadsPerThreadgroup tuning. */
218 uint maxTotalThreadsPerThreadgroup_Tuning_ = 0;
219
220 /* Set to true when batch compiling */
221 bool async_compilation_ = false;
222
223 /* If greater than one, use argument buffer to support arbitrary number of samplers. */
224 int arg_buf_samplers_vert_ = 0;
225 int arg_buf_samplers_frag_ = 0;
226 int arg_buf_samplers_comp_ = 0;
227
228 bool finalize_shader(const shader::ShaderCreateInfo *info = nullptr);
229
230 public:
231 MTLShader(MTLContext *ctx, const char *name);
234 const char *name,
235 NSString *input_vertex_source,
236 NSString *input_fragment_source,
237 NSString *vertex_function_name_,
238 NSString *fragment_function_name_);
239 ~MTLShader();
240
241 void init(const shader::ShaderCreateInfo & /*info*/, bool is_batch_compilation) override;
242
243 /* Assign GLSL source. */
248
249 /* Compile and build - Return true if successful. */
250 bool finalize(const shader::ShaderCreateInfo *info = nullptr) override;
252 void warm_cache(int limit) override;
253
254 /* Utility. */
255 bool is_valid()
256 {
257 return valid_;
258 }
260 {
261 return (shader_library_compute_ != nil);
262 }
264 {
265 return (parent_shader_ != nil);
266 }
268 {
269 return current_pipeline_state_;
270 }
272 {
273 return static_cast<MTLShaderInterface *>(this->interface);
274 }
276 {
277 return push_constant_data_;
278 }
279
280 /* Shader source generators from create-info.
281 * These aren't all used by Metal, as certain parts of source code generation
282 * for shader entry-points and resource mapping occur during `finalize`. */
283 std::string resources_declare(const shader::ShaderCreateInfo &info) const override;
284 std::string vertex_interface_declare(const shader::ShaderCreateInfo &info) const override;
285 std::string fragment_interface_declare(const shader::ShaderCreateInfo &info) const override;
286 std::string geometry_interface_declare(const shader::ShaderCreateInfo &info) const override;
287 std::string geometry_layout_declare(const shader::ShaderCreateInfo &info) const override;
288 std::string compute_layout_declare(const shader::ShaderCreateInfo &info) const override;
289
290 void bind(const shader::SpecializationConstants *constants_state) override;
291 void unbind() override;
292
293 void uniform_float(int location, int comp_len, int array_size, const float *data) override;
294 void uniform_int(int location, int comp_len, int array_size, const int *data) override;
296 void push_constant_bindstate_mark_dirty(bool is_dirty);
297
298 /* Metal shader properties and source mapping. */
299 void set_vertex_function_name(NSString *vetex_function_name);
300 void set_fragment_function_name(NSString *fragment_function_name);
301 void set_compute_function_name(NSString *compute_function_name);
302 void shader_source_from_msl(NSString *input_vertex_source, NSString *input_fragment_source);
303 void shader_compute_source_from_msl(NSString *input_compute_source);
305
307 MTLPrimitiveTopologyClass prim_type);
309 MTLContext *ctx,
310 MTLPrimitiveTopologyClass prim_type,
311 const MTLRenderPipelineStateDescriptor &pipeline_descriptor);
312
314 MTLContext *ctx, MTLComputePipelineStateDescriptor &compute_pipeline_descriptor);
315
317 {
318 return compute_pso_common_state_;
319 }
320
321 private:
322 /* Generate MSL shader from GLSL source. */
323 bool generate_msl_from_glsl(const shader::ShaderCreateInfo *info);
324 bool generate_msl_from_glsl_compute(const shader::ShaderCreateInfo *info);
325
326 MEM_CXX_CLASS_ALLOC_FUNCS("MTLShader");
327};
328
330 public:
332
333 Shader *compile_shader(const shader::ShaderCreateInfo &info) override;
334 void specialize_shader(ShaderSpecialization &specialization) override;
335};
336
337/* Vertex format conversion.
338 * Determines whether it is possible to resize a vertex attribute type
339 * during input assembly. A conversion is implied by the difference
340 * between the input vertex descriptor (from MTLBatch/MTLImmediate)
341 * and the type specified in the shader source.
342 *
343 * e.g. vec3 to vec4 expansion, or vec4 to vec2 truncation.
344 * NOTE: Vector expansion will replace empty elements with the values
345 * (0,0,0,1).
346 *
347 * If implicit format resize is not possible, this function
348 * returns false.
349 *
350 * Implicitly supported conversions in Metal are described here:
351 * https://developer.apple.com/documentation/metal/mtlvertexattributedescriptor/1516081-format?language=objc
352 */
353inline MTLVertexFormat format_resize_comp(MTLVertexFormat mtl_format, uint32_t components)
354{
355#define RESIZE_TYPE(_type, _suffix) \
356 case MTLVertexFormat##_type##_suffix: \
357 case MTLVertexFormat##_type##2##_suffix: \
358 case MTLVertexFormat##_type##3##_suffix: \
359 case MTLVertexFormat##_type##4##_suffix: \
360 switch (components) { \
361 case 1: \
362 return MTLVertexFormat##_type##_suffix; \
363 case 2: \
364 return MTLVertexFormat##_type##2##_suffix; \
365 case 3: \
366 return MTLVertexFormat##_type##3##_suffix; \
367 case 4: \
368 return MTLVertexFormat##_type##4##_suffix; \
369 } \
370 break;
371
372 switch (mtl_format) {
374 RESIZE_TYPE(Char, Normalized)
375 RESIZE_TYPE(UChar, )
376 RESIZE_TYPE(UChar, Normalized)
378 RESIZE_TYPE(Short, Normalized)
379 RESIZE_TYPE(UShort, )
380 RESIZE_TYPE(UShort, Normalized)
382 RESIZE_TYPE(UInt, )
383 RESIZE_TYPE(Half, )
385 default:
386 /* Can only call this function on format that can be resized. */
388 break;
389 }
390
391#undef RESIZE_TYPE
392 return MTLVertexFormatInvalid;
393}
394
395inline MTLVertexFormat format_get_component_type(MTLVertexFormat mtl_format)
396{
397 return format_resize_comp(mtl_format, 1);
398}
399
400inline MTLVertexFormat to_mtl(GPUVertCompType component_type,
401 GPUVertFetchMode fetch_mode,
402 uint32_t component_len)
403{
404#define FORMAT_PER_COMP(_type, _suffix) \
405 switch (component_len) { \
406 case 1: \
407 return MTLVertexFormat##_type##_suffix; \
408 case 2: \
409 return MTLVertexFormat##_type##2##_suffix; \
410 case 3: \
411 return MTLVertexFormat##_type##3##_suffix; \
412 case 4: \
413 return MTLVertexFormat##_type##4##_suffix; \
414 default: \
415 BLI_assert_msg(0, "Invalid attribute component count"); \
416 break; \
417 } \
418 break;
419
420#define FORMAT_PER_COMP_SMALL_INT(_type) \
421 switch (fetch_mode) { \
422 case GPU_FETCH_INT: \
423 FORMAT_PER_COMP(_type, ) \
424 case GPU_FETCH_INT_TO_FLOAT_UNIT: \
425 FORMAT_PER_COMP(_type, Normalized) \
426 case GPU_FETCH_FLOAT: \
427 BLI_assert_msg(0, "Invalid fetch mode for integer attribute"); \
428 break; \
429 } \
430 break;
431
432#define FORMAT_PER_COMP_INT(_type) \
433 switch (fetch_mode) { \
434 case GPU_FETCH_INT: \
435 FORMAT_PER_COMP(_type, ) \
436 case GPU_FETCH_FLOAT: \
437 BLI_assert_msg(0, "Invalid fetch mode for integer attribute"); \
438 break; \
439 case GPU_FETCH_INT_TO_FLOAT_UNIT: \
440 /* Fallback to manual conversion */ \
441 break; \
442 } \
443 break;
444
445 switch (component_type) {
446 case GPU_COMP_I8:
448 case GPU_COMP_U8:
450 case GPU_COMP_I16:
452 case GPU_COMP_U16:
454 case GPU_COMP_I32:
456 case GPU_COMP_U32:
458 case GPU_COMP_F32:
459 switch (fetch_mode) {
460 case GPU_FETCH_FLOAT:
462 break;
463 case GPU_FETCH_INT:
465 BLI_assert_msg(0, "Invalid fetch mode for float attribute");
466 break;
467 }
468 case GPU_COMP_I10:
469 switch (fetch_mode) {
471 return MTLVertexFormatInt1010102Normalized;
472 case GPU_FETCH_FLOAT:
473 case GPU_FETCH_INT:
474 BLI_assert_msg(0, "Invalid fetch mode for compressed attribute");
475 break;
476 }
477 case GPU_COMP_MAX:
479 break;
480 }
481#undef FORMAT_PER_COMP
482 /* Loading mode not natively supported. */
483 return MTLVertexFormatInvalid;
484}
485
486inline int mtl_format_component_len(MTLVertexFormat format)
487{
488#define FORMAT_PER_TYPE(_comp, _value) \
489 case MTLVertexFormatChar##_comp: \
490 case MTLVertexFormatChar##_comp##Normalized: \
491 case MTLVertexFormatUChar##_comp: \
492 case MTLVertexFormatUChar##_comp##Normalized: \
493 case MTLVertexFormatShort##_comp: \
494 case MTLVertexFormatShort##_comp##Normalized: \
495 case MTLVertexFormatUShort##_comp: \
496 case MTLVertexFormatUShort##_comp##Normalized: \
497 case MTLVertexFormatInt##_comp: \
498 case MTLVertexFormatUInt##_comp: \
499 case MTLVertexFormatHalf##_comp: \
500 case MTLVertexFormatFloat##_comp: \
501 return _value;
502
503 switch (format) {
504 FORMAT_PER_TYPE(, 1)
505 FORMAT_PER_TYPE(2, 2)
506 FORMAT_PER_TYPE(3, 3)
507 FORMAT_PER_TYPE(4, 4)
508 case MTLVertexFormatUInt1010102Normalized:
509 case MTLVertexFormatInt1010102Normalized:
510 case MTLVertexFormatUChar4Normalized_BGRA:
511 return 4;
512#if defined(MAC_OS_VERSION_14_0)
513 case MTLVertexFormatFloatRG11B10:
514 return 3;
515 case MTLVertexFormatFloatRGB9E5:
516 return 3;
517#endif
518 case MTLVertexFormatInvalid:
519 return -1;
520 }
521
522#undef FORMAT_PER_TYPE
523 return -1;
524}
525
526inline bool mtl_format_is_normalized(MTLVertexFormat format)
527{
528#define FORMAT_PER_TYPE(_comp) \
529 case MTLVertexFormatChar##_comp##Normalized: \
530 case MTLVertexFormatUChar##_comp##Normalized: \
531 case MTLVertexFormatShort##_comp##Normalized: \
532 case MTLVertexFormatUShort##_comp##Normalized: \
533 return true;
534
535 switch (format) {
540 default:
541 break;
542 }
543
544#undef FORMAT_PER_TYPE
545 return false;
546}
547
562inline MTLVertexFormat mtl_convert_vertex_format_ex(MTLVertexFormat shader_attr_format,
563 GPUVertCompType component_type,
564 uint32_t component_len,
565 GPUVertFetchMode fetch_mode)
566{
567 MTLVertexFormat vertex_attr_format = to_mtl(component_type, fetch_mode, component_len);
568
569 if (vertex_attr_format == MTLVertexFormatInvalid) {
570 /* No valid builtin conversion known or error. */
571 return vertex_attr_format;
572 }
573
574 if (vertex_attr_format == shader_attr_format) {
575 /* Everything matches. Nothing to do. */
576 return vertex_attr_format;
577 }
578
579 if (vertex_attr_format == MTLVertexFormatInt1010102Normalized) {
580 BLI_assert_msg(format_get_component_type(shader_attr_format) == MTLVertexFormatFloat,
581 "Vertex format is GPU_COMP_I10 but shader input is not float");
582 return vertex_attr_format;
583 }
584
585 /* Attribute type mismatch. Check if casting is supported. */
586 MTLVertexFormat shader_attr_comp_type = format_get_component_type(shader_attr_format);
587 MTLVertexFormat vertex_attr_comp_type = format_get_component_type(vertex_attr_format);
588
589 if (shader_attr_comp_type == vertex_attr_comp_type) {
590 /* Conversion of vectors of different lengths is valid. */
591 return vertex_attr_format;
592 }
593
594 if (shader_attr_comp_type != MTLVertexFormatFloat) {
595 BLI_assert_msg(vertex_attr_comp_type != MTLVertexFormatFloat,
596 "Vertex format is GPU_COMP_F32 but shader input is not float");
597 }
598 /* Casting normalized MTLVertexFormat types are only valid to float or half. */
599 if (shader_attr_comp_type == MTLVertexFormatFloat) {
600 BLI_assert_msg(mtl_format_is_normalized(vertex_attr_comp_type),
601 "Vertex format is INT_TO_FLOAT_UNIT but shader input is not float");
602 }
603 /* The sign of an integer MTLVertexFormat can not be cast to a shader argument with an integer
604 * type of a different sign. */
605 if (shader_attr_comp_type == MTLVertexFormatInt) {
606 BLI_assert_msg(ELEM(vertex_attr_comp_type, MTLVertexFormatChar, MTLVertexFormatShort),
607 "Vertex format is either I8 or I16 but shader input is not float");
608 }
609 if (shader_attr_comp_type == MTLVertexFormatUInt) {
610 BLI_assert_msg(ELEM(vertex_attr_comp_type, MTLVertexFormatUChar, MTLVertexFormatUShort),
611 "Vertex format is either U8 or U16 but shader input is not float");
612 }
613 /* Valid automatic conversion. */
614 return vertex_attr_format;
615}
616
617inline bool mtl_convert_vertex_format(MTLVertexFormat shader_attr_format,
618 GPUVertCompType component_type,
619 uint32_t component_len,
620 GPUVertFetchMode fetch_mode,
621 MTLVertexFormat *r_convertedFormat)
622{
623 *r_convertedFormat = mtl_convert_vertex_format_ex(
624 shader_attr_format, component_type, component_len, fetch_mode);
625 return (*r_convertedFormat != MTLVertexFormatInvalid);
626}
627
628} // namespace blender::gpu
#define BLI_assert_unreachable()
Definition BLI_assert.h:93
#define BLI_assert_msg(a, msg)
Definition BLI_assert.h:53
unsigned int uint
#define ELEM(...)
GPUVertFetchMode
@ GPU_FETCH_FLOAT
@ GPU_FETCH_INT_TO_FLOAT_UNIT
@ GPU_FETCH_INT
GPUVertCompType
@ GPU_COMP_U16
@ GPU_COMP_MAX
@ GPU_COMP_I10
@ GPU_COMP_F32
@ GPU_COMP_I32
@ GPU_COMP_I8
@ GPU_COMP_U32
@ GPU_COMP_I16
@ GPU_COMP_U8
Read Guarded memory(de)allocation.
BMesh const char void * data
void init()
Shader * compile_shader(const shader::ShaderCreateInfo &info) override
void specialize_shader(ShaderSpecialization &specialization) override
void set_fragment_function_name(NSString *fragment_function_name)
void warm_cache(int limit) override
void geometry_shader_from_glsl(MutableSpan< StringRefNull > sources) override
MTLRenderPipelineStateInstance * bake_pipeline_state(MTLContext *ctx, MTLPrimitiveTopologyClass prim_type, const MTLRenderPipelineStateDescriptor &pipeline_descriptor)
const MTLComputePipelineStateCommon & get_compute_common_state()
std::string vertex_interface_declare(const shader::ShaderCreateInfo &info) const override
MTLComputePipelineStateInstance * bake_compute_pipeline_state(MTLContext *ctx, MTLComputePipelineStateDescriptor &compute_pipeline_descriptor)
std::string geometry_interface_declare(const shader::ShaderCreateInfo &info) const override
bool finalize_compute(const shader::ShaderCreateInfo *info)
void shader_compute_source_from_msl(NSString *input_compute_source)
void uniform_int(int location, int comp_len, int array_size, const int *data) override
void bind(const shader::SpecializationConstants *constants_state) override
bool finalize(const shader::ShaderCreateInfo *info=nullptr) override
void fragment_shader_from_glsl(MutableSpan< StringRefNull > sources) override
std::string compute_layout_declare(const shader::ShaderCreateInfo &info) const override
std::string fragment_interface_declare(const shader::ShaderCreateInfo &info) const override
void compute_shader_from_glsl(MutableSpan< StringRefNull > sources) override
void set_vertex_function_name(NSString *vetex_function_name)
void shader_source_from_msl(NSString *input_vertex_source, NSString *input_fragment_source)
void unbind() override
MTLShaderInterface * get_interface()
void uniform_float(int location, int comp_len, int array_size, const float *data) override
void vertex_shader_from_glsl(MutableSpan< StringRefNull > sources) override
MTLRenderPipelineStateInstance * bake_current_pipeline_state(MTLContext *ctx, MTLPrimitiveTopologyClass prim_type)
MTLShader(MTLContext *ctx, const char *name)
Definition mtl_shader.mm:71
std::string geometry_layout_declare(const shader::ShaderCreateInfo &info) const override
std::string resources_declare(const shader::ShaderCreateInfo &info) const override
MTLRenderPipelineStateDescriptor & get_current_pipeline_state()
void set_interface(MTLShaderInterface *interface)
void push_constant_bindstate_mark_dirty(bool is_dirty)
void set_compute_function_name(NSString *compute_function_name)
ShaderCompiler(uint32_t threads_count=1, GPUWorker::ContextType context_type=GPUWorker::ContextType::PerThread, bool support_specializations=false)
ShaderInterface * interface
Shader(const char *name)
Definition gpu_shader.cc:57
format
#define FORMAT_PER_COMP_INT(_type)
#define RESIZE_TYPE(_type, _suffix)
#define FORMAT_PER_TYPE(_comp, _value)
#define FORMAT_PER_COMP_SMALL_INT(_type)
#define FORMAT_PER_COMP(_type, _suffix)
MTLVertexFormat format_resize_comp(MTLVertexFormat mtl_format, uint32_t components)
MTLVertexFormat format_get_component_type(MTLVertexFormat mtl_format)
bool mtl_convert_vertex_format(MTLVertexFormat shader_attr_format, GPUVertCompType component_type, uint32_t component_len, GPUVertFetchMode fetch_mode, MTLVertexFormat *r_convertedFormat)
MTLVertexFormat to_mtl(GPUVertCompType component_type, GPUVertFetchMode fetch_mode, uint32_t component_len)
MTLVertexFormat mtl_convert_vertex_format_ex(MTLVertexFormat shader_attr_format, GPUVertCompType component_type, uint32_t component_len, GPUVertFetchMode fetch_mode)
bool mtl_format_is_normalized(MTLVertexFormat format)
int mtl_format_component_len(MTLVertexFormat format)
void set_compute_workgroup_size(int workgroup_size_x, int workgroup_size_y, int workgroup_size_z)
blender::Vector< MTLBufferArgumentData > buffer_bindings_reflection_data_frag
Definition mtl_shader.hh:98
blender::Vector< MTLBufferArgumentData > buffer_bindings_reflection_data_vert
Definition mtl_shader.hh:97
Describe inputs & outputs, stage interfaces, resources and sources of a shader. If all data is correc...