Blender V4.3
mtl_shader.hh
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2023 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
9#pragma once
10
11#include "MEM_guardedalloc.h"
12
13#include "GPU_batch.hh"
14#include "GPU_capabilities.hh"
15#include "GPU_shader.hh"
16#include "GPU_vertex_format.hh"
17
18#include <Metal/Metal.h>
19#include <QuartzCore/QuartzCore.h>
20#include <functional>
21#include <unordered_map>
22
23#include <deque>
24#include <mutex>
25#include <thread>
26
27#include "mtl_framebuffer.hh"
29#include "mtl_shader_shared.h"
30#include "mtl_state.hh"
31#include "mtl_texture.hh"
32
34#include "gpu_shader_private.hh"
35
36namespace blender::gpu {
37
38class MTLShaderInterface;
39class MTLContext;
40
41/* Debug control. */
42#define MTL_SHADER_DEBUG_EXPORT_SOURCE 0
43#define MTL_SHADER_TRANSLATION_DEBUG_OUTPUT 0
44
45/* Separate print used only during development and debugging. */
46#if MTL_SHADER_TRANSLATION_DEBUG_OUTPUT
47# define shader_debug_printf printf
48#else
49# define shader_debug_printf(...) /* Null print. */
50#endif
51
52/* Offset base specialization constant ID for function constants declared in CreateInfo. */
53#define MTL_SHADER_SPECIALIZATION_CONSTANT_BASE_ID 30
54/* Maximum threshold for specialized shader variant count.
55 * This is a catch-all to prevent excessive PSO permutations from being created and also catch
56 * parameters which should ideally not be used for specialization. */
57#define MTL_SHADER_MAX_SPECIALIZED_PSOS 5
58
59/* Desired reflection data for a buffer binding. */
66
67/* Metal Render Pipeline State Instance. */
69 /* Function instances with specialization.
70 * Required for argument encoder construction. */
71 id<MTLFunction> vert;
72 id<MTLFunction> frag;
73
74 /* PSO handle. */
75 id<MTLRenderPipelineState> pso;
76
78 /* Unique index for PSO variant. */
80 /* Base bind index for binding uniform buffers, offset based on other
81 * bound buffers such as vertex buffers, as the count can vary. */
83 /* Base bind index for binding storage buffers. */
85 /* buffer bind slot used for null attributes (-1 if not needed). */
87 /* buffer bind used for transform feedback output buffer. */
89 /* Topology class. */
90 MTLPrimitiveTopologyClass prim_type;
91
101};
102
103/* Common compute pipeline state. */
105
106 /* Thread-group information is common for all PSO variants. */
110
111 inline void set_compute_workgroup_size(int workgroup_size_x,
112 int workgroup_size_y,
113 int workgroup_size_z)
114 {
115 this->threadgroup_x_len = workgroup_size_x;
116 this->threadgroup_y_len = workgroup_size_y;
117 this->threadgroup_z_len = workgroup_size_z;
118 }
119};
120
121/* Metal Compute Pipeline State instance per PSO. */
123
125 /* Unique index for PSO variant. */
127 /* Base bind index for binding uniform buffers, offset based on other
128 * bound buffers such as vertex buffers, as the count can vary. */
130 /* Base bind index for binding storage buffers. */
132
133 /* Function instances with specialization.
134 * Required for argument encoder construction. */
135 id<MTLFunction> compute = nil;
136 /* PSO handle. */
137 id<MTLComputePipelineState> pso = nil;
138};
139
140/* #MTLShaderBuilder source wrapper used during initial compilation. */
142 NSString *msl_source_vert_ = @"";
143 NSString *msl_source_frag_ = @"";
144 NSString *msl_source_compute_ = @"";
145
146 /* Generated GLSL source used during compilation. */
147 std::string glsl_vertex_source_ = "";
148 std::string glsl_fragment_source_ = "";
149 std::string glsl_compute_source_ = "";
150
151 /* Indicates whether source code has been provided via MSL directly. */
152 bool source_from_msl_ = false;
153};
154
168class MTLShader : public Shader {
171
172 public:
173 /* Cached SSBO vertex fetch attribute uniform locations. */
179
180 private:
181 /* Context Handle. */
182 MTLContext *context_ = nullptr;
183
185 /* Transform feedback mode. */
186 eGPUShaderTFBType transform_feedback_type_ = GPU_SHADER_TFB_NONE;
187 /* Transform feedback outputs written to TFB buffer. */
188 blender::Vector<std::string> tf_output_name_list_;
189 /* Whether transform feedback is currently active. */
190 bool transform_feedback_active_ = false;
191 /* Vertex buffer to write transform feedback data into. */
192 VertBuf *transform_feedback_vertbuf_ = nullptr;
193
195 MTLShaderBuilder *shd_builder_ = nullptr;
196 NSString *vertex_function_name_ = @"";
197 NSString *fragment_function_name_ = @"";
198 NSString *compute_function_name_ = @"";
199
201 id<MTLLibrary> shader_library_vert_ = nil;
202 id<MTLLibrary> shader_library_frag_ = nil;
203 id<MTLLibrary> shader_library_compute_ = nil;
204 bool valid_ = false;
205
207 /* Metal API Descriptor used for creation of unique PSOs based on rendering state. */
208 MTLRenderPipelineDescriptor *pso_descriptor_ = nil;
209 /* Metal backend struct containing all high-level pipeline state parameters
210 * which contribute to instantiation of a unique PSO. */
211 MTLRenderPipelineStateDescriptor current_pipeline_state_;
212 /* Cache of compiled PipelineStateObjects. */
214 std::mutex pso_cache_lock_;
215
217 MTLComputePipelineStateCommon compute_pso_common_state_;
219 compute_pso_cache_;
220
221 /* True to enable multi-layered rendering support. */
222 bool uses_gpu_layer = false;
223
224 /* True to enable multi-viewport rendering support. */
225 bool uses_gpu_viewport_index = false;
226
228 /* Indicates whether to pass in VertexBuffer's as regular buffer bindings
229 * and perform vertex assembly manually, rather than using Stage-in.
230 * This is used to give a vertex shader full access to all of the
231 * vertex data.
232 * This is primarily used for optimization techniques and
233 * alternative solutions for Geometry-shaders which are unsupported
234 * by Metal. */
235 bool use_ssbo_vertex_fetch_mode_ = false;
236 /* Output primitive type when rendering sing ssbo_vertex_fetch. */
237 MTLPrimitiveType ssbo_vertex_fetch_output_prim_type_;
238
239 /* Output vertices per original vertex shader instance.
240 * This number will be multiplied by the number of input primitives
241 * from the source draw call. */
242 uint32_t ssbo_vertex_fetch_output_num_verts_ = 0;
243
244 bool ssbo_vertex_attribute_bind_active_ = false;
245 int ssbo_vertex_attribute_bind_mask_ = 0;
246 bool ssbo_vbo_slot_used_[MTL_SSBO_VERTEX_FETCH_MAX_VBOS];
247
248 struct ShaderSSBOAttributeBinding {
249 int attribute_index = -1;
250 int uniform_stride;
251 int uniform_offset;
252 int uniform_fetchmode;
253 int uniform_vbo_id;
254 int uniform_attr_type;
255 };
256 ShaderSSBOAttributeBinding cached_ssbo_attribute_bindings_[MTL_MAX_VERTEX_INPUT_ATTRIBUTES] = {};
257
258 /* Metal Shader Uniform data store.
259 * This blocks is used to store current shader push_constant
260 * data before it is submitted to the GPU. This is currently
261 * stored per shader instance, though depending on GPU module
262 * functionality, this could potentially be a global data store.
263 * This data is associated with the PushConstantBlock, which is
264 * always at index zero in the UBO list. */
265 void *push_constant_data_ = nullptr;
266 bool push_constant_modified_ = false;
267
268 /* Special definition for Max TotalThreadsPerThreadgroup tuning. */
269 uint maxTotalThreadsPerThreadgroup_Tuning_ = 0;
270
271 /* Set to true when batch compiling */
272 bool async_compilation_ = false;
273
274 bool finalize_shader(const shader::ShaderCreateInfo *info = nullptr);
275
276 public:
277 MTLShader(MTLContext *ctx, const char *name);
278 MTLShader(MTLContext *ctx,
279 MTLShaderInterface *interface,
280 const char *name,
281 NSString *input_vertex_source,
282 NSString *input_fragment_source,
283 NSString *vertex_function_name_,
284 NSString *fragment_function_name_);
285 ~MTLShader();
286
287 void init(const shader::ShaderCreateInfo & /*info*/, bool is_batch_compilation) override;
288
289 /* Assign GLSL source. */
294
295 /* Compile and build - Return true if successful. */
296 bool finalize(const shader::ShaderCreateInfo *info = nullptr) override;
298 void warm_cache(int limit) override;
299
300 /* Utility. */
301 bool is_valid()
302 {
303 return valid_;
304 }
306 {
307 return (shader_library_compute_ != nil);
308 }
310 {
311 return (parent_shader_ != nil);
312 }
314 {
315 return current_pipeline_state_;
316 }
318 {
319 return static_cast<MTLShaderInterface *>(this->interface);
320 }
322 {
323 return push_constant_data_;
324 }
325
326 /* Shader source generators from create-info.
327 * These aren't all used by Metal, as certain parts of source code generation
328 * for shader entry-points and resource mapping occur during `finalize`. */
329 std::string resources_declare(const shader::ShaderCreateInfo &info) const override;
330 std::string vertex_interface_declare(const shader::ShaderCreateInfo &info) const override;
331 std::string fragment_interface_declare(const shader::ShaderCreateInfo &info) const override;
332 std::string geometry_interface_declare(const shader::ShaderCreateInfo &info) const override;
333 std::string geometry_layout_declare(const shader::ShaderCreateInfo &info) const override;
334 std::string compute_layout_declare(const shader::ShaderCreateInfo &info) const override;
335
337 const eGPUShaderTFBType geom_type) override;
338 bool transform_feedback_enable(VertBuf *buf) override;
339 void transform_feedback_disable() override;
340
341 void bind() override;
342 void unbind() override;
343
344 void uniform_float(int location, int comp_len, int array_size, const float *data) override;
345 void uniform_int(int location, int comp_len, int array_size, const int *data) override;
347 void push_constant_bindstate_mark_dirty(bool is_dirty);
348
349 /* SSBO vertex fetch draw parameters. */
350 bool get_uses_ssbo_vertex_fetch() const override
351 {
352 return use_ssbo_vertex_fetch_mode_;
353 }
355 {
356 return ssbo_vertex_fetch_output_num_verts_;
357 }
358
359 /* DEPRECATED: Kept only because of BGL API. (Returning -1 in METAL). */
360 int program_handle_get() const override
361 {
362 return -1;
363 }
364
366 {
367 return ssbo_vertex_fetch_output_prim_type_;
368 }
369 static int ssbo_vertex_type_to_attr_type(MTLVertexFormat attribute_type);
371
372 /* SSBO Vertex Bindings Utility functions. */
375 void ssbo_vertex_fetch_bind_attributes_end(id<MTLRenderCommandEncoder> active_encoder);
376
377 /* Metal shader properties and source mapping. */
378 void set_vertex_function_name(NSString *vetex_function_name);
379 void set_fragment_function_name(NSString *fragment_function_name);
380 void set_compute_function_name(NSString *compute_function_name);
381 void shader_source_from_msl(NSString *input_vertex_source, NSString *input_fragment_source);
382 void shader_compute_source_from_msl(NSString *input_compute_source);
384
386 MTLPrimitiveTopologyClass prim_type);
388 MTLContext *ctx,
389 MTLPrimitiveTopologyClass prim_type,
390 const MTLRenderPipelineStateDescriptor &pipeline_descriptor);
391
393 MTLContext *ctx, MTLComputePipelineStateDescriptor &compute_pipeline_descriptor);
394
396 {
397 return compute_pso_common_state_;
398 }
399 /* Transform Feedback. */
401 bool has_transform_feedback_varying(std::string str);
402
403 private:
404 /* Generate MSL shader from GLSL source. */
405 bool generate_msl_from_glsl(const shader::ShaderCreateInfo *info);
406 bool generate_msl_from_glsl_compute(const shader::ShaderCreateInfo *info);
407
408 MEM_CXX_CLASS_ALLOC_FUNCS("MTLShader");
409};
410
412 private:
413 enum ParallelWorkType {
414 PARALLELWORKTYPE_UNSPECIFIED,
415 PARALLELWORKTYPE_COMPILE_SHADER,
416 PARALLELWORKTYPE_BAKE_PSO,
417 };
418
419 struct ParallelWork {
420 const shader::ShaderCreateInfo *info = nullptr;
421 class MTLShaderCompiler *shader_compiler = nullptr;
422 MTLShader *shader = nullptr;
423 Vector<Shader::Constants::Value> specialization_values;
424
425 ParallelWorkType work_type = PARALLELWORKTYPE_UNSPECIFIED;
426 bool is_ready = false;
427 };
428
429 struct Batch {
431 bool is_ready = false;
432 };
433
434 std::mutex batch_mutex;
435 BatchHandle next_batch_handle = 1;
437
438 std::vector<std::thread> compile_threads;
439
440 volatile bool terminate_compile_threads;
441 std::condition_variable cond_var;
442 std::mutex queue_mutex;
443 std::deque<ParallelWork *> parallel_work_queue;
444
445 void parallel_compilation_thread_func(GPUContext *blender_gpu_context);
446 BatchHandle create_batch(size_t batch_size);
447 void add_item_to_batch(ParallelWork *work_item, BatchHandle batch_handle);
448 void add_parallel_item_to_queue(ParallelWork *add_parallel_item_to_queuework_item,
449 BatchHandle batch_handle);
450
451 std::atomic<int> ref_count = 1;
452
453 public:
456
460 bool batch_is_ready(BatchHandle handle);
462
465
467 {
468 ref_count++;
469 }
471 {
472 BLI_assert(ref_count > 0);
473 ref_count--;
474 }
476 {
477 return ref_count;
478 }
479};
480
482 private:
483 MTLParallelShaderCompiler *parallel_shader_compiler;
484
485 public:
487 virtual ~MTLShaderCompiler() override;
488
490 virtual bool batch_is_ready(BatchHandle handle) override;
491 virtual Vector<Shader *> batch_finalize(BatchHandle &handle) override;
492
494 Span<ShaderSpecialization> specializations) override;
495 virtual bool specialization_batch_is_ready(SpecializationBatchHandle &handle) override;
496
498};
499
500/* Vertex format conversion.
501 * Determines whether it is possible to resize a vertex attribute type
502 * during input assembly. A conversion is implied by the difference
503 * between the input vertex descriptor (from MTLBatch/MTLImmediate)
504 * and the type specified in the shader source.
505 *
506 * e.g. vec3 to vec4 expansion, or vec4 to vec2 truncation.
507 * NOTE: Vector expansion will replace empty elements with the values
508 * (0,0,0,1).
509 *
510 * If implicit format resize is not possible, this function
511 * returns false.
512 *
513 * Implicitly supported conversions in Metal are described here:
514 * https://developer.apple.com/documentation/metal/mtlvertexattributedescriptor/1516081-format?language=objc
515 */
516inline bool mtl_vertex_format_resize(MTLVertexFormat mtl_format,
517 uint32_t components,
518 MTLVertexFormat *r_convertedFormat)
519{
520 MTLVertexFormat out_vert_format = MTLVertexFormatInvalid;
521 switch (mtl_format) {
522 /* Char. */
523 case MTLVertexFormatChar:
524 case MTLVertexFormatChar2:
525 case MTLVertexFormatChar3:
526 case MTLVertexFormatChar4:
527 switch (components) {
528 case 1:
529 out_vert_format = MTLVertexFormatChar;
530 break;
531 case 2:
532 out_vert_format = MTLVertexFormatChar2;
533 break;
534 case 3:
535 out_vert_format = MTLVertexFormatChar3;
536 break;
537 case 4:
538 out_vert_format = MTLVertexFormatChar4;
539 break;
540 }
541 break;
542
543 /* Normalized Char. */
544 case MTLVertexFormatCharNormalized:
545 case MTLVertexFormatChar2Normalized:
546 case MTLVertexFormatChar3Normalized:
547 case MTLVertexFormatChar4Normalized:
548 switch (components) {
549 case 1:
550 out_vert_format = MTLVertexFormatCharNormalized;
551 break;
552 case 2:
553 out_vert_format = MTLVertexFormatChar2Normalized;
554 break;
555 case 3:
556 out_vert_format = MTLVertexFormatChar3Normalized;
557 break;
558 case 4:
559 out_vert_format = MTLVertexFormatChar4Normalized;
560 break;
561 }
562 break;
563
564 /* Unsigned Char. */
565 case MTLVertexFormatUChar:
566 case MTLVertexFormatUChar2:
567 case MTLVertexFormatUChar3:
568 case MTLVertexFormatUChar4:
569 switch (components) {
570 case 1:
571 out_vert_format = MTLVertexFormatUChar;
572 break;
573 case 2:
574 out_vert_format = MTLVertexFormatUChar2;
575 break;
576 case 3:
577 out_vert_format = MTLVertexFormatUChar3;
578 break;
579 case 4:
580 out_vert_format = MTLVertexFormatUChar4;
581 break;
582 }
583 break;
584
585 /* Normalized Unsigned char */
586 case MTLVertexFormatUCharNormalized:
587 case MTLVertexFormatUChar2Normalized:
588 case MTLVertexFormatUChar3Normalized:
589 case MTLVertexFormatUChar4Normalized:
590 switch (components) {
591 case 1:
592 out_vert_format = MTLVertexFormatUCharNormalized;
593 break;
594 case 2:
595 out_vert_format = MTLVertexFormatUChar2Normalized;
596 break;
597 case 3:
598 out_vert_format = MTLVertexFormatUChar3Normalized;
599 break;
600 case 4:
601 out_vert_format = MTLVertexFormatUChar4Normalized;
602 break;
603 }
604 break;
605
606 /* Short. */
607 case MTLVertexFormatShort:
608 case MTLVertexFormatShort2:
609 case MTLVertexFormatShort3:
610 case MTLVertexFormatShort4:
611 switch (components) {
612 case 1:
613 out_vert_format = MTLVertexFormatShort;
614 break;
615 case 2:
616 out_vert_format = MTLVertexFormatShort2;
617 break;
618 case 3:
619 out_vert_format = MTLVertexFormatShort3;
620 break;
621 case 4:
622 out_vert_format = MTLVertexFormatShort4;
623 break;
624 }
625 break;
626
627 /* Normalized Short. */
628 case MTLVertexFormatShortNormalized:
629 case MTLVertexFormatShort2Normalized:
630 case MTLVertexFormatShort3Normalized:
631 case MTLVertexFormatShort4Normalized:
632 switch (components) {
633 case 1:
634 out_vert_format = MTLVertexFormatShortNormalized;
635 break;
636 case 2:
637 out_vert_format = MTLVertexFormatShort2Normalized;
638 break;
639 case 3:
640 out_vert_format = MTLVertexFormatShort3Normalized;
641 break;
642 case 4:
643 out_vert_format = MTLVertexFormatShort4Normalized;
644 break;
645 }
646 break;
647
648 /* Unsigned Short. */
649 case MTLVertexFormatUShort:
650 case MTLVertexFormatUShort2:
651 case MTLVertexFormatUShort3:
652 case MTLVertexFormatUShort4:
653 switch (components) {
654 case 1:
655 out_vert_format = MTLVertexFormatUShort;
656 break;
657 case 2:
658 out_vert_format = MTLVertexFormatUShort2;
659 break;
660 case 3:
661 out_vert_format = MTLVertexFormatUShort3;
662 break;
663 case 4:
664 out_vert_format = MTLVertexFormatUShort4;
665 break;
666 }
667 break;
668
669 /* Normalized Unsigned Short. */
670 case MTLVertexFormatUShortNormalized:
671 case MTLVertexFormatUShort2Normalized:
672 case MTLVertexFormatUShort3Normalized:
673 case MTLVertexFormatUShort4Normalized:
674 switch (components) {
675 case 1:
676 out_vert_format = MTLVertexFormatUShortNormalized;
677 break;
678 case 2:
679 out_vert_format = MTLVertexFormatUShort2Normalized;
680 break;
681 case 3:
682 out_vert_format = MTLVertexFormatUShort3Normalized;
683 break;
684 case 4:
685 out_vert_format = MTLVertexFormatUShort4Normalized;
686 break;
687 }
688 break;
689
690 /* Integer. */
691 case MTLVertexFormatInt:
692 case MTLVertexFormatInt2:
693 case MTLVertexFormatInt3:
694 case MTLVertexFormatInt4:
695 switch (components) {
696 case 1:
697 out_vert_format = MTLVertexFormatInt;
698 break;
699 case 2:
700 out_vert_format = MTLVertexFormatInt2;
701 break;
702 case 3:
703 out_vert_format = MTLVertexFormatInt3;
704 break;
705 case 4:
706 out_vert_format = MTLVertexFormatInt4;
707 break;
708 }
709 break;
710
711 /* Unsigned Integer. */
712 case MTLVertexFormatUInt:
713 case MTLVertexFormatUInt2:
714 case MTLVertexFormatUInt3:
715 case MTLVertexFormatUInt4:
716 switch (components) {
717 case 1:
718 out_vert_format = MTLVertexFormatUInt;
719 break;
720 case 2:
721 out_vert_format = MTLVertexFormatUInt2;
722 break;
723 case 3:
724 out_vert_format = MTLVertexFormatUInt3;
725 break;
726 case 4:
727 out_vert_format = MTLVertexFormatUInt4;
728 break;
729 }
730 break;
731
732 /* Half. */
733 case MTLVertexFormatHalf:
734 case MTLVertexFormatHalf2:
735 case MTLVertexFormatHalf3:
736 case MTLVertexFormatHalf4:
737 switch (components) {
738 case 1:
739 out_vert_format = MTLVertexFormatHalf;
740 break;
741 case 2:
742 out_vert_format = MTLVertexFormatHalf2;
743 break;
744 case 3:
745 out_vert_format = MTLVertexFormatHalf3;
746 break;
747 case 4:
748 out_vert_format = MTLVertexFormatHalf4;
749 break;
750 }
751 break;
752
753 /* Float. */
754 case MTLVertexFormatFloat:
755 case MTLVertexFormatFloat2:
756 case MTLVertexFormatFloat3:
757 case MTLVertexFormatFloat4:
758 switch (components) {
759 case 1:
760 out_vert_format = MTLVertexFormatFloat;
761 break;
762 case 2:
763 out_vert_format = MTLVertexFormatFloat2;
764 break;
765 case 3:
766 out_vert_format = MTLVertexFormatFloat3;
767 break;
768 case 4:
769 out_vert_format = MTLVertexFormatFloat4;
770 break;
771 }
772 break;
773
774 /* Other formats */
775 default:
776 out_vert_format = mtl_format;
777 break;
778 }
779 *r_convertedFormat = out_vert_format;
780 return out_vert_format != MTLVertexFormatInvalid;
781}
782
797inline bool mtl_convert_vertex_format(MTLVertexFormat shader_attrib_format,
798 GPUVertCompType component_type,
799 uint32_t component_length,
800 GPUVertFetchMode fetch_mode,
801 MTLVertexFormat *r_convertedFormat)
802{
803 bool normalized = (fetch_mode == GPU_FETCH_INT_TO_FLOAT_UNIT);
804 MTLVertexFormat out_vert_format = MTLVertexFormatInvalid;
805
806 switch (component_type) {
807
808 case GPU_COMP_I8:
809 switch (fetch_mode) {
810 case GPU_FETCH_INT:
811 if (shader_attrib_format == MTLVertexFormatChar ||
812 shader_attrib_format == MTLVertexFormatChar2 ||
813 shader_attrib_format == MTLVertexFormatChar3 ||
814 shader_attrib_format == MTLVertexFormatChar4)
815 {
816
817 /* No conversion Needed (as type matches) - Just a vector resize if needed. */
818 bool can_convert = mtl_vertex_format_resize(
819 shader_attrib_format, component_type, &out_vert_format);
820
821 /* Ensure format resize successful. */
822 BLI_assert(can_convert);
823 UNUSED_VARS_NDEBUG(can_convert);
824 }
825 else if (shader_attrib_format == MTLVertexFormatInt4 && component_length == 4) {
826 /* Allow type expansion - Shader expects MTLVertexFormatInt4, we can supply a type
827 * with fewer bytes if component count is the same. Sign must also match original type
828 * -- which is not a problem in this case. */
829 out_vert_format = MTLVertexFormatChar4;
830 }
831 else if (shader_attrib_format == MTLVertexFormatInt3 && component_length == 3) {
832 /* Same as above case for matching length and signage (Len=3). */
833 out_vert_format = MTLVertexFormatChar3;
834 }
835 else if (shader_attrib_format == MTLVertexFormatInt2 && component_length == 2) {
836 /* Same as above case for matching length and signage (Len=2). */
837 out_vert_format = MTLVertexFormatChar2;
838 }
839 else if (shader_attrib_format == MTLVertexFormatInt && component_length == 1) {
840 /* Same as above case for matching length and signage (Len=1). */
841 out_vert_format = MTLVertexFormatChar;
842 }
843 else if (shader_attrib_format == MTLVertexFormatInt && component_length == 4) {
844 /* Special case here, format has been specified as GPU_COMP_U8 with 4 components, which
845 * is equivalent to an Int -- so data will be compatible with the shader interface. */
846 out_vert_format = MTLVertexFormatInt;
847 }
848 else {
849 BLI_assert_msg(false,
850 "Source vertex data format is either Char, Char2, Char3, Char4 but "
851 "format in shader interface is NOT compatible.\n");
852 out_vert_format = MTLVertexFormatInvalid;
853 }
854 break;
855
856 /* Source vertex data is integer type, but shader interface type is floating point.
857 * If the input attribute is specified as normalized, we can convert. */
858 case GPU_FETCH_FLOAT:
861 if (normalized) {
862 switch (component_length) {
863 case 1:
864 out_vert_format = MTLVertexFormatCharNormalized;
865 break;
866 case 2:
867 out_vert_format = MTLVertexFormatChar2Normalized;
868 break;
869 case 3:
870 out_vert_format = MTLVertexFormatChar3Normalized;
871 break;
872 case 4:
873 out_vert_format = MTLVertexFormatChar4Normalized;
874 break;
875 default:
876 BLI_assert_msg(false, "invalid vertex format");
877 out_vert_format = MTLVertexFormatInvalid;
878 }
879 }
880 else {
881 /* Cannot convert. */
882 out_vert_format = MTLVertexFormatInvalid;
883 }
884 break;
885 }
886 break;
887
888 case GPU_COMP_U8:
889 switch (fetch_mode) {
890 /* Fetching INT: Check backing shader format matches source input. */
891 case GPU_FETCH_INT:
892 if (shader_attrib_format == MTLVertexFormatUChar ||
893 shader_attrib_format == MTLVertexFormatUChar2 ||
894 shader_attrib_format == MTLVertexFormatUChar3 ||
895 shader_attrib_format == MTLVertexFormatUChar4)
896 {
897
898 /* No conversion Needed (as type matches) - Just a vector resize if needed. */
899 bool can_convert = mtl_vertex_format_resize(
900 shader_attrib_format, component_length, &out_vert_format);
901
902 /* Ensure format resize successful. */
903 BLI_assert(can_convert);
904 UNUSED_VARS_NDEBUG(can_convert);
905 /* TODO(Metal): Add other format conversions if needed. Currently no attributes hit
906 * this path. */
907 }
908 else if (shader_attrib_format == MTLVertexFormatUInt4 && component_length == 4) {
909 /* Allow type expansion - Shader expects MTLVertexFormatUInt4, we can supply a type
910 * with fewer bytes if component count is the same. */
911 out_vert_format = MTLVertexFormatUChar4;
912 }
913 else if (shader_attrib_format == MTLVertexFormatUInt3 && component_length == 3) {
914 /* Same as above case for matching length and signage (Len=3). */
915 out_vert_format = MTLVertexFormatUChar3;
916 }
917 else if (shader_attrib_format == MTLVertexFormatUInt2 && component_length == 2) {
918 /* Same as above case for matching length and signage (Len=2). */
919 out_vert_format = MTLVertexFormatUChar2;
920 }
921 else if (shader_attrib_format == MTLVertexFormatUInt && component_length == 1) {
922 /* Same as above case for matching length and signage (Len=1). */
923 out_vert_format = MTLVertexFormatUChar;
924 }
925 else if (shader_attrib_format == MTLVertexFormatInt && component_length == 4) {
926 /* Special case here, format has been specified as GPU_COMP_U8 with 4 components, which
927 * is equivalent to an Int-- so data will be compatible with shader interface. */
928 out_vert_format = MTLVertexFormatInt;
929 }
930 else if (shader_attrib_format == MTLVertexFormatUInt && component_length == 4) {
931 /* Special case here, format has been specified as GPU_COMP_U8 with 4 components, which
932 * is equivalent to a UInt-- so data will be compatible with shader interface. */
933 out_vert_format = MTLVertexFormatUInt;
934 }
935 else {
936 BLI_assert_msg(false,
937 "Source vertex data format is either UChar, UChar2, UChar3, UChar4 but "
938 "format in shader interface is NOT compatible.\n");
939 out_vert_format = MTLVertexFormatInvalid;
940 }
941 break;
942
943 /* Source vertex data is integral type, but shader interface type is floating point.
944 * If the input attribute is specified as normalized, we can convert. */
945 case GPU_FETCH_FLOAT:
948 if (normalized) {
949 switch (component_length) {
950 case 1:
951 out_vert_format = MTLVertexFormatUCharNormalized;
952 break;
953 case 2:
954 out_vert_format = MTLVertexFormatUChar2Normalized;
955 break;
956 case 3:
957 out_vert_format = MTLVertexFormatUChar3Normalized;
958 break;
959 case 4:
960 out_vert_format = MTLVertexFormatUChar4Normalized;
961 break;
962 default:
963 BLI_assert_msg(false, "invalid vertex format");
964 out_vert_format = MTLVertexFormatInvalid;
965 }
966 }
967 else {
968 /* Cannot convert. */
969 out_vert_format = MTLVertexFormatInvalid;
970 }
971 break;
972 }
973 break;
974
975 case GPU_COMP_I16:
976 switch (fetch_mode) {
977 case GPU_FETCH_INT:
978 if (shader_attrib_format == MTLVertexFormatShort ||
979 shader_attrib_format == MTLVertexFormatShort2 ||
980 shader_attrib_format == MTLVertexFormatShort3 ||
981 shader_attrib_format == MTLVertexFormatShort4)
982 {
983 /* No conversion Needed (as type matches) - Just a vector resize if needed. */
984 bool can_convert = mtl_vertex_format_resize(
985 shader_attrib_format, component_length, &out_vert_format);
986
987 /* Ensure conversion successful. */
988 BLI_assert(can_convert);
989 UNUSED_VARS_NDEBUG(can_convert);
990 }
991 else {
992 BLI_assert_msg(false,
993 "Source vertex data format is either Short, Short2, Short3, Short4 but "
994 "format in shader interface is NOT compatible.\n");
995 out_vert_format = MTLVertexFormatInvalid;
996 }
997 break;
998
999 /* Source vertex data is integral type, but shader interface type is floating point.
1000 * If the input attribute is specified as normalized, we can convert. */
1001 case GPU_FETCH_FLOAT:
1004 if (normalized) {
1005 switch (component_length) {
1006 case 1:
1007 out_vert_format = MTLVertexFormatShortNormalized;
1008 break;
1009 case 2:
1010 out_vert_format = MTLVertexFormatShort2Normalized;
1011 break;
1012 case 3:
1013 out_vert_format = MTLVertexFormatShort3Normalized;
1014 break;
1015 case 4:
1016 out_vert_format = MTLVertexFormatShort4Normalized;
1017 break;
1018 default:
1019 BLI_assert_msg(false, "invalid vertex format");
1020 out_vert_format = MTLVertexFormatInvalid;
1021 }
1022 }
1023 else {
1024 /* Cannot convert. */
1025 out_vert_format = MTLVertexFormatInvalid;
1026 }
1027 break;
1028 }
1029 break;
1030
1031 case GPU_COMP_U16:
1032 switch (fetch_mode) {
1033 case GPU_FETCH_INT:
1034 if (shader_attrib_format == MTLVertexFormatUShort ||
1035 shader_attrib_format == MTLVertexFormatUShort2 ||
1036 shader_attrib_format == MTLVertexFormatUShort3 ||
1037 shader_attrib_format == MTLVertexFormatUShort4)
1038 {
1039 /* No conversion Needed (as type matches) - Just a vector resize if needed. */
1040 bool can_convert = mtl_vertex_format_resize(
1041 shader_attrib_format, component_length, &out_vert_format);
1042
1043 /* Ensure format resize successful. */
1044 BLI_assert(can_convert);
1045 UNUSED_VARS_NDEBUG(can_convert);
1046 }
1047 else {
1048 BLI_assert_msg(false,
1049 "Source vertex data format is either UShort, UShort2, UShort3, UShort4 "
1050 "but format in shader interface is NOT compatible.\n");
1051 out_vert_format = MTLVertexFormatInvalid;
1052 }
1053 break;
1054
1055 /* Source vertex data is integral type, but shader interface type is floating point.
1056 * If the input attribute is specified as normalized, we can convert. */
1057 case GPU_FETCH_FLOAT:
1060 if (normalized) {
1061 switch (component_length) {
1062 case 1:
1063 out_vert_format = MTLVertexFormatUShortNormalized;
1064 break;
1065 case 2:
1066 out_vert_format = MTLVertexFormatUShort2Normalized;
1067 break;
1068 case 3:
1069 out_vert_format = MTLVertexFormatUShort3Normalized;
1070 break;
1071 case 4:
1072 out_vert_format = MTLVertexFormatUShort4Normalized;
1073 break;
1074 default:
1075 BLI_assert_msg(false, "invalid vertex format");
1076 out_vert_format = MTLVertexFormatInvalid;
1077 }
1078 }
1079 else {
1080 /* Cannot convert. */
1081 out_vert_format = MTLVertexFormatInvalid;
1082 }
1083 break;
1084 }
1085 break;
1086
1087 case GPU_COMP_I32:
1088 switch (fetch_mode) {
1089 case GPU_FETCH_INT:
1090 if (shader_attrib_format == MTLVertexFormatInt ||
1091 shader_attrib_format == MTLVertexFormatInt2 ||
1092 shader_attrib_format == MTLVertexFormatInt3 ||
1093 shader_attrib_format == MTLVertexFormatInt4)
1094 {
1095 /* No conversion Needed (as type matches) - Just a vector resize if needed. */
1096 bool can_convert = mtl_vertex_format_resize(
1097 shader_attrib_format, component_length, &out_vert_format);
1098
1099 /* Verify conversion successful. */
1100 BLI_assert(can_convert);
1101 UNUSED_VARS_NDEBUG(can_convert);
1102 }
1103 else {
1104 BLI_assert_msg(false,
1105 "Source vertex data format is either Int, Int2, Int3, Int4 but format "
1106 "in shader interface is NOT compatible.\n");
1107 out_vert_format = MTLVertexFormatInvalid;
1108 }
1109 break;
1110 case GPU_FETCH_FLOAT:
1113 /* Unfortunately we cannot implicitly convert between Int and Float in METAL. */
1114 out_vert_format = MTLVertexFormatInvalid;
1115 break;
1116 }
1117 break;
1118
1119 case GPU_COMP_U32:
1120 switch (fetch_mode) {
1121 case GPU_FETCH_INT:
1122 if (shader_attrib_format == MTLVertexFormatUInt ||
1123 shader_attrib_format == MTLVertexFormatUInt2 ||
1124 shader_attrib_format == MTLVertexFormatUInt3 ||
1125 shader_attrib_format == MTLVertexFormatUInt4)
1126 {
1127 /* No conversion Needed (as type matches) - Just a vector resize if needed. */
1128 bool can_convert = mtl_vertex_format_resize(
1129 shader_attrib_format, component_length, &out_vert_format);
1130
1131 /* Verify conversion successful. */
1132 BLI_assert(can_convert);
1133 UNUSED_VARS_NDEBUG(can_convert);
1134 }
1135 else {
1136 BLI_assert_msg(false,
1137 "Source vertex data format is either UInt, UInt2, UInt3, UInt4 but "
1138 "format in shader interface is NOT compatible.\n");
1139 out_vert_format = MTLVertexFormatInvalid;
1140 }
1141 break;
1142 case GPU_FETCH_FLOAT:
1145 /* Unfortunately we cannot convert between UInt and Float in METAL */
1146 out_vert_format = MTLVertexFormatInvalid;
1147 break;
1148 }
1149 break;
1150
1151 case GPU_COMP_F32:
1152 switch (fetch_mode) {
1153
1154 /* Source data is float. This will be compatible
1155 * if type specified in shader is also float. */
1156 case GPU_FETCH_FLOAT:
1159 if (shader_attrib_format == MTLVertexFormatFloat ||
1160 shader_attrib_format == MTLVertexFormatFloat2 ||
1161 shader_attrib_format == MTLVertexFormatFloat3 ||
1162 shader_attrib_format == MTLVertexFormatFloat4)
1163 {
1164 /* No conversion Needed (as type matches) - Just a vector resize, if needed. */
1165 bool can_convert = mtl_vertex_format_resize(
1166 shader_attrib_format, component_length, &out_vert_format);
1167
1168 /* Verify conversion successful. */
1169 BLI_assert(can_convert);
1170 UNUSED_VARS_NDEBUG(can_convert);
1171 }
1172 else {
1173 BLI_assert_msg(false,
1174 "Source vertex data format is either Float, Float2, Float3, Float4 but "
1175 "format in shader interface is NOT compatible.\n");
1176 out_vert_format = MTLVertexFormatInvalid;
1177 }
1178 break;
1179
1180 case GPU_FETCH_INT:
1181 /* Unfortunately we cannot convert between Float and Int implicitly in METAL. */
1182 out_vert_format = MTLVertexFormatInvalid;
1183 break;
1184 }
1185 break;
1186
1187 case GPU_COMP_I10:
1188 out_vert_format = MTLVertexFormatInt1010102Normalized;
1189 break;
1190 case GPU_COMP_MAX:
1192 break;
1193 }
1194 *r_convertedFormat = out_vert_format;
1195 return (out_vert_format != MTLVertexFormatInvalid);
1196}
1197
1198inline uint comp_count_from_vert_format(MTLVertexFormat vert_format)
1199{
1200 switch (vert_format) {
1201 case MTLVertexFormatFloat:
1202 case MTLVertexFormatInt:
1203 case MTLVertexFormatUInt:
1204 case MTLVertexFormatShort:
1205 case MTLVertexFormatUChar:
1206 case MTLVertexFormatUCharNormalized:
1207 return 1;
1208 case MTLVertexFormatUChar2:
1209 case MTLVertexFormatUInt2:
1210 case MTLVertexFormatFloat2:
1211 case MTLVertexFormatInt2:
1212 case MTLVertexFormatUChar2Normalized:
1213 return 2;
1214 case MTLVertexFormatUChar3:
1215 case MTLVertexFormatUInt3:
1216 case MTLVertexFormatFloat3:
1217 case MTLVertexFormatInt3:
1218 case MTLVertexFormatShort3Normalized:
1219 case MTLVertexFormatUChar3Normalized:
1220 return 3;
1221 case MTLVertexFormatUChar4:
1222 case MTLVertexFormatFloat4:
1223 case MTLVertexFormatUInt4:
1224 case MTLVertexFormatInt4:
1225 case MTLVertexFormatUChar4Normalized:
1226 case MTLVertexFormatInt1010102Normalized:
1227
1228 default:
1229 BLI_assert_msg(false, "Unrecognized attribute type. Add types to switch as needed.");
1230 return 0;
1231 }
1232}
1233
1234inline GPUVertFetchMode fetchmode_from_vert_format(MTLVertexFormat vert_format)
1235{
1236 switch (vert_format) {
1237 case MTLVertexFormatFloat:
1238 case MTLVertexFormatFloat2:
1239 case MTLVertexFormatFloat3:
1240 case MTLVertexFormatFloat4:
1241 return GPU_FETCH_FLOAT;
1242
1243 case MTLVertexFormatUChar:
1244 case MTLVertexFormatUChar2:
1245 case MTLVertexFormatUChar3:
1246 case MTLVertexFormatUChar4:
1247 case MTLVertexFormatChar:
1248 case MTLVertexFormatChar2:
1249 case MTLVertexFormatChar3:
1250 case MTLVertexFormatChar4:
1251 case MTLVertexFormatUShort:
1252 case MTLVertexFormatUShort2:
1253 case MTLVertexFormatUShort3:
1254 case MTLVertexFormatUShort4:
1255 case MTLVertexFormatShort:
1256 case MTLVertexFormatShort2:
1257 case MTLVertexFormatShort3:
1258 case MTLVertexFormatShort4:
1259 case MTLVertexFormatUInt:
1260 case MTLVertexFormatUInt2:
1261 case MTLVertexFormatUInt3:
1262 case MTLVertexFormatUInt4:
1263 case MTLVertexFormatInt:
1264 case MTLVertexFormatInt2:
1265 case MTLVertexFormatInt3:
1266 case MTLVertexFormatInt4:
1267 return GPU_FETCH_INT;
1268
1269 case MTLVertexFormatUCharNormalized:
1270 case MTLVertexFormatUChar2Normalized:
1271 case MTLVertexFormatUChar3Normalized:
1272 case MTLVertexFormatUChar4Normalized:
1273 case MTLVertexFormatCharNormalized:
1274 case MTLVertexFormatChar2Normalized:
1275 case MTLVertexFormatChar3Normalized:
1276 case MTLVertexFormatChar4Normalized:
1277 case MTLVertexFormatUShortNormalized:
1278 case MTLVertexFormatUShort2Normalized:
1279 case MTLVertexFormatUShort3Normalized:
1280 case MTLVertexFormatUShort4Normalized:
1281 case MTLVertexFormatShortNormalized:
1282 case MTLVertexFormatShort2Normalized:
1283 case MTLVertexFormatShort3Normalized:
1284 case MTLVertexFormatShort4Normalized:
1285 case MTLVertexFormatInt1010102Normalized:
1287
1288 default:
1289 BLI_assert_msg(false, "Unrecognized attribute type. Add types to switch as needed.");
1290 return GPU_FETCH_FLOAT;
1291 }
1292}
1293
1294inline GPUVertCompType comp_type_from_vert_format(MTLVertexFormat vert_format)
1295{
1296 switch (vert_format) {
1297 case MTLVertexFormatUChar:
1298 case MTLVertexFormatUChar2:
1299 case MTLVertexFormatUChar3:
1300 case MTLVertexFormatUChar4:
1301 case MTLVertexFormatUCharNormalized:
1302 case MTLVertexFormatUChar2Normalized:
1303 case MTLVertexFormatUChar3Normalized:
1304 case MTLVertexFormatUChar4Normalized:
1305 return GPU_COMP_U8;
1306
1307 case MTLVertexFormatChar:
1308 case MTLVertexFormatChar2:
1309 case MTLVertexFormatChar3:
1310 case MTLVertexFormatChar4:
1311 case MTLVertexFormatCharNormalized:
1312 case MTLVertexFormatChar2Normalized:
1313 case MTLVertexFormatChar3Normalized:
1314 case MTLVertexFormatChar4Normalized:
1315 return GPU_COMP_I8;
1316
1317 case MTLVertexFormatShort:
1318 case MTLVertexFormatShort2:
1319 case MTLVertexFormatShort3:
1320 case MTLVertexFormatShort4:
1321 case MTLVertexFormatShortNormalized:
1322 case MTLVertexFormatShort2Normalized:
1323 case MTLVertexFormatShort3Normalized:
1324 case MTLVertexFormatShort4Normalized:
1325 return GPU_COMP_I16;
1326
1327 case MTLVertexFormatUShort:
1328 case MTLVertexFormatUShort2:
1329 case MTLVertexFormatUShort3:
1330 case MTLVertexFormatUShort4:
1331 case MTLVertexFormatUShortNormalized:
1332 case MTLVertexFormatUShort2Normalized:
1333 case MTLVertexFormatUShort3Normalized:
1334 case MTLVertexFormatUShort4Normalized:
1335 return GPU_COMP_U16;
1336
1337 case MTLVertexFormatInt:
1338 case MTLVertexFormatInt2:
1339 case MTLVertexFormatInt3:
1340 case MTLVertexFormatInt4:
1341 return GPU_COMP_I32;
1342
1343 case MTLVertexFormatUInt:
1344 case MTLVertexFormatUInt2:
1345 case MTLVertexFormatUInt3:
1346 case MTLVertexFormatUInt4:
1347 return GPU_COMP_U32;
1348
1349 case MTLVertexFormatFloat:
1350 case MTLVertexFormatFloat2:
1351 case MTLVertexFormatFloat3:
1352 case MTLVertexFormatFloat4:
1353 return GPU_COMP_F32;
1354
1355 case MTLVertexFormatInt1010102Normalized:
1356 return GPU_COMP_I10;
1357
1358 default:
1359 BLI_assert_msg(false, "Unrecognized attribute type. Add types to switch as needed.");
1360 return GPU_COMP_F32;
1361 }
1362}
1363
1364} // namespace blender::gpu
#define BLI_assert_unreachable()
Definition BLI_assert.h:97
#define BLI_assert(a)
Definition BLI_assert.h:50
#define BLI_assert_msg(a, msg)
Definition BLI_assert.h:57
unsigned int uint
#define UNUSED_VARS_NDEBUG(...)
int64_t BatchHandle
Definition GPU_shader.hh:68
eGPUShaderTFBType
@ GPU_SHADER_TFB_NONE
int64_t SpecializationBatchHandle
GPUVertFetchMode
@ GPU_FETCH_FLOAT
@ GPU_FETCH_INT_TO_FLOAT_UNIT
@ GPU_FETCH_INT
@ GPU_FETCH_INT_TO_FLOAT
GPUVertCompType
@ GPU_COMP_U16
@ GPU_COMP_MAX
@ GPU_COMP_I10
@ GPU_COMP_F32
@ GPU_COMP_I32
@ GPU_COMP_I8
@ GPU_COMP_U32
@ GPU_COMP_I16
@ GPU_COMP_U8
Read Guarded memory(de)allocation.
struct GPUContext GPUContext
void init()
SIMD_FORCE_INLINE btVector3 normalized() const
Return a normalized version of this vector.
bool specialization_batch_is_ready(SpecializationBatchHandle &handle)
Vector< Shader * > batch_finalize(BatchHandle &handle)
bool batch_is_ready(BatchHandle handle)
BatchHandle batch_compile(MTLShaderCompiler *shade_compiler, Span< const shader::ShaderCreateInfo * > &infos)
SpecializationBatchHandle precompile_specializations(Span< ShaderSpecialization > specializations)
virtual SpecializationBatchHandle precompile_specializations(Span< ShaderSpecialization > specializations) override
virtual Vector< Shader * > batch_finalize(BatchHandle &handle) override
virtual ~MTLShaderCompiler() override
virtual bool specialization_batch_is_ready(SpecializationBatchHandle &handle) override
virtual BatchHandle batch_compile(Span< const shader::ShaderCreateInfo * > &infos) override
virtual bool batch_is_ready(BatchHandle handle) override
void ssbo_vertex_fetch_bind_attributes_begin()
void set_fragment_function_name(NSString *fragment_function_name)
bool transform_feedback_enable(VertBuf *buf) override
void warm_cache(int limit) override
void ssbo_vertex_fetch_bind_attribute(const MTLSSBOAttribute &ssbo_attr)
void transform_feedback_names_set(Span< const char * > name_list, const eGPUShaderTFBType geom_type) override
int get_ssbo_vertex_fetch_output_num_verts() const override
MTLRenderPipelineStateInstance * bake_pipeline_state(MTLContext *ctx, MTLPrimitiveTopologyClass prim_type, const MTLRenderPipelineStateDescriptor &pipeline_descriptor)
const MTLComputePipelineStateCommon & get_compute_common_state()
void geometry_shader_from_glsl(MutableSpan< const char * > sources) override
std::string vertex_interface_declare(const shader::ShaderCreateInfo &info) const override
MTLComputePipelineStateInstance * bake_compute_pipeline_state(MTLContext *ctx, MTLComputePipelineStateDescriptor &compute_pipeline_descriptor)
std::string geometry_interface_declare(const shader::ShaderCreateInfo &info) const override
bool finalize_compute(const shader::ShaderCreateInfo *info)
void shader_compute_source_from_msl(NSString *input_compute_source)
void uniform_int(int location, int comp_len, int array_size, const int *data) override
bool finalize(const shader::ShaderCreateInfo *info=nullptr) override
MTLPrimitiveType get_ssbo_vertex_fetch_output_prim_type()
std::string compute_layout_declare(const shader::ShaderCreateInfo &info) const override
void transform_feedback_disable() override
std::string fragment_interface_declare(const shader::ShaderCreateInfo &info) const override
void fragment_shader_from_glsl(MutableSpan< const char * > sources) override
void vertex_shader_from_glsl(MutableSpan< const char * > sources) override
static int ssbo_vertex_type_to_attr_type(MTLVertexFormat attribute_type)
bool has_transform_feedback_varying(std::string str)
void set_vertex_function_name(NSString *vetex_function_name)
void bind() override
void shader_source_from_msl(NSString *input_vertex_source, NSString *input_fragment_source)
VertBuf * get_transform_feedback_active_buffer()
void unbind() override
MTLShaderInterface * get_interface()
void uniform_float(int location, int comp_len, int array_size, const float *data) override
int program_handle_get() const override
MTLRenderPipelineStateInstance * bake_current_pipeline_state(MTLContext *ctx, MTLPrimitiveTopologyClass prim_type)
void ssbo_vertex_fetch_bind_attributes_end(id< MTLRenderCommandEncoder > active_encoder)
MTLShader(MTLContext *ctx, const char *name)
Definition mtl_shader.mm:72
void compute_shader_from_glsl(MutableSpan< const char * > sources) override
std::string geometry_layout_declare(const shader::ShaderCreateInfo &info) const override
std::string resources_declare(const shader::ShaderCreateInfo &info) const override
MTLRenderPipelineStateDescriptor & get_current_pipeline_state()
bool get_uses_ssbo_vertex_fetch() const override
void set_interface(MTLShaderInterface *interface)
void push_constant_bindstate_mark_dirty(bool is_dirty)
void set_compute_function_name(NSString *compute_function_name)
ShaderInterface * interface
#define str(s)
#define MTL_MAX_VERTEX_INPUT_ATTRIBUTES
#define MTL_SSBO_VERTEX_FETCH_MAX_VBOS
GPUVertFetchMode fetchmode_from_vert_format(MTLVertexFormat vert_format)
bool mtl_vertex_format_resize(MTLVertexFormat mtl_format, uint32_t components, MTLVertexFormat *r_convertedFormat)
uint comp_count_from_vert_format(MTLVertexFormat vert_format)
GPUVertCompType comp_type_from_vert_format(MTLVertexFormat vert_format)
bool mtl_convert_vertex_format(MTLVertexFormat shader_attrib_format, GPUVertCompType component_type, uint32_t component_length, GPUVertFetchMode fetch_mode, MTLVertexFormat *r_convertedFormat)
unsigned int uint32_t
Definition stdint.h:80
void set_compute_workgroup_size(int workgroup_size_x, int workgroup_size_y, int workgroup_size_z)
blender::Vector< MTLBufferArgumentData > buffer_bindings_reflection_data_frag
blender::Vector< MTLBufferArgumentData > buffer_bindings_reflection_data_vert
Definition mtl_shader.hh:99
Describe inputs & outputs, stage interfaces, resources and sources of a shader. If all data is correc...