Blender V4.3
gpu_shader_create_info.hh
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2021 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
14#pragma once
15
16#include "BLI_hash.hh"
17#include "BLI_string_ref.hh"
18#include "BLI_vector.hh"
19#include "GPU_common_types.hh"
20#include "GPU_material.hh"
21#include "GPU_texture.hh"
22
23#include <iostream>
24
25/* Force enable `printf` support in release build. */
26#define GPU_FORCE_ENABLE_SHADER_PRINTF 0
27
28#if !defined(NDEBUG) || GPU_FORCE_ENABLE_SHADER_PRINTF
29# define GPU_SHADER_PRINTF_ENABLE 1
30#else
31# define GPU_SHADER_PRINTF_ENABLE 0
32#endif
33#define GPU_SHADER_PRINTF_SLOT 13
34#define GPU_SHADER_PRINTF_MAX_CAPACITY (1024 * 4)
35
36/* Used for primitive expansion. */
37#define GPU_SSBO_INDEX_BUF_SLOT 7
38
39namespace blender::gpu::shader {
40
41/* Helps intellisense / auto-completion. */
42#ifndef GPU_SHADER_CREATE_INFO
43# define GPU_SHADER_INTERFACE_INFO(_interface, _inst_name) \
44 StageInterfaceInfo _interface(#_interface, _inst_name); \
45 _interface
46# define GPU_SHADER_CREATE_INFO(_info) \
47 ShaderCreateInfo _info(#_info); \
48 _info
49#endif
50
51/* All of these functions is a bit out of place */
52static inline Type to_type(const eGPUType type)
53{
54 switch (type) {
55 case GPU_FLOAT:
56 return Type::FLOAT;
57 case GPU_VEC2:
58 return Type::VEC2;
59 case GPU_VEC3:
60 return Type::VEC3;
61 case GPU_VEC4:
62 return Type::VEC4;
63 case GPU_MAT3:
64 return Type::MAT3;
65 case GPU_MAT4:
66 return Type::MAT4;
67 default:
68 BLI_assert_msg(0, "Error: Cannot convert eGPUType to shader::Type.");
69 return Type::FLOAT;
70 }
71}
72
73static inline std::ostream &operator<<(std::ostream &stream, const Type type)
74{
75 switch (type) {
76 case Type::FLOAT:
77 return stream << "float";
78 case Type::VEC2:
79 return stream << "vec2";
80 case Type::VEC3:
81 return stream << "vec3";
82 case Type::VEC4:
83 return stream << "vec4";
84 case Type::MAT3:
85 return stream << "mat3";
86 case Type::MAT4:
87 return stream << "mat4";
89 return stream << "vec3_1010102_Inorm";
90 case Type::UCHAR:
91 return stream << "uchar";
92 case Type::UCHAR2:
93 return stream << "uchar2";
94 case Type::UCHAR3:
95 return stream << "uchar3";
96 case Type::UCHAR4:
97 return stream << "uchar4";
98 case Type::CHAR:
99 return stream << "char";
100 case Type::CHAR2:
101 return stream << "char2";
102 case Type::CHAR3:
103 return stream << "char3";
104 case Type::CHAR4:
105 return stream << "char4";
106 case Type::INT:
107 return stream << "int";
108 case Type::IVEC2:
109 return stream << "ivec2";
110 case Type::IVEC3:
111 return stream << "ivec3";
112 case Type::IVEC4:
113 return stream << "ivec4";
114 case Type::UINT:
115 return stream << "uint";
116 case Type::UVEC2:
117 return stream << "uvec2";
118 case Type::UVEC3:
119 return stream << "uvec3";
120 case Type::UVEC4:
121 return stream << "uvec4";
122 case Type::USHORT:
123 return stream << "ushort";
124 case Type::USHORT2:
125 return stream << "ushort2";
126 case Type::USHORT3:
127 return stream << "ushort3";
128 case Type::USHORT4:
129 return stream << "ushort4";
130 case Type::SHORT:
131 return stream << "short";
132 case Type::SHORT2:
133 return stream << "short2";
134 case Type::SHORT3:
135 return stream << "short3";
136 case Type::SHORT4:
137 return stream << "short4";
138 case Type::BOOL:
139 return stream << "bool";
140 default:
141 BLI_assert(0);
142 return stream;
143 }
144}
145
146static inline std::ostream &operator<<(std::ostream &stream, const eGPUType type)
147{
148 switch (type) {
149 case GPU_CLOSURE:
150 return stream << "Closure";
151 default:
152 return stream << to_type(type);
153 }
154}
155
156enum class BuiltinBits {
157 NONE = 0,
162 BARYCENTRIC_COORD = (1 << 0),
163 FRAG_COORD = (1 << 2),
164 FRONT_FACING = (1 << 4),
165 GLOBAL_INVOCATION_ID = (1 << 5),
166 INSTANCE_ID = (1 << 6),
171 LAYER = (1 << 7),
172 LOCAL_INVOCATION_ID = (1 << 8),
173 LOCAL_INVOCATION_INDEX = (1 << 9),
174 NUM_WORK_GROUP = (1 << 10),
175 POINT_COORD = (1 << 11),
176 POINT_SIZE = (1 << 12),
177 PRIMITIVE_ID = (1 << 13),
178 VERTEX_ID = (1 << 14),
179 WORK_GROUP_ID = (1 << 15),
180 WORK_GROUP_SIZE = (1 << 16),
185 VIEWPORT_INDEX = (1 << 17),
186
187 /* Texture atomics requires usage options to alter compilation flag. */
188 TEXTURE_ATOMIC = (1 << 18),
189
190 /* Not a builtin but a flag we use to tag shaders that use the debug features. */
191 USE_PRINTF = (1 << 28),
192 USE_DEBUG_DRAW = (1 << 29),
193 USE_DEBUG_PRINT = (1 << 30),
194};
196
201enum class DepthWrite {
202 /* UNCHANGED specified as default to indicate gl_FragDepth is not used. */
203 UNCHANGED = 0,
204 ANY,
205 GREATER,
206 LESS,
207};
208
209/* Samplers & images. */
260
261/* Storage qualifiers. */
262enum class Qualifier {
264 NO_RESTRICT = (1 << 0),
265 READ = (1 << 1),
266 WRITE = (1 << 2),
269 QUALIFIER_MAX = (WRITE << 1) - 1,
270};
272
274enum class Frequency {
275 BATCH = 0,
276 PASS,
278 GEOMETRY,
279};
280
282enum class DualBlend {
283 NONE = 0,
284 SRC_0,
285 SRC_1,
286};
287
289enum class Interpolation {
290 SMOOTH = 0,
291 FLAT,
293};
294
296enum class PrimitiveIn {
297 POINTS = 0,
298 LINES,
300 TRIANGLES,
302};
303
305enum class PrimitiveOut {
306 POINTS = 0,
309 LINES,
310 TRIANGLES,
311};
312
319
328
329 StageInterfaceInfo(const char *name_, const char *instance_name_)
330 : name(name_), instance_name(instance_name_){};
332
334
336 {
337 inouts.append({Interpolation::SMOOTH, type, _name});
338 return *(Self *)this;
339 }
340
342 {
343 inouts.append({Interpolation::FLAT, type, _name});
344 return *(Self *)this;
345 }
346
348 {
349 inouts.append({Interpolation::NO_PERSPECTIVE, type, _name});
350 return *(Self *)this;
351 }
352};
353
368 bool finalized_ = false;
388 std::string vertex_source_generated = "";
390 std::string compute_source_generated = "";
392 std::string typedef_source_generated = "";
395
396#define TEST_EQUAL(a, b, _member) \
397 if (!((a)._member == (b)._member)) { \
398 return false; \
399 }
400
401#define TEST_VECTOR_EQUAL(a, b, _vector) \
402 TEST_EQUAL(a, b, _vector.size()); \
403 for (auto i : _vector.index_range()) { \
404 TEST_EQUAL(a, b, _vector[i]); \
405 }
406
407 struct VertIn {
408 int index;
411
412 bool operator==(const VertIn &b) const
413 {
414 TEST_EQUAL(*this, b, index);
415 TEST_EQUAL(*this, b, type);
416 TEST_EQUAL(*this, b, name);
417 return true;
418 }
419 };
421
427 int max_vertices = -1;
428
430 {
431 TEST_EQUAL(*this, b, primitive_in);
432 TEST_EQUAL(*this, b, invocations);
433 TEST_EQUAL(*this, b, primitive_out);
434 TEST_EQUAL(*this, b, max_vertices);
435 return true;
436 }
437 };
439
441 int local_size_x = -1;
442 int local_size_y = -1;
443 int local_size_z = -1;
444
445 bool operator==(const ComputeStageLayout &b) const
446 {
447 TEST_EQUAL(*this, b, local_size_x);
448 TEST_EQUAL(*this, b, local_size_y);
449 TEST_EQUAL(*this, b, local_size_z);
450 return true;
451 }
452 };
454
455 struct FragOut {
456 int index;
460 /* NOTE: Currently only supported by Metal. */
462
463 bool operator==(const FragOut &b) const
464 {
465 TEST_EQUAL(*this, b, index);
466 TEST_EQUAL(*this, b, type);
467 TEST_EQUAL(*this, b, blend);
468 TEST_EQUAL(*this, b, name);
470 return true;
471 }
472 };
474
477
479
485
492
497
503
504 struct Resource {
511
513 int slot;
514 union {
519 };
520
521 Resource(BindType type, int _slot) : bind_type(type), slot(_slot){};
522
523 bool operator==(const Resource &b) const
524 {
525 TEST_EQUAL(*this, b, bind_type);
526 TEST_EQUAL(*this, b, slot);
527 switch (bind_type) {
528 case UNIFORM_BUFFER:
530 TEST_EQUAL(*this, b, uniformbuf.name);
531 break;
532 case STORAGE_BUFFER:
535 TEST_EQUAL(*this, b, storagebuf.name);
536 break;
537 case SAMPLER:
538 TEST_EQUAL(*this, b, sampler.type);
539 TEST_EQUAL(*this, b, sampler.sampler);
540 TEST_EQUAL(*this, b, sampler.name);
541 break;
542 case IMAGE:
543 TEST_EQUAL(*this, b, image.format);
544 TEST_EQUAL(*this, b, image.type);
545 TEST_EQUAL(*this, b, image.qualifiers);
546 TEST_EQUAL(*this, b, image.name);
547 break;
548 }
549 return true;
550 }
551 };
560
562 {
563 switch (freq) {
564 case Frequency::PASS:
565 return pass_resources_;
566 case Frequency::BATCH:
567 return batch_resources_;
569 return geometry_resources_;
570 }
572 return pass_resources_;
573 }
574
575 /* Return all resources regardless of their frequency. */
577 {
578 Vector<Resource> all_resources;
579 all_resources.extend(pass_resources_);
580 all_resources.extend(batch_resources_);
581 all_resources.extend(geometry_resources_);
582 return all_resources;
583 }
584
587
588 struct PushConst {
592
593 bool operator==(const PushConst &b) const
594 {
595 TEST_EQUAL(*this, b, type);
596 TEST_EQUAL(*this, b, name);
597 TEST_EQUAL(*this, b, array_size);
598 return true;
599 }
600 };
601
603
604 /* Sources for resources type definitions. */
606
608
615
616 /* Transform feedback properties. */
619
620 /* Api-specific parameters. */
621#ifdef WITH_METAL_BACKEND
622 ushort mtl_max_threads_per_threadgroup_ = 0;
623#endif
624
625 public:
626 ShaderCreateInfo(const char *name) : name_(name){};
628
630
631 /* -------------------------------------------------------------------- */
635 Self &vertex_in(int slot, Type type, StringRefNull name)
636 {
637 vertex_inputs_.append({slot, type, name});
638 interface_names_size_ += name.size() + 1;
639 return *(Self *)this;
640 }
641
643 {
644 vertex_out_interfaces_.append(&interface);
645 return *(Self *)this;
646 }
647
649 PrimitiveOut prim_out,
650 int max_vertices,
651 int invocations = -1)
652 {
655 geometry_layout_.max_vertices = max_vertices;
656 geometry_layout_.invocations = invocations;
657 return *(Self *)this;
658 }
659
660 Self &local_group_size(int local_size_x = -1, int local_size_y = -1, int local_size_z = -1)
661 {
662 compute_layout_.local_size_x = local_size_x;
663 compute_layout_.local_size_y = local_size_y;
664 compute_layout_.local_size_z = local_size_z;
665 return *(Self *)this;
666 }
667
673 {
674 early_fragment_test_ = enable;
675 return *(Self *)this;
676 }
677
685 {
686 geometry_out_interfaces_.append(&interface);
687 return *(Self *)this;
688 }
689
691 Type type,
692 StringRefNull name,
694 int raster_order_group = -1)
695 {
696 fragment_outputs_.append({slot, type, blend, name, raster_order_group});
697 return *(Self *)this;
698 }
699
714 Self &subpass_in(int slot, Type type, StringRefNull name, int raster_order_group = -1)
715 {
716 subpass_inputs_.append({slot, type, DualBlend::NONE, name, raster_order_group});
717 return *(Self *)this;
718 }
719
722 /* -------------------------------------------------------------------- */
726 /* Adds a specialization constant which is a dynamically modifiable value, which will be
727 * statically compiled into a PSO configuration to provide optimal runtime performance,
728 * with a reduced re-compilation cost vs Macro's with easier generation of unique permutations
729 * based on run-time values.
730 *
731 * Tip: To evaluate use-cases of where specialization constants can provide a performance
732 * gain, benchmark a given shader in its default case. Attempt to statically disable branches or
733 * conditions which rely on uniform look-ups and measure if there is a marked improvement in
734 * performance and/or reduction in memory bandwidth/register pressure.
735 *
736 * NOTE: Specialization constants will incur new compilation of PSOs and thus can incur an
737 * unexpected cost. Specialization constants should be reserved for infrequently changing
738 * parameters (e.g. user setting parameters such as toggling of features or quality level
739 * presets), or those with a low set of possible runtime permutations.
740 *
741 * Specialization constants are assigned at runtime using:
742 * - `GPU_shader_constant_*(shader, name, value)`
743 * or
744 * - `DrawPass::specialize_constant(shader, name, value)`
745 *
746 * All constants **MUST** be specified before binding a shader.
747 */
748 Self &specialization_constant(Type type, StringRefNull name, double default_value)
749 {
750 SpecializationConstant constant;
751 constant.type = type;
752 constant.name = name;
753 switch (type) {
754 case Type::INT:
755 constant.value.i = static_cast<int>(default_value);
756 break;
757 case Type::BOOL:
758 case Type::UINT:
759 constant.value.u = static_cast<uint>(default_value);
760 break;
761 case Type::FLOAT:
762 constant.value.f = static_cast<float>(default_value);
763 break;
764 default:
765 BLI_assert_msg(0, "Only scalar types can be used as constants");
766 break;
767 }
768 specialization_constants_.append(constant);
769 interface_names_size_ += name.size() + 1;
770 return *(Self *)this;
771 }
772
773 /* TODO: Add API to specify unique specialization config permutations in CreateInfo, allowing
774 * specialized compilation to be primed and handled in the background at start-up, rather than
775 * waiting for a given permutation to occur dynamically. */
776
779 /* -------------------------------------------------------------------- */
783 Self &uniform_buf(int slot,
784 StringRefNull type_name,
785 StringRefNull name,
787 {
789 res.uniformbuf.name = name;
790 res.uniformbuf.type_name = type_name;
791 resources_get_(freq).append(res);
792 interface_names_size_ += name.size() + 1;
793 return *(Self *)this;
794 }
795
796 Self &storage_buf(int slot,
797 Qualifier qualifiers,
798 StringRefNull type_name,
799 StringRefNull name,
801 {
803 res.storagebuf.qualifiers = qualifiers;
804 res.storagebuf.type_name = type_name;
805 res.storagebuf.name = name;
806 resources_get_(freq).append(res);
807 interface_names_size_ += name.size() + 1;
808 return *(Self *)this;
809 }
810
811 Self &image(int slot,
813 Qualifier qualifiers,
814 ImageType type,
815 StringRefNull name,
817 {
819 res.image.format = format;
820 res.image.qualifiers = qualifiers;
821 res.image.type = type;
822 res.image.name = name;
823 resources_get_(freq).append(res);
824 interface_names_size_ += name.size() + 1;
825 return *(Self *)this;
826 }
827
828 Self &sampler(int slot,
829 ImageType type,
830 StringRefNull name,
833 {
835 res.sampler.type = type;
836 res.sampler.name = name;
837 /* Produces ASAN errors for the moment. */
838 // res.sampler.sampler = sampler;
840 resources_get_(freq).append(res);
841 interface_names_size_ += name.size() + 1;
842 return *(Self *)this;
843 }
844
847 /* -------------------------------------------------------------------- */
852 {
853 vertex_source_ = filename;
854 return *(Self *)this;
855 }
856
858 {
859 geometry_source_ = filename;
860 return *(Self *)this;
861 }
862
864 {
865 fragment_source_ = filename;
866 return *(Self *)this;
867 }
868
870 {
871 compute_source_ = filename;
872 return *(Self *)this;
873 }
874
877 /* -------------------------------------------------------------------- */
883 Self &push_constant(Type type, StringRefNull name, int array_size = 0)
884 {
885 /* We don't have support for UINT push constants yet, use INT instead. */
886 BLI_assert(type != Type::UINT);
887 BLI_assert_msg(name.find("[") == -1,
888 "Array syntax is forbidden for push constants."
889 "Use the array_size parameter instead.");
890 push_constants_.append({type, name, array_size});
891 interface_names_size_ += name.size() + 1;
892 return *(Self *)this;
893 }
894
897 /* -------------------------------------------------------------------- */
902 {
903 defines_.append({name, value});
904 return *(Self *)this;
905 }
906
909 /* -------------------------------------------------------------------- */
914 {
916 return *(Self *)this;
917 }
918
920 {
921 builtins_ |= builtin;
922 return *(Self *)this;
923 }
924
925 /* Defines how the fragment shader will write to gl_FragDepth. */
927 {
928 depth_write_ = value;
929 return *(Self *)this;
930 }
931
933 {
935 return *(Self *)this;
936 }
937
939 {
941 return *(Self *)this;
942 }
943
945 {
947 return *(Self *)this;
948 }
949
952 /* -------------------------------------------------------------------- */
959 {
960 additional_infos_.append(info_name);
961 return *(Self *)this;
962 }
963
964 template<typename... Args> Self &additional_info(StringRefNull info_name, Args... args)
965 {
966 additional_info(info_name);
967 additional_info(args...);
968 return *(Self *)this;
969 }
970
973 /* -------------------------------------------------------------------- */
982 {
983 typedef_sources_.append(filename);
984 return *(Self *)this;
985 }
986
989 /* -------------------------------------------------------------------- */
996 {
998 tf_type_ = tf_mode;
999 return *(Self *)this;
1000 }
1001
1003 {
1005 tf_names_.append(name);
1006 return *(Self *)this;
1007 }
1010 /* -------------------------------------------------------------------- */
1018 /* \name mtl_max_total_threads_per_threadgroup
1019 * \a max_total_threads_per_threadgroup - Provides compiler hint for maximum threadgroup size up
1020 * front. Maximum value is 1024. */
1021 Self &mtl_max_total_threads_per_threadgroup(ushort max_total_threads_per_threadgroup)
1022 {
1023#ifdef WITH_METAL_BACKEND
1024 mtl_max_threads_per_threadgroup_ = max_total_threads_per_threadgroup;
1025#else
1026 UNUSED_VARS(max_total_threads_per_threadgroup);
1027#endif
1028 return *(Self *)this;
1029 }
1030
1033 /* -------------------------------------------------------------------- */
1040 /* WARNING: Recursive evaluation is not thread safe.
1041 * Non-recursive evaluation expects their dependencies to be already finalized.
1042 * (All statically declared CreateInfos are automatically finalized at startup) */
1043 void finalize(const bool recursive = false);
1044
1045 std::string check_error() const;
1046 bool is_vulkan_compatible() const;
1047
1049 void validate_merge(const ShaderCreateInfo &other_info);
1050 void validate_vertex_attributes(const ShaderCreateInfo *other_info = nullptr);
1051
1054 /* -------------------------------------------------------------------- */
1059 /* Comparison operator for GPUPass cache. We only compare if it will create the same shader
1060 * code. So we do not compare name and some other internal stuff. */
1088
1090 friend std::ostream &operator<<(std::ostream &stream, const ShaderCreateInfo &info)
1091 {
1092 /* TODO(@fclem): Complete print. */
1093
1094 auto print_resource = [&](const Resource &res) {
1095 switch (res.bind_type) {
1097 stream << "UNIFORM_BUFFER(" << res.slot << ", " << res.uniformbuf.name << ")"
1098 << std::endl;
1099 break;
1101 stream << "STORAGE_BUFFER(" << res.slot << ", " << res.storagebuf.name << ")"
1102 << std::endl;
1103 break;
1105 stream << "SAMPLER(" << res.slot << ", " << res.sampler.name << ")" << std::endl;
1106 break;
1108 stream << "IMAGE(" << res.slot << ", " << res.image.name << ")" << std::endl;
1109 break;
1110 }
1111 };
1112
1113 /* TODO(@fclem): Order the resources. */
1114 for (auto &res : info.batch_resources_) {
1115 print_resource(res);
1116 }
1117 for (auto &res : info.pass_resources_) {
1118 print_resource(res);
1119 }
1120 for (auto &res : info.geometry_resources_) {
1121 print_resource(res);
1122 }
1123 return stream;
1124 }
1125
1127 {
1128 for (auto &res : batch_resources_) {
1129 if (res.bind_type == bind_type) {
1130 return true;
1131 }
1132 }
1133 for (auto &res : pass_resources_) {
1134 if (res.bind_type == bind_type) {
1135 return true;
1136 }
1137 }
1138 for (auto &res : geometry_resources_) {
1139 if (res.bind_type == bind_type) {
1140 return true;
1141 }
1142 }
1143 return false;
1144 }
1145
1147 {
1149 }
1150
1153#undef TEST_EQUAL
1154#undef TEST_VECTOR_EQUAL
1155};
1156
1157} // namespace blender::gpu::shader
1158
1159namespace blender {
1162 {
1163 uint64_t hash = 0;
1165 hash = hash * 33 ^ uint64_t(value.u);
1166 }
1167 return hash;
1168 }
1169};
1170} // namespace blender
#define BLI_assert_unreachable()
Definition BLI_assert.h:97
#define BLI_assert(a)
Definition BLI_assert.h:50
#define BLI_assert_msg(a, msg)
Definition BLI_assert.h:57
unsigned short ushort
unsigned int uint
#define UNUSED_VARS(...)
#define ENUM_OPERATORS(_type, _max)
eGPUType
@ GPU_VEC2
@ GPU_MAT4
@ GPU_VEC4
@ GPU_CLOSURE
@ GPU_VEC3
@ GPU_MAT3
@ GPU_FLOAT
eGPUShaderTFBType
@ GPU_SHADER_TFB_NONE
eGPUTextureFormat
constexpr int64_t size() const
void append(const T &value)
void extend(Span< T > array)
local_group_size(16, 16) .push_constant(Type b
#define TEST_VECTOR_EQUAL(a, b, _vector)
#define TEST_EQUAL(a, b, _member)
format
static std::ostream & operator<<(std::ostream &stream, const Type type)
static Type to_type(const eGPUType type)
static void print_resource(std::ostream &os, const ShaderCreateInfo::Resource &res)
#define hash
Definition noise.c:154
unsigned __int64 uint64_t
Definition stdint.h:90
static constexpr GPUSamplerState internal_sampler()
uint64_t operator()(const Vector< blender::gpu::shader::SpecializationConstant::Value > &key) const
Describe inputs & outputs, stage interfaces, resources and sources of a shader. If all data is correc...
Vector< StageInterfaceInfo * > vertex_out_interfaces_
Self & image(int slot, eGPUTextureFormat format, Qualifier qualifiers, ImageType type, StringRefNull name, Frequency freq=Frequency::PASS)
Self & mtl_max_total_threads_per_threadgroup(ushort max_total_threads_per_threadgroup)
Self & compute_source(StringRefNull filename)
Self & geometry_layout(PrimitiveIn prim_in, PrimitiveOut prim_out, int max_vertices, int invocations=-1)
Self & fragment_source(StringRefNull filename)
Vector< std::array< StringRefNull, 2 > > defines_
void validate_vertex_attributes(const ShaderCreateInfo *other_info=nullptr)
Vector< Resource > & resources_get_(Frequency freq)
Self & vertex_in(int slot, Type type, StringRefNull name)
void finalize(const bool recursive=false)
Self & push_constant(Type type, StringRefNull name, int array_size=0)
Self & geometry_out(StageInterfaceInfo &interface)
Self & transform_feedback_mode(eGPUShaderTFBType tf_mode)
bool operator==(const ShaderCreateInfo &b) const
Self & additional_info(StringRefNull info_name)
Self & typedef_source(StringRefNull filename)
Self & fragment_out(int slot, Type type, StringRefNull name, DualBlend blend=DualBlend::NONE, int raster_order_group=-1)
Self & local_group_size(int local_size_x=-1, int local_size_y=-1, int local_size_z=-1)
Self & vertex_out(StageInterfaceInfo &interface)
Vector< StageInterfaceInfo * > geometry_out_interfaces_
Self & storage_buf(int slot, Qualifier qualifiers, StringRefNull type_name, StringRefNull name, Frequency freq=Frequency::PASS)
Self & geometry_source(StringRefNull filename)
bool has_resource_type(Resource::BindType bind_type) const
Self & vertex_source(StringRefNull filename)
Self & sampler(int slot, ImageType type, StringRefNull name, Frequency freq=Frequency::PASS, GPUSamplerState sampler=GPUSamplerState::internal_sampler())
Self & additional_info(StringRefNull info_name, Args... args)
Self & uniform_buf(int slot, StringRefNull type_name, StringRefNull name, Frequency freq=Frequency::PASS)
Self & subpass_in(int slot, Type type, StringRefNull name, int raster_order_group=-1)
Self & specialization_constant(Type type, StringRefNull name, double default_value)
friend std::ostream & operator<<(std::ostream &stream, const ShaderCreateInfo &info)
void validate_merge(const ShaderCreateInfo &other_info)
Self & define(StringRefNull name, StringRefNull value="")
Vector< SpecializationConstant > specialization_constants_
Self & smooth(Type type, StringRefNull _name)
StageInterfaceInfo(const char *name_, const char *instance_name_)
Self & no_perspective(Type type, StringRefNull _name)
Self & flat(Type type, StringRefNull _name)
uint8_t flag
Definition wm_window.cc:138