Blender V4.3
mtl_batch.mm
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2022-2023 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
11#include "BLI_assert.h"
12#include "BLI_span.hh"
13
14#include "BKE_global.hh"
15
16#include "GPU_batch.hh"
17#include "GPU_common.hh"
18#include "gpu_shader_private.hh"
19
20#include "mtl_batch.hh"
21#include "mtl_context.hh"
22#include "mtl_debug.hh"
23#include "mtl_index_buffer.hh"
24#include "mtl_shader.hh"
25#include "mtl_storage_buffer.hh"
26#include "mtl_vertex_buffer.hh"
27
28#include <string>
29
30namespace blender::gpu {
31
32/* -------------------------------------------------------------------- */
35void MTLBatch::draw(int v_first, int v_count, int i_first, int i_count)
36{
37 if (this->flag & GPU_BATCH_INVALID) {
38 this->shader_in_use_ = false;
39 }
40 this->draw_advanced(v_first, v_count, i_first, i_count);
41}
42
43void MTLBatch::draw_indirect(GPUStorageBuf *indirect_buf, intptr_t offset)
44{
45 if (this->flag & GPU_BATCH_INVALID) {
46 this->shader_in_use_ = false;
47 }
48 this->draw_advanced_indirect(indirect_buf, offset);
49}
50
51void MTLBatch::shader_bind()
52{
53 if (active_shader_ && active_shader_->is_valid()) {
54 active_shader_->bind();
55 shader_in_use_ = true;
56 }
57}
58
59void MTLBatch::MTLVertexDescriptorCache::vertex_descriptor_cache_init(MTLContext *ctx)
60{
61 BLI_assert(ctx != nullptr);
62 this->vertex_descriptor_cache_clear();
63 cache_context_ = ctx;
64}
65
66void MTLBatch::MTLVertexDescriptorCache::vertex_descriptor_cache_clear()
67{
68 cache_life_index_++;
69 cache_context_ = nullptr;
70}
71
72void MTLBatch::MTLVertexDescriptorCache::vertex_descriptor_cache_ensure()
73{
74 if (this->cache_context_ != nullptr) {
75
76 /* Invalidate vertex descriptor bindings cache if batch has changed. */
77 if (batch_->flag & GPU_BATCH_DIRTY) {
78 batch_->flag &= ~GPU_BATCH_DIRTY;
79 this->vertex_descriptor_cache_clear();
80 }
81 }
82
83 /* Initialize cache if not ready. */
84 if (cache_context_ == nullptr) {
85 this->vertex_descriptor_cache_init(MTLContext::get());
86 }
87}
88
89MTLBatch::VertexDescriptorShaderInterfacePair *MTLBatch::MTLVertexDescriptorCache::find(
90 const ShaderInterface *interface)
91{
92 this->vertex_descriptor_cache_ensure();
93 for (int i = 0; i < GPU_VAO_STATIC_LEN; ++i) {
94 if (cache_[i].interface == interface && cache_[i].cache_life_index == cache_life_index_) {
95 return &cache_[i];
96 }
97 }
98 return nullptr;
99}
100
101bool MTLBatch::MTLVertexDescriptorCache::insert(
102 MTLBatch::VertexDescriptorShaderInterfacePair &data)
103{
104 vertex_descriptor_cache_ensure();
105 for (int i = 0; i < GPU_VAO_STATIC_LEN; ++i) {
106 if (cache_[i].interface == nullptr || cache_[i].cache_life_index != cache_life_index_) {
107 cache_[i] = data;
108 cache_[i].cache_life_index = cache_life_index_;
109 return true;
110 }
111 }
112 return false;
113}
114
115int MTLBatch::prepare_vertex_binding(MTLVertBuf *verts,
116 MTLRenderPipelineStateDescriptor &desc,
117 const MTLShaderInterface *interface,
118 uint16_t &attr_mask,
119 bool instanced)
120{
121
122 const GPUVertFormat *format = &verts->format;
123 /* Whether the current vertex buffer has been added to the buffer layout descriptor. */
124 bool buffer_added = false;
125 /* Per-vertex stride of current vertex buffer. */
126 int buffer_stride = format->stride;
127 /* Buffer binding index of the vertex buffer once added to the buffer layout descriptor. */
128 int buffer_index = -1;
129 int attribute_offset = 0;
130
131 if (!active_shader_->get_uses_ssbo_vertex_fetch()) {
133 buffer_stride >= 4 &&
134 "In Metal, Vertex buffer stride should be 4. SSBO Vertex fetch is not affected by this");
135 }
136
137 /* Iterate over VertBuf vertex format and find attributes matching those in the active
138 * shader's interface. */
139 for (uint32_t a_idx = 0; a_idx < format->attr_len; a_idx++) {
140 const GPUVertAttr *a = &format->attrs[a_idx];
141
142 if (format->deinterleaved) {
143 attribute_offset += ((a_idx == 0) ? 0 : format->attrs[a_idx - 1].size) * verts->vertex_len;
144 buffer_stride = a->size;
145 }
146 else {
147 attribute_offset = a->offset;
148 }
149
150 /* Find attribute with the matching name. Attributes may have multiple compatible
151 * name aliases. */
152 for (uint32_t n_idx = 0; n_idx < a->name_len; n_idx++) {
153 const char *name = GPU_vertformat_attr_name_get(format, a, n_idx);
154 const ShaderInput *input = interface->attr_get(name);
155
156 if (input == nullptr || input->location == -1) {
157 /* Vertex/instance buffers provided have attribute data for attributes which are not needed
158 * by this particular shader. This shader only needs binding information for the attributes
159 * has in the shader interface. */
160 if (StringRefNull(name) != "dummy") {
162 "MTLBatch: Could not find attribute with name '%s' (defined in active vertex "
163 "format) "
164 "in the shader interface for shader '%s'",
165 name,
166 interface->get_name());
167 }
168 continue;
169 }
170
171 /* Fetch metal attribute information (ShaderInput->binding is used to fetch the corresponding
172 * slot. */
173 const MTLShaderInputAttribute &mtl_attr = interface->get_attribute(input->binding);
174 BLI_assert(mtl_attr.location >= 0);
175 /* Verify that the attribute location from the shader interface
176 * matches the attribute location returned in the input table. These should always be the
177 * same. */
178 BLI_assert(mtl_attr.location == input->location);
179
180 /* Check if attribute is already present in the given slot. */
181 if ((~attr_mask) & (1 << mtl_attr.location)) {
183 " -- [Batch] Skipping attribute with input location %d (As one is already bound)",
184 mtl_attr.location);
185 }
186 else {
187
188 /* Update attribute used-slot mask. */
189 attr_mask &= ~(1 << mtl_attr.location);
190
191 /* Add buffer layout entry in descriptor if it has not yet been added
192 * for current vertex buffer. */
193 if (!buffer_added) {
194 buffer_index = desc.vertex_descriptor.num_vert_buffers;
195 desc.vertex_descriptor.buffer_layouts[buffer_index].step_function =
196 (instanced) ? MTLVertexStepFunctionPerInstance : MTLVertexStepFunctionPerVertex;
197 desc.vertex_descriptor.buffer_layouts[buffer_index].step_rate = 1;
198 desc.vertex_descriptor.buffer_layouts[buffer_index].stride = buffer_stride;
199 desc.vertex_descriptor.num_vert_buffers++;
200 buffer_added = true;
201
202 MTL_LOG_INFO(" -- [Batch] Adding source %s buffer (Index: %d, Stride: %d)",
203 (instanced) ? "instance" : "vertex",
204 buffer_index,
205 buffer_stride);
206 }
207 else {
208 /* Ensure stride is correct for de-interleaved attributes. */
209 desc.vertex_descriptor.buffer_layouts[buffer_index].stride = buffer_stride;
210 }
211
212 /* Handle Matrix/Array vertex attribute types.
213 * Metal does not natively support these as attribute types, so we handle these cases
214 * by stacking together compatible types (e.g. 4xVec4 for Mat4) and combining
215 * the data in the shader.
216 * The generated Metal shader will contain a generated input binding, which reads
217 * in individual attributes and merges them into the desired type after vertex
218 * assembly. e.g. a Mat4 (Float4x4) will generate 4 Float4 attributes. */
219 if (a->comp_len == 16 || a->comp_len == 12 || a->comp_len == 8) {
221 a->comp_len == 16,
222 "only mat4 attributes currently supported -- Not ready to handle other long "
223 "component length attributes yet");
224
225 /* SSBO Vertex Fetch Attribute safety checks. */
226 if (active_shader_->get_uses_ssbo_vertex_fetch()) {
227 /* When using SSBO vertex fetch, we do not need to expose split attributes,
228 * A matrix can be read directly as a whole block of contiguous data. */
229 MTLSSBOAttribute ssbo_attr(mtl_attr.index,
230 buffer_index,
231 attribute_offset,
232 buffer_stride,
234 instanced);
235 active_shader_->ssbo_vertex_fetch_bind_attribute(ssbo_attr);
236 desc.vertex_descriptor.ssbo_attributes[desc.vertex_descriptor.num_ssbo_attributes] =
237 ssbo_attr;
238 desc.vertex_descriptor.num_ssbo_attributes++;
239 }
240 else {
241
242 /* Handle Mat4 attributes. */
243 if (a->comp_len == 16) {
244 /* Debug safety checks. */
245 BLI_assert_msg(mtl_attr.matrix_element_count == 4,
246 "mat4 type expected but there are fewer components");
247 BLI_assert_msg(mtl_attr.size == 16, "Expecting subtype 'vec4' with 16 bytes");
249 mtl_attr.format == MTLVertexFormatFloat4,
250 "Per-attribute vertex format MUST be float4 for an input type of 'mat4'");
251
252 /* We have found the 'ROOT' attribute. A mat4 contains 4 consecutive float4 attribute
253 * locations we must map to. */
254 for (int i = 0; i < a->comp_len / 4; i++) {
255 desc.vertex_descriptor.attributes[mtl_attr.location + i].format =
256 MTLVertexFormatFloat4;
257 /* Data is consecutive in the buffer for the whole matrix, each float4 will shift
258 * the offset by 16 bytes. */
259 desc.vertex_descriptor.attributes[mtl_attr.location + i].offset =
260 attribute_offset + i * 16;
261 /* All source data for a matrix is in the same singular buffer. */
262 desc.vertex_descriptor.attributes[mtl_attr.location + i].buffer_index =
263 buffer_index;
264
265 /* Update total attribute account. */
266 desc.vertex_descriptor.total_attributes++;
267 desc.vertex_descriptor.max_attribute_value = max_ii(
268 mtl_attr.location + i, desc.vertex_descriptor.max_attribute_value);
269 MTL_LOG_INFO("-- Sub-Attrib Location: %d, offset: %d, buffer index: %d",
270 mtl_attr.location + i,
271 attribute_offset + i * 16,
272 buffer_index);
273
274 /* Update attribute used-slot mask for array elements. */
275 attr_mask &= ~(1 << (mtl_attr.location + i));
276 }
278 "Float4x4 attribute type added for '%s' at attribute locations: %d to %d",
279 name,
280 mtl_attr.location,
281 mtl_attr.location + 3);
282 }
283
284 /* Ensure we are not exceeding the attribute limit. */
285 BLI_assert(desc.vertex_descriptor.max_attribute_value <
287 }
288 }
289 else {
290
291 /* Handle Any required format conversions.
292 * NOTE(Metal): If there is a mis-match between the format of an attribute
293 * in the shader interface, and the specified format in the VertexBuffer VertexFormat,
294 * we need to perform a format conversion.
295 *
296 * The Metal API can perform certain conversions internally during vertex assembly:
297 * - Type Normalization e.g short2 to float2 between 0.0 to 1.0.
298 * - Type Truncation e.g. Float4 to Float2.
299 * - Type expansion e,g, Float3 to Float4 (Following 0,0,0,1 for assignment to empty
300 * elements).
301 *
302 * Certain conversion cannot be performed however, and in these cases, we need to
303 * instruct the shader to generate a specialized version with a conversion routine upon
304 * attribute read.
305 * - This handles cases such as conversion between types e.g. Integer to float without
306 * normalization.
307 *
308 * For more information on the supported and unsupported conversions, see:
309 * https://developer.apple.com/documentation/metal/mtlvertexattributedescriptor/1516081-format?language=objc
310 */
311 MTLVertexFormat converted_format;
312 bool can_use_internal_conversion = mtl_convert_vertex_format(
313 mtl_attr.format,
314 (GPUVertCompType)a->comp_type,
315 a->comp_len,
316 (GPUVertFetchMode)a->fetch_mode,
317 &converted_format);
318 bool is_floating_point_format = (a->comp_type == GPU_COMP_F32);
319
320 if (can_use_internal_conversion) {
321 desc.vertex_descriptor.attributes[mtl_attr.location].format = converted_format;
322 desc.vertex_descriptor.attributes[mtl_attr.location].format_conversion_mode =
323 is_floating_point_format ? (GPUVertFetchMode)GPU_FETCH_FLOAT :
325 BLI_assert(converted_format != MTLVertexFormatInvalid);
326 }
327 else {
328 /* The internal implicit conversion is not supported.
329 * In this case, we need to handle conversion inside the shader.
330 * This is handled using `format_conversion_mode`.
331 * `format_conversion_mode` is assigned the blender-specified fetch mode (GPU_FETCH_*).
332 * This then controls how a given attribute is interpreted. The data will be read
333 * as specified and then converted appropriately to the correct form.
334 *
335 * e.g. if `GPU_FETCH_INT_TO_FLOAT` is specified, the specialized read-routine
336 * in the shader will read the data as an int, and cast this to floating point
337 * representation. (Rather than reading the source data as float).
338 *
339 * NOTE: Even if full conversion is not supported, we may still partially perform an
340 * implicit conversion where possible, such as vector truncation or expansion. */
341 MTLVertexFormat converted_format;
342 bool can_convert = mtl_vertex_format_resize(
343 mtl_attr.format, a->comp_len, &converted_format);
344 desc.vertex_descriptor.attributes[mtl_attr.location].format = can_convert ?
345 converted_format :
346 mtl_attr.format;
347 desc.vertex_descriptor.attributes[mtl_attr.location].format_conversion_mode =
348 (GPUVertFetchMode)a->fetch_mode;
349 BLI_assert(desc.vertex_descriptor.attributes[mtl_attr.location].format !=
350 MTLVertexFormatInvalid);
351 }
352 desc.vertex_descriptor.attributes[mtl_attr.location].offset = attribute_offset;
353 desc.vertex_descriptor.attributes[mtl_attr.location].buffer_index = buffer_index;
354 desc.vertex_descriptor.max_attribute_value =
355 ((mtl_attr.location) > desc.vertex_descriptor.max_attribute_value) ?
356 (mtl_attr.location) :
357 desc.vertex_descriptor.max_attribute_value;
358 desc.vertex_descriptor.total_attributes++;
359 /* SSBO Vertex Fetch attribute bind. */
360 if (active_shader_->get_uses_ssbo_vertex_fetch()) {
361 BLI_assert_msg(desc.vertex_descriptor.attributes[mtl_attr.location].format ==
362 mtl_attr.format,
363 "SSBO Vertex Fetch does not support attribute conversion.");
364
365 MTLSSBOAttribute ssbo_attr(
366 mtl_attr.index,
367 buffer_index,
368 attribute_offset,
369 buffer_stride,
371 desc.vertex_descriptor.attributes[mtl_attr.location].format),
372 instanced);
373
374 active_shader_->ssbo_vertex_fetch_bind_attribute(ssbo_attr);
375 desc.vertex_descriptor.ssbo_attributes[desc.vertex_descriptor.num_ssbo_attributes] =
376 ssbo_attr;
377 desc.vertex_descriptor.num_ssbo_attributes++;
378 }
379
380 /* NOTE: We are setting max_attribute_value to be up to the maximum found index, because
381 * of this, it is possible that we may skip over certain attributes if they were not in
382 * the source GPUVertFormat. */
384 " -- Batch Attribute(%d): ORIG Shader Format: %d, ORIG Vert format: %d, Vert "
385 "components: %d, Fetch Mode %d --> FINAL FORMAT: %d",
386 mtl_attr.location,
387 (int)mtl_attr.format,
388 (int)a->comp_type,
389 (int)a->comp_len,
390 (int)a->fetch_mode,
391 (int)desc.vertex_descriptor.attributes[mtl_attr.location].format);
392
394 " -- [Batch] matching %s attribute '%s' (Attribute Index: %d, Buffer index: %d, "
395 "offset: %d)",
396 (instanced) ? "instance" : "vertex",
397 name,
398 mtl_attr.location,
399 buffer_index,
400 attribute_offset);
401 }
402 }
403 }
404 }
405 if (buffer_added) {
406 return buffer_index;
407 }
408 return -1;
409}
410
411id<MTLRenderCommandEncoder> MTLBatch::bind(uint v_count)
412{
413 /* Setup draw call and render pipeline state here. Called by every draw, but setup here so that
414 * MTLDrawList only needs to perform setup a single time. */
415 BLI_assert(this);
416
417 /* Fetch Metal device. */
419 if (!ctx) {
420 BLI_assert_msg(false, "No context available for rendering.");
421 return nil;
422 }
423
424 /* Fetch bound shader from context. */
425 active_shader_ = static_cast<MTLShader *>(ctx->shader);
426
427 if (active_shader_ == nullptr || !active_shader_->is_valid()) {
428 /* Skip drawing if there is no valid Metal shader.
429 * This will occur if the path through which the shader is prepared
430 * is invalid (e.g. Python without create-info), or, the source shader uses a geometry pass. */
431 BLI_assert_msg(false, "No valid Metal shader!");
432 return nil;
433 }
434
435 /* Check if using SSBO Fetch Mode.
436 * This is an alternative drawing mode to geometry shaders, wherein vertex buffers
437 * are bound as readable (random-access) GPU buffers and certain descriptor properties
438 * are passed using Shader uniforms. */
439 bool uses_ssbo_fetch = active_shader_->get_uses_ssbo_vertex_fetch();
440
441 /* Prepare Vertex Descriptor and extract VertexBuffers to bind. */
443 int num_buffers = 0;
444
445 /* Ensure Index Buffer is ready. */
446 MTLIndexBuf *mtl_elem = static_cast<MTLIndexBuf *>(reinterpret_cast<IndexBuf *>(this->elem));
447 if (mtl_elem != NULL) {
448 mtl_elem->upload_data();
449 }
450
451 /* Populate vertex descriptor with attribute binding information.
452 * The vertex descriptor and buffer layout descriptors describe
453 * how vertex data from bound vertex buffers maps to the
454 * shader's input.
455 * A unique vertex descriptor will result in a new PipelineStateObject
456 * being generated for the currently bound shader. */
457 prepare_vertex_descriptor_and_bindings(buffers, num_buffers);
458
459 /* Prepare Vertex Buffers - Run before RenderCommandEncoder in case BlitCommandEncoder buffer
460 * data operations are required. */
461 for (int i = 0; i < num_buffers; i++) {
462 MTLVertBuf *buf_at_index = buffers[i];
463 if (buf_at_index == NULL) {
465 false,
466 "Total buffer count does not match highest buffer index, could be gaps in bindings");
467 continue;
468 }
469
470 MTLVertBuf *mtlvbo = static_cast<MTLVertBuf *>(reinterpret_cast<VertBuf *>(buf_at_index));
471 mtlvbo->bind();
472 }
473
474 /* Ensure render pass is active and fetch active RenderCommandEncoder. */
475 id<MTLRenderCommandEncoder> rec = ctx->ensure_begin_render_pass();
476
477 /* Fetch RenderPassState to enable resource binding for active pass. */
479
480 /* Debug Check: Ensure Frame-buffer instance is not dirty. */
482
483 /* Bind Shader. */
484 this->shader_bind();
485
486 /* GPU debug markers. */
487 if (G.debug & G_DEBUG_GPU) {
488 [rec pushDebugGroup:[NSString stringWithFormat:@"Draw Commands%@ (GPUShader: %s)",
489 this->elem ? @"(indexed)" : @"",
490 active_shader_->get_interface()->get_name()]];
491 [rec insertDebugSignpost:[NSString
492 stringWithFormat:@"Draw Commands %@ (GPUShader: %s)",
493 this->elem ? @"(indexed)" : @"",
494 active_shader_->get_interface()->get_name()]];
495 }
496
497 /*** Bind Vertex Buffers and Index Buffers **/
498
499 /* SSBO Vertex Fetch Buffer bindings. */
500 if (uses_ssbo_fetch) {
501
502 /* SSBO Vertex Fetch - Bind Index Buffer to appropriate slot -- if used. */
503 id<MTLBuffer> idx_buffer = nil;
504 GPUPrimType final_prim_type = this->prim_type;
505
506 if (mtl_elem != nullptr) {
507
508 /* Fetch index buffer. This function can situationally return an optimized
509 * index buffer of a different primitive type. If this is the case, `final_prim_type`
510 * and `v_count` will be updated with the new format.
511 * NOTE: For indexed rendering, v_count represents the number of indices. */
512 idx_buffer = mtl_elem->get_index_buffer(final_prim_type, v_count);
513 BLI_assert(idx_buffer != nil);
514
515 /* Update uniforms for SSBO-vertex-fetch-mode indexed rendering to flag usage. */
516 int &uniform_ssbo_index_mode_u16 = active_shader_->uni_ssbo_uses_index_mode_u16;
517 BLI_assert(uniform_ssbo_index_mode_u16 != -1);
518 int uses_index_mode_u16 = (mtl_elem->index_type_ == GPU_INDEX_U16) ? 1 : 0;
519 active_shader_->uniform_int(uniform_ssbo_index_mode_u16, 1, 1, &uses_index_mode_u16);
520
521 BLI_assert(active_shader_->uni_ssbo_index_base_loc != -1);
522 int index_base = (int)mtl_elem->index_base_;
523 active_shader_->uniform_int(active_shader_->uni_ssbo_index_base_loc, 1, 1, &index_base);
524 }
525 else {
526 idx_buffer = ctx->get_null_buffer();
527 }
529
530 /* Ensure all attributes are set. */
531 active_shader_->ssbo_vertex_fetch_bind_attributes_end(rec);
532
533 /* Bind NULL Buffers for unused vertex data slots. */
534 id<MTLBuffer> null_buffer = ctx->get_null_buffer();
535 BLI_assert(null_buffer != nil);
536 for (int i = num_buffers; i < MTL_SSBO_VERTEX_FETCH_MAX_VBOS; i++) {
538 rps.bind_vertex_buffer(null_buffer, 0, i);
539 }
540 }
541
542 /* Flag whether Indexed rendering is used or not. */
543 int &uniform_ssbo_use_indexed = active_shader_->uni_ssbo_uses_indexed_rendering;
544 BLI_assert(uniform_ssbo_use_indexed != -1);
545 int uses_indexed_rendering = (mtl_elem != nullptr) ? 1 : 0;
546 active_shader_->uniform_int(uniform_ssbo_use_indexed, 1, 1, &uses_indexed_rendering);
547
548 /* Set SSBO-fetch-mode status uniforms. */
549 BLI_assert(active_shader_->uni_ssbo_input_prim_type_loc != -1);
550 BLI_assert(active_shader_->uni_ssbo_input_vert_count_loc != -1);
551 GPU_shader_uniform_int_ex(reinterpret_cast<GPUShader *>(wrap(active_shader_)),
552 active_shader_->uni_ssbo_input_prim_type_loc,
553 1,
554 1,
555 (const int *)(&final_prim_type));
556 GPU_shader_uniform_int_ex(reinterpret_cast<GPUShader *>(wrap(active_shader_)),
557 active_shader_->uni_ssbo_input_vert_count_loc,
558 1,
559 1,
560 (const int *)(&v_count));
561 }
562
563 /* Ensure Context Render Pipeline State is fully setup and ready to execute the draw.
564 * This should happen after all other final rendering setup is complete. */
565 MTLPrimitiveType mtl_prim_type = gpu_prim_type_to_metal(this->prim_type);
566 if (!ctx->ensure_render_pipeline_state(mtl_prim_type)) {
567 MTL_LOG_ERROR("Failed to prepare and apply render pipeline state.");
568 BLI_assert(false);
569 return nil;
570 }
571
572 /* Bind Vertex Buffers. */
573 for (int i = 0; i < num_buffers; i++) {
574 MTLVertBuf *buf_at_index = buffers[i];
575 if (buf_at_index == NULL) {
577 false,
578 "Total buffer count does not match highest buffer index, could be gaps in bindings");
579 continue;
580 }
581 /* Buffer handle. */
582 MTLVertBuf *mtlvbo = static_cast<MTLVertBuf *>(reinterpret_cast<VertBuf *>(buf_at_index));
583 mtlvbo->flag_used();
584
585 /* Fetch buffer from MTLVertexBuffer and bind. */
586 id<MTLBuffer> mtl_buffer = mtlvbo->get_metal_buffer();
587
588 BLI_assert(mtl_buffer != nil);
589 rps.bind_vertex_buffer(mtl_buffer, 0, i);
590 }
591
592 /* Return Render Command Encoder used with setup. */
593 return rec;
594}
595
596void MTLBatch::unbind(id<MTLRenderCommandEncoder> rec)
597{
598 /* Pop bind debug group. */
599 if (G.debug & G_DEBUG_GPU) {
600 [rec popDebugGroup];
601 }
602}
603
604void MTLBatch::prepare_vertex_descriptor_and_bindings(MTLVertBuf **buffers, int &num_buffers)
605{
606
607 /* Here we populate the MTLContext vertex descriptor and resolve which buffers need to be bound.
608 */
609 MTLStateManager *state_manager = static_cast<MTLStateManager *>(
612 const MTLShaderInterface *interface = active_shader_->get_interface();
613 uint16_t attr_mask = interface->get_enabled_attribute_mask();
614
615 /* Reset vertex descriptor to default state. */
617
618 /* Fetch Vertex and Instance Buffers. */
619 Span<MTLVertBuf *> mtl_verts(reinterpret_cast<MTLVertBuf **>(this->verts),
621 Span<MTLVertBuf *> mtl_inst(reinterpret_cast<MTLVertBuf **>(this->inst),
623
624 /* SSBO Vertex fetch also passes vertex descriptor information into the shader. */
625 if (active_shader_->get_uses_ssbo_vertex_fetch()) {
627 }
628
629 /* Resolve Metal vertex buffer bindings. */
630 /* Vertex Descriptors
631 * ------------------
632 * Vertex Descriptors are required to generate a pipeline state, based on the current Batch's
633 * buffer bindings. These bindings are a unique matching, depending on what input attributes a
634 * batch has in its buffers, and those which are supported by the shader interface.
635 *
636 * We iterate through the buffers and resolve which attributes satisfy the requirements of the
637 * currently bound shader. We cache this data, for a given Batch<->ShderInterface pairing in a
638 * VAO cache to avoid the need to recalculate this data. */
639 bool buffer_is_instanced[GPU_BATCH_VBO_MAX_LEN] = {false};
640
641 VertexDescriptorShaderInterfacePair *descriptor = this->vao_cache.find(interface);
642 if (descriptor) {
643 desc.vertex_descriptor = descriptor->vertex_descriptor;
644 attr_mask = descriptor->attr_mask;
645 num_buffers = descriptor->num_buffers;
646
647 for (int bid = 0; bid < GPU_BATCH_VBO_MAX_LEN; ++bid) {
648 if (descriptor->bufferIds[bid].used) {
649 if (descriptor->bufferIds[bid].is_instance) {
650 buffers[bid] = mtl_inst[descriptor->bufferIds[bid].id];
651 buffer_is_instanced[bid] = true;
652 }
653 else {
654 buffers[bid] = mtl_verts[descriptor->bufferIds[bid].id];
655 buffer_is_instanced[bid] = false;
656 }
657 }
658 }
659
660 /* Use cached ssbo attribute binding data. */
661 if (active_shader_->get_uses_ssbo_vertex_fetch()) {
664 active_shader_->ssbo_vertex_fetch_bind_attribute(
666 }
667 }
668 }
669 else {
670 VertexDescriptorShaderInterfacePair pair{};
671 pair.interface = interface;
672
673 for (int i = 0; i < GPU_BATCH_VBO_MAX_LEN; ++i) {
674 pair.bufferIds[i].id = -1;
675 pair.bufferIds[i].is_instance = 0;
676 pair.bufferIds[i].used = 0;
677 }
678 /* NOTE: Attribute extraction order from buffer is the reverse of the OpenGL as we flag once an
679 * attribute is found, rather than pre-setting the mask. */
680 /* Extract Instance attributes (These take highest priority). */
681 for (int v = 0; v < GPU_BATCH_INST_VBO_MAX_LEN; v++) {
682 if (mtl_inst[v]) {
683 MTL_LOG_INFO(" -- [Batch] Checking bindings for bound instance buffer %p", mtl_inst[v]);
684 int buffer_ind = this->prepare_vertex_binding(
685 mtl_inst[v], desc, interface, attr_mask, true);
686 if (buffer_ind >= 0) {
687 buffers[buffer_ind] = mtl_inst[v];
688 buffer_is_instanced[buffer_ind] = true;
689
690 pair.bufferIds[buffer_ind].id = v;
691 pair.bufferIds[buffer_ind].used = 1;
692 pair.bufferIds[buffer_ind].is_instance = 1;
693 num_buffers = ((buffer_ind + 1) > num_buffers) ? (buffer_ind + 1) : num_buffers;
694 }
695 }
696 }
697
698 /* Extract Vertex attributes (First-bound vertex buffer takes priority). */
699 for (int v = 0; v < GPU_BATCH_VBO_MAX_LEN; v++) {
700 if (mtl_verts[v] != NULL) {
701 MTL_LOG_INFO(" -- [Batch] Checking bindings for bound vertex buffer %p", mtl_verts[v]);
702 int buffer_ind = this->prepare_vertex_binding(
703 mtl_verts[v], desc, interface, attr_mask, false);
704 if (buffer_ind >= 0) {
705 buffers[buffer_ind] = mtl_verts[v];
706 buffer_is_instanced[buffer_ind] = false;
707
708 pair.bufferIds[buffer_ind].id = v;
709 pair.bufferIds[buffer_ind].used = 1;
710 pair.bufferIds[buffer_ind].is_instance = 0;
711 num_buffers = ((buffer_ind + 1) > num_buffers) ? (buffer_ind + 1) : num_buffers;
712 }
713 }
714 }
715
716 /* Add to VertexDescriptor cache */
718 pair.attr_mask = attr_mask;
719 pair.vertex_descriptor = desc.vertex_descriptor;
720 pair.num_buffers = num_buffers;
721 if (!this->vao_cache.insert(pair)) {
722 printf(
723 "[Performance Warning] cache is full (Size: %d), vertex descriptor will not be cached\n",
725 }
726 }
727
728/* DEBUG: verify if our attribute bindings have been fully provided as expected. */
729#if MTL_DEBUG_SHADER_ATTRIBUTES == 1
730 if (attr_mask != 0) {
731 /* Attributes are not necessarily contiguous. */
732 for (int i = 0; i < active_shader_->get_interface()->get_total_attributes(); i++) {
733 const MTLShaderInputAttribute &attr = active_shader_->get_interface()->get_attribute(i);
734 if (attr_mask & (1 << attr.location)) {
736 "Warning: Missing expected attribute '%s' with location: %u in shader %s (attr "
737 "number: %u)",
738 active_shader_->get_interface()->get_name_at_offset(attr.name_offset),
739 attr.location,
740 active_shader_->name_get(),
741 i);
742
743 /* If an attribute is not included, then format in vertex descriptor should be invalid due
744 * to nil assignment. */
745 BLI_assert(desc.vertex_descriptor.attributes[attr.location].format ==
746 MTLVertexFormatInvalid);
747 }
748 }
749 }
750#endif
751}
752
753void MTLBatch::draw_advanced(int v_first, int v_count, int i_first, int i_count)
754{
755
756#if TRUST_NO_ONE
757 BLI_assert(v_count > 0 && i_count > 0);
758#endif
759
760 /* Setup RenderPipelineState for batch. */
761 MTLContext *ctx = MTLContext::get();
762 id<MTLRenderCommandEncoder> rec = this->bind(v_count);
763 if (rec == nil) {
764 /* End of draw. */
765 this->unbind(rec);
766 return;
767 }
768
769 /* Fetch IndexBuffer and resolve primitive type. */
770 MTLIndexBuf *mtl_elem = static_cast<MTLIndexBuf *>(reinterpret_cast<IndexBuf *>(this->elem));
771 MTLPrimitiveType mtl_prim_type = gpu_prim_type_to_metal(this->prim_type);
772
773 /* Render using SSBO Vertex Fetch. */
774 if (active_shader_->get_uses_ssbo_vertex_fetch()) {
775
776 /* Submit draw call with modified vertex count, which reflects vertices per primitive defined
777 * in the USE_SSBO_VERTEX_FETCH pragma. */
778 int num_input_primitives = gpu_get_prim_count_from_type(v_count, this->prim_type);
779 int output_num_verts = num_input_primitives *
783 output_num_verts, active_shader_->get_ssbo_vertex_fetch_output_prim_type()),
784 "Output Vertex count is not compatible with the requested output vertex primitive type");
785
786 /* Set depth stencil state (requires knowledge of primitive type). */
787 ctx->ensure_depth_stencil_state(active_shader_->get_ssbo_vertex_fetch_output_prim_type());
788
789 [rec drawPrimitives:active_shader_->get_ssbo_vertex_fetch_output_prim_type()
790 vertexStart:0
791 vertexCount:output_num_verts
792 instanceCount:i_count
793 baseInstance:i_first];
794 ctx->main_command_buffer.register_draw_counters(output_num_verts * i_count);
795 }
796 /* Perform regular draw. */
797 else if (mtl_elem == NULL) {
798
799 /* Primitive Type toplogy emulation. */
800 if (mtl_needs_topology_emulation(this->prim_type)) {
801
802 /* Generate index buffer for primitive types requiring emulation. */
803 GPUPrimType emulated_prim_type = this->prim_type;
804 uint32_t emulated_v_count = v_count;
805 id<MTLBuffer> generated_index_buffer = this->get_emulated_toplogy_buffer(emulated_prim_type,
806 emulated_v_count);
807 BLI_assert(generated_index_buffer != nil);
808
809 MTLPrimitiveType emulated_mtl_prim_type = gpu_prim_type_to_metal(emulated_prim_type);
810
811 /* Temp: Disable culling for emulated primitive types.
812 * TODO(Metal): Support face winding in topology buffer. */
813 [rec setCullMode:MTLCullModeNone];
814
815 if (generated_index_buffer != nil) {
816 BLI_assert(emulated_mtl_prim_type == MTLPrimitiveTypeTriangle ||
817 emulated_mtl_prim_type == MTLPrimitiveTypeLine);
818 if (emulated_mtl_prim_type == MTLPrimitiveTypeTriangle) {
819 BLI_assert(emulated_v_count % 3 == 0);
820 }
821 if (emulated_mtl_prim_type == MTLPrimitiveTypeLine) {
822 BLI_assert(emulated_v_count % 2 == 0);
823 }
824
825 /* Set depth stencil state (requires knowledge of primitive type). */
826 ctx->ensure_depth_stencil_state(emulated_mtl_prim_type);
827
828 [rec drawIndexedPrimitives:emulated_mtl_prim_type
829 indexCount:emulated_v_count
830 indexType:MTLIndexTypeUInt32
831 indexBuffer:generated_index_buffer
832 indexBufferOffset:0
833 instanceCount:i_count
834 baseVertex:v_first
835 baseInstance:i_first];
836 }
837 else {
838 printf("[Note] Cannot draw batch -- Emulated Topology mode: %u not yet supported\n",
839 this->prim_type);
840 }
841 }
842 else {
843 /* Set depth stencil state (requires knowledge of primitive type). */
844 ctx->ensure_depth_stencil_state(mtl_prim_type);
845
846 /* Issue draw call. */
847 [rec drawPrimitives:mtl_prim_type
848 vertexStart:v_first
849 vertexCount:v_count
850 instanceCount:i_count
851 baseInstance:i_first];
852 }
853 ctx->main_command_buffer.register_draw_counters(v_count * i_count);
854 }
855 /* Perform indexed draw. */
856 else {
857
858 MTLIndexType index_type = MTLIndexBuf::gpu_index_type_to_metal(mtl_elem->index_type_);
859 uint32_t base_index = mtl_elem->index_base_;
860 uint32_t index_size = (mtl_elem->index_type_ == GPU_INDEX_U16) ? 2 : 4;
861 uint32_t v_first_ofs = ((v_first + mtl_elem->index_start_) * index_size);
862 BLI_assert_msg((v_first_ofs % index_size) == 0,
863 "Index offset is not 2/4-byte aligned as per METAL spec");
864
865 /* Fetch index buffer. May return an index buffer of a differing format,
866 * if index buffer optimization is used. In these cases, final_prim_type and
867 * index_count get updated with the new properties. */
868 GPUPrimType final_prim_type = this->prim_type;
869 uint index_count = v_count;
870
871 id<MTLBuffer> index_buffer = mtl_elem->get_index_buffer(final_prim_type, index_count);
872 mtl_prim_type = gpu_prim_type_to_metal(final_prim_type);
873 BLI_assert(index_buffer != nil);
874
875 if (index_buffer != nil) {
876
877 /* Set depth stencil state (requires knowledge of primitive type). */
878 ctx->ensure_depth_stencil_state(mtl_prim_type);
879
880 /* Issue draw call. */
881 [rec drawIndexedPrimitives:mtl_prim_type
882 indexCount:index_count
883 indexType:index_type
884 indexBuffer:index_buffer
885 indexBufferOffset:v_first_ofs
886 instanceCount:i_count
887 baseVertex:base_index
888 baseInstance:i_first];
889 ctx->main_command_buffer.register_draw_counters(index_count * i_count);
890 }
891 else {
892 BLI_assert_msg(false, "Index buffer does not have backing Metal buffer");
893 }
894 }
895
896 /* End of draw. */
897 this->unbind(rec);
898}
899
900void MTLBatch::draw_advanced_indirect(GPUStorageBuf *indirect_buf, intptr_t offset)
901{
902 /* Setup RenderPipelineState for batch. */
903 MTLContext *ctx = MTLContext::get();
904 id<MTLRenderCommandEncoder> rec = this->bind(0);
905 if (rec == nil) {
906 printf("Failed to open Render Command encoder for DRAW INDIRECT\n");
907
908 /* End of draw. */
909 this->unbind(rec);
910 return;
911 }
912
913 /* Fetch indirect buffer Metal handle. */
914 MTLStorageBuf *mtlssbo = static_cast<MTLStorageBuf *>(unwrap(indirect_buf));
915 id<MTLBuffer> mtl_indirect_buf = mtlssbo->get_metal_buffer();
916 BLI_assert(mtl_indirect_buf != nil);
917 if (mtl_indirect_buf == nil) {
918 MTL_LOG_WARNING("Metal Indirect Draw Storage Buffer is nil.");
919
920 /* End of draw. */
921 this->unbind(rec);
922 return;
923 }
924
925 /* Indirect SSBO vertex fetch calls require the draw command in the buffer to be mutated at
926 * command encoding time. This takes place within the draw manager when a shader supporting
927 * SSBO Vertex-Fetch is used. */
928 if (active_shader_->get_uses_ssbo_vertex_fetch())
929 { /* Set depth stencil state (requires knowledge of primitive type). */
930 ctx->ensure_depth_stencil_state(active_shader_->get_ssbo_vertex_fetch_output_prim_type());
931
932 /* Issue draw call. */
933 [rec drawPrimitives:active_shader_->get_ssbo_vertex_fetch_output_prim_type()
934 indirectBuffer:mtl_indirect_buf
935 indirectBufferOffset:offset];
936 ctx->main_command_buffer.register_draw_counters(1);
937
938 /* End of draw. */
939 this->unbind(rec);
940 return;
941 }
942
943 /* Unsupported primitive type check. */
944 BLI_assert_msg(this->prim_type != GPU_PRIM_TRI_FAN,
945 "TriangleFan is not supported in Metal for Indirect draws.");
946
947 /* Fetch IndexBuffer and resolve primitive type. */
948 MTLIndexBuf *mtl_elem = static_cast<MTLIndexBuf *>(reinterpret_cast<IndexBuf *>(this->elem));
949 MTLPrimitiveType mtl_prim_type = gpu_prim_type_to_metal(this->prim_type);
950
951 if (mtl_needs_topology_emulation(this->prim_type)) {
952 BLI_assert_msg(false, "Metal Topology emulation unsupported for draw indirect.\n");
953
954 /* End of draw. */
955 this->unbind(rec);
956 return;
957 }
958
959 if (mtl_elem == NULL) {
960 /* Set depth stencil state (requires knowledge of primitive type). */
961 ctx->ensure_depth_stencil_state(mtl_prim_type);
962
963 /* Issue draw call. */
964 [rec drawPrimitives:mtl_prim_type indirectBuffer:mtl_indirect_buf indirectBufferOffset:offset];
965 ctx->main_command_buffer.register_draw_counters(1);
966 }
967 else {
968 /* Fetch index buffer. May return an index buffer of a differing format,
969 * if index buffer optimization is used. In these cases, final_prim_type and
970 * index_count get updated with the new properties. */
971 MTLIndexType index_type = MTLIndexBuf::gpu_index_type_to_metal(mtl_elem->index_type_);
972 GPUPrimType final_prim_type = this->prim_type;
973 uint index_count = 0;
974
975 /* Disable index optimization for indirect draws. */
976 mtl_elem->flag_can_optimize(false);
977
978 id<MTLBuffer> index_buffer = mtl_elem->get_index_buffer(final_prim_type, index_count);
979 mtl_prim_type = gpu_prim_type_to_metal(final_prim_type);
980 BLI_assert(index_buffer != nil);
981
982 if (index_buffer != nil) {
983
984 /* Set depth stencil state (requires knowledge of primitive type). */
985 ctx->ensure_depth_stencil_state(mtl_prim_type);
986
987 /* Issue draw call. */
988 [rec drawIndexedPrimitives:mtl_prim_type
989 indexType:index_type
990 indexBuffer:index_buffer
991 indexBufferOffset:0
992 indirectBuffer:mtl_indirect_buf
993 indirectBufferOffset:offset];
994 ctx->main_command_buffer.register_draw_counters(1);
995 }
996 else {
997 BLI_assert_msg(false, "Index buffer does not have backing Metal buffer");
998 }
999 }
1000
1001 /* End of draw. */
1002 this->unbind(rec);
1003}
1004
1007/* -------------------------------------------------------------------- */
1011id<MTLBuffer> MTLBatch::get_emulated_toplogy_buffer(GPUPrimType &in_out_prim_type,
1012 uint32_t &in_out_v_count)
1013{
1014
1015 BLI_assert(in_out_v_count > 0);
1016 /* Determine emulated primitive types. */
1017 GPUPrimType input_prim_type = in_out_prim_type;
1018 uint32_t v_count = in_out_v_count;
1019 GPUPrimType output_prim_type;
1020 switch (input_prim_type) {
1021 case GPU_PRIM_POINTS:
1022 case GPU_PRIM_LINES:
1023 case GPU_PRIM_TRIS:
1024 BLI_assert_msg(false, "Optimal primitive types should not reach here.");
1025 return nil;
1026 break;
1027 case GPU_PRIM_LINES_ADJ:
1028 case GPU_PRIM_TRIS_ADJ:
1029 BLI_assert_msg(false, "Adjacency primitive types should not reach here.");
1030 return nil;
1031 break;
1033 case GPU_PRIM_LINE_LOOP:
1035 output_prim_type = GPU_PRIM_LINES;
1036 break;
1037 case GPU_PRIM_TRI_STRIP:
1038 case GPU_PRIM_TRI_FAN:
1039 output_prim_type = GPU_PRIM_TRIS;
1040 break;
1041 default:
1042 BLI_assert_msg(false, "Invalid primitive type.");
1043 return nil;
1044 }
1045
1046 /* Check if topology buffer exists and is valid. */
1047 if (this->emulated_topology_buffer_ != nullptr &&
1048 (emulated_topology_type_ != input_prim_type || topology_buffer_input_v_count_ != v_count))
1049 {
1050
1051 /* Release existing topology buffer. */
1052 emulated_topology_buffer_->free();
1053 emulated_topology_buffer_ = nullptr;
1054 }
1055
1056 /* Generate new topology index buffer. */
1057 if (this->emulated_topology_buffer_ == nullptr) {
1058 /* Calculate IB len. */
1059 uint32_t output_prim_count = 0;
1060 switch (input_prim_type) {
1063 output_prim_count = v_count - 1;
1064 break;
1065 case GPU_PRIM_LINE_LOOP:
1066 output_prim_count = v_count;
1067 break;
1068 case GPU_PRIM_TRI_STRIP:
1069 case GPU_PRIM_TRI_FAN:
1070 output_prim_count = v_count - 2;
1071 break;
1072 default:
1073 BLI_assert_msg(false, "Cannot generate optimized topology buffer for other types.");
1074 break;
1075 }
1076 uint32_t output_IB_elems = output_prim_count * ((output_prim_type == GPU_PRIM_TRIS) ? 3 : 2);
1077
1078 /* Allocate buffer. */
1079 uint32_t buffer_bytes = output_IB_elems * 4;
1080 BLI_assert(buffer_bytes > 0);
1081 this->emulated_topology_buffer_ = MTLContext::get_global_memory_manager()->allocate(
1082 buffer_bytes, true);
1083
1084 /* Populate. */
1085 uint32_t *data = (uint32_t *)this->emulated_topology_buffer_->get_host_ptr();
1086 BLI_assert(data != nullptr);
1087
1088 /* TODO(Metal): Support inverse winding modes. */
1089 bool winding_clockwise = false;
1090 UNUSED_VARS(winding_clockwise);
1091
1092 switch (input_prim_type) {
1093 /* Line Loop. */
1094 case GPU_PRIM_LINE_LOOP: {
1095 int line = 0;
1096 for (line = 0; line < output_prim_count - 1; line++) {
1097 data[line * 2 + 0] = line + 0;
1098 data[line * 2 + 1] = line + 1;
1099 }
1100 /* Closing line. */
1101 data[line * 2 + 0] = line + 0;
1102 data[line * 2 + 1] = 0;
1103 } break;
1104
1105 /* Triangle Fan. */
1106 case GPU_PRIM_TRI_FAN: {
1107 for (int triangle = 0; triangle < output_prim_count; triangle++) {
1108 data[triangle * 3 + 0] = 0; /* Always 0 */
1109 data[triangle * 3 + 1] = triangle + 1;
1110 data[triangle * 3 + 2] = triangle + 2;
1111 }
1112 } break;
1113
1114 default:
1115 BLI_assert_msg(false, "Other primitive types do not require emulation.");
1116 return nil;
1117 }
1118
1119 /* Flush. */
1120 this->emulated_topology_buffer_->flush();
1121 /* Assign members relating to current cached IB. */
1122 topology_buffer_input_v_count_ = v_count;
1123 topology_buffer_output_v_count_ = output_IB_elems;
1124 emulated_topology_type_ = input_prim_type;
1125 }
1126
1127 /* Return. */
1128 in_out_v_count = topology_buffer_output_v_count_;
1129 in_out_prim_type = output_prim_type;
1130 return (emulated_topology_buffer_) ? emulated_topology_buffer_->get_metal_buffer() : nil;
1131}
1132
1135} // namespace blender::gpu
@ G_DEBUG_GPU
#define BLI_assert(a)
Definition BLI_assert.h:50
#define BLI_assert_msg(a, msg)
Definition BLI_assert.h:57
MINLINE int max_ii(int a, int b)
unsigned int uint
#define UNUSED_VARS(...)
#define GPU_BATCH_INST_VBO_MAX_LEN
Definition GPU_batch.hh:33
#define GPU_BATCH_VBO_MAX_LEN
Definition GPU_batch.hh:32
@ GPU_BATCH_INVALID
Definition GPU_batch.hh:39
@ GPU_BATCH_DIRTY
Definition GPU_batch.hh:58
GPUPrimType
@ GPU_PRIM_TRI_FAN
@ GPU_PRIM_LINE_LOOP
@ GPU_PRIM_LINE_STRIP_ADJ
@ GPU_PRIM_TRIS_ADJ
@ GPU_PRIM_LINES
@ GPU_PRIM_POINTS
@ GPU_PRIM_LINES_ADJ
@ GPU_PRIM_LINE_STRIP
@ GPU_PRIM_TRI_STRIP
@ GPU_PRIM_TRIS
int gpu_get_prim_count_from_type(uint vertex_len, GPUPrimType prim_type)
void GPU_shader_uniform_int_ex(GPUShader *shader, int location, int length, int array_size, const int *value)
BLI_INLINE const char * GPU_vertformat_attr_name_get(const GPUVertFormat *format, const GPUVertAttr *attr, uint n_idx)
GPUVertFetchMode
@ GPU_FETCH_FLOAT
@ GPU_FETCH_INT
GPUVertCompType
@ GPU_COMP_F32
struct GPUShader GPUShader
ATTR_WARN_UNUSED_RESULT const BMVert * v
GPUIndexBufType index_type_
void draw(int v_first, int v_count, int i_first, int i_count) override
Definition mtl_batch.mm:35
void draw_indirect(GPUStorageBuf *indirect_buf, intptr_t offset) override
Definition mtl_batch.mm:43
id< MTLRenderCommandEncoder > bind(uint v_count)
Definition mtl_batch.mm:411
void unbind(id< MTLRenderCommandEncoder > rec)
Definition mtl_batch.mm:596
gpu::MTLBuffer * allocate(uint64_t size, bool cpu_visible)
Definition mtl_memory.mm:96
void * get_host_ptr() const
id< MTLBuffer > get_metal_buffer() const
MTLRenderPassState & get_render_pass_state()
bool ensure_render_pipeline_state(MTLPrimitiveType prim_type)
id< MTLRenderCommandEncoder > ensure_begin_render_pass()
static MTLContext * get()
id< MTLBuffer > get_null_buffer()
MTLCommandBufferManager main_command_buffer
static MTLBufferPool * get_global_memory_manager()
static MTLIndexType gpu_index_type_to_metal(GPUIndexBufType type)
id< MTLBuffer > get_index_buffer(GPUPrimType &in_out_primitive_type, uint &in_out_v_count)
void bind_vertex_buffer(id< MTLBuffer > buffer, uint64_t buffer_offset, uint index)
BufferBindingCached cached_vertex_buffer_bindings[MTL_MAX_BUFFER_BINDINGS]
const char * get_name_at_offset(uint32_t offset) const
const MTLShaderInputAttribute & get_attribute(uint index) const
void ssbo_vertex_fetch_bind_attributes_begin()
void ssbo_vertex_fetch_bind_attribute(const MTLSSBOAttribute &ssbo_attr)
int get_ssbo_vertex_fetch_output_num_verts() const override
void uniform_int(int location, int comp_len, int array_size, const int *data) override
MTLPrimitiveType get_ssbo_vertex_fetch_output_prim_type()
static int ssbo_vertex_type_to_attr_type(MTLVertexFormat attribute_type)
void bind() override
MTLShaderInterface * get_interface()
void ssbo_vertex_fetch_bind_attributes_end(id< MTLRenderCommandEncoder > active_encoder)
bool get_uses_ssbo_vertex_fetch() const override
MTLRenderPipelineStateDescriptor & get_pipeline_descriptor()
Definition mtl_state.hh:59
const char *const name_get() const
#define printf
#define NULL
draw_view push_constant(Type::INT, "radiance_src") .push_constant(Type capture_info_buf storage_buf(1, Qualifier::READ, "ObjectBounds", "bounds_buf[]") .push_constant(Type draw_view int
static float verts[][3]
struct @620::@623 attr_id
format
descriptor
#define G(x, y, z)
#define GPU_VAO_STATIC_LEN
Definition mtl_batch.hh:26
#define MTL_MAX_VERTEX_INPUT_ATTRIBUTES
#define MTL_LOG_INFO(info,...)
Definition mtl_debug.hh:51
#define MTL_LOG_WARNING(info,...)
Definition mtl_debug.hh:44
#define MTL_LOG_ERROR(info,...)
Definition mtl_debug.hh:36
#define MTL_SSBO_VERTEX_FETCH_MAX_VBOS
#define GPU_SHADER_ATTR_TYPE_MAT4
#define MTL_SSBO_VERTEX_FETCH_IBO_INDEX
static Context * unwrap(GPUContext *ctx)
bool mtl_vertex_format_resize(MTLVertexFormat mtl_format, uint32_t components, MTLVertexFormat *r_convertedFormat)
static GPUContext * wrap(Context *ctx)
static MTLPrimitiveType gpu_prim_type_to_metal(GPUPrimType prim_type)
static bool mtl_vertex_count_fits_primitive_type(uint32_t vertex_count, MTLPrimitiveType prim_type)
static bool mtl_needs_topology_emulation(GPUPrimType prim_type)
bool mtl_convert_vertex_format(MTLVertexFormat shader_attrib_format, GPUVertCompType component_type, uint32_t component_length, GPUVertFetchMode fetch_mode, MTLVertexFormat *r_convertedFormat)
unsigned short uint16_t
Definition stdint.h:79
unsigned int uint32_t
Definition stdint.h:80
_W64 int intptr_t
Definition stdint.h:118
MTLSSBOAttribute ssbo_attributes[GPU_VERT_ATTR_MAX_LEN]
MTLVertexAttributeDescriptorPSO attributes[GPU_VERT_ATTR_MAX_LEN]
char * buffers[2]
uint8_t flag
Definition wm_window.cc:138