35 storage_source_ = MTL_STORAGE_BUF_TYPE_DEFAULT;
44 storage_source_ = MTL_STORAGE_BUF_TYPE_DEFAULT;
52 storage_source_ = MTL_STORAGE_BUF_TYPE_UNIFORMBUF;
61 storage_source_ = MTL_STORAGE_BUF_TYPE_VERTBUF;
70 storage_source_ = MTL_STORAGE_BUF_TYPE_INDEXBUF;
79 storage_source_ = MTL_STORAGE_BUF_TYPE_TEXTURE;
86 if (storage_source_ == MTL_STORAGE_BUF_TYPE_DEFAULT) {
94 if (gpu_write_fence_ != nil) {
95 [gpu_write_fence_ release];
96 gpu_write_fence_ = nil;
123 if (storage_source_ != MTL_STORAGE_BUF_TYPE_DEFAULT) {
150 if (storage_source_ != MTL_STORAGE_BUF_TYPE_DEFAULT) {
157 bool do_upload_data = (
data !=
nullptr);
161 if (!device_only && do_upload_data) {
174 if (do_upload_data) {
190 id<MTLBuffer> dst_buf = this->
metal_buffer_->get_metal_buffer();
193 id<MTLBlitCommandEncoder> blit_encoder =
195 [blit_encoder copyFromBuffer:staging_buf_mtl
226 "Error: Trying to bind \"%s\" ssbo to slot %d which is above the reported limit of %d.\n",
238 if (
data_ !=
nullptr) {
248 ctx_ssbo_bind_slot.
ssbo =
this;
249 ctx_ssbo_bind_slot.
bound =
true;
266 if (bound_ctx_ !=
nullptr && bind_slot_ > -1) {
268 bound_ctx_->pipeline_state.ssbo_bindings[bind_slot_];
269 if (ctx_ssbo_bind_slot.
bound && ctx_ssbo_bind_slot.
ssbo ==
this) {
270 ctx_ssbo_bind_slot.
bound =
false;
271 ctx_ssbo_bind_slot.
ssbo =
nullptr;
278 bound_ctx_ =
nullptr;
285 BLI_assert_msg(ctx,
"Clears should always be performed while a valid context exists.");
293 uint clear_byte = clear_value & 0xFF;
294 bool clear_value_bytes_equal = (clear_byte == ((clear_value >> 8) & 0xFF)) &&
295 (clear_byte == ((clear_value >> 16) & 0xFF)) &&
296 (clear_byte == ((clear_value >> 24) & 0xFF));
297 if (clear_value_bytes_equal) {
298 id<MTLBlitCommandEncoder> blit_encoder =
307 id<MTLComputeCommandEncoder> compute_encoder =
314 [compute_encoder dispatchThreads:MTLSizeMake(
size_in_bytes_ /
sizeof(uint32_t), 1, 1)
315 threadsPerThreadgroup:MTLSizeMake(128, 1, 1)];
328 if (copy_size == 0) {
331 if (src->vbo_ ==
nullptr) {
346 [blit_encoder copyFromBuffer:src_buf
347 sourceOffset:src_offset
349 destinationOffset:dst_offset
357 "Storage buffers with usage GPU_USAGE_DEVICE_ONLY cannot have their data "
358 "synchronized to the host.");
366 if (gpu_write_fence_ == nil) {
367 gpu_write_fence_ = [ctx->
device newSharedEvent];
375 id<MTLBuffer> storage_buf_mtl = this->
metal_buffer_->get_metal_buffer();
376 if (storage_buf_mtl.storageMode == MTLStorageModeManaged) {
378 [blit_encoder synchronizeResource:storage_buf_mtl];
389 if (
data ==
nullptr) {
413 id<MTLBuffer> storage_buf_mtl = this->
metal_buffer_->get_metal_buffer();
417 [blit_encoder copyFromBuffer:storage_buf_mtl
419 toBuffer:staging_buf_mtl
422 if (staging_buf_mtl.storageMode == MTLStorageModeManaged) {
423 [blit_encoder synchronizeResource:staging_buf_mtl];
429 "Device-only storage buffer being read. This will stall the GPU pipeline. Ensure this "
430 "path is only used in testing.");
440 if (gpu_write_fence_ != nil) {
442 while (gpu_write_fence_.signaledValue < host_read_signal_value_) {
452 if (
metal_buffer_->get_resource_options() & MTLResourceStorageModeManaged) {
458 id<MTLBlitCommandEncoder> blit_encoder =
460 [blit_encoder synchronizeResource:
metal_buffer_->get_metal_buffer()];
475 switch (storage_source_) {
477 case MTL_STORAGE_BUF_TYPE_DEFAULT: {
483 "Storage Buffer backing resource does not yet exist. Ensure the resource is "
484 "bound with data before the calling code requires its underlying MTLBuffer.");
487 "Storage Buffer backing resource data has not yet been uploaded. Ensure the resource is "
488 "bound with data before the calling code requires its underlying MTLBuffer.");
492 case MTL_STORAGE_BUF_TYPE_UNIFORMBUF: {
496 case MTL_STORAGE_BUF_TYPE_VERTBUF: {
500 case MTL_STORAGE_BUF_TYPE_INDEXBUF: {
504 case MTL_STORAGE_BUF_TYPE_TEXTURE: {
507 id<MTLTexture> tex =
texture_->get_metal_handle_base();
510 source_buffer =
texture_->backing_buffer_;
#define BLI_assert_msg(a, msg)
Platform independent time functions.
void BLI_time_sleep_ms(int ms)
#define UNUSED_VARS_NDEBUG(...)
BMesh const char void * data
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
gpu::MTLBuffer * allocate(uint64_t size, bool cpu_visible)
gpu::MTLBuffer * allocate_aligned(uint64_t size, uint alignment, bool cpu_visible)
void * get_host_ptr() const
void flush_range(uint64_t offset, uint64_t length)
id< MTLBuffer > get_metal_buffer() const
void encode_signal_event(id< MTLEvent > event, uint64_t value)
id< MTLComputeCommandEncoder > ensure_begin_compute_encoder()
MTLComputeState & get_compute_state()
id< MTLBlitCommandEncoder > ensure_begin_blit_encoder()
void bind_compute_bytes(const void *bytes, uint64_t length, uint index)
void bind_compute_buffer(id< MTLBuffer > buffer, uint64_t buffer_offset, uint index)
void bind_pso(id< MTLComputePipelineState > pso)
id< MTLComputePipelineState > get_buffer_clear_pso()
MTLContextComputeUtils & get_compute_utils()
static MTLContext * get()
MTLContextGlobalShaderPipelineState pipeline_state
MTLCommandBufferManager main_command_buffer
static MTLBufferPool * get_global_memory_manager()
void async_flush_to_host() override
gpu::MTLTexture * texture_
void read(void *data) override
void clear(uint32_t clear_value) override
MTLIndexBuf * index_buffer_
~MTLStorageBuf() override
void copy_sub(VertBuf *src, uint dst_offset, uint src_offset, uint copy_size) override
void update(const void *data) override
gpu::MTLBuffer * metal_buffer_
MTLStorageBuf(size_t size, GPUUsageType usage, const char *name)
id< MTLBuffer > get_metal_buffer()
MTLUniformBuf * uniform_buffer_
void bind(int slot) override
MTLVertBuf * vertex_buffer_
StorageBuf(size_t size, const char *name)
char name_[DEBUG_NAME_LEN]
float length(VecOp< float, D >) RET
TEX_TEMPLATE DataVec texture(T, FltCoord, float=0.0f) RET
#define MTL_MAX_BUFFER_BINDINGS
#define MTL_LOG_WARNING(info,...)
MTLStorageBufferBinding ssbo_bindings[MTL_MAX_BUFFER_BINDINGS]