Blender V4.3
mtl_vertex_buffer.mm
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2022-2023 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
9#include "mtl_debug.hh"
10#include "mtl_storage_buffer.hh"
11
12namespace blender::gpu {
13
15
20
22{
23 /* Discard previous data, if any. */
26 data_ = nullptr;
27 }
28 else {
29 data_ = (uchar *)MEM_mallocN(sizeof(uchar) * this->size_alloc_get(), __func__);
30 }
31}
32
34{
36 data_ = nullptr;
37 }
38 else {
39 data_ = (uchar *)MEM_reallocN(data_, sizeof(uchar) * this->size_alloc_get());
40 }
41}
42
44{
45 if (vbo_ != nullptr) {
46 vbo_->free();
47 vbo_ = nullptr;
48 is_wrapper_ = false;
49 }
50
51 GPU_TEXTURE_FREE_SAFE(buffer_texture_);
52
54
55 if (ssbo_wrapper_) {
56 delete ssbo_wrapper_;
57 ssbo_wrapper_ = nullptr;
58 }
59}
60
62{
64 MTLVertBuf *src = this;
65 MTLVertBuf *dst = static_cast<MTLVertBuf *>(dst_);
66
67 /* Ensure buffer has been initialized. */
68 src->bind();
69
70 if (src->vbo_) {
71
72 /* Fetch active context. */
74 BLI_assert(ctx);
75
76 /* Ensure destination does not have an active VBO. */
77 BLI_assert(dst->vbo_ == nullptr);
78
79 /* Allocate VBO for destination vertbuf. */
80 uint64_t length = src->vbo_->get_size();
82 length, (dst->get_usage_type() != GPU_USAGE_DEVICE_ONLY));
83 dst->alloc_size_ = length;
84
85 /* Fetch Metal buffer handles. */
86 id<MTLBuffer> src_buffer = src->vbo_->get_metal_buffer();
87 id<MTLBuffer> dest_buffer = dst->vbo_->get_metal_buffer();
88
89 /* Use blit encoder to copy data to duplicate buffer allocation. */
90 id<MTLBlitCommandEncoder> enc = ctx->main_command_buffer.ensure_begin_blit_encoder();
91 if (G.debug & G_DEBUG_GPU) {
92 [enc insertDebugSignpost:@"VertexBufferDuplicate"];
93 }
94 [enc copyFromBuffer:src_buffer
95 sourceOffset:0
96 toBuffer:dest_buffer
97 destinationOffset:0
98 size:length];
99
100 /* Flush results back to host buffer, if one exists. */
101 if (dest_buffer.storageMode == MTLStorageModeManaged) {
102 [enc synchronizeResource:dest_buffer];
103 }
104
105 if (G.debug & G_DEBUG_GPU) {
106 [enc insertDebugSignpost:@"VertexBufferDuplicateEnd"];
107 }
108
109 /* Mark as in-use, as contents are updated via GPU command. */
110 src->flag_used();
111 }
112
113 /* Copy raw CPU data. */
114 if (data_ != nullptr) {
115 dst->data_ = (uchar *)MEM_dupallocN(src->data_);
116 }
117}
118
120{
121 this->bind();
122}
123
125{
126 /* Determine allocation size. Set minimum allocation size to be
127 * the maximal of a single attribute to avoid validation and
128 * correctness errors. */
129 uint64_t required_size_raw = sizeof(uchar) * this->size_used_get();
130 uint64_t required_size = max_ulul(required_size_raw, 128);
131
132 if (required_size_raw == 0) {
133 MTL_LOG_INFO("Vertex buffer required_size = 0");
134 }
135
136 /* If the vertex buffer has already been allocated, but new data is ready,
137 * or the usage size has changed, we release the existing buffer and
138 * allocate a new buffer to ensure we do not overwrite in-use GPU resources.
139 *
140 * NOTE: We only need to free the existing allocation if contents have been
141 * submitted to the GPU. Otherwise we can simply upload new data to the
142 * existing buffer, if it will fit.
143 *
144 * NOTE: If a buffer is re-sized, but no new data is provided, the previous
145 * contents are copied into the newly allocated buffer. */
146 bool requires_reallocation = (vbo_ != nullptr) && (alloc_size_ != required_size);
147 bool new_data_ready = (this->flag & GPU_VERTBUF_DATA_DIRTY) && this->data_;
148
149 gpu::MTLBuffer *prev_vbo = nullptr;
150 GPUVertBufStatus prev_flag = this->flag;
151
152 if (vbo_ != nullptr) {
153 if (requires_reallocation || (new_data_ready && contents_in_flight_)) {
154 /* Track previous VBO to copy data from. */
155 prev_vbo = vbo_;
156
157 /* Reset current allocation status. */
158 vbo_ = nullptr;
159 is_wrapper_ = false;
160 alloc_size_ = 0;
161
162 /* Flag as requiring data upload. */
163 if (requires_reallocation) {
164 this->flag &= ~GPU_VERTBUF_DATA_UPLOADED;
165 }
166 }
167 }
168
169 /* Create MTLBuffer of requested size. */
170 if (vbo_ == nullptr) {
172 required_size, (this->get_usage_type() != GPU_USAGE_DEVICE_ONLY));
173 vbo_->set_label(@"Vertex Buffer");
174 BLI_assert(vbo_ != nullptr);
175 BLI_assert(vbo_->get_metal_buffer() != nil);
176
177 is_wrapper_ = false;
178 alloc_size_ = required_size;
179 contents_in_flight_ = false;
180 }
181
182 /* Upload new data, if provided. */
183 if (new_data_ready) {
184
185 /* Only upload data if usage size is greater than zero.
186 * Do not upload data for device-only buffers. */
187 if (required_size_raw > 0 && usage_ != GPU_USAGE_DEVICE_ONLY) {
188
189 /* Debug: Verify allocation is large enough. */
190 BLI_assert(vbo_->get_size() >= required_size_raw);
191
192 /* Fetch mapped buffer host ptr and upload data. */
193 void *dst_data = vbo_->get_host_ptr();
194 memcpy((uint8_t *)dst_data, this->data_, required_size_raw);
195 vbo_->flush_range(0, required_size_raw);
196 }
197
198 /* If static usage, free host-side data. */
199 if (usage_ == GPU_USAGE_STATIC) {
201 }
202
203 /* Flag data as having been uploaded. */
204 this->flag &= ~GPU_VERTBUF_DATA_DIRTY;
206 }
207 else if (requires_reallocation) {
208
209 /* If buffer has been re-sized, copy existing data if host
210 * data had been previously uploaded. */
211 BLI_assert(prev_vbo != nullptr);
212
213 if (prev_flag & GPU_VERTBUF_DATA_UPLOADED) {
214
215 /* Fetch active context. */
217 BLI_assert(ctx);
218
219 id<MTLBuffer> copy_prev_buffer = prev_vbo->get_metal_buffer();
220 id<MTLBuffer> copy_new_buffer = vbo_->get_metal_buffer();
221 BLI_assert(copy_prev_buffer != nil);
222 BLI_assert(copy_new_buffer != nil);
223
224 /* Ensure a blit command encoder is active for buffer copy operation. */
225 id<MTLBlitCommandEncoder> enc = ctx->main_command_buffer.ensure_begin_blit_encoder();
226 [enc copyFromBuffer:copy_prev_buffer
227 sourceOffset:0
228 toBuffer:copy_new_buffer
229 destinationOffset:0
230 size:min_ulul([copy_new_buffer length], [copy_prev_buffer length])];
231
232 /* Flush newly copied data back to host-side buffer, if one exists.
233 * Ensures data and cache coherency for managed MTLBuffers. */
234 if (copy_new_buffer.storageMode == MTLStorageModeManaged) {
235 [enc synchronizeResource:copy_new_buffer];
236 }
237
238 /* For VBOs flagged as static, release host data as it will no longer be needed. */
239 if (usage_ == GPU_USAGE_STATIC) {
241 }
242
243 /* Flag data as uploaded. */
245
246 /* Flag as in-use, as contents have been updated via GPU commands. */
247 this->flag_used();
248 }
249 }
250
251 /* Release previous buffer if re-allocated. */
252 if (prev_vbo != nullptr) {
253 prev_vbo->free();
254 }
255
256 /* Ensure buffer has been created. */
257 BLI_assert(vbo_ != nullptr);
258}
259
260/* Update Sub currently only used by hair */
261void MTLVertBuf::update_sub(uint start, uint len, const void *data)
262{
263 /* Fetch and verify active context. */
265 BLI_assert(ctx);
266 BLI_assert(ctx->device);
267
268 /* Ensure vertbuf has been created. */
269 this->bind();
270 BLI_assert(start + len <= alloc_size_);
271
272 /* Create temporary scratch buffer allocation for sub-range of data. */
273 MTLTemporaryBuffer scratch_allocation =
275 memcpy(scratch_allocation.data, data, len);
276 [scratch_allocation.metal_buffer
277 didModifyRange:NSMakeRange(scratch_allocation.buffer_offset, len)];
278 id<MTLBuffer> data_buffer = scratch_allocation.metal_buffer;
279 uint64_t data_buffer_offset = scratch_allocation.buffer_offset;
280
281 BLI_assert(vbo_ != nullptr && data != nullptr);
282 BLI_assert((start + len) <= vbo_->get_size());
283
284 /* Fetch destination buffer. */
285 id<MTLBuffer> dst_buffer = vbo_->get_metal_buffer();
286
287 /* Ensure blit command encoder for copying data. */
288 id<MTLBlitCommandEncoder> enc = ctx->main_command_buffer.ensure_begin_blit_encoder();
289 [enc copyFromBuffer:data_buffer
290 sourceOffset:data_buffer_offset
291 toBuffer:dst_buffer
292 destinationOffset:start
293 size:len];
294
295 /* Flush modified buffer back to host buffer, if one exists. */
296 if (dst_buffer.storageMode == MTLStorageModeManaged) {
297 [enc synchronizeResource:dst_buffer];
298 }
299}
300
302{
303 this->flag_used();
304
305 /* Ensure resource is initialized. */
306 this->bind();
307
308 /* Create MTLStorageBuffer to wrap this resource and use conventional binding. */
309 if (ssbo_wrapper_ == nullptr) {
310 ssbo_wrapper_ = new MTLStorageBuf(this, ceil_to_multiple_u(alloc_size_, 16));
311 }
312 ssbo_wrapper_->bind(binding);
313}
314
316{
317 /* Ensure allocations are ready, and data uploaded. */
318 this->bind();
319 BLI_assert(vbo_ != nullptr);
320
321 /* If vertex buffer updated, release existing texture and re-create. */
322 id<MTLBuffer> buf = this->get_metal_buffer();
323 if (buffer_texture_ != nullptr) {
324 gpu::MTLTexture *mtl_buffer_tex = static_cast<gpu::MTLTexture *>(
325 unwrap(this->buffer_texture_));
326 id<MTLBuffer> tex_buf = mtl_buffer_tex->get_vertex_buffer();
327 if (tex_buf != buf) {
328 GPU_TEXTURE_FREE_SAFE(buffer_texture_);
329 buffer_texture_ = nullptr;
330 }
331 }
332
333 /* Create texture from vertex buffer. */
334 if (buffer_texture_ == nullptr) {
335 buffer_texture_ = GPU_texture_create_from_vertbuf("vertbuf_as_texture", this);
336 }
337
338 /* Verify successful creation and bind. */
339 BLI_assert(buffer_texture_ != nullptr);
340 GPU_texture_bind(buffer_texture_, binding);
341}
342
343void MTLVertBuf::read(void *data) const
344{
345 /* Fetch active context. */
347 BLI_assert(ctx);
348
349 BLI_assert(vbo_ != nullptr);
350
352
353 /* Ensure data is flushed for host caches. */
354 id<MTLBuffer> source_buffer = vbo_->get_metal_buffer();
355 if (source_buffer.storageMode == MTLStorageModeManaged) {
356 id<MTLBlitCommandEncoder> enc = ctx->main_command_buffer.ensure_begin_blit_encoder();
357 [enc synchronizeResource:source_buffer];
358 }
359
360 /* Ensure GPU has finished operating on commands which may modify data. */
361 GPU_finish();
362
363 /* Simple direct read. */
364 void *host_ptr = vbo_->get_host_ptr();
365 memcpy(data, host_ptr, alloc_size_);
366 }
367 else {
368 /* Copy private data into temporary staging buffer. */
370 true);
371
372 id<MTLBuffer> source_buffer = vbo_->get_metal_buffer();
373 id<MTLBuffer> dest_buffer = dst_tmp_vbo_->get_metal_buffer();
374 BLI_assert(source_buffer != nil);
375 BLI_assert(dest_buffer != nil);
376
377 /* Ensure a blit command encoder is active for buffer copy operation. */
378 id<MTLBlitCommandEncoder> enc = ctx->main_command_buffer.ensure_begin_blit_encoder();
379 [enc copyFromBuffer:source_buffer
380 sourceOffset:0
381 toBuffer:dest_buffer
382 destinationOffset:0
383 size:min_ulul([dest_buffer length], [dest_buffer length])];
384
385 /* Flush newly copied data back to host-side buffer, if one exists.
386 * Ensures data and cache coherency for managed MTLBuffers. */
387 if (dest_buffer.storageMode == MTLStorageModeManaged) {
388 [enc synchronizeResource:dest_buffer];
389 }
390
391 /* wait for GPU. */
392 GPU_finish();
393
394 /* Simple direct read. */
395 void *host_ptr = dst_tmp_vbo_->get_host_ptr();
396 memcpy(data, host_ptr, alloc_size_);
397 dst_tmp_vbo_->free();
398 }
399}
400
402{
403 BLI_assert(vbo_ == nullptr);
404
405 /* Attempt to cast to Metal buffer handle. */
406 BLI_assert(handle != 0);
407 id<MTLBuffer> buffer = reinterpret_cast<id<MTLBuffer>>((void *)handle);
408
409 is_wrapper_ = true;
410 vbo_ = new gpu::MTLBuffer(buffer);
411
412 /* We assume the data is already on the device, so no need to allocate or send it. */
414}
415
417{
418 contents_in_flight_ = true;
419}
420
421} // namespace blender::gpu
@ G_DEBUG_GPU
#define BLI_assert(a)
Definition BLI_assert.h:50
MINLINE uint ceil_to_multiple_u(uint a, uint b)
unsigned char uchar
unsigned int uint
void GPU_finish()
Definition gpu_state.cc:299
void GPU_texture_bind(GPUTexture *texture, int unit)
GPUTexture * GPU_texture_create_from_vertbuf(const char *name, blender::gpu::VertBuf *vertex_buf)
#define GPU_TEXTURE_FREE_SAFE(texture)
GPUVertBufStatus
@ GPU_VERTBUF_DATA_DIRTY
@ GPU_VERTBUF_DATA_UPLOADED
@ GPU_USAGE_STATIC
@ GPU_USAGE_DEVICE_ONLY
#define MEM_SAFE_FREE(v)
#define MEM_reallocN(vmemh, len)
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition btDbvt.cpp:52
SIMD_FORCE_INLINE btScalar length() const
Return the length of the vector.
Definition btVector3.h:257
gpu::MTLBuffer * allocate(uint64_t size, bool cpu_visible)
Definition mtl_memory.mm:96
uint64_t get_size() const
void * get_host_ptr() const
void flush_range(uint64_t offset, uint64_t length)
void set_label(NSString *str)
id< MTLBuffer > get_metal_buffer() const
id< MTLBlitCommandEncoder > ensure_begin_blit_encoder()
static MTLContext * get()
MTLCommandBufferManager main_command_buffer
MTLScratchBufferManager & get_scratchbuffer_manager()
static MTLBufferPool * get_global_memory_manager()
MTLTemporaryBuffer scratch_buffer_allocate_range_aligned(uint64_t alloc_size, uint alignment)
void bind(int slot) override
id< MTLBuffer > get_vertex_buffer() const
void duplicate_data(VertBuf *dst) override
void wrap_handle(uint64_t handle) override
void bind_as_ssbo(uint binding) override
void update_sub(uint start, uint len, const void *data) override
void read(void *data) const override
void bind_as_texture(uint binding) override
GPUUsageType get_usage_type() const
#define NULL
int len
void *(* MEM_mallocN)(size_t len, const char *str)
Definition mallocn.cc:44
void *(* MEM_dupallocN)(const void *vmemh)
Definition mallocn.cc:39
MINLINE unsigned long long min_ulul(unsigned long long a, unsigned long long b)
MINLINE unsigned long long max_ulul(unsigned long long a, unsigned long long b)
#define G(x, y, z)
#define MTL_LOG_INFO(info,...)
Definition mtl_debug.hh:51
static Context * unwrap(GPUContext *ctx)
unsigned char uint8_t
Definition stdint.h:78
unsigned __int64 uint64_t
Definition stdint.h:90