Blender V5.0
mtl_vertex_buffer.mm
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2022-2023 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
9#include "mtl_debug.hh"
10#include "mtl_storage_buffer.hh"
11
12namespace blender::gpu {
13
15
20
22{
23 /* Discard previous data, if any. */
26 data_ = nullptr;
27 }
28 else {
30 }
31}
32
34{
36 data_ = nullptr;
37 }
38 else {
39 data_ = (uchar *)MEM_reallocN(data_, sizeof(uchar) * this->size_alloc_get());
40 }
41}
42
44{
45 if (vbo_ != nullptr) {
46 vbo_->free();
47 vbo_ = nullptr;
48 is_wrapper_ = false;
49 }
50
51 GPU_TEXTURE_FREE_SAFE(buffer_texture_);
52
54
55 if (ssbo_wrapper_) {
56 delete ssbo_wrapper_;
57 ssbo_wrapper_ = nullptr;
58 }
59}
60
62{
63 this->bind();
64}
65
67{
68 /* Determine allocation size. Set minimum allocation size to be
69 * the maximal of a single attribute to avoid validation and
70 * correctness errors. */
71 uint64_t required_size_raw = sizeof(uchar) * this->size_used_get();
72 uint64_t required_size = max_ulul(required_size_raw, 128);
73
74 if (required_size_raw == 0) {
75 MTL_LOG_DEBUG("Vertex buffer required_size = 0");
76 }
77
78 /* If the vertex buffer has already been allocated, but new data is ready,
79 * or the usage size has changed, we release the existing buffer and
80 * allocate a new buffer to ensure we do not overwrite in-use GPU resources.
81 *
82 * NOTE: We only need to free the existing allocation if contents have been
83 * submitted to the GPU. Otherwise we can simply upload new data to the
84 * existing buffer, if it will fit.
85 *
86 * NOTE: If a buffer is re-sized, but no new data is provided, the previous
87 * contents are copied into the newly allocated buffer. */
88 bool requires_reallocation = (vbo_ != nullptr) && (alloc_size_ != required_size);
89 bool new_data_ready = (this->flag & GPU_VERTBUF_DATA_DIRTY) && this->data_;
90
91 gpu::MTLBuffer *prev_vbo = nullptr;
92 GPUVertBufStatus prev_flag = this->flag;
93
94 if (vbo_ != nullptr) {
95 if (requires_reallocation || (new_data_ready && contents_in_flight_)) {
96 /* Track previous VBO to copy data from. */
97 prev_vbo = vbo_;
98
99 /* Reset current allocation status. */
100 vbo_ = nullptr;
101 is_wrapper_ = false;
102 alloc_size_ = 0;
103
104 /* Flag as requiring data upload. */
105 if (requires_reallocation) {
107 }
108 }
109 }
110
111 /* Create MTLBuffer of requested size. */
112 if (vbo_ == nullptr) {
114 required_size, (this->get_usage_type() != GPU_USAGE_DEVICE_ONLY));
115 vbo_->set_label(@"Vertex Buffer");
116 BLI_assert(vbo_ != nullptr);
117 BLI_assert(vbo_->get_metal_buffer() != nil);
118
119 is_wrapper_ = false;
120 alloc_size_ = required_size;
121 contents_in_flight_ = false;
122 }
123
124 /* Upload new data, if provided. */
125 if (new_data_ready) {
126
127 /* Only upload data if usage size is greater than zero.
128 * Do not upload data for device-only buffers. */
129 if (required_size_raw > 0 && usage_ != GPU_USAGE_DEVICE_ONLY) {
130
131 /* Debug: Verify allocation is large enough. */
132 BLI_assert(vbo_->get_size() >= required_size_raw);
133
134 /* Fetch mapped buffer host ptr and upload data. */
135 void *dst_data = vbo_->get_host_ptr();
136 memcpy((uint8_t *)dst_data, this->data_, required_size_raw);
137 vbo_->flush_range(0, required_size_raw);
138 }
139
140 /* If static usage, free host-side data. */
141 if (usage_ == GPU_USAGE_STATIC) {
143 }
144
145 /* Flag data as having been uploaded. */
148 }
149 else if (requires_reallocation) {
150
151 /* If buffer has been re-sized, copy existing data if host
152 * data had been previously uploaded. */
153 BLI_assert(prev_vbo != nullptr);
154
155 if (prev_flag & GPU_VERTBUF_DATA_UPLOADED) {
156
157 /* Fetch active context. */
159 BLI_assert(ctx);
160
161 id<MTLBuffer> copy_prev_buffer = prev_vbo->get_metal_buffer();
162 id<MTLBuffer> copy_new_buffer = vbo_->get_metal_buffer();
163 BLI_assert(copy_prev_buffer != nil);
164 BLI_assert(copy_new_buffer != nil);
165
166 /* Ensure a blit command encoder is active for buffer copy operation. */
167 id<MTLBlitCommandEncoder> enc = ctx->main_command_buffer.ensure_begin_blit_encoder();
168 [enc copyFromBuffer:copy_prev_buffer
169 sourceOffset:0
170 toBuffer:copy_new_buffer
171 destinationOffset:0
172 size:min_ulul([copy_new_buffer length], [copy_prev_buffer length])];
173
174 /* Flush newly copied data back to host-side buffer, if one exists.
175 * Ensures data and cache coherency for managed MTLBuffers. */
176 if (copy_new_buffer.storageMode == MTLStorageModeManaged) {
177 [enc synchronizeResource:copy_new_buffer];
178 }
179
180 /* For VBOs flagged as static, release host data as it will no longer be needed. */
181 if (usage_ == GPU_USAGE_STATIC) {
183 }
184
185 /* Flag data as uploaded. */
187
188 /* Flag as in-use, as contents have been updated via GPU commands. */
189 this->flag_used();
190 }
191 }
192
193 /* Release previous buffer if re-allocated. */
194 if (prev_vbo != nullptr) {
195 prev_vbo->free();
196 }
197
198 /* Ensure buffer has been created. */
199 BLI_assert(vbo_ != nullptr);
200}
201
202/* Update Sub currently only used by hair */
203void MTLVertBuf::update_sub(uint start, uint len, const void *data)
204{
205 /* Fetch and verify active context. */
207 BLI_assert(ctx);
208 BLI_assert(ctx->device);
209
210 /* Ensure vertbuf has been created. */
211 this->bind();
212 BLI_assert(start + len <= alloc_size_);
213
214 /* Create temporary scratch buffer allocation for sub-range of data. */
215 MTLTemporaryBuffer scratch_allocation =
217 memcpy(scratch_allocation.data, data, len);
218 [scratch_allocation.metal_buffer
219 didModifyRange:NSMakeRange(scratch_allocation.buffer_offset, len)];
220 id<MTLBuffer> data_buffer = scratch_allocation.metal_buffer;
221 uint64_t data_buffer_offset = scratch_allocation.buffer_offset;
222
223 BLI_assert(vbo_ != nullptr && data != nullptr);
224 BLI_assert((start + len) <= vbo_->get_size());
225
226 /* Fetch destination buffer. */
227 id<MTLBuffer> dst_buffer = vbo_->get_metal_buffer();
228
229 /* Ensure blit command encoder for copying data. */
230 id<MTLBlitCommandEncoder> enc = ctx->main_command_buffer.ensure_begin_blit_encoder();
231 [enc copyFromBuffer:data_buffer
232 sourceOffset:data_buffer_offset
233 toBuffer:dst_buffer
234 destinationOffset:start
235 size:len];
236
237 /* Flush modified buffer back to host buffer, if one exists. */
238 if (dst_buffer.storageMode == MTLStorageModeManaged) {
239 [enc synchronizeResource:dst_buffer];
240 }
241}
242
244{
245 this->flag_used();
246
247 /* Ensure resource is initialized. */
248 this->bind();
249
250 /* Create MTLStorageBuffer to wrap this resource and use conventional binding. */
251 if (ssbo_wrapper_ == nullptr) {
252 ssbo_wrapper_ = new MTLStorageBuf(this, ceil_to_multiple_u(alloc_size_, 16));
253 }
254 ssbo_wrapper_->bind(binding);
255}
256
258{
259 /* Ensure allocations are ready, and data uploaded. */
260 this->bind();
261 BLI_assert(vbo_ != nullptr);
262
263 /* If vertex buffer updated, release existing texture and re-create. */
264 id<MTLBuffer> buf = this->get_metal_buffer();
265 if (buffer_texture_ != nullptr) {
266 gpu::MTLTexture *mtl_buffer_tex = static_cast<gpu::MTLTexture *>(this->buffer_texture_);
267 id<MTLBuffer> tex_buf = mtl_buffer_tex->get_vertex_buffer();
268 if (tex_buf != buf) {
269 GPU_TEXTURE_FREE_SAFE(buffer_texture_);
270 buffer_texture_ = nullptr;
271 }
272 }
273
274 /* Create texture from vertex buffer. */
275 if (buffer_texture_ == nullptr) {
276 buffer_texture_ = GPU_texture_create_from_vertbuf("vertbuf_as_texture", this);
277 }
278
279 /* Verify successful creation and bind. */
280 BLI_assert(buffer_texture_ != nullptr);
281 GPU_texture_bind(buffer_texture_, binding);
282}
283
284void MTLVertBuf::read(void *data) const
285{
286 /* Fetch active context. */
288 BLI_assert(ctx);
289
290 BLI_assert(vbo_ != nullptr);
291
293
294 /* Ensure data is flushed for host caches. */
295 id<MTLBuffer> source_buffer = vbo_->get_metal_buffer();
296 if (source_buffer.storageMode == MTLStorageModeManaged) {
297 id<MTLBlitCommandEncoder> enc = ctx->main_command_buffer.ensure_begin_blit_encoder();
298 [enc synchronizeResource:source_buffer];
299 }
300
301 /* Ensure GPU has finished operating on commands which may modify data. */
302 GPU_finish();
303
304 /* Simple direct read. */
305 void *host_ptr = vbo_->get_host_ptr();
306 memcpy(data, host_ptr, alloc_size_);
307 }
308 else {
309 /* Copy private data into temporary staging buffer. */
311 true);
312
313 id<MTLBuffer> source_buffer = vbo_->get_metal_buffer();
314 id<MTLBuffer> dest_buffer = dst_tmp_vbo_->get_metal_buffer();
315 BLI_assert(source_buffer != nil);
316 BLI_assert(dest_buffer != nil);
317
318 /* Ensure a blit command encoder is active for buffer copy operation. */
319 id<MTLBlitCommandEncoder> enc = ctx->main_command_buffer.ensure_begin_blit_encoder();
320 [enc copyFromBuffer:source_buffer
321 sourceOffset:0
322 toBuffer:dest_buffer
323 destinationOffset:0
324 size:min_ulul([dest_buffer length], [dest_buffer length])];
325
326 /* Flush newly copied data back to host-side buffer, if one exists.
327 * Ensures data and cache coherency for managed MTLBuffers. */
328 if (dest_buffer.storageMode == MTLStorageModeManaged) {
329 [enc synchronizeResource:dest_buffer];
330 }
331
332 /* wait for GPU. */
333 GPU_finish();
334
335 /* Simple direct read. */
336 void *host_ptr = dst_tmp_vbo_->get_host_ptr();
337 memcpy(data, host_ptr, alloc_size_);
338 dst_tmp_vbo_->free();
339 }
340}
341
343{
344 BLI_assert(vbo_ == nullptr);
345
346 /* Attempt to cast to Metal buffer handle. */
347 BLI_assert(handle != 0);
348 id<MTLBuffer> buffer = reinterpret_cast<id<MTLBuffer>>((void *)handle);
349
350 is_wrapper_ = true;
351 vbo_ = new gpu::MTLBuffer(buffer);
352
353 /* We assume the data is already on the device, so no need to allocate or send it. */
355}
356
358{
359 contents_in_flight_ = true;
360}
361
362} // namespace blender::gpu
#define BLI_assert(a)
Definition BLI_assert.h:46
MINLINE uint ceil_to_multiple_u(uint a, uint b)
unsigned char uchar
unsigned int uint
void GPU_finish()
Definition gpu_state.cc:310
#define GPU_TEXTURE_FREE_SAFE(texture)
blender::gpu::Texture * GPU_texture_create_from_vertbuf(const char *name, blender::gpu::VertBuf *vertex_buf)
void GPU_texture_bind(blender::gpu::Texture *texture, int unit)
GPUVertBufStatus
@ GPU_VERTBUF_DATA_DIRTY
@ GPU_VERTBUF_DATA_UPLOADED
@ GPU_USAGE_STATIC
@ GPU_USAGE_DEVICE_ONLY
#define MEM_SAFE_FREE(v)
#define MEM_reallocN(vmemh, len)
unsigned long long int uint64_t
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition btDbvt.cpp:52
gpu::MTLBuffer * allocate(uint64_t size, bool cpu_visible)
void * get_host_ptr() const
void set_label(NSString *str)
id< MTLBuffer > get_metal_buffer() const
id< MTLBlitCommandEncoder > ensure_begin_blit_encoder()
static MTLContext * get()
MTLCommandBufferManager main_command_buffer
MTLScratchBufferManager & get_scratchbuffer_manager()
static MTLBufferPool * get_global_memory_manager()
MTLTemporaryBuffer scratch_buffer_allocate_range_aligned(uint64_t alloc_size, uint alignment)
id< MTLBuffer > get_vertex_buffer() const
void wrap_handle(uint64_t handle) override
void bind_as_ssbo(uint binding) override
void update_sub(uint start, uint len, const void *data) override
void read(void *data) const override
void bind_as_texture(uint binding) override
GPUUsageType get_usage_type() const
MutableSpan< T > data()
float length(VecOp< float, D >) RET
void * MEM_malloc_arrayN(size_t len, size_t size, const char *str)
Definition mallocn.cc:133
MINLINE unsigned long long min_ulul(unsigned long long a, unsigned long long b)
MINLINE unsigned long long max_ulul(unsigned long long a, unsigned long long b)
#define MTL_LOG_DEBUG(info,...)
Definition mtl_debug.hh:49
MTLBufferRange MTLTemporaryBuffer
uint len