Blender V4.3
draw_instance_data.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2016 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
18#include "draw_instance_data.hh"
19#include "draw_manager_c.hh"
20
21#include "DRW_engine.hh"
22#include "DRW_render.hh" /* For DRW_shgroup_get_instance_count() */
23
24#include "GPU_material.hh"
25
26#include "DNA_particle_types.h"
27
28#include "BKE_duplilist.hh"
29
30#include "RNA_access.hh"
31#include "RNA_path.hh"
32
33#include "BLI_bitmap.h"
34#include "BLI_memblock.h"
35#include "BLI_mempool.h"
36#include "BLI_utildefines.h"
37#include "MEM_guardedalloc.h"
38
41 bool used; /* If this data is used or not. */
42 size_t data_size; /* Size of one instance data. */
44};
45
56
64
67 blender::gpu::Batch *batch;
69 blender::gpu::Batch *instancer;
73 blender::gpu::Batch *geom;
74};
75
76static ListBase g_idatalists = {nullptr, nullptr};
77
78static void instancing_batch_references_add(blender::gpu::Batch *batch)
79{
80 for (int i = 0; i < GPU_BATCH_VBO_MAX_LEN && batch->verts[i]; i++) {
82 }
83 for (int i = 0; i < GPU_BATCH_INST_VBO_MAX_LEN && batch->inst[i]; i++) {
85 }
86}
87
88static void instancing_batch_references_remove(blender::gpu::Batch *batch)
89{
90 for (int i = 0; i < GPU_BATCH_VBO_MAX_LEN && batch->verts[i]; i++) {
92 }
93 for (int i = 0; i < GPU_BATCH_INST_VBO_MAX_LEN && batch->inst[i]; i++) {
95 }
96}
97
98/* -------------------------------------------------------------------- */
104 int *vert_len)
105{
106 BLI_assert(format != nullptr);
107 BLI_assert(vert_len != nullptr);
108
109 DRWTempBufferHandle *handle = static_cast<DRWTempBufferHandle *>(
110 BLI_memblock_alloc(idatalist->pool_buffers));
111
112 if (handle->format != format) {
113 handle->format = format;
114 GPU_VERTBUF_DISCARD_SAFE(handle->buf);
115
119
120 handle->buf = vert;
121 }
122 handle->vert_len = vert_len;
123 return handle->buf;
124}
125
128 blender::gpu::Batch *instancer,
129 blender::gpu::Batch *geom)
130{
131 /* Do not call this with a batch that is already an instancing batch. */
132 BLI_assert(geom->inst[0] == nullptr);
133 /* Only call with one of them. */
134 BLI_assert((instancer != nullptr) != (buf != nullptr));
135
136 DRWTempInstancingHandle *handle = static_cast<DRWTempInstancingHandle *>(
138 if (handle->batch == nullptr) {
139 handle->batch = GPU_batch_calloc();
140 }
141
142 blender::gpu::Batch *batch = handle->batch;
143 bool instancer_compat = buf ? ((batch->inst[0] == buf) &&
145 ((batch->inst[0] == instancer->verts[0]) &&
146 (batch->inst[1] == instancer->verts[1]));
147 bool is_compatible = (batch->prim_type == geom->prim_type) && instancer_compat &&
148 (batch->flag & GPU_BATCH_BUILDING) == 0 && (batch->elem == geom->elem);
149 for (int i = 0; i < GPU_BATCH_VBO_MAX_LEN && is_compatible; i++) {
150 if (batch->verts[i] != geom->verts[i]) {
151 is_compatible = false;
152 }
153 }
154
155 if (!is_compatible) {
158 /* Save args and init later. */
160 handle->buf = buf;
161 handle->instancer = instancer;
162 handle->geom = geom;
163 }
164 return batch;
165}
166
167blender::gpu::Batch *DRW_temp_batch_request(DRWInstanceDataList *idatalist,
169 GPUPrimType prim_type)
170{
171 blender::gpu::Batch **batch_ptr = static_cast<blender::gpu::Batch **>(
173 if (*batch_ptr == nullptr) {
174 *batch_ptr = GPU_batch_calloc();
175 }
176
177 blender::gpu::Batch *batch = *batch_ptr;
178 bool is_compatible = (batch->verts[0] == buf) && (batch->prim_type == prim_type) &&
180 if (!is_compatible) {
182 GPU_batch_init(batch, prim_type, buf, nullptr);
183 }
184 return batch;
185}
186
188{
189 handle->format = nullptr;
190 GPU_VERTBUF_DISCARD_SAFE(handle->buf);
191}
192
194{
196 GPU_BATCH_DISCARD_SAFE(handle->batch);
197}
198
199static void temp_batch_free(blender::gpu::Batch **batch)
200{
202}
203
205{
206 /* Resize down buffers in use and send data to GPU. */
208 DRWTempBufferHandle *handle;
209 BLI_memblock_iternew(idatalist->pool_buffers, &iter);
210 while ((handle = static_cast<DRWTempBufferHandle *>(BLI_memblock_iterstep(&iter)))) {
211 if (handle->vert_len != nullptr) {
212 uint vert_len = *(handle->vert_len);
213 uint target_buf_size = ((vert_len / DRW_BUFFER_VERTS_CHUNK) + 1) * DRW_BUFFER_VERTS_CHUNK;
214 if (target_buf_size < GPU_vertbuf_get_vertex_alloc(handle->buf)) {
215 GPU_vertbuf_data_resize(*handle->buf, target_buf_size);
216 }
217 GPU_vertbuf_data_len_set(*handle->buf, vert_len);
218 if (vert_len > 0) {
219 GPU_vertbuf_use(handle->buf); /* Send data. */
220 }
221 }
222 }
223 /* Finish pending instancing batches. */
224 DRWTempInstancingHandle *handle_inst;
225 BLI_memblock_iternew(idatalist->pool_instancing, &iter);
226 while ((handle_inst = static_cast<DRWTempInstancingHandle *>(BLI_memblock_iterstep(&iter)))) {
227 blender::gpu::Batch *batch = handle_inst->batch;
228 if (batch && batch->flag == GPU_BATCH_BUILDING) {
229 blender::gpu::VertBuf *inst_buf = handle_inst->buf;
230 blender::gpu::Batch *inst_batch = handle_inst->instancer;
231 blender::gpu::Batch *geom = handle_inst->geom;
232 GPU_batch_copy(batch, geom);
233 if (inst_batch != nullptr) {
234 for (int i = 0; i < GPU_BATCH_INST_VBO_MAX_LEN && inst_batch->verts[i]; i++) {
235 GPU_batch_instbuf_add(batch, inst_batch->verts[i], false);
236 }
237 }
238 else {
239 GPU_batch_instbuf_add(batch, inst_buf, false);
240 }
241 /* Add reference to avoid comparing pointers (in DRW_temp_batch_request) that could
242 * potentially be the same. This will delay the freeing of the blender::gpu::VertBuf itself.
243 */
245 }
246 }
247 /* Resize pools and free unused. */
251}
252
255/* -------------------------------------------------------------------- */
260{
261 DRWInstanceData *idata = static_cast<DRWInstanceData *>(
262 MEM_callocN(sizeof(DRWInstanceData), "DRWInstanceData"));
263 idata->next = nullptr;
264 idata->used = true;
265 idata->data_size = attr_size;
266 idata->mempool = BLI_mempool_create(sizeof(float) * idata->data_size, 0, 16, 0);
267
269
270 /* Push to linked list. */
271 if (idatalist->idata_head[attr_size - 1] == nullptr) {
272 idatalist->idata_head[attr_size - 1] = idata;
273 }
274 else {
275 idatalist->idata_tail[attr_size - 1]->next = idata;
276 }
277 idatalist->idata_tail[attr_size - 1] = idata;
278
279 return idata;
280}
281
283{
285}
286
288{
289 return BLI_mempool_alloc(idata->mempool);
290}
291
293{
295
296 DRWInstanceData *idata = idatalist->idata_head[attr_size - 1];
297
298 /* Search for an unused data chunk. */
299 for (; idata; idata = idata->next) {
300 if (idata->used == false) {
301 idata->used = true;
302 return idata;
303 }
304 }
305
306 return drw_instance_data_create(idatalist, attr_size);
307}
308
311/* -------------------------------------------------------------------- */
316{
317 DRWInstanceDataList *idatalist = static_cast<DRWInstanceDataList *>(
318 MEM_callocN(sizeof(DRWInstanceDataList), "DRWInstanceDataList"));
319
320 idatalist->pool_batching = BLI_memblock_create(sizeof(blender::gpu::Batch *));
323
324 BLI_addtail(&g_idatalists, idatalist);
325
326 return idatalist;
327}
328
330{
331 DRWInstanceData *idata, *next_idata;
332
333 for (int i = 0; i < MAX_INSTANCE_DATA_SIZE; i++) {
334 for (idata = idatalist->idata_head[i]; idata; idata = next_idata) {
335 next_idata = idata->next;
337 MEM_freeN(idata);
338 }
339 idatalist->idata_head[i] = nullptr;
340 idatalist->idata_tail[i] = nullptr;
341 }
342
346
347 BLI_remlink(&g_idatalists, idatalist);
348
349 MEM_freeN(idatalist);
350}
351
353{
354 DRWInstanceData *idata;
355
356 for (int i = 0; i < MAX_INSTANCE_DATA_SIZE; i++) {
357 for (idata = idatalist->idata_head[i]; idata; idata = idata->next) {
358 idata->used = false;
359 }
360 }
361}
362
364{
365 DRWInstanceData *idata, *next_idata;
366
367 /* Remove unused data blocks and sanitize each list. */
368 for (int i = 0; i < MAX_INSTANCE_DATA_SIZE; i++) {
369 idatalist->idata_tail[i] = nullptr;
370 for (idata = idatalist->idata_head[i]; idata; idata = next_idata) {
371 next_idata = idata->next;
372 if (idata->used == false) {
373 if (idatalist->idata_head[i] == idata) {
374 idatalist->idata_head[i] = next_idata;
375 }
376 else {
377 /* idatalist->idata_tail[i] is guaranteed not to be null in this case. */
378 idatalist->idata_tail[i]->next = next_idata;
379 }
381 MEM_freeN(idata);
382 }
383 else {
384 if (idatalist->idata_tail[i] != nullptr) {
385 idatalist->idata_tail[i]->next = idata;
386 }
387 idatalist->idata_tail[i] = idata;
388 }
389 }
390 }
391}
392
394{
395 DRWInstanceData *idata;
396
397 for (int i = 0; i < MAX_INSTANCE_DATA_SIZE; i++) {
398 for (idata = idatalist->idata_head[i]; idata; idata = idata->next) {
400 }
401 }
402}
403
406/* -------------------------------------------------------------------- */
410#define CHUNK_LIST_STEP (1 << 4)
411
414 /* Memory buffers used to stage chunk data before transfer to UBOs. */
416 /* Uniform buffer objects with flushed data. */
417 GPUUniformBuf **chunk_ubos;
418 /* True if the relevant chunk contains data (distinct from simply being allocated). */
420
423};
424
426 uint item_size,
427 uint chunk_size)
428{
429 buffer->chunk_buffers = nullptr;
430 buffer->chunk_used = nullptr;
431 buffer->chunk_ubos = nullptr;
432 buffer->num_chunks = 0;
433 buffer->item_size = item_size;
434 buffer->chunk_size = chunk_size;
435 buffer->chunk_bytes = item_size * chunk_size;
436}
437
439{
440 DRWSparseUniformBuf *buf = static_cast<DRWSparseUniformBuf *>(
441 MEM_mallocN(sizeof(DRWSparseUniformBuf), __func__));
442 drw_sparse_uniform_buffer_init(buf, item_size, chunk_size);
443 return buf;
444}
445
447{
448 for (int i = 0; i < buffer->num_chunks; i++) {
449 if (BLI_BITMAP_TEST(buffer->chunk_used, i)) {
450 if (buffer->chunk_ubos[i] == nullptr) {
451 buffer->chunk_ubos[i] = GPU_uniformbuf_create(buffer->chunk_bytes);
452 }
453 GPU_uniformbuf_update(buffer->chunk_ubos[i], buffer->chunk_buffers[i]);
454 }
455 }
456}
457
459{
460 int max_used_chunk = 0;
461
462 for (int i = 0; i < buffer->num_chunks; i++) {
463 /* Delete buffers that were not used since the last clear call. */
464 if (free_all || !BLI_BITMAP_TEST(buffer->chunk_used, i)) {
465 MEM_SAFE_FREE(buffer->chunk_buffers[i]);
466
467 if (buffer->chunk_ubos[i]) {
469 buffer->chunk_ubos[i] = nullptr;
470 }
471 }
472 else {
473 max_used_chunk = i + 1;
474 }
475 }
476
477 /* Shrink the chunk array if appropriate. */
478 const int old_num_chunks = buffer->num_chunks;
479
480 buffer->num_chunks = (max_used_chunk + CHUNK_LIST_STEP - 1) & ~(CHUNK_LIST_STEP - 1);
481
482 if (buffer->num_chunks == 0) {
483 /* Ensure that an empty pool holds no memory allocations. */
485 MEM_SAFE_FREE(buffer->chunk_used);
486 MEM_SAFE_FREE(buffer->chunk_ubos);
487 return;
488 }
489
490 if (buffer->num_chunks != old_num_chunks) {
491 buffer->chunk_buffers = static_cast<char **>(
492 MEM_recallocN(buffer->chunk_buffers, buffer->num_chunks * sizeof(void *)));
493 buffer->chunk_ubos = static_cast<GPUUniformBuf **>(
494 MEM_recallocN(buffer->chunk_ubos, buffer->num_chunks * sizeof(void *)));
495 BLI_BITMAP_RESIZE(buffer->chunk_used, buffer->num_chunks);
496 }
497
498 BLI_bitmap_set_all(buffer->chunk_used, false, buffer->num_chunks);
499}
500
506
508{
509 return buffer->num_chunks == 0;
510}
511
512static GPUUniformBuf *drw_sparse_uniform_buffer_get_ubo(DRWSparseUniformBuf *buffer, int chunk)
513{
514 if (buffer && chunk < buffer->num_chunks && BLI_BITMAP_TEST(buffer->chunk_used, chunk)) {
515 return buffer->chunk_ubos[chunk];
516 }
517 return nullptr;
518}
519
520void DRW_sparse_uniform_buffer_bind(DRWSparseUniformBuf *buffer, int chunk, int location)
521{
522 GPUUniformBuf *ubo = drw_sparse_uniform_buffer_get_ubo(buffer, chunk);
523 if (ubo) {
524 GPU_uniformbuf_bind(ubo, location);
525 }
526}
527
529{
530 GPUUniformBuf *ubo = drw_sparse_uniform_buffer_get_ubo(buffer, chunk);
531 if (ubo) {
533 }
534}
535
537{
538 if (chunk >= buffer->num_chunks) {
539 buffer->num_chunks = (chunk + CHUNK_LIST_STEP) & ~(CHUNK_LIST_STEP - 1);
540 buffer->chunk_buffers = static_cast<char **>(
541 MEM_recallocN(buffer->chunk_buffers, buffer->num_chunks * sizeof(void *)));
542 buffer->chunk_ubos = static_cast<GPUUniformBuf **>(
543 MEM_recallocN(buffer->chunk_ubos, buffer->num_chunks * sizeof(void *)));
544 BLI_BITMAP_RESIZE(buffer->chunk_used, buffer->num_chunks);
545 }
546
547 char *chunk_buffer = buffer->chunk_buffers[chunk];
548
549 if (chunk_buffer == nullptr) {
550 buffer->chunk_buffers[chunk] = chunk_buffer = static_cast<char *>(
551 MEM_callocN(buffer->chunk_bytes, __func__));
552 }
553 else if (!BLI_BITMAP_TEST(buffer->chunk_used, chunk)) {
554 memset(chunk_buffer, 0, buffer->chunk_bytes);
555 }
556
557 BLI_BITMAP_ENABLE(buffer->chunk_used, chunk);
558
559 return chunk_buffer + buffer->item_size * item;
560}
561
564/* -------------------------------------------------------------------- */
570 /* Attribute list (also used as hash table key) handled by this buffer. */
572 /* Sparse UBO buffer containing the attribute values. */
574 /* Last handle used to update the buffer, checked for avoiding redundant updates. */
576 /* Linked list pointer used for freeing the empty unneeded buffers. */
578};
579
581 const GPUUniformAttrList *key)
582{
583 void **pkey, **pval;
584
585 if (!BLI_ghash_ensure_p_ex(table, key, &pkey, &pval)) {
586 DRWUniformAttrBuf *buffer = static_cast<DRWUniformAttrBuf *>(
587 MEM_callocN(sizeof(*buffer), __func__));
588
589 *pkey = &buffer->key;
590 *pval = buffer;
591
592 GPU_uniform_attr_list_copy(&buffer->key, key);
594 &buffer->ubos, key->count * sizeof(float[4]), DRW_RESOURCE_CHUNK_LEN);
595
596 buffer->last_handle = (DRWResourceHandle)-1;
597 }
598
599 return (DRWUniformAttrBuf *)*pval;
600}
601
603 const Object *ob,
604 const Object *dupli_parent,
605 const DupliObject *dupli_source,
606 float r_data[4])
607{
608 /* If requesting instance data, check the parent particle system and object. */
609 if (attr->use_dupli) {
610 BKE_object_dupli_find_rgba_attribute(ob, dupli_source, dupli_parent, attr->name, r_data);
611 }
612 else {
613 BKE_object_dupli_find_rgba_attribute(ob, nullptr, nullptr, attr->name, r_data);
614 }
615}
616
618 const GPUUniformAttrList *key,
619 DRWResourceHandle *handle,
620 const Object *ob,
621 const Object *dupli_parent,
622 const DupliObject *dupli_source)
623{
625
626 if (buffer->last_handle != *handle) {
627 buffer->last_handle = *handle;
628
629 int chunk = DRW_handle_chunk_get(handle);
630 int item = DRW_handle_id_get(handle);
631 float(*values)[4] = static_cast<float(*)[4]>(
632 DRW_sparse_uniform_buffer_ensure_item(&buffer->ubos, chunk, item));
633
634 LISTBASE_FOREACH (const GPUUniformAttr *, attr, &buffer->key.list) {
635 drw_uniform_attribute_lookup(attr, ob, dupli_parent, dupli_source, *values++);
636 }
637 }
638}
639
641{
642 DRWData *data = DST.vmempool;
643
644 if (data->vlattrs_ubo_ready && data->vlattrs_ubo != nullptr) {
645 return data->vlattrs_ubo;
646 }
647
648 /* Allocate the buffer data. */
649 const int buf_size = DRW_RESOURCE_CHUNK_LEN;
650
651 if (data->vlattrs_buf == nullptr) {
652 data->vlattrs_buf = static_cast<LayerAttribute *>(
653 MEM_calloc_arrayN(buf_size, sizeof(LayerAttribute), "View Layer Attr Data"));
654 }
655
656 /* Look up attributes.
657 *
658 * Mirrors code in draw_resource.cc and cycles/blender/shader.cpp.
659 */
660 LayerAttribute *buffer = data->vlattrs_buf;
661 int count = 0;
662
663 LISTBASE_FOREACH (GPULayerAttr *, attr, &data->vlattrs_name_list) {
664 float value[4];
665
667 DST.draw_ctx.scene, DST.draw_ctx.view_layer, attr->name, value))
668 {
669 LayerAttribute *item = &buffer[count++];
670
671 memcpy(item->data, value, sizeof(item->data));
672 item->hash_code = attr->hash_code;
673
674 /* Check if the buffer is full just in case. */
675 if (count >= buf_size) {
676 break;
677 }
678 }
679 }
680
681 buffer[0].buffer_length = count;
682
683 /* Update or create the UBO object. */
684 if (data->vlattrs_ubo != nullptr) {
685 GPU_uniformbuf_update(data->vlattrs_ubo, buffer);
686 }
687 else {
688 data->vlattrs_ubo = GPU_uniformbuf_create_ex(
689 sizeof(*buffer) * buf_size, buffer, "View Layer Attributes");
690 }
691
692 data->vlattrs_ubo_ready = true;
693
694 return data->vlattrs_ubo;
695}
696
698{
699 DRWUniformAttrBuf *buffer = static_cast<DRWUniformAttrBuf *>(BLI_ghash_lookup(table, key));
700 return buffer ? &buffer->ubos : nullptr;
701}
702
707
709{
710 GHASH_FOREACH_BEGIN (DRWUniformAttrBuf *, buffer, table) {
711 DRW_sparse_uniform_buffer_flush(&buffer->ubos);
712 }
714}
715
717{
718 DRWUniformAttrBuf *buffer = static_cast<DRWUniformAttrBuf *>(ptr);
719
722 MEM_freeN(buffer);
723}
724
726{
727 DRWUniformAttrBuf *remove_list = nullptr;
728
729 GHASH_FOREACH_BEGIN (DRWUniformAttrBuf *, buffer, table) {
730 buffer->last_handle = (DRWResourceHandle)-1;
731 DRW_sparse_uniform_buffer_clear(&buffer->ubos, false);
732
733 if (DRW_sparse_uniform_buffer_is_empty(&buffer->ubos)) {
734 buffer->next_empty = remove_list;
735 remove_list = buffer;
736 }
737 }
739
740 while (remove_list) {
741 DRWUniformAttrBuf *buffer = remove_list;
742 remove_list = buffer->next_empty;
743 BLI_ghash_remove(table, &buffer->key, nullptr, drw_uniform_attrs_pool_free_cb);
744 }
745}
746
751
bool BKE_object_dupli_find_rgba_attribute(const Object *ob, const DupliObject *dupli, const Object *dupli_parent, const char *name, float r_value[4])
bool BKE_view_layer_find_rgba_attribute(const Scene *scene, const ViewLayer *layer, const char *name, float r_value[4])
#define BLI_assert(a)
Definition BLI_assert.h:50
#define BLI_BITMAP_TEST(_bitmap, _index)
Definition BLI_bitmap.h:65
#define BLI_BITMAP_ENABLE(_bitmap, _index)
Definition BLI_bitmap.h:82
#define BLI_BITMAP_RESIZE(_bitmap, _num)
Definition BLI_bitmap.h:118
void BLI_bitmap_set_all(BLI_bitmap *bitmap, bool set, size_t bits)
Definition bitmap.c:18
unsigned int BLI_bitmap
Definition BLI_bitmap.h:17
#define GHASH_FOREACH_END()
Definition BLI_ghash.h:530
#define GHASH_FOREACH_BEGIN(type, var, what)
Definition BLI_ghash.h:524
bool BLI_ghash_remove(GHash *gh, const void *key, GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp)
Definition BLI_ghash.c:787
void * BLI_ghash_lookup(const GHash *gh, const void *key) ATTR_WARN_UNUSED_RESULT
Definition BLI_ghash.c:731
void BLI_ghash_free(GHash *gh, GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp)
Definition BLI_ghash.c:860
bool BLI_ghash_ensure_p_ex(GHash *gh, const void *key, void ***r_key, void ***r_val) ATTR_WARN_UNUSED_RESULT
Definition BLI_ghash.c:768
#define LISTBASE_FOREACH(type, var, list)
void BLI_addtail(struct ListBase *listbase, void *vlink) ATTR_NONNULL(1)
Definition listbase.cc:110
void BLI_remlink(struct ListBase *listbase, void *vlink) ATTR_NONNULL(1)
Definition listbase.cc:130
void(* MemblockValFreeFP)(void *val)
void BLI_memblock_destroy(BLI_memblock *mblk, MemblockValFreeFP free_callback) ATTR_NONNULL(1)
void * BLI_memblock_alloc(BLI_memblock *mblk) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
#define BLI_memblock_create(elem_size)
void BLI_memblock_iternew(BLI_memblock *mblk, BLI_memblock_iter *iter) ATTR_NONNULL()
void BLI_memblock_clear(BLI_memblock *mblk, MemblockValFreeFP free_callback) ATTR_NONNULL(1)
void * BLI_memblock_iterstep(BLI_memblock_iter *iter) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL()
void * BLI_mempool_alloc(BLI_mempool *pool) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT ATTR_RETURNS_NONNULL ATTR_NONNULL(1)
void void BLI_mempool_clear_ex(BLI_mempool *pool, int elem_num_reserve) ATTR_NONNULL(1)
BLI_mempool * BLI_mempool_create(unsigned int esize, unsigned int elem_num, unsigned int pchunk, unsigned int flag) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT ATTR_RETURNS_NONNULL
int BLI_mempool_len(const BLI_mempool *pool) ATTR_NONNULL(1)
void BLI_mempool_destroy(BLI_mempool *pool) ATTR_NONNULL(1)
unsigned int uint
#define GPU_BATCH_INST_VBO_MAX_LEN
Definition GPU_batch.hh:33
int GPU_batch_instbuf_add(blender::gpu::Batch *batch, blender::gpu::VertBuf *vertex_buf, bool own_vbo)
void GPU_batch_clear(blender::gpu::Batch *batch)
#define GPU_BATCH_VBO_MAX_LEN
Definition GPU_batch.hh:32
#define GPU_batch_init(batch, primitive_type, vertex_buf, index_buf)
Definition GPU_batch.hh:167
#define GPU_BATCH_DISCARD_SAFE(batch)
Definition GPU_batch.hh:205
void GPU_batch_copy(blender::gpu::Batch *batch_dst, blender::gpu::Batch *batch_src)
@ GPU_BATCH_BUILDING
Definition GPU_batch.hh:56
void GPU_uniform_attr_list_copy(GPUUniformAttrList *dest, const GPUUniformAttrList *src)
GHash * GPU_uniform_attr_list_hash_new(const char *info)
void GPU_uniform_attr_list_free(GPUUniformAttrList *set)
GPUPrimType
GPUUniformBuf * GPU_uniformbuf_create_ex(size_t size, const void *data, const char *name)
void GPU_uniformbuf_unbind(GPUUniformBuf *ubo)
#define GPU_uniformbuf_create(size)
void GPU_uniformbuf_update(GPUUniformBuf *ubo, const void *data)
void GPU_uniformbuf_free(GPUUniformBuf *ubo)
void GPU_uniformbuf_bind(GPUUniformBuf *ubo, int slot)
void GPU_vertbuf_handle_ref_remove(blender::gpu::VertBuf *verts)
void GPU_vertbuf_use(blender::gpu::VertBuf *)
void GPU_vertbuf_data_resize(blender::gpu::VertBuf &verts, uint v_len)
void GPU_vertbuf_handle_ref_add(blender::gpu::VertBuf *verts)
void GPU_vertbuf_data_len_set(blender::gpu::VertBuf &verts, uint v_len)
@ GPU_VERTBUF_DATA_UPLOADED
GPUVertBufStatus GPU_vertbuf_get_status(const blender::gpu::VertBuf *verts)
#define GPU_VERTBUF_DISCARD_SAFE(verts)
blender::gpu::VertBuf * GPU_vertbuf_calloc()
void GPU_vertbuf_data_alloc(blender::gpu::VertBuf &verts, uint v_len)
void GPU_vertbuf_init_with_format_ex(blender::gpu::VertBuf &verts, const GPUVertFormat &format, GPUUsageType)
@ GPU_USAGE_DYNAMIC
uint GPU_vertbuf_get_vertex_alloc(const blender::gpu::VertBuf *verts)
Read Guarded memory(de)allocation.
#define MEM_recallocN(vmemh, len)
#define MEM_SAFE_FREE(v)
static ListBase g_idatalists
void DRW_instance_data_list_free(DRWInstanceDataList *idatalist)
DRWSparseUniformBuf * DRW_sparse_uniform_buffer_new(uint item_size, uint chunk_size)
#define CHUNK_LIST_STEP
static void temp_buffer_handle_free(DRWTempBufferHandle *handle)
blender::gpu::Batch * DRW_temp_batch_request(DRWInstanceDataList *idatalist, blender::gpu::VertBuf *buf, GPUPrimType prim_type)
static void temp_instancing_handle_free(DRWTempInstancingHandle *handle)
void DRW_instance_data_list_free_unused(DRWInstanceDataList *idatalist)
DRWInstanceDataList * DRW_instance_data_list_create()
bool DRW_sparse_uniform_buffer_is_empty(DRWSparseUniformBuf *buffer)
static void instancing_batch_references_remove(blender::gpu::Batch *batch)
static DRWUniformAttrBuf * drw_uniform_attrs_pool_ensure(GHash *table, const GPUUniformAttrList *key)
static GPUUniformBuf * drw_sparse_uniform_buffer_get_ubo(DRWSparseUniformBuf *buffer, int chunk)
static void instancing_batch_references_add(blender::gpu::Batch *batch)
static DRWInstanceData * drw_instance_data_create(DRWInstanceDataList *idatalist, uint attr_size)
GPUUniformBuf * drw_ensure_layer_attribute_buffer()
void DRW_uniform_attrs_pool_flush_all(GHash *table)
static void DRW_instance_data_free(DRWInstanceData *idata)
void DRW_instance_buffer_finish(DRWInstanceDataList *idatalist)
void DRW_sparse_uniform_buffer_free(DRWSparseUniformBuf *buffer)
static void drw_sparse_uniform_buffer_init(DRWSparseUniformBuf *buffer, uint item_size, uint chunk_size)
void DRW_uniform_attrs_pool_clear_all(GHash *table)
static void temp_batch_free(blender::gpu::Batch **batch)
blender::gpu::VertBuf * DRW_temp_buffer_request(DRWInstanceDataList *idatalist, GPUVertFormat *format, int *vert_len)
DRWSparseUniformBuf * DRW_uniform_attrs_pool_find_ubo(GHash *table, const GPUUniformAttrList *key)
blender::gpu::Batch * DRW_temp_batch_instance_request(DRWInstanceDataList *idatalist, blender::gpu::VertBuf *buf, blender::gpu::Batch *instancer, blender::gpu::Batch *geom)
static void drw_uniform_attrs_pool_free_cb(void *ptr)
void * DRW_instance_data_next(DRWInstanceData *idata)
void DRW_sparse_uniform_buffer_unbind(DRWSparseUniformBuf *buffer, int chunk)
void DRW_sparse_uniform_buffer_bind(DRWSparseUniformBuf *buffer, int chunk, int location)
GHash * DRW_uniform_attrs_pool_new()
void DRW_instance_data_list_reset(DRWInstanceDataList *idatalist)
void DRW_sparse_uniform_buffer_clear(DRWSparseUniformBuf *buffer, bool free_all)
void DRW_sparse_uniform_buffer_flush(DRWSparseUniformBuf *buffer)
DRWInstanceData * DRW_instance_data_request(DRWInstanceDataList *idatalist, uint attr_size)
void * DRW_sparse_uniform_buffer_ensure_item(DRWSparseUniformBuf *buffer, int chunk, int item)
void drw_uniform_attrs_pool_update(GHash *table, const GPUUniformAttrList *key, DRWResourceHandle *handle, const Object *ob, const Object *dupli_parent, const DupliObject *dupli_source)
void DRW_instance_data_list_resize(DRWInstanceDataList *idatalist)
void DRW_uniform_attrs_pool_free(GHash *table)
static void drw_uniform_attribute_lookup(const GPUUniformAttr *attr, const Object *ob, const Object *dupli_parent, const DupliObject *dupli_source, float r_data[4])
#define DRW_BUFFER_VERTS_CHUNK
#define MAX_INSTANCE_DATA_SIZE
DRWManager DST
BLI_INLINE uint32_t DRW_handle_chunk_get(const DRWResourceHandle *handle)
#define DRW_RESOURCE_CHUNK_LEN
BLI_INLINE uint32_t DRW_handle_id_get(const DRWResourceHandle *handle)
uint32_t DRWResourceHandle
draw_view in_light_buf[] float
Batch * GPU_batch_calloc()
Definition gpu_batch.cc:49
struct @620::@622 batch
static uint attr_size(const GPUVertAttr *a)
int count
format
void *(* MEM_mallocN)(size_t len, const char *str)
Definition mallocn.cc:44
void *(* MEM_calloc_arrayN)(size_t len, size_t size, const char *str)
Definition mallocn.cc:43
void MEM_freeN(void *vmemh)
Definition mallocn.cc:105
void *(* MEM_callocN)(size_t len, const char *str)
Definition mallocn.cc:42
ViewLayer * view_layer
GPUUniformBuf * vlattrs_ubo
BLI_memblock * pool_batching
DRWInstanceData * idata_tail[MAX_INSTANCE_DATA_SIZE]
DRWInstanceData * idata_head[MAX_INSTANCE_DATA_SIZE]
DRWInstanceDataList * next
BLI_memblock * pool_buffers
DRWInstanceDataList * prev
BLI_memblock * pool_instancing
BLI_mempool * mempool
DRWInstanceData * next
DRWContextState draw_ctx
DRWData * vmempool
GPUUniformBuf ** chunk_ubos
blender::gpu::VertBuf * buf
blender::gpu::Batch * instancer
blender::gpu::VertBuf * buf
blender::gpu::Batch * batch
blender::gpu::Batch * geom
DRWUniformAttrBuf * next_empty
DRWSparseUniformBuf ubos
GPUUniformAttrList key
DRWResourceHandle last_handle
char name[64]
PointerRNA * ptr
Definition wm_files.cc:4126