Blender V5.0
BLI_mempool.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2008 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
16
17#include <algorithm>
18#include <cstdlib>
19#include <cstring>
20
21#include "atomic_ops.h"
22
23#include "BLI_utildefines.h"
24
25#include "BLI_asan.h"
26#include "BLI_math_base.h"
27#include "BLI_mempool.h" /* own include */
28#include "BLI_mempool_private.h" /* own include */
29
30#ifdef WITH_ASAN
31# include "BLI_threads.h"
32#endif
33
34#include "MEM_guardedalloc.h"
35
36#ifdef WITH_MEM_VALGRIND
37# include "valgrind/memcheck.h"
38#endif
39
40#include "BLI_strict_flags.h" /* IWYU pragma: keep. Keep last. */
41
42#ifdef WITH_ASAN
43# define POISON_REDZONE_SIZE 32
44#else
45# define POISON_REDZONE_SIZE 0
46#endif
47
48/* NOTE: copied from BLO_core_bhead.hh, don't use here because we're in BLI. */
49/* NOTE: this is endianness-sensitive. */
50#define MAKE_ID(a, b, c, d) (int(d) << 24 | int(c) << 16 | (b) << 8 | (a))
51#define MAKE_ID_8(a, b, c, d, e, f, g, h) \
52 (int64_t(h) << 56 | int64_t(g) << 48 | int64_t(f) << 40 | int64_t(e) << 32 | int64_t(d) << 24 | \
53 int64_t(c) << 16 | int64_t(b) << 8 | (a))
54
61#define FREEWORD \
62 ((sizeof(void *) > sizeof(int32_t)) ? MAKE_ID_8('e', 'e', 'r', 'f', 'f', 'r', 'e', 'e') : \
63 MAKE_ID('e', 'f', 'f', 'e'))
64
68#define USEDWORD MAKE_ID('u', 's', 'e', 'd')
69
70/* optimize pool size */
71#define USE_CHUNK_POW2
72
73#ifndef NDEBUG
74static bool mempool_debug_memset = false;
75#endif
76
86 intptr_t freeword;
87};
88
96
126
127#define MEMPOOL_ELEM_SIZE_MIN (sizeof(void *) * 2)
128
129#define CHUNK_DATA(chunk) \
130 ((BLI_freenode *)(CHECK_TYPE_INLINE(chunk, BLI_mempool_chunk *), (void *)((chunk) + 1)))
131
132#define NODE_STEP_NEXT(node) ((BLI_freenode *)((char *)(node) + esize))
133#define NODE_STEP_PREV(node) ((BLI_freenode *)((char *)(node) - esize))
134
136#define CHUNK_OVERHEAD uint(MEM_SIZE_OVERHEAD + sizeof(BLI_mempool_chunk))
137
139{
140#ifdef WITH_ASAN
141 BLI_mutex_unlock(&pool->mutex);
142#else
143 UNUSED_VARS(pool);
144#endif
145}
146
148{
149#ifdef WITH_ASAN
150 BLI_mutex_lock(&pool->mutex);
151#else
152 UNUSED_VARS(pool);
153#endif
154}
155
157{
158 while (index-- && head) {
159 head = head->next;
160 }
161 return head;
162}
163
170BLI_INLINE uint mempool_maxchunks(const uint elem_num, const uint pchunk)
171{
172 return (elem_num <= pchunk) ? 1 : ((elem_num / pchunk) + 1);
173}
174
176{
177 return static_cast<BLI_mempool_chunk *>(
178 MEM_mallocN(sizeof(BLI_mempool_chunk) + size_t(pool->csize), "mempool chunk"));
179}
180
191 BLI_mempool_chunk *mpchunk,
192 BLI_freenode *last_tail)
193{
194 const uint esize = pool->esize;
195 BLI_freenode *curnode = CHUNK_DATA(mpchunk);
196 uint j;
197
198 /* append */
199 if (pool->chunk_tail) {
200 pool->chunk_tail->next = mpchunk;
201 }
202 else {
203 BLI_assert(pool->chunks == nullptr);
204 pool->chunks = mpchunk;
205 }
206
207 mpchunk->next = nullptr;
208 pool->chunk_tail = mpchunk;
209
210 if (UNLIKELY(pool->free == nullptr)) {
211 pool->free = curnode;
212 }
213
214 /* loop through the allocated data, building the pointer structures */
215 j = pool->pchunk;
216 if (pool->flag & BLI_MEMPOOL_ALLOW_ITER) {
217 while (j--) {
219
221#ifdef WITH_MEM_VALGRIND
222 VALGRIND_MAKE_MEM_DEFINED(curnode, pool->esize - POISON_REDZONE_SIZE);
223#endif
224 curnode->next = next = NODE_STEP_NEXT(curnode);
225 curnode->freeword = FREEWORD;
226
227 BLI_asan_poison(curnode, pool->esize);
228#ifdef WITH_MEM_VALGRIND
229 VALGRIND_MAKE_MEM_UNDEFINED(curnode, pool->esize);
230#endif
231 curnode = next;
232 }
233 }
234 else {
235 while (j--) {
237
239#ifdef WITH_MEM_VALGRIND
240 VALGRIND_MAKE_MEM_DEFINED(curnode, pool->esize - POISON_REDZONE_SIZE);
241#endif
242 curnode->next = next = NODE_STEP_NEXT(curnode);
243 BLI_asan_poison(curnode, pool->esize);
244#ifdef WITH_MEM_VALGRIND
245 VALGRIND_MAKE_MEM_UNDEFINED(curnode, pool->esize);
246#endif
247
248 curnode = next;
249 }
250 }
251
252 /* terminate the list (rewind one)
253 * will be overwritten if 'curnode' gets passed in again as 'last_tail' */
254
255 if (POISON_REDZONE_SIZE > 0) {
257 BLI_asan_poison(curnode, pool->esize);
258#ifdef WITH_MEM_VALGRIND
259 VALGRIND_MAKE_MEM_DEFINED(curnode, pool->esize - POISON_REDZONE_SIZE);
260 VALGRIND_MAKE_MEM_UNDEFINED(curnode, pool->esize);
261#endif
262 }
263
264 curnode = NODE_STEP_PREV(curnode);
265
267#ifdef WITH_MEM_VALGRIND
268 VALGRIND_MAKE_MEM_DEFINED(curnode, pool->esize - POISON_REDZONE_SIZE);
269#endif
270
271 curnode->next = nullptr;
272 BLI_asan_poison(curnode, pool->esize);
273#ifdef WITH_MEM_VALGRIND
274 VALGRIND_MAKE_MEM_UNDEFINED(curnode, pool->esize);
275#endif
276
277 /* final pointer in the previously allocated chunk is wrong */
278 if (last_tail) {
279 BLI_asan_unpoison(last_tail, pool->esize - POISON_REDZONE_SIZE);
280#ifdef WITH_MEM_VALGRIND
281 VALGRIND_MAKE_MEM_DEFINED(last_tail, pool->esize - POISON_REDZONE_SIZE);
282#endif
283 last_tail->next = CHUNK_DATA(mpchunk);
284 BLI_asan_poison(last_tail, pool->esize);
285#ifdef WITH_MEM_VALGRIND
286 VALGRIND_MAKE_MEM_UNDEFINED(last_tail, pool->esize);
287#endif
288 }
289
290 return curnode;
291}
292
294{
295#ifdef WITH_ASAN
296 BLI_asan_unpoison(mpchunk, sizeof(BLI_mempool_chunk) + pool->esize * pool->csize);
297#else
298 UNUSED_VARS(pool);
299#endif
300#ifdef WITH_MEM_VALGRIND
301 VALGRIND_MAKE_MEM_DEFINED(mpchunk, sizeof(BLI_mempool_chunk) + pool->esize * pool->csize);
302#endif
303 MEM_freeN(mpchunk);
304}
305
307{
308 BLI_mempool_chunk *mpchunk_next;
309
310 for (; mpchunk; mpchunk = mpchunk_next) {
311 mpchunk_next = mpchunk->next;
312 mempool_chunk_free(mpchunk, pool);
313 }
314}
315
317{
318 BLI_mempool *pool;
319 BLI_freenode *last_tail = nullptr;
320 uint i, maxchunks;
321
322 /* allocate the pool structure */
323 pool = MEM_callocN<BLI_mempool>("memory pool");
324
325#ifdef WITH_ASAN
326 BLI_mutex_init(&pool->mutex);
327#endif
328
329 /* set the elem size */
330 esize = std::max(esize, uint(MEMPOOL_ELEM_SIZE_MIN));
331
333 esize = std::max(esize, uint(sizeof(BLI_freenode)));
334 }
335
336 esize += POISON_REDZONE_SIZE;
337
338 maxchunks = mempool_maxchunks(elem_num, pchunk);
339
340 pool->chunks = nullptr;
341 pool->chunk_tail = nullptr;
342 pool->esize = esize;
343
344 /* Optimize chunk size to powers of 2, accounting for slop-space. */
345#ifdef USE_CHUNK_POW2
346 {
347 BLI_assert(power_of_2_max_u(pchunk * esize) > CHUNK_OVERHEAD);
348 pchunk = (power_of_2_max_u(pchunk * esize) - CHUNK_OVERHEAD) / esize;
349 }
350#endif
351
352 pool->csize = esize * pchunk;
353
354 /* Ensure this is a power of 2, minus the rounding by element size. */
355#if defined(USE_CHUNK_POW2) && !defined(NDEBUG)
356 {
357 uint final_size = uint(MEM_SIZE_OVERHEAD) + uint(sizeof(BLI_mempool_chunk)) + pool->csize;
358 BLI_assert((uint(power_of_2_max_u(final_size)) - final_size) < pool->esize);
359 }
360#endif
361
362 pool->pchunk = pchunk;
363 pool->flag = flag;
364 pool->free = nullptr; /* mempool_chunk_add assigns */
365 pool->maxchunks = maxchunks;
366 pool->totused = 0;
367
368 if (elem_num) {
369 /* Allocate the actual chunks. */
370 for (i = 0; i < maxchunks; i++) {
371 BLI_mempool_chunk *mpchunk = mempool_chunk_alloc(pool);
372 last_tail = mempool_chunk_add(pool, mpchunk, last_tail);
373 }
374 }
375
376#ifdef WITH_MEM_VALGRIND
377 VALGRIND_CREATE_MEMPOOL(pool, 0, false);
378#endif
379
380 return pool;
381}
382
384{
385 BLI_freenode *free_pop;
386
387 if (UNLIKELY(pool->free == nullptr)) {
388 /* Need to allocate a new chunk. */
389 BLI_mempool_chunk *mpchunk = mempool_chunk_alloc(pool);
390 mempool_chunk_add(pool, mpchunk, nullptr);
391 }
392
393 free_pop = pool->free;
394
396#ifdef WITH_MEM_VALGRIND
397 VALGRIND_MEMPOOL_ALLOC(pool, free_pop, pool->esize - POISON_REDZONE_SIZE);
398 /* Mark as define, then undefine immediately before returning so:
399 * - `free_pop->next` can be read without reading "undefined" memory.
400 * - `freeword` can be set without causing the memory to be considered "defined".
401 *
402 * These could be handled on a more granular level - dealing with defining & underlining these
403 * members explicitly but that requires more involved calls,
404 * adding overhead for no real benefit. */
405 VALGRIND_MAKE_MEM_DEFINED(free_pop, pool->esize - POISON_REDZONE_SIZE);
406#endif
407
408 BLI_assert(pool->chunk_tail->next == nullptr);
409
410 if (pool->flag & BLI_MEMPOOL_ALLOW_ITER) {
411 free_pop->freeword = USEDWORD;
412 }
413
414 pool->free = free_pop->next;
415 pool->totused++;
416
417#ifdef WITH_MEM_VALGRIND
418 VALGRIND_MAKE_MEM_UNDEFINED(free_pop, pool->esize - POISON_REDZONE_SIZE);
419#endif
420
421 return (void *)free_pop;
422}
423
425{
426 void *retval = BLI_mempool_alloc(pool);
427
428 memset(retval, 0, size_t(pool->esize) - POISON_REDZONE_SIZE);
429
430 return retval;
431}
432
433void BLI_mempool_free(BLI_mempool *pool, void *addr)
434{
435 BLI_freenode *newhead = static_cast<BLI_freenode *>(addr);
436
437#ifndef NDEBUG
438 {
439 BLI_mempool_chunk *chunk;
440 bool found = false;
441 for (chunk = pool->chunks; chunk; chunk = chunk->next) {
442 if (ARRAY_HAS_ITEM((char *)addr, (char *)CHUNK_DATA(chunk), pool->csize)) {
443 found = true;
444 break;
445 }
446 }
447 if (!found) {
448 BLI_assert_msg(0, "Attempt to free data which is not in pool.\n");
449 }
450 }
451
452 /* Enable for debugging. */
454 memset(addr, 255, pool->esize - POISON_REDZONE_SIZE);
455 }
456#endif
457
458 if (pool->flag & BLI_MEMPOOL_ALLOW_ITER) {
459#ifndef NDEBUG
460 /* This will detect double free's. */
461 BLI_assert(newhead->freeword != FREEWORD);
462#endif
463 newhead->freeword = FREEWORD;
464 }
465
466 newhead->next = pool->free;
467 pool->free = newhead;
468
469 BLI_asan_poison(newhead, pool->esize);
470
471 pool->totused--;
472
473#ifdef WITH_MEM_VALGRIND
474 VALGRIND_MEMPOOL_FREE(pool, addr);
475#endif
476
477 /* Nothing is in use; free all the chunks except the first. */
478 if (UNLIKELY(pool->totused == 0) && (pool->chunks->next)) {
479 const uint esize = pool->esize;
480 BLI_freenode *curnode;
481 uint j;
482 BLI_mempool_chunk *first;
483
484 first = pool->chunks;
485 mempool_chunk_free_all(first->next, pool);
486 first->next = nullptr;
487 pool->chunk_tail = first;
488
489 /* Temporary allocation so VALGRIND doesn't complain when setting freed blocks 'next'. */
490#ifdef WITH_MEM_VALGRIND
491 VALGRIND_MEMPOOL_ALLOC(pool, CHUNK_DATA(first), pool->csize);
492#endif
493
494 curnode = CHUNK_DATA(first);
495 pool->free = curnode;
496
497 j = pool->pchunk;
498 while (j--) {
500 BLI_freenode *next = curnode->next = NODE_STEP_NEXT(curnode);
501 BLI_asan_poison(curnode, pool->esize);
502 curnode = next;
503 }
504
506 BLI_freenode *prev = NODE_STEP_PREV(curnode);
507 BLI_asan_poison(curnode, pool->esize);
508
509 curnode = prev;
510
512 curnode->next = nullptr; /* terminate the list */
513 BLI_asan_poison(curnode, pool->esize);
514
515#ifdef WITH_MEM_VALGRIND
516 VALGRIND_MEMPOOL_FREE(pool, CHUNK_DATA(first));
517#endif
518 }
519}
520
522{
523 int ret = int(pool->totused);
524
525 return ret;
526}
527
529{
530 mempool_asan_lock(pool);
531
533
534 if (index < pool->totused) {
535 /* We could have some faster mem chunk stepping code inline. */
536 BLI_mempool_iter iter;
537 void *elem;
538 BLI_mempool_iternew(pool, &iter);
539 for (elem = BLI_mempool_iterstep(&iter); index-- != 0; elem = BLI_mempool_iterstep(&iter)) {
540 /* pass */
541 }
542
544 return elem;
545 }
546
548 return nullptr;
549}
550
552{
553 const uint esize = pool->esize - uint(POISON_REDZONE_SIZE);
554 BLI_mempool_iter iter;
555 const char *elem;
556 char *p = static_cast<char *>(data);
557
559
560 mempool_asan_lock(pool);
561 BLI_mempool_iternew(pool, &iter);
562 while ((elem = static_cast<const char *>(BLI_mempool_iterstep(&iter)))) {
563 memcpy(p, elem, size_t(esize));
564 p = reinterpret_cast<char *>(NODE_STEP_NEXT(p));
565 }
567}
568
569void *BLI_mempool_as_arrayN(BLI_mempool *pool, const char *allocstr)
570{
571 char *data = static_cast<char *>(
572 MEM_malloc_arrayN(size_t(pool->totused), pool->esize, allocstr));
574 return data;
575}
576
578{
580
581 iter->pool = pool;
582 iter->curchunk = pool->chunks;
583 iter->curindex = 0;
584}
585
587{
588 BLI_mempool_iternew(pool, &ts_iter->iter);
589 ts_iter->curchunk_threaded_shared = nullptr;
590}
591
593{
595
597 __func__);
598 BLI_mempool_chunk **curchunk_threaded_shared = MEM_callocN<BLI_mempool_chunk *>(__func__);
599
600 mempool_threadsafe_iternew(pool, &iter_arr->ts_iter);
601
602 *curchunk_threaded_shared = iter_arr->ts_iter.iter.curchunk;
603 iter_arr->ts_iter.curchunk_threaded_shared = curchunk_threaded_shared;
604 for (size_t i = 1; i < iter_num; i++) {
605 iter_arr[i].ts_iter = iter_arr[0].ts_iter;
606 *curchunk_threaded_shared = iter_arr[i].ts_iter.iter.curchunk =
607 ((*curchunk_threaded_shared) ? (*curchunk_threaded_shared)->next : nullptr);
608 }
609
610 return iter_arr;
611}
612
620
621#if 0
622/* unoptimized, more readable */
623
624static void *bli_mempool_iternext(BLI_mempool_iter *iter)
625{
626 void *ret = nullptr;
627
628 if (iter->curchunk == nullptr || !iter->pool->totused) {
629 return ret;
630 }
631
632 ret = ((char *)CHUNK_DATA(iter->curchunk)) + (iter->pool->esize * iter->curindex);
633
634 iter->curindex++;
635
636 if (iter->curindex == iter->pool->pchunk) {
637 iter->curindex = 0;
638 iter->curchunk = iter->curchunk->next;
639 }
640
641 return ret;
642}
643
645{
647
648 do {
649 ret = bli_mempool_iternext(iter);
650 } while (ret && ret->freeword == FREEWORD);
651
652 return ret;
653}
654
655#else /* Optimized version of code above. */
656
658{
659 if (UNLIKELY(iter->curchunk == nullptr)) {
660 return nullptr;
661 }
662
663 const uint esize = iter->pool->esize;
664 BLI_freenode *curnode = POINTER_OFFSET(CHUNK_DATA(iter->curchunk), (esize * iter->curindex));
666 do {
667 ret = curnode;
668
670# ifdef WITH_MEM_VALGRIND
671 VALGRIND_MAKE_MEM_DEFINED(ret, iter->pool->esize - POISON_REDZONE_SIZE);
672# endif
673
674 if (++iter->curindex != iter->pool->pchunk) {
675 curnode = POINTER_OFFSET(curnode, esize);
676 }
677 else {
678 iter->curindex = 0;
679 iter->curchunk = iter->curchunk->next;
680 if (UNLIKELY(iter->curchunk == nullptr)) {
682# ifdef WITH_MEM_VALGRIND
683 VALGRIND_MAKE_MEM_DEFINED(ret, iter->pool->esize - POISON_REDZONE_SIZE);
684# endif
685 void *ret2 = (ret->freeword == FREEWORD) ? nullptr : ret;
686
687 if (ret->freeword == FREEWORD) {
688 BLI_asan_poison(ret, iter->pool->esize);
689# ifdef WITH_MEM_VALGRIND
690 VALGRIND_MAKE_MEM_UNDEFINED(ret, iter->pool->esize);
691# endif
692 }
693
694 return ret2;
695 }
696 curnode = CHUNK_DATA(iter->curchunk);
697 }
698 } while (ret->freeword == FREEWORD);
699
700 return ret;
701}
702
704{
705 BLI_mempool_iter *iter = &ts_iter->iter;
706 if (UNLIKELY(iter->curchunk == nullptr)) {
707 return nullptr;
708 }
709
710 mempool_asan_lock(iter->pool);
711
712 const uint esize = iter->pool->esize;
713 BLI_freenode *curnode = POINTER_OFFSET(CHUNK_DATA(iter->curchunk), (esize * iter->curindex));
715 do {
716 ret = curnode;
717
719# ifdef WITH_MEM_VALGRIND
720 VALGRIND_MAKE_MEM_DEFINED(ret, iter->pool->esize);
721# endif
722
723 if (++iter->curindex != iter->pool->pchunk) {
724 curnode = POINTER_OFFSET(curnode, esize);
725 }
726 else {
727 iter->curindex = 0;
728
729 /* Begin unique to the `threadsafe` version of this function. */
730 for (iter->curchunk = *ts_iter->curchunk_threaded_shared;
731 (iter->curchunk != nullptr) &&
732 (atomic_cas_ptr((void **)ts_iter->curchunk_threaded_shared,
733 iter->curchunk,
734 iter->curchunk->next) != iter->curchunk);
735 iter->curchunk = *ts_iter->curchunk_threaded_shared)
736 {
737 /* pass. */
738 }
739 if (UNLIKELY(iter->curchunk == nullptr)) {
740 if (ret->freeword == FREEWORD) {
741 BLI_asan_poison(ret, esize);
742# ifdef WITH_MEM_VALGRIND
743 VALGRIND_MAKE_MEM_UNDEFINED(ret, iter->pool->esize);
744# endif
746 return nullptr;
747 }
749 return ret;
750 }
751 /* End `threadsafe` exception. */
752
753 iter->curchunk = iter->curchunk->next;
754 if (UNLIKELY(iter->curchunk == nullptr)) {
755 if (ret->freeword == FREEWORD) {
756 BLI_asan_poison(ret, iter->pool->esize);
757# ifdef WITH_MEM_VALGRIND
758 VALGRIND_MAKE_MEM_UNDEFINED(ret, iter->pool->esize);
759# endif
761 return nullptr;
762 }
764 return ret;
765 }
766
767 curnode = CHUNK_DATA(iter->curchunk);
768 }
769
770 if (ret->freeword == FREEWORD) {
771 BLI_asan_poison(ret, iter->pool->esize);
772# ifdef WITH_MEM_VALGRIND
773 VALGRIND_MAKE_MEM_UNDEFINED(ret, iter->pool->esize);
774# endif
775 }
776 else {
777 break;
778 }
779 } while (true);
780
782 return ret;
783}
784
785#endif
786
787void BLI_mempool_clear_ex(BLI_mempool *pool, const int elem_num_reserve)
788{
789 BLI_mempool_chunk *mpchunk;
790 BLI_mempool_chunk *mpchunk_next;
791 uint maxchunks;
792
793 BLI_mempool_chunk *chunks_temp;
794 BLI_freenode *last_tail = nullptr;
795
796#ifdef WITH_MEM_VALGRIND
798 VALGRIND_CREATE_MEMPOOL(pool, 0, false);
799#endif
800
801 if (elem_num_reserve == -1) {
802 maxchunks = pool->maxchunks;
803 }
804 else {
805 maxchunks = mempool_maxchunks(uint(elem_num_reserve), pool->pchunk);
806 }
807
808 /* Free all after 'pool->maxchunks'. */
809 mpchunk = mempool_chunk_find(pool->chunks, maxchunks - 1);
810 if (mpchunk && mpchunk->next) {
811 /* terminate */
812 mpchunk_next = mpchunk->next;
813 mpchunk->next = nullptr;
814 mpchunk = mpchunk_next;
815
816 do {
817 mpchunk_next = mpchunk->next;
818 mempool_chunk_free(mpchunk, pool);
819 } while ((mpchunk = mpchunk_next));
820 }
821
822 /* re-initialize */
823 pool->free = nullptr;
824 pool->totused = 0;
825 chunks_temp = pool->chunks;
826 pool->chunks = nullptr;
827 pool->chunk_tail = nullptr;
828
829 while ((mpchunk = chunks_temp)) {
830 chunks_temp = mpchunk->next;
831 last_tail = mempool_chunk_add(pool, mpchunk, last_tail);
832 }
833}
834
836{
837 BLI_mempool_clear_ex(pool, -1);
838}
839
841{
842 mempool_chunk_free_all(pool->chunks, pool);
843
844#ifdef WITH_MEM_VALGRIND
846#endif
847
848 MEM_freeN(pool);
849}
850
851#ifndef NDEBUG
856#endif
#define BLI_asan_unpoison(addr, size)
Definition BLI_asan.h:36
#define BLI_asan_poison(addr, size)
Definition BLI_asan.h:31
#define BLI_assert(a)
Definition BLI_assert.h:46
#define BLI_assert_msg(a, msg)
Definition BLI_assert.h:53
#define BLI_INLINE
MINLINE unsigned int power_of_2_max_u(unsigned int x)
#define VALGRIND_MEMPOOL_ALLOC(pool, addr, size)
#define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed)
#define VALGRIND_DESTROY_MEMPOOL(pool)
#define MEMPOOL_ELEM_SIZE_MIN
void BLI_mempool_clear_ex(BLI_mempool *pool, const int elem_num_reserve)
#define NODE_STEP_PREV(node)
static bool mempool_debug_memset
ParallelMempoolTaskData * mempool_iter_threadsafe_create(BLI_mempool *pool, const size_t iter_num)
#define USEDWORD
void * BLI_mempool_findelem(BLI_mempool *pool, uint index)
#define CHUNK_DATA(chunk)
void BLI_mempool_as_array(BLI_mempool *pool, void *data)
static void mempool_asan_lock(BLI_mempool *pool)
void mempool_iter_threadsafe_destroy(ParallelMempoolTaskData *iter_arr)
void * BLI_mempool_as_arrayN(BLI_mempool *pool, const char *allocstr)
void * BLI_mempool_alloc(BLI_mempool *pool)
int BLI_mempool_len(const BLI_mempool *pool)
void BLI_mempool_free(BLI_mempool *pool, void *addr)
void BLI_mempool_clear(BLI_mempool *pool)
static BLI_mempool_chunk * mempool_chunk_alloc(const BLI_mempool *pool)
static void mempool_threadsafe_iternew(BLI_mempool *pool, BLI_mempool_threadsafe_iter *ts_iter)
void BLI_mempool_iternew(BLI_mempool *pool, BLI_mempool_iter *iter)
#define FREEWORD
static void mempool_asan_unlock(BLI_mempool *pool)
static void mempool_chunk_free(BLI_mempool_chunk *mpchunk, BLI_mempool *pool)
void * mempool_iter_threadsafe_step(BLI_mempool_threadsafe_iter *ts_iter)
void * BLI_mempool_calloc(BLI_mempool *pool)
void BLI_mempool_destroy(BLI_mempool *pool)
void * BLI_mempool_iterstep(BLI_mempool_iter *iter)
void BLI_mempool_set_memory_debug()
#define POISON_REDZONE_SIZE
static BLI_freenode * mempool_chunk_add(BLI_mempool *pool, BLI_mempool_chunk *mpchunk, BLI_freenode *last_tail)
BLI_mempool * BLI_mempool_create(uint esize, uint elem_num, uint pchunk, uint flag)
static void mempool_chunk_free_all(BLI_mempool_chunk *mpchunk, BLI_mempool *pool)
#define CHUNK_OVERHEAD
#define NODE_STEP_NEXT(node)
BLI_INLINE BLI_mempool_chunk * mempool_chunk_find(BLI_mempool_chunk *head, uint index)
BLI_INLINE uint mempool_maxchunks(const uint elem_num, const uint pchunk)
@ BLI_MEMPOOL_ALLOW_ITER
unsigned int uint
void BLI_mutex_init(ThreadMutex *mutex)
Definition threads.cc:340
void BLI_mutex_lock(ThreadMutex *mutex)
Definition threads.cc:345
void BLI_mutex_unlock(ThreadMutex *mutex)
Definition threads.cc:350
pthread_mutex_t ThreadMutex
Definition BLI_threads.h:79
#define UNUSED_VARS(...)
#define ARRAY_HAS_ITEM(arr_item, arr_start, arr_len)
#define UNLIKELY(x)
#define POINTER_OFFSET(v, ofs)
Read Guarded memory(de)allocation.
#define MEM_SIZE_OVERHEAD
Provides wrapper around system-specific atomic primitives, and some extensions (faked-atomic operatio...
ATOMIC_INLINE void * atomic_cas_ptr(void **v, void *old, void *_new)
BMesh const char void * data
ThreadMutex mutex
void * MEM_mallocN(size_t len, const char *str)
Definition mallocn.cc:128
void * MEM_calloc_arrayN(size_t len, size_t size, const char *str)
Definition mallocn.cc:123
void * MEM_callocN(size_t len, const char *str)
Definition mallocn.cc:118
void * MEM_malloc_arrayN(size_t len, size_t size, const char *str)
Definition mallocn.cc:133
void MEM_freeN(void *vmemh)
Definition mallocn.cc:113
static ulong * next
return ret
BLI_freenode * next
intptr_t freeword
BLI_mempool_chunk * next
struct BLI_mempool_chunk * curchunk
Definition BLI_mempool.h:96
BLI_mempool * pool
Definition BLI_mempool.h:95
unsigned int curindex
Definition BLI_mempool.h:97
struct BLI_mempool_chunk ** curchunk_threaded_shared
BLI_mempool_chunk * chunks
BLI_mempool_chunk * chunk_tail
BLI_freenode * free
BLI_mempool_threadsafe_iter ts_iter
i
Definition text_draw.cc:230
uint8_t flag
Definition wm_window.cc:145