24#define MALLOCA(_size) ((_size) <= 8192) ? alloca(_size) : MEM_mallocN((_size), __func__)
25#define MALLOCA_FREE(_mem, _size) \
26 if (((_mem) != nullptr) && ((_size) > 8192)) { \
66 void *userdata_chunk_array =
nullptr;
67 const bool use_userdata_chunk = (userdata_chunk_size != 0) && (userdata_chunk !=
nullptr);
71 if (use_userdata_chunk) {
73 settings->
func_init(userdata, userdata_chunk);
86 if (use_userdata_chunk) {
87 if (settings->
func_free !=
nullptr && userdata_chunk !=
nullptr) {
89 settings->
func_free(userdata, userdata_chunk);
104 const int tasks_num = threads_num + 2;
106 state.userdata = userdata;
109 if (use_userdata_chunk) {
110 userdata_chunk_array =
MALLOCA(userdata_chunk_size * tasks_num);
114 mempool,
size_t(tasks_num));
116 for (
int i = 0;
i < tasks_num;
i++) {
117 void *userdata_chunk_local =
nullptr;
118 if (use_userdata_chunk) {
119 userdata_chunk_local = (
char *)userdata_chunk_array + (userdata_chunk_size *
i);
120 memcpy(userdata_chunk_local, userdata_chunk, userdata_chunk_size);
122 settings->
func_init(userdata, userdata_chunk_local);
135 if (use_userdata_chunk) {
137 for (
int i = 0;
i < tasks_num;
i++) {
140 userdata, userdata_chunk, mempool_iterator_data[
i].tls.userdata_chunk);
143 settings->
func_free(userdata, mempool_iterator_data[
i].tls.userdata_chunk);
147 MALLOCA_FREE(userdata_chunk_array, userdata_chunk_size * tasks_num);
ParallelMempoolTaskData * mempool_iter_threadsafe_create(BLI_mempool *pool, const size_t iter_num)
void mempool_iter_threadsafe_destroy(ParallelMempoolTaskData *iter_arr)
void * mempool_iter_threadsafe_step(BLI_mempool_threadsafe_iter *ts_iter)
void BLI_mempool_iternew(BLI_mempool *pool, BLI_mempool_iter *iter) ATTR_NONNULL()
void * BLI_mempool_iterstep(BLI_mempool_iter *iter) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL()
int BLI_mempool_len(const BLI_mempool *pool) ATTR_NONNULL(1)
int BLI_task_scheduler_num_threads(void)
void * BLI_task_pool_user_data(TaskPool *pool)
struct MempoolIterData MempoolIterData
void BLI_task_pool_work_and_wait(TaskPool *pool)
TaskPool * BLI_task_pool_create(void *userdata, eTaskPriority priority)
void(* TaskParallelMempoolFunc)(void *userdata, MempoolIterData *iter, const TaskParallelTLS *__restrict tls)
void BLI_task_pool_free(TaskPool *pool)
void BLI_task_pool_push(TaskPool *pool, TaskRunFunction run, void *taskdata, bool free_taskdata, TaskFreeFunction freedata)
Read Guarded memory(de)allocation.
TaskParallelMempoolFunc func
TaskParallelReduceFunc func_reduce
TaskParallelFreeFunc func_free
TaskParallelInitFunc func_init
size_t userdata_chunk_size
#define MALLOCA_FREE(_mem, _size)
static void parallel_mempool_func(TaskPool *__restrict pool, void *taskdata)
void BLI_task_parallel_mempool(BLI_mempool *mempool, void *userdata, TaskParallelMempoolFunc func, const TaskParallelSettings *settings)