Blender V4.3
BLI_task_test.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2023 Blender Authors
2 *
3 * SPDX-License-Identifier: Apache-2.0 */
4
5#include "testing/testing.h"
6#include <atomic>
7#include <cstring>
8
9#include "atomic_ops.h"
10
11#include "MEM_guardedalloc.h"
12
13#include "BLI_utildefines.h"
14
15#include "BLI_listbase.h"
16#include "BLI_mempool.h"
17#include "BLI_task.h"
18#include "BLI_task.hh"
19
20#define ITEMS_NUM 10000
21
22/* *** Parallel iterations over range of integer values. *** */
23
24static void task_range_iter_func(void *userdata, int index, const TaskParallelTLS *__restrict tls)
25{
26 int *data = (int *)userdata;
27 data[index] = index;
28 *((int *)tls->userdata_chunk) += index;
29 // printf("%d, %d, %d\n", index, data[index], *((int *)tls->userdata_chunk));
30}
31
32static void task_range_iter_reduce_func(const void *__restrict /*userdata*/,
33 void *__restrict join_v,
34 void *__restrict userdata_chunk)
35{
36 int *join = (int *)join_v;
37 int *chunk = (int *)userdata_chunk;
38 *join += *chunk;
39 // printf("%d, %d\n", data[ITEMS_NUM], *((int *)userdata_chunk));
40}
41
42TEST(task, RangeIter)
43{
44 int data[ITEMS_NUM] = {0};
45 int sum = 0;
46
48
49 TaskParallelSettings settings;
51 settings.min_iter_per_thread = 1;
52
53 settings.userdata_chunk = &sum;
54 settings.userdata_chunk_size = sizeof(sum);
55 settings.func_reduce = task_range_iter_reduce_func;
56
58
59 /* Those checks should ensure us all items of the listbase were processed once, and only once
60 * as expected. */
61
62 int expected_sum = 0;
63 for (int i = 0; i < ITEMS_NUM; i++) {
64 EXPECT_EQ(data[i], i);
65 expected_sum += i;
66 }
67 EXPECT_EQ(sum, expected_sum);
68
70}
71
72/* *** Parallel iterations over mempool items. *** */
73
74static void task_mempool_iter_func(void *userdata,
75 MempoolIterData *item,
76 const TaskParallelTLS *__restrict /*tls*/)
77{
78 int *data = (int *)item;
79 int *count = (int *)userdata;
80
81 EXPECT_TRUE(data != nullptr);
82
83 *data += 1;
85}
86
87TEST(task, MempoolIter)
88{
89 int *data[ITEMS_NUM];
92 sizeof(*data[0]), ITEMS_NUM, 32, BLI_MEMPOOL_ALLOW_ITER);
93
94 int i;
95
96 /* 'Randomly' add and remove some items from mempool, to create a non-homogeneous one. */
97 int items_num = 0;
98 for (i = 0; i < ITEMS_NUM; i++) {
99 data[i] = (int *)BLI_mempool_alloc(mempool);
100 *data[i] = i - 1;
101 items_num++;
102 }
103
104 for (i = 0; i < ITEMS_NUM; i += 3) {
105 BLI_mempool_free(mempool, data[i]);
106 data[i] = nullptr;
107 items_num--;
108 }
109
110 for (i = 0; i < ITEMS_NUM; i += 7) {
111 if (data[i] == nullptr) {
112 data[i] = (int *)BLI_mempool_alloc(mempool);
113 *data[i] = i - 1;
114 items_num++;
115 }
116 }
117
118 for (i = 0; i < ITEMS_NUM - 5; i += 23) {
119 for (int j = 0; j < 5; j++) {
120 if (data[i + j] != nullptr) {
121 BLI_mempool_free(mempool, data[i + j]);
122 data[i + j] = nullptr;
123 items_num--;
124 }
125 }
126 }
127
128 TaskParallelSettings settings;
130
131 BLI_task_parallel_mempool(mempool, &items_num, task_mempool_iter_func, &settings);
132
133 /* Those checks should ensure us all items of the mempool were processed once, and only once - as
134 * expected. */
135 EXPECT_EQ(items_num, 0);
136 for (i = 0; i < ITEMS_NUM; i++) {
137 if (data[i] != nullptr) {
138 EXPECT_EQ(*data[i], i);
139 }
140 }
141
142 BLI_mempool_destroy(mempool);
144}
145
146/* *** Parallel iterations over mempool items with TLS. *** */
147
149 ListBase *accumulate_items;
150};
151
152static void task_mempool_iter_tls_func(void * /*userdata*/,
153 MempoolIterData *item,
154 const TaskParallelTLS *__restrict tls)
155{
156 TaskMemPool_Chunk *task_data = (TaskMemPool_Chunk *)tls->userdata_chunk;
157 int *data = (int *)item;
158
159 EXPECT_TRUE(data != nullptr);
160 if (task_data->accumulate_items == nullptr) {
161 task_data->accumulate_items = MEM_cnew<ListBase>(__func__);
162 }
163
164 /* Flip to prove this has been touched. */
165 *data = -*data;
166
167 BLI_addtail(task_data->accumulate_items, BLI_genericNodeN(data));
168}
169
170static void task_mempool_iter_tls_reduce(const void *__restrict /*userdata*/,
171 void *__restrict chunk_join,
172 void *__restrict chunk)
173{
174 TaskMemPool_Chunk *join_chunk = (TaskMemPool_Chunk *)chunk_join;
175 TaskMemPool_Chunk *data_chunk = (TaskMemPool_Chunk *)chunk;
176
177 if (data_chunk->accumulate_items != nullptr) {
178 if (join_chunk->accumulate_items == nullptr) {
179 join_chunk->accumulate_items = MEM_cnew<ListBase>(__func__);
180 }
181 BLI_movelisttolist(join_chunk->accumulate_items, data_chunk->accumulate_items);
182 }
183}
184
185static void task_mempool_iter_tls_free(const void * /*userdata*/, void *__restrict userdata_chunk)
186{
187 TaskMemPool_Chunk *task_data = (TaskMemPool_Chunk *)userdata_chunk;
188 MEM_freeN(task_data->accumulate_items);
189}
190
191TEST(task, MempoolIterTLS)
192{
193 int *data[ITEMS_NUM];
196 sizeof(*data[0]), ITEMS_NUM, 32, BLI_MEMPOOL_ALLOW_ITER);
197
198 int i;
199
200 /* Add numbers negative `1..ITEMS_NUM` inclusive. */
201 for (i = 0; i < ITEMS_NUM; i++) {
202 data[i] = (int *)BLI_mempool_alloc(mempool);
203 *data[i] = -(i + 1);
204 }
205
206 TaskParallelSettings settings;
208
209 TaskMemPool_Chunk tls_data;
210 tls_data.accumulate_items = nullptr;
211
212 settings.userdata_chunk = &tls_data;
213 settings.userdata_chunk_size = sizeof(tls_data);
214
215 settings.func_free = task_mempool_iter_tls_free;
216 settings.func_reduce = task_mempool_iter_tls_reduce;
217
218 BLI_task_parallel_mempool(mempool, nullptr, task_mempool_iter_tls_func, &settings);
219
220 EXPECT_EQ(BLI_listbase_count(tls_data.accumulate_items), ITEMS_NUM);
221
222 /* Check that all elements are added into the list once. */
223 int number_accum = 0;
224 LISTBASE_FOREACH (LinkData *, link, tls_data.accumulate_items) {
225 int *data = (int *)link->data;
226 number_accum += *data;
227 }
228 EXPECT_EQ(number_accum, (ITEMS_NUM * (ITEMS_NUM + 1)) / 2);
229
230 BLI_freelistN(tls_data.accumulate_items);
231 MEM_freeN(tls_data.accumulate_items);
232
233 BLI_mempool_destroy(mempool);
235}
236
237TEST(task, ParallelInvoke)
238{
239 std::atomic<int> counter = 0;
240 blender::threading::parallel_invoke([&]() { counter++; },
241 [&]() { counter++; },
242 [&]() { counter++; },
243 [&]() { counter++; },
244 [&]() { counter++; },
245 [&]() { counter++; });
246 EXPECT_EQ(counter, 6);
247}
EXPECT_EQ(BLI_expr_pylike_eval(expr, nullptr, 0, &result), EXPR_PYLIKE_INVALID)
#define LISTBASE_FOREACH(type, var, list)
void void void BLI_movelisttolist(struct ListBase *dst, struct ListBase *src) ATTR_NONNULL(1
void void BLI_freelistN(struct ListBase *listbase) ATTR_NONNULL(1)
Definition listbase.cc:496
void BLI_addtail(struct ListBase *listbase, void *vlink) ATTR_NONNULL(1)
Definition listbase.cc:110
struct LinkData * BLI_genericNodeN(void *data)
Definition listbase.cc:909
int BLI_listbase_count(const struct ListBase *listbase) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
void * BLI_mempool_alloc(BLI_mempool *pool) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT ATTR_RETURNS_NONNULL ATTR_NONNULL(1)
void BLI_mempool_free(BLI_mempool *pool, void *addr) ATTR_NONNULL(1
BLI_mempool * BLI_mempool_create(unsigned int esize, unsigned int elem_num, unsigned int pchunk, unsigned int flag) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT ATTR_RETURNS_NONNULL
@ BLI_MEMPOOL_ALLOW_ITER
Definition BLI_mempool.h:95
void BLI_mempool_destroy(BLI_mempool *pool) ATTR_NONNULL(1)
struct MempoolIterData MempoolIterData
Definition BLI_task.h:209
void BLI_task_parallel_range(int start, int stop, void *userdata, TaskParallelRangeFunc func, const TaskParallelSettings *settings)
Definition task_range.cc:99
BLI_INLINE void BLI_parallel_range_settings_defaults(TaskParallelSettings *settings)
Definition BLI_task.h:230
void BLI_task_parallel_mempool(struct BLI_mempool *mempool, void *userdata, TaskParallelMempoolFunc func, const TaskParallelSettings *settings)
BLI_INLINE void BLI_parallel_mempool_settings_defaults(TaskParallelSettings *settings)
Definition BLI_task.h:238
static void task_mempool_iter_func(void *userdata, MempoolIterData *item, const TaskParallelTLS *__restrict)
static void task_mempool_iter_tls_reduce(const void *__restrict, void *__restrict chunk_join, void *__restrict chunk)
static void task_range_iter_func(void *userdata, int index, const TaskParallelTLS *__restrict tls)
static void task_mempool_iter_tls_free(const void *, void *__restrict userdata_chunk)
static void task_range_iter_reduce_func(const void *__restrict, void *__restrict join_v, void *__restrict userdata_chunk)
#define ITEMS_NUM
static void task_mempool_iter_tls_func(void *, MempoolIterData *item, const TaskParallelTLS *__restrict tls)
TEST(task, RangeIter)
struct TaskMemPool_Chunk { ListBase *accumulate_items;} TaskMemPool_Chunk
void BLI_threadapi_init(void)
Definition threads.cc:114
void BLI_threadapi_exit(void)
Definition threads.cc:119
Read Guarded memory(de)allocation.
Provides wrapper around system-specific atomic primitives, and some extensions (faked-atomic operatio...
ATOMIC_INLINE uint32_t atomic_sub_and_fetch_uint32(uint32_t *p, uint32_t x)
static T sum(const btAlignedObjectArray< T > &items)
int count
void MEM_freeN(void *vmemh)
Definition mallocn.cc:105
void parallel_invoke(Functions &&...functions)
Definition BLI_task.hh:199
unsigned int uint32_t
Definition stdint.h:80