Blender V5.0
threads.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2006 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
8
9#include <algorithm>
10#include <cerrno>
11#include <cstdio> /* For `printf`. */
12#include <cstdlib>
13#include <deque>
14
15#include "MEM_guardedalloc.h"
16
17#include "BLI_listbase.h"
18#include "BLI_threads.h"
19#include "BLI_time.h"
20#include "BLI_utildefines.h"
21
22/* for checking system threads - BLI_system_thread_count */
23#ifdef WIN32
24# include <sys/timeb.h>
25# include <windows.h>
26#elif defined(__APPLE__)
27# include <sys/sysctl.h>
28# include <sys/types.h>
29#else
30# include <sys/time.h>
31# include <unistd.h>
32#endif
33
34#ifdef WITH_TBB
35# include <tbb/spin_mutex.h>
36#endif
37
38#include "atomic_ops.h"
39
90static pthread_mutex_t _image_lock = PTHREAD_MUTEX_INITIALIZER;
91static pthread_mutex_t _image_draw_lock = PTHREAD_MUTEX_INITIALIZER;
92static pthread_mutex_t _viewer_lock = PTHREAD_MUTEX_INITIALIZER;
93static pthread_mutex_t _custom1_lock = PTHREAD_MUTEX_INITIALIZER;
94static pthread_mutex_t _nodes_lock = PTHREAD_MUTEX_INITIALIZER;
95static pthread_mutex_t _movieclip_lock = PTHREAD_MUTEX_INITIALIZER;
96static pthread_mutex_t _colormanage_lock = PTHREAD_MUTEX_INITIALIZER;
97static pthread_mutex_t _fftw_lock = PTHREAD_MUTEX_INITIALIZER;
98static pthread_mutex_t _view3d_lock = PTHREAD_MUTEX_INITIALIZER;
99static pthread_t mainid;
100static uint thread_levels = 0; /* threads can be invoked inside threads */
101static int threads_override_num = 0;
102
103/* just a max for security reasons */
104#define RE_MAX_THREAD BLENDER_MAX_THREADS
105
108 void *(*do_thread)(void *);
110 pthread_t pthread;
111 int avail;
112};
113
115{
116 mainid = pthread_self();
117}
118
120
121void BLI_threadpool_init(ListBase *threadbase, void *(*do_thread)(void *), int tot)
122{
123 int a;
124
125 if (threadbase != nullptr && tot > 0) {
126 BLI_listbase_clear(threadbase);
127
128 if (tot > RE_MAX_THREAD) {
129 tot = RE_MAX_THREAD;
130 }
131 else if (tot < 1) {
132 tot = 1;
133 }
134
135 for (a = 0; a < tot; a++) {
136 ThreadSlot *tslot = MEM_callocN<ThreadSlot>("threadslot");
137 BLI_addtail(threadbase, tslot);
138 tslot->do_thread = do_thread;
139 tslot->avail = 1;
140 }
141 }
142
144}
145
147{
148 int counter = 0;
149
150 LISTBASE_FOREACH (ThreadSlot *, tslot, threadbase) {
151 if (tslot->avail) {
152 counter++;
153 }
154 }
155
156 return counter;
157}
158
160{
161 int counter = 0;
162
163 LISTBASE_FOREACH (ThreadSlot *, tslot, threadbase) {
164 if (tslot->avail) {
165 return counter;
166 }
167 ++counter;
168 }
169
170 return 0;
171}
172
173static void *tslot_thread_start(void *tslot_p)
174{
175 ThreadSlot *tslot = (ThreadSlot *)tslot_p;
176 return tslot->do_thread(tslot->callerdata);
177}
178
180{
181 return pthread_equal(pthread_self(), mainid);
182}
183
184void BLI_threadpool_insert(ListBase *threadbase, void *callerdata)
185{
186 LISTBASE_FOREACH (ThreadSlot *, tslot, threadbase) {
187 if (tslot->avail) {
188 tslot->avail = 0;
189 tslot->callerdata = callerdata;
190 pthread_create(&tslot->pthread, nullptr, tslot_thread_start, tslot);
191 return;
192 }
193 }
194 printf("ERROR: could not insert thread slot\n");
195}
196
197void BLI_threadpool_remove(ListBase *threadbase, void *callerdata)
198{
199 LISTBASE_FOREACH (ThreadSlot *, tslot, threadbase) {
200 if (tslot->callerdata == callerdata) {
201 pthread_join(tslot->pthread, nullptr);
202 tslot->callerdata = nullptr;
203 tslot->avail = 1;
204 }
205 }
206}
207
208void BLI_threadpool_remove_index(ListBase *threadbase, int index)
209{
210 int counter = 0;
211
212 LISTBASE_FOREACH (ThreadSlot *, tslot, threadbase) {
213 if (counter == index && tslot->avail == 0) {
214 pthread_join(tslot->pthread, nullptr);
215 tslot->callerdata = nullptr;
216 tslot->avail = 1;
217 break;
218 }
219 ++counter;
220 }
221}
222
224{
225 LISTBASE_FOREACH (ThreadSlot *, tslot, threadbase) {
226 if (tslot->avail == 0) {
227 pthread_join(tslot->pthread, nullptr);
228 tslot->callerdata = nullptr;
229 tslot->avail = 1;
230 }
231 }
232}
233
235{
236
237 /* Only needed if there's actually some stuff to end
238 * this way we don't end up decrementing thread_levels on an empty `threadbase`. */
239 if (threadbase == nullptr || BLI_listbase_is_empty(threadbase)) {
240 return;
241 }
242
243 LISTBASE_FOREACH (ThreadSlot *, tslot, threadbase) {
244 if (tslot->avail == 0) {
245 pthread_join(tslot->pthread, nullptr);
246 }
247 }
248 BLI_freelistN(threadbase);
249}
250
251/* System Information */
252
254{
255 static int t = -1;
256
257 if (threads_override_num != 0) {
259 }
260 if (LIKELY(t != -1)) {
261 return t;
262 }
263
264 {
265#ifdef WIN32
266 SYSTEM_INFO info;
267 GetSystemInfo(&info);
268 t = int(info.dwNumberOfProcessors);
269#else
270# ifdef __APPLE__
271 int mib[2];
272 size_t len;
273
274 mib[0] = CTL_HW;
275 mib[1] = HW_NCPU;
276 len = sizeof(t);
277 sysctl(mib, 2, &t, &len, nullptr, 0);
278# else
279 t = int(sysconf(_SC_NPROCESSORS_ONLN));
280# endif
281#endif
282 }
283
284 CLAMP(t, 1, RE_MAX_THREAD);
285
286 return t;
287}
288
293
298
299/* Global Mutex Locks */
300
301static ThreadMutex *global_mutex_from_type(const int type)
302{
303 switch (type) {
304 case LOCK_IMAGE:
305 return &_image_lock;
306 case LOCK_DRAW_IMAGE:
307 return &_image_draw_lock;
308 case LOCK_VIEWER:
309 return &_viewer_lock;
310 case LOCK_CUSTOM1:
311 return &_custom1_lock;
312 case LOCK_NODES:
313 return &_nodes_lock;
314 case LOCK_MOVIECLIP:
315 return &_movieclip_lock;
316 case LOCK_COLORMANAGE:
317 return &_colormanage_lock;
318 case LOCK_FFTW:
319 return &_fftw_lock;
320 case LOCK_VIEW3D:
321 return &_view3d_lock;
322 default:
324 return nullptr;
325 }
326}
327
328void BLI_thread_lock(int type)
329{
330 pthread_mutex_lock(global_mutex_from_type(type));
331}
332
333void BLI_thread_unlock(int type)
334{
335 pthread_mutex_unlock(global_mutex_from_type(type));
336}
337
338/* Mutex Locks */
339
341{
342 pthread_mutex_init(mutex, nullptr);
343}
344
346{
347 pthread_mutex_lock(mutex);
348}
349
351{
352 pthread_mutex_unlock(mutex);
353}
354
356{
357 return (pthread_mutex_trylock(mutex) == 0);
358}
359
361{
362 pthread_mutex_destroy(mutex);
363}
364
371
377
378/* Spin Locks */
379
380#ifdef WITH_TBB
381static tbb::spin_mutex *tbb_spin_mutex_cast(SpinLock *spin)
382{
383 static_assert(sizeof(SpinLock) >= sizeof(tbb::spin_mutex),
384 "SpinLock must match tbb::spin_mutex");
385 static_assert(alignof(SpinLock) % alignof(tbb::spin_mutex) == 0,
386 "SpinLock must be aligned same as tbb::spin_mutex");
387 return reinterpret_cast<tbb::spin_mutex *>(spin);
388}
389#endif
390
392{
393#ifdef WITH_TBB
394 tbb::spin_mutex *spin_mutex = tbb_spin_mutex_cast(spin);
395 new (spin_mutex) tbb::spin_mutex();
396#elif defined(__APPLE__)
397 BLI_mutex_init(spin);
398#elif defined(_MSC_VER)
399 *spin = 0;
400#else
401 pthread_spin_init(spin, 0);
402#endif
403}
404
406{
407#ifdef WITH_TBB
408 tbb::spin_mutex *spin_mutex = tbb_spin_mutex_cast(spin);
409 spin_mutex->lock();
410#elif defined(__APPLE__)
411 BLI_mutex_lock(spin);
412#elif defined(_MSC_VER)
413# if defined(_M_ARM64)
414 // InterlockedExchangeAcquire takes a long arg on MSVC ARM64
415 static_assert(sizeof(long) == sizeof(SpinLock));
416 while (InterlockedExchangeAcquire((volatile long *)spin, 1)) {
417# else
418 while (InterlockedExchangeAcquire(spin, 1)) {
419# endif
420 while (*spin) {
421 /* Spin-lock hint for processors with hyper-threading. */
422 YieldProcessor();
423 }
424 }
425#else
426 pthread_spin_lock(spin);
427#endif
428}
429
431{
432#ifdef WITH_TBB
433 tbb::spin_mutex *spin_mutex = tbb_spin_mutex_cast(spin);
434 spin_mutex->unlock();
435#elif defined(__APPLE__)
436 BLI_mutex_unlock(spin);
437#elif defined(_MSC_VER)
438 _ReadWriteBarrier();
439 *spin = 0;
440#else
441 pthread_spin_unlock(spin);
442#endif
443}
444
446{
447#ifdef WITH_TBB
448 tbb::spin_mutex *spin_mutex = tbb_spin_mutex_cast(spin);
449 spin_mutex->~spin_mutex();
450#elif defined(__APPLE__)
451 BLI_mutex_end(spin);
452#elif defined(_MSC_VER)
453 /* Nothing to do, spin is a simple integer type. */
454 UNUSED_VARS(spin);
455#else
456 pthread_spin_destroy(spin);
457#endif
458}
459
460/* Read/Write Mutex Lock */
461
463{
464 pthread_rwlock_init(mutex, nullptr);
465}
466
468{
469 if (mode == THREAD_LOCK_READ) {
470 pthread_rwlock_rdlock(mutex);
471 }
472 else {
473 pthread_rwlock_wrlock(mutex);
474 }
475}
476
478{
479 pthread_rwlock_unlock(mutex);
480}
481
483{
484 pthread_rwlock_destroy(mutex);
485}
486
493
499
500/* Ticket Mutex Lock */
501
503 pthread_cond_t cond;
504 pthread_mutex_t mutex;
506 pthread_t owner;
508};
509
511{
512 TicketMutex *ticket = MEM_callocN<TicketMutex>("TicketMutex");
513
514 pthread_cond_init(&ticket->cond, nullptr);
515 pthread_mutex_init(&ticket->mutex, nullptr);
516
517 return ticket;
518}
519
521{
522 pthread_mutex_destroy(&ticket->mutex);
523 pthread_cond_destroy(&ticket->cond);
524 MEM_freeN(ticket);
525}
526
527static bool ticket_mutex_lock(TicketMutex *ticket, const bool check_recursive)
528{
529 uint queue_me;
530
531 pthread_mutex_lock(&ticket->mutex);
532
533 /* Check for recursive locks, for debugging only. */
534 if (check_recursive && ticket->has_owner && pthread_equal(pthread_self(), ticket->owner)) {
535 pthread_mutex_unlock(&ticket->mutex);
536 return false;
537 }
538
539 queue_me = ticket->queue_tail++;
540
541 while (queue_me != ticket->queue_head) {
542 pthread_cond_wait(&ticket->cond, &ticket->mutex);
543 }
544
545 ticket->owner = pthread_self();
546 ticket->has_owner = true;
547
548 pthread_mutex_unlock(&ticket->mutex);
549 return true;
550}
551
553{
554 ticket_mutex_lock(ticket, false);
555}
556
558{
559 return ticket_mutex_lock(ticket, true);
560}
561
563{
564 pthread_mutex_lock(&ticket->mutex);
565 ticket->queue_head++;
566 ticket->has_owner = false;
567 pthread_cond_broadcast(&ticket->cond);
568 pthread_mutex_unlock(&ticket->mutex);
569}
570
571/* ************************************************ */
572
573/* Condition */
574
576{
577 pthread_cond_init(cond, nullptr);
578}
579
581{
582 pthread_cond_wait(cond, mutex);
583}
584
586{
587 pthread_cond_wait(cond, global_mutex_from_type(type));
588}
589
591{
592 pthread_cond_signal(cond);
593}
594
596{
597 pthread_cond_broadcast(cond);
598}
599
601{
602 pthread_cond_destroy(cond);
603}
604
605/* ************************************************ */
606
611
614 std::deque<ThreadQueueWork> queue_low_priority;
615 std::deque<ThreadQueueWork> queue_normal_priority;
616 std::deque<ThreadQueueWork> queue_high_priority;
617 pthread_mutex_t mutex;
618 pthread_cond_t push_cond;
619 pthread_cond_t finish_cond;
620 volatile int nowait = 0;
621 volatile int canceled = 0;
622};
623
625{
626 ThreadQueue *queue = MEM_new<ThreadQueue>(__func__);
627
628 pthread_mutex_init(&queue->mutex, nullptr);
629 pthread_cond_init(&queue->push_cond, nullptr);
630 pthread_cond_init(&queue->finish_cond, nullptr);
631
632 return queue;
633}
634
636{
637 /* destroy everything, assumes no one is using queue anymore */
638 pthread_cond_destroy(&queue->finish_cond);
639 pthread_cond_destroy(&queue->push_cond);
640 pthread_mutex_destroy(&queue->mutex);
641
642 MEM_delete(queue);
643}
644
646{
647 BLI_assert(work);
648
649 pthread_mutex_lock(&queue->mutex);
650
651 ThreadQueueWork work_reference;
652 work_reference.work = work;
653 work_reference.id = ++queue->current_id;
654
655 switch (priority) {
657 queue->queue_low_priority.push_back(work_reference);
658 break;
660 queue->queue_normal_priority.push_back(work_reference);
661 break;
663 queue->queue_high_priority.push_back(work_reference);
664 break;
665 }
666
667 /* signal threads waiting to pop */
668 pthread_cond_signal(&queue->push_cond);
669 pthread_mutex_unlock(&queue->mutex);
670
671 return work_reference.id;
672}
673
676{
677 if (queue->queue_low_priority.empty() && queue->queue_normal_priority.empty() &&
678 queue->queue_high_priority.empty())
679 {
680 pthread_cond_signal(&queue->finish_cond);
681 }
682}
683
685{
686 pthread_mutex_lock(&queue->mutex);
687
688 bool found = false;
689 auto check = [&](const ThreadQueueWork &work) {
690 if (work.id == work_id) {
691 found = true;
692 return true;
693 }
694 return false;
695 };
696
697 auto cancel = [&](std::deque<ThreadQueueWork> &sub_queue) {
698 sub_queue.erase(std::remove_if(sub_queue.begin(), sub_queue.end(), check), sub_queue.end());
699 };
700
701 cancel(queue->queue_low_priority);
702 cancel(queue->queue_normal_priority);
703 cancel(queue->queue_high_priority);
704
705 if (found) {
706 check_finalization(queue);
707 }
708
709 pthread_mutex_unlock(&queue->mutex);
710}
711
713{
714 ThreadQueueWork work_reference = {0};
715
716 /* wait until there is work */
717 pthread_mutex_lock(&queue->mutex);
718 while (!queue->nowait && queue->queue_low_priority.empty() &&
719 queue->queue_normal_priority.empty() && queue->queue_high_priority.empty())
720 {
721 pthread_cond_wait(&queue->push_cond, &queue->mutex);
722 }
723
724 /* if we have something, pop it */
725 for (std::deque<ThreadQueueWork> *sub_queue :
727 {
728 if (sub_queue->empty()) {
729 continue;
730 }
731 work_reference = sub_queue->front();
732 sub_queue->pop_front();
733
734 /* Don't pop more than one work. */
735 break;
736 }
737
738 if (work_reference.work) {
739 check_finalization(queue);
740 }
741
742 pthread_mutex_unlock(&queue->mutex);
743
744 return work_reference.work;
745}
746
747static void wait_timeout(timespec *timeout, int ms)
748{
749 ldiv_t div_result;
750 long sec, usec, x;
751
752#ifdef WIN32
753 {
754 struct _timeb now;
755 _ftime(&now);
756 sec = now.time;
757 usec = now.millitm * 1000; /* microsecond precision would be better */
758 }
759#else
760 {
761 timeval now;
762 gettimeofday(&now, nullptr);
763 sec = now.tv_sec;
764 usec = now.tv_usec;
765 }
766#endif
767
768 /* add current time + millisecond offset */
769 div_result = ldiv(ms, 1000);
770 timeout->tv_sec = sec + div_result.quot;
771
772 x = usec + (div_result.rem * 1000);
773
774 if (x >= 1000000) {
775 timeout->tv_sec++;
776 x -= 1000000;
777 }
778
779 timeout->tv_nsec = x * 1000;
780}
781
783{
784 double t;
785 ThreadQueueWork work_reference = {0};
786 timespec timeout;
787
789 wait_timeout(&timeout, ms);
790
791 /* wait until there is work */
792 pthread_mutex_lock(&queue->mutex);
793 while (!queue->nowait && queue->queue_low_priority.empty() &&
794 queue->queue_normal_priority.empty() && queue->queue_high_priority.empty())
795 {
796 if (pthread_cond_timedwait(&queue->push_cond, &queue->mutex, &timeout) == ETIMEDOUT) {
797 break;
798 }
799 if (BLI_time_now_seconds() - t >= ms * 0.001) {
800 break;
801 }
802 }
803
804 /* if we have something, pop it */
805 for (std::deque<ThreadQueueWork> *sub_queue :
807 {
808 if (sub_queue->empty()) {
809 continue;
810 }
811 work_reference = sub_queue->front();
812 sub_queue->pop_front();
813
814 /* Don't pop more than one work. */
815 break;
816 }
817
818 if (work_reference.work) {
819 check_finalization(queue);
820 }
821
822 pthread_mutex_unlock(&queue->mutex);
823
824 return work_reference.work;
825}
826
828{
829 int size;
830
831 pthread_mutex_lock(&queue->mutex);
832 size = queue->queue_low_priority.size() + queue->queue_normal_priority.size() +
833 queue->queue_high_priority.size();
834 pthread_mutex_unlock(&queue->mutex);
835
836 return size;
837}
838
840{
841 bool is_empty;
842
843 pthread_mutex_lock(&queue->mutex);
844 is_empty = queue->queue_low_priority.empty() && queue->queue_normal_priority.empty() &&
845 queue->queue_high_priority.empty();
846 pthread_mutex_unlock(&queue->mutex);
847
848 return is_empty;
849}
850
852{
853 pthread_mutex_lock(&queue->mutex);
854
855 queue->nowait = 1;
856
857 /* signal threads waiting to pop */
858 pthread_cond_broadcast(&queue->push_cond);
859 pthread_mutex_unlock(&queue->mutex);
860}
861
863{
864 /* wait for finish condition */
865 pthread_mutex_lock(&queue->mutex);
866
867 while (!queue->queue_low_priority.empty() || !queue->queue_normal_priority.empty() ||
868 !queue->queue_high_priority.empty())
869 {
870 pthread_cond_wait(&queue->finish_cond, &queue->mutex);
871 }
872
873 pthread_mutex_unlock(&queue->mutex);
874}
#define BLI_assert_unreachable()
Definition BLI_assert.h:93
#define BLI_assert(a)
Definition BLI_assert.h:46
#define LISTBASE_FOREACH(type, var, list)
BLI_INLINE void BLI_listbase_clear(ListBase *lb)
BLI_INLINE bool BLI_listbase_is_empty(const ListBase *lb)
void void BLI_freelistN(ListBase *listbase) ATTR_NONNULL(1)
Definition listbase.cc:497
void BLI_addtail(ListBase *listbase, void *vlink) ATTR_NONNULL(1)
Definition listbase.cc:111
ATTR_WARN_UNUSED_RESULT const size_t num
unsigned int uint
ThreadQueueWorkPriority
@ BLI_THREAD_QUEUE_WORK_PRIORITY_LOW
@ BLI_THREAD_QUEUE_WORK_PRIORITY_HIGH
@ BLI_THREAD_QUEUE_WORK_PRIORITY_NORMAL
pthread_spinlock_t SpinLock
pthread_rwlock_t ThreadRWMutex
#define THREAD_LOCK_READ
pthread_cond_t ThreadCondition
@ LOCK_NODES
Definition BLI_threads.h:67
@ LOCK_VIEW3D
Definition BLI_threads.h:71
@ LOCK_DRAW_IMAGE
Definition BLI_threads.h:64
@ LOCK_COLORMANAGE
Definition BLI_threads.h:69
@ LOCK_MOVIECLIP
Definition BLI_threads.h:68
@ LOCK_CUSTOM1
Definition BLI_threads.h:66
@ LOCK_IMAGE
Definition BLI_threads.h:63
@ LOCK_VIEWER
Definition BLI_threads.h:65
@ LOCK_FFTW
Definition BLI_threads.h:70
pthread_mutex_t ThreadMutex
Definition BLI_threads.h:79
Platform independent time functions.
double BLI_time_now_seconds(void)
Definition time.cc:113
#define CLAMP(a, b, c)
#define UNUSED_VARS(...)
#define LIKELY(x)
Read Guarded memory(de)allocation.
Provides wrapper around system-specific atomic primitives, and some extensions (faked-atomic operatio...
ATOMIC_INLINE unsigned int atomic_fetch_and_add_u(unsigned int *p, unsigned int x)
unsigned long long int uint64_t
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition btDbvt.cpp:52
ThreadMutex mutex
#define long
#define printf(...)
void * MEM_callocN(size_t len, const char *str)
Definition mallocn.cc:118
void MEM_freeN(void *vmemh)
Definition mallocn.cc:113
uint64_t id
Definition threads.cc:609
std::deque< ThreadQueueWork > queue_low_priority
Definition threads.cc:614
volatile int nowait
Definition threads.cc:620
volatile int canceled
Definition threads.cc:621
std::deque< ThreadQueueWork > queue_high_priority
Definition threads.cc:616
uint64_t current_id
Definition threads.cc:613
pthread_cond_t push_cond
Definition threads.cc:618
pthread_cond_t finish_cond
Definition threads.cc:619
pthread_mutex_t mutex
Definition threads.cc:617
std::deque< ThreadQueueWork > queue_normal_priority
Definition threads.cc:615
void * callerdata
Definition threads.cc:109
void *(* do_thread)(void *)
Definition threads.cc:108
pthread_t pthread
Definition threads.cc:110
ThreadSlot * next
Definition threads.cc:107
ThreadSlot * prev
Definition threads.cc:107
bool has_owner
Definition threads.cc:507
pthread_cond_t cond
Definition threads.cc:503
pthread_t owner
Definition threads.cc:506
uint queue_head
Definition threads.cc:505
pthread_mutex_t mutex
Definition threads.cc:504
uint queue_tail
Definition threads.cc:505
void BLI_condition_notify_all(ThreadCondition *cond)
Definition threads.cc:595
void BLI_thread_queue_cancel_work(ThreadQueue *queue, uint64_t work_id)
Definition threads.cc:684
bool BLI_mutex_trylock(ThreadMutex *mutex)
Definition threads.cc:355
void BLI_rw_mutex_end(ThreadRWMutex *mutex)
Definition threads.cc:482
void BLI_threadapi_init()
Definition threads.cc:114
void * BLI_thread_queue_pop(ThreadQueue *queue)
Definition threads.cc:712
void BLI_thread_unlock(int type)
Definition threads.cc:333
void BLI_ticket_mutex_unlock(TicketMutex *ticket)
Definition threads.cc:562
ThreadRWMutex * BLI_rw_mutex_alloc()
Definition threads.cc:487
static pthread_mutex_t _image_lock
Definition threads.cc:90
void BLI_mutex_end(ThreadMutex *mutex)
Definition threads.cc:360
static void * tslot_thread_start(void *tslot_p)
Definition threads.cc:173
#define RE_MAX_THREAD
Definition threads.cc:104
void BLI_threadapi_exit()
Definition threads.cc:119
void BLI_mutex_free(ThreadMutex *mutex)
Definition threads.cc:372
static pthread_mutex_t _custom1_lock
Definition threads.cc:93
void BLI_threadpool_clear(ListBase *threadbase)
Definition threads.cc:223
void BLI_threadpool_init(ListBase *threadbase, void *(*do_thread)(void *), int tot)
Definition threads.cc:121
void BLI_thread_lock(int type)
Definition threads.cc:328
int BLI_thread_is_main()
Definition threads.cc:179
bool BLI_ticket_mutex_lock_check_recursive(TicketMutex *ticket)
Definition threads.cc:557
void BLI_threadpool_remove(ListBase *threadbase, void *callerdata)
Definition threads.cc:197
TicketMutex * BLI_ticket_mutex_alloc()
Definition threads.cc:510
static pthread_mutex_t _view3d_lock
Definition threads.cc:98
void BLI_condition_wait(ThreadCondition *cond, ThreadMutex *mutex)
Definition threads.cc:580
int BLI_system_num_threads_override_get()
Definition threads.cc:294
static int threads_override_num
Definition threads.cc:101
int BLI_threadpool_available_thread_index(ListBase *threadbase)
Definition threads.cc:159
static void wait_timeout(timespec *timeout, int ms)
Definition threads.cc:747
static bool ticket_mutex_lock(TicketMutex *ticket, const bool check_recursive)
Definition threads.cc:527
void BLI_mutex_init(ThreadMutex *mutex)
Definition threads.cc:340
static pthread_mutex_t _viewer_lock
Definition threads.cc:92
void BLI_system_num_threads_override_set(int num)
Definition threads.cc:289
void BLI_condition_end(ThreadCondition *cond)
Definition threads.cc:600
static ThreadMutex * global_mutex_from_type(const int type)
Definition threads.cc:301
void BLI_thread_queue_free(ThreadQueue *queue)
Definition threads.cc:635
static uint thread_levels
Definition threads.cc:100
ThreadQueue * BLI_thread_queue_init()
Definition threads.cc:624
static pthread_mutex_t _colormanage_lock
Definition threads.cc:96
static pthread_mutex_t _fftw_lock
Definition threads.cc:97
void BLI_rw_mutex_lock(ThreadRWMutex *mutex, int mode)
Definition threads.cc:467
void BLI_ticket_mutex_lock(TicketMutex *ticket)
Definition threads.cc:552
void BLI_condition_notify_one(ThreadCondition *cond)
Definition threads.cc:590
bool BLI_thread_queue_is_empty(ThreadQueue *queue)
Definition threads.cc:839
void BLI_ticket_mutex_free(TicketMutex *ticket)
Definition threads.cc:520
void BLI_condition_wait_global_mutex(ThreadCondition *cond, const int type)
Definition threads.cc:585
uint64_t BLI_thread_queue_push(ThreadQueue *queue, void *work, ThreadQueueWorkPriority priority)
Definition threads.cc:645
static pthread_mutex_t _image_draw_lock
Definition threads.cc:91
static void check_finalization(ThreadQueue *queue)
Definition threads.cc:675
void BLI_threadpool_end(ListBase *threadbase)
Definition threads.cc:234
void BLI_condition_init(ThreadCondition *cond)
Definition threads.cc:575
static pthread_t mainid
Definition threads.cc:99
void BLI_mutex_lock(ThreadMutex *mutex)
Definition threads.cc:345
void BLI_thread_queue_nowait(ThreadQueue *queue)
Definition threads.cc:851
int BLI_system_thread_count()
Definition threads.cc:253
void BLI_thread_queue_wait_finish(ThreadQueue *queue)
Definition threads.cc:862
void BLI_mutex_unlock(ThreadMutex *mutex)
Definition threads.cc:350
void BLI_rw_mutex_init(ThreadRWMutex *mutex)
Definition threads.cc:462
static pthread_mutex_t _nodes_lock
Definition threads.cc:94
void BLI_threadpool_insert(ListBase *threadbase, void *callerdata)
Definition threads.cc:184
static pthread_mutex_t _movieclip_lock
Definition threads.cc:95
void BLI_spin_init(SpinLock *spin)
Definition threads.cc:391
void BLI_spin_unlock(SpinLock *spin)
Definition threads.cc:430
void BLI_threadpool_remove_index(ListBase *threadbase, int index)
Definition threads.cc:208
int BLI_available_threads(ListBase *threadbase)
Definition threads.cc:146
void BLI_spin_lock(SpinLock *spin)
Definition threads.cc:405
void BLI_rw_mutex_free(ThreadRWMutex *mutex)
Definition threads.cc:494
void BLI_rw_mutex_unlock(ThreadRWMutex *mutex)
Definition threads.cc:477
void BLI_spin_end(SpinLock *spin)
Definition threads.cc:445
ThreadMutex * BLI_mutex_alloc()
Definition threads.cc:365
void * BLI_thread_queue_pop_timeout(ThreadQueue *queue, int ms)
Definition threads.cc:782
int BLI_thread_queue_len(ThreadQueue *queue)
Definition threads.cc:827
uint len