Blender V4.3
atomic_ops_msvc.h
Go to the documentation of this file.
1/*
2 * Adopted from jemalloc with this license:
3 *
4 * Copyright (C) 2002-2013 Jason Evans <jasone@canonware.com>.
5 * All rights reserved.
6 * Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved.
7 * Copyright (C) 2009-2013 Facebook, Inc. All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright notice(s),
12 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright notice(s),
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY EXPRESS
18 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
20 * EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
23 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
24 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
25 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
26 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
33#ifndef __ATOMIC_OPS_MSVC_H__
34#define __ATOMIC_OPS_MSVC_H__
35
36#include "atomic_ops_utils.h"
37
38#define NOGDI
39#ifndef NOMINMAX
40# define NOMINMAX
41#endif
42#define WIN32_LEAN_AND_MEAN
43
44#include <intrin.h>
45#include <windows.h>
46
47#if defined(__clang__)
48# pragma GCC diagnostic push
49# pragma GCC diagnostic ignored "-Wincompatible-pointer-types"
50#endif
51
52/* TODO(sergey): On x64 platform both read and write of a variable aligned to its type size is
53 * atomic, so in theory it is possible to avoid memory barrier and gain performance. The downside
54 * of that would be that it will impose requirement to value which is being operated on. */
55#define __atomic_impl_load_generic(v) (MemoryBarrier(), *(v))
56#define __atomic_impl_store_generic(p, v) \
57 do { \
58 *(p) = (v); \
59 MemoryBarrier(); \
60 } while (0)
61
62/* 64-bit operations. */
63/* Unsigned */
65{
66 return InterlockedExchangeAdd64((int64_t *)p, (int64_t)x) + x;
67}
68
70{
71 return InterlockedExchangeAdd64((int64_t *)p, -((int64_t)x)) - x;
72}
73
75{
76 return InterlockedCompareExchange64((int64_t *)v, _new, old);
77}
78
83
88
90{
91 return InterlockedExchangeAdd64((int64_t *)p, (int64_t)x);
92}
93
95{
96 return InterlockedExchangeAdd64((int64_t *)p, -((int64_t)x));
97}
98
99/* Signed */
101{
102 return InterlockedExchangeAdd64(p, x) + x;
103}
104
106{
107 return InterlockedExchangeAdd64(p, -x) - x;
108}
109
111{
112 return InterlockedCompareExchange64(v, _new, old);
113}
114
119
124
126{
127 return InterlockedExchangeAdd64(p, x);
128}
129
131{
132 return InterlockedExchangeAdd64(p, -x);
133}
134
135/******************************************************************************/
136/* 32-bit operations. */
137/* Unsigned */
139{
140 return InterlockedExchangeAdd(p, x) + x;
141}
142
144{
145 return InterlockedExchangeAdd(p, -((int32_t)x)) - x;
146}
147
149{
150 return InterlockedCompareExchange((long *)v, _new, old);
151}
152
157
162
164{
165 return InterlockedExchangeAdd(p, x);
166}
167
169{
170 return InterlockedOr((long *)p, x);
171}
172
174{
175 return InterlockedAnd((long *)p, x);
176}
177
178/* Signed */
180{
181 return InterlockedExchangeAdd((long *)p, x) + x;
182}
183
185{
186 return InterlockedExchangeAdd((long *)p, -x) - x;
187}
188
190{
191 return InterlockedCompareExchange((long *)v, _new, old);
192}
193
198
203
205{
206 return InterlockedExchangeAdd((long *)p, x);
207}
208
210{
211 return InterlockedOr((long *)p, x);
212}
213
215{
216 return InterlockedAnd((long *)p, x);
217}
218
219/******************************************************************************/
220/* 16-bit operations. */
221
222/* Signed */
224{
225 return InterlockedOr16((short *)p, x);
226}
227
229{
230 return InterlockedAnd16((short *)p, x);
231}
232
233/******************************************************************************/
234/* 8-bit operations. */
235
236/* Unsigned */
237#pragma intrinsic(_InterlockedAnd8)
239{
240#if (LG_SIZEOF_PTR == 8 || LG_SIZEOF_INT == 8)
241 return InterlockedAnd8((char *)p, (char)b);
242#else
243 return _InterlockedAnd8((char *)p, (char)b);
244#endif
245}
246
247#pragma intrinsic(_InterlockedOr8)
249{
250#if (LG_SIZEOF_PTR == 8 || LG_SIZEOF_INT == 8)
251 return InterlockedOr8((char *)p, (char)b);
252#else
253 return _InterlockedOr8((char *)p, (char)b);
254#endif
255}
256
257/* Signed */
258#pragma intrinsic(_InterlockedAnd8)
260{
261#if (LG_SIZEOF_PTR == 8 || LG_SIZEOF_INT == 8)
262 return InterlockedAnd8((char *)p, (char)b);
263#else
264 return _InterlockedAnd8((char *)p, (char)b);
265#endif
266}
267
268#pragma intrinsic(_InterlockedOr8)
270{
271#if (LG_SIZEOF_PTR == 8 || LG_SIZEOF_INT == 8)
272 return InterlockedOr8((char *)p, (char)b);
273#else
274 return _InterlockedOr8((char *)p, (char)b);
275#endif
276}
277
278#undef __atomic_impl_load_generic
279#undef __atomic_impl_store_generic
280
281#if defined(__clang__)
282# pragma GCC diagnostic pop
283#endif
284
285#endif /* __ATOMIC_OPS_MSVC_H__ */
ATOMIC_INLINE uint32_t atomic_fetch_and_or_uint32(uint32_t *p, uint32_t x)
ATOMIC_INLINE int32_t atomic_add_and_fetch_int32(int32_t *p, int32_t x)
ATOMIC_INLINE void atomic_store_uint64(uint64_t *p, uint64_t v)
ATOMIC_INLINE uint64_t atomic_fetch_and_sub_uint64(uint64_t *p, uint64_t x)
ATOMIC_INLINE int64_t atomic_sub_and_fetch_int64(int64_t *p, int64_t x)
ATOMIC_INLINE uint8_t atomic_fetch_and_and_uint8(uint8_t *p, uint8_t b)
ATOMIC_INLINE uint64_t atomic_load_uint64(const uint64_t *v)
ATOMIC_INLINE int64_t atomic_cas_int64(int64_t *v, int64_t old, int64_t _new)
ATOMIC_INLINE uint32_t atomic_fetch_and_add_uint32(uint32_t *p, uint32_t x)
ATOMIC_INLINE uint8_t atomic_fetch_and_or_uint8(uint8_t *p, uint8_t b)
ATOMIC_INLINE int32_t atomic_load_int32(const int32_t *v)
ATOMIC_INLINE int64_t atomic_load_int64(const int64_t *v)
ATOMIC_INLINE int32_t atomic_fetch_and_or_int32(int32_t *p, int32_t x)
ATOMIC_INLINE uint32_t atomic_fetch_and_and_uint32(uint32_t *p, uint32_t x)
#define __atomic_impl_load_generic(v)
ATOMIC_INLINE void atomic_store_int64(int64_t *p, int64_t v)
ATOMIC_INLINE int64_t atomic_fetch_and_add_int64(int64_t *p, int64_t x)
ATOMIC_INLINE uint32_t atomic_add_and_fetch_uint32(uint32_t *p, uint32_t x)
ATOMIC_INLINE void atomic_store_int32(int32_t *p, int32_t v)
ATOMIC_INLINE int32_t atomic_fetch_and_add_int32(int32_t *p, int32_t x)
ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _new)
ATOMIC_INLINE uint64_t atomic_fetch_and_add_uint64(uint64_t *p, uint64_t x)
ATOMIC_INLINE uint32_t atomic_sub_and_fetch_uint32(uint32_t *p, uint32_t x)
#define __atomic_impl_store_generic(p, v)
ATOMIC_INLINE int64_t atomic_add_and_fetch_int64(int64_t *p, int64_t x)
ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x)
ATOMIC_INLINE uint32_t atomic_load_uint32(const uint32_t *v)
ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x)
ATOMIC_INLINE void atomic_store_uint32(uint32_t *p, uint32_t v)
ATOMIC_INLINE int32_t atomic_cas_int32(int32_t *v, int32_t old, int32_t _new)
ATOMIC_INLINE int32_t atomic_fetch_and_and_int32(int32_t *p, int32_t x)
ATOMIC_INLINE int8_t atomic_fetch_and_or_int8(int8_t *p, int8_t b)
ATOMIC_INLINE int16_t atomic_fetch_and_and_int16(int16_t *p, int16_t x)
ATOMIC_INLINE int32_t atomic_sub_and_fetch_int32(int32_t *p, int32_t x)
ATOMIC_INLINE int16_t atomic_fetch_and_or_int16(int16_t *p, int16_t x)
ATOMIC_INLINE int8_t atomic_fetch_and_and_int8(int8_t *p, int8_t b)
ATOMIC_INLINE int64_t atomic_fetch_and_sub_int64(int64_t *p, int64_t x)
ATOMIC_INLINE uint32_t atomic_cas_uint32(uint32_t *v, uint32_t old, uint32_t _new)
#define ATOMIC_INLINE
ATTR_WARN_UNUSED_RESULT const BMVert * v
local_group_size(16, 16) .push_constant(Type b
signed short int16_t
Definition stdint.h:76
unsigned int uint32_t
Definition stdint.h:80
__int64 int64_t
Definition stdint.h:89
signed int int32_t
Definition stdint.h:77
unsigned char uint8_t
Definition stdint.h:78
unsigned __int64 uint64_t
Definition stdint.h:90
signed char int8_t
Definition stdint.h:75