8 #if defined(USE_PTHREAD_MUTEXES) 14 #define _papi_hwd_lock(lck) \ 17 pthread_mutex_lock (&_papi_hwd_lock_data[lck]); \ 19 #define _papi_hwd_unlock(lck) \ 22 pthread_mutex_unlock(&_papi_hwd_lock_data[lck]); \ 30 #define MUTEX_CLOSED 1 37 #ifdef __INTEL_COMPILER 38 #define _papi_hwd_lock(lck) { while(_InterlockedCompareExchange_acq(&_papi_hwd_lock_data[lck],MUTEX_CLOSED,MUTEX_OPEN) != MUTEX_OPEN) { ; } } 39 #define _papi_hwd_unlock(lck) { _InterlockedExchange((volatile int *)&_papi_hwd_lock_data[lck], MUTEX_OPEN); } 41 #define _papi_hwd_lock(lck) \ 44 __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "r"(MUTEX_OPEN)); \ 45 __asm__ __volatile__ ("cmpxchg4.acq %0=[%1],%2,ar.ccv" : "=r"(res) : "r"(&_papi_hwd_lock_data[lck]), "r"(MUTEX_CLOSED) : "memory"); \ 46 } while (res != MUTEX_OPEN); } 48 #define _papi_hwd_unlock(lck) { __asm__ __volatile__ ("st4.rel [%0]=%1" : : "r"(&_papi_hwd_lock_data[lck]), "r"(MUTEX_OPEN) : "memory"); } 55 #elif defined(__i386__)||defined(__x86_64__) 56 #define _papi_hwd_lock(lck) \ 59 unsigned int res = 0; \ 61 __asm__ __volatile__ ("lock ; " "cmpxchg %1,%2" : "=a"(res) : "q"(MUTEX_CLOSED), "m"(_papi_hwd_lock_data[lck]), "0"(MUTEX_OPEN) : "memory"); \ 62 } while(res != (unsigned int)MUTEX_OPEN); \ 64 #define _papi_hwd_unlock(lck) \ 67 unsigned int res = 0; \ 68 __asm__ __volatile__ ("xchg %0,%1" : "=r"(res) : "m"(_papi_hwd_lock_data[lck]), "0"(MUTEX_OPEN) : "memory"); \ 75 #elif defined(__powerpc__) 85 static __inline__
unsigned long 86 papi_xchg_u32(
volatile void *p,
unsigned long val )
90 __asm__ __volatile__(
"\n\ 95 isync":
"=&r"( prev ),
"=m"( *(
volatile unsigned long * ) p )
96 :
"r"( p ),
"r"(
val ),
97 "m"( *(
volatile unsigned long * ) p )
103 #define _papi_hwd_lock(lck) \ 105 unsigned int retval; \ 107 retval = papi_xchg_u32(&_papi_hwd_lock_data[lck],MUTEX_CLOSED); \ 108 } while(retval != (unsigned int)MUTEX_OPEN); \ 110 #define _papi_hwd_unlock(lck) \ 112 unsigned int retval; \ 114 retval = papi_xchg_u32(&_papi_hwd_lock_data[lck],MUTEX_OPEN); \ 115 } while(retval != (unsigned int)MUTEX_CLOSED); \ 122 #elif defined(__sparc__) 124 __raw_spin_lock(
volatile unsigned int *
lock )
126 __asm__ __volatile__(
"\n1:\n\t" "ldstub [%0], %%g2\n\t" "orcc %%g2, 0x0, %%g0\n\t" "bne,a 2f\n\t" " ldub [%0], %%g2\n\t" ".subsection 2\n" "2:\n\t" "orcc %%g2, 0x0, %%g0\n\t" "bne,a 2b\n\t" " ldub [%0], %%g2\n\t" "b,a 1b\n\t" ".previous\n":
128 :
"g2",
"memory",
"cc" );
131 __raw_spin_unlock(
volatile unsigned int *
lock )
133 __asm__ __volatile__(
"stb %%g0, [%0]"::
"r"(
lock ):
"memory" );
136 #define _papi_hwd_lock(lck) __raw_spin_lock(&_papi_hwd_lock_data[lck]); 137 #define _papi_hwd_unlock(lck) __raw_spin_unlock(&_papi_hwd_lock_data[lck]) 143 #elif defined(__arm__) 156 #warning "WARNING! Verify mutexes work on ARM!" 161 #define MUTEX_SET(tsl) ({ \ 164 "swpb %0, %1, [%2]\n\t" \ 165 "eor %0, %0, #1\n\t" \ 167 : "r" (1), "r" (tsl) \ 172 #define _papi_hwd_lock(lck) MUTEX_SET(lck) 173 #define _papi_hwd_unlock(lck) (*(volatile int *)(lck) = 0) 178 static inline int __arm_papi_spin_lock (
volatile unsigned int *
lock)
183 asm volatile (
"swp %0, %1, [%2]" 185 :
"0" (1),
"r" (
lock)
191 #define _papi_hwd_lock(lck) { rmb(); __arm_papi_spin_lock(&_papi_hwd_lock_data[lck]); rmb(); } 192 #define _papi_hwd_unlock(lck) { rmb(); _papi_hwd_lock_data[lck] = 0; rmb(); } 194 #elif defined(__mips__) 195 static inline void __raw_spin_lock(
volatile unsigned int *
lock)
198 __asm__ __volatile__(
199 " .set noreorder # __raw_spin_lock \n" 213 static inline void __raw_spin_unlock(
volatile unsigned int *
lock)
215 __asm__ __volatile__(
216 " .set noreorder # __raw_spin_unlock \n" 224 #define _papi_hwd_lock(lck) __raw_spin_lock(&_papi_hwd_lock_data[lck]); 225 #define _papi_hwd_unlock(lck) __raw_spin_unlock(&_papi_hwd_lock_data[lck]) 228 #error "_papi_hwd_lock/unlock undefined!"
volatile unsigned int _papi_hwd_lock_data[PAPI_MAX_LOCK]