17 #if !defined(__TBB_machine_H) || defined(__TBB_machine_gcc_generic_H)
18 #error Do not #include this internal file directly; use public TBB headers instead.
21 #define __TBB_machine_gcc_generic_H
26 #define __TBB_WORDSIZE __SIZEOF_POINTER__
28 #if __TBB_GCC_64BIT_ATOMIC_BUILTINS_BROKEN
29 #define __TBB_64BIT_ATOMICS 0
33 #if __ANDROID__ && __TBB_generic_arch
34 #define __TBB_CPU_CTL_ENV_PRESENT 0
39 #if __BIG_ENDIAN__ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_BIG_ENDIAN__)
40 #define __TBB_ENDIANNESS __TBB_ENDIAN_BIG
41 #elif __LITTLE_ENDIAN__ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_LITTLE_ENDIAN__)
42 #define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE
43 #elif defined(__BYTE_ORDER__)
44 #define __TBB_ENDIANNESS __TBB_ENDIAN_UNSUPPORTED
46 #define __TBB_ENDIANNESS __TBB_ENDIAN_DETECT
49 #if __TBB_GCC_VERSION < 40700
57 #define __TBB_acquire_consistency_helper() __sync_synchronize()
58 #define __TBB_release_consistency_helper() __sync_synchronize()
59 #define __TBB_full_memory_fence() __sync_synchronize()
60 #define __TBB_control_consistency_helper() __sync_synchronize()
62 #define __TBB_MACHINE_DEFINE_ATOMICS(S,T) \
63 inline T __TBB_machine_cmpswp##S( volatile void *ptr, T value, T comparand ) { \
64 return __sync_val_compare_and_swap(reinterpret_cast<volatile T *>(ptr),comparand,value); \
66 inline T __TBB_machine_fetchadd##S( volatile void *ptr, T value ) { \
67 return __sync_fetch_and_add(reinterpret_cast<volatile T *>(ptr),value); \
70 #define __TBB_USE_GENERIC_FETCH_STORE 1
75 #define __TBB_compiler_fence() __asm__ __volatile__("": : :"memory")
78 #define __TBB_acquire_consistency_helper() __TBB_compiler_fence(); __atomic_thread_fence(__ATOMIC_ACQUIRE); __TBB_compiler_fence()
79 #define __TBB_release_consistency_helper() __TBB_compiler_fence(); __atomic_thread_fence(__ATOMIC_RELEASE); __TBB_compiler_fence()
80 #define __TBB_full_memory_fence() __atomic_thread_fence(__ATOMIC_SEQ_CST)
81 #define __TBB_control_consistency_helper() __TBB_acquire_consistency_helper()
83 #define __TBB_MACHINE_DEFINE_ATOMICS(S,T) \
84 inline T __TBB_machine_cmpswp##S( volatile void *ptr, T value, T comparand ) { \
85 (void)__atomic_compare_exchange_n(reinterpret_cast<volatile T *>(ptr), &comparand, value, \
86 false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \
89 inline T __TBB_machine_fetchadd##S( volatile void *ptr, T value ) { \
90 return __atomic_fetch_add(reinterpret_cast<volatile T *>(ptr), value, __ATOMIC_SEQ_CST); \
92 inline T __TBB_machine_fetchstore##S( volatile void *ptr, T value ) { \
93 return __atomic_exchange_n(reinterpret_cast<volatile T *>(ptr), value, __ATOMIC_SEQ_CST); \
96 #endif // __TBB_GCC_VERSION < 40700
103 #undef __TBB_MACHINE_DEFINE_ATOMICS
108 #if __TBB_GCC_VERSION < 40700
112 #define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1
113 #define __TBB_USE_GENERIC_RELAXED_LOAD_STORE 1
114 #define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1
117 __sync_fetch_and_or(
reinterpret_cast<volatile uintptr_t *
>(ptr),addend);
121 __sync_fetch_and_and(
reinterpret_cast<volatile uintptr_t *
>(ptr),addend);
125 return __sync_lock_test_and_set(&flag,1)==0;
129 __sync_lock_release(&flag);
135 static inline void __TBB_machine_or(
volatile void *ptr, uintptr_t addend ) {
136 __atomic_fetch_or(
reinterpret_cast<volatile uintptr_t *
>(ptr),addend,__ATOMIC_SEQ_CST);
140 __atomic_fetch_and(
reinterpret_cast<volatile uintptr_t *
>(ptr),addend,__ATOMIC_SEQ_CST);
144 return !__atomic_test_and_set(&flag,__ATOMIC_ACQUIRE);
148 __atomic_clear(&flag,__ATOMIC_RELEASE);
157 template <
typename T,
int MemOrder>
158 inline T __TBB_machine_atomic_load(
const volatile T& location) {
160 T
value = __atomic_load_n(&location, MemOrder);
165 template <
typename T,
int MemOrder>
166 inline void __TBB_machine_atomic_store(
volatile T& location, T
value) {
168 __atomic_store_n(&location,
value, MemOrder);
172 template <
typename T,
size_t S>
173 struct machine_load_store {
175 return __TBB_machine_atomic_load<T, __ATOMIC_ACQUIRE>(location);
178 __TBB_machine_atomic_store<T, __ATOMIC_RELEASE>(location,
value);
182 template <
typename T,
size_t S>
183 struct machine_load_store_relaxed {
184 static inline T
load (
const volatile T& location ) {
185 return __TBB_machine_atomic_load<T, __ATOMIC_RELAXED>(location);
187 static inline void store (
volatile T& location, T
value ) {
188 __TBB_machine_atomic_store<T, __ATOMIC_RELAXED>(location,
value);
192 template <
typename T,
size_t S>
193 struct machine_load_store_seq_cst {
194 static T
load (
const volatile T& location ) {
195 return __TBB_machine_atomic_load<T, __ATOMIC_SEQ_CST>(location);
197 static void store (
volatile T &location, T
value ) {
198 __TBB_machine_atomic_store<T, __ATOMIC_SEQ_CST>(location,
value);
204 #endif // __TBB_GCC_VERSION < 40700
207 #define __TBB_AtomicOR(P,V) __TBB_machine_or(P,V)
208 #define __TBB_AtomicAND(P,V) __TBB_machine_and(P,V)
210 #define __TBB_TryLockByte __TBB_machine_try_lock_byte
211 #define __TBB_UnlockByte __TBB_machine_unlock_byte
215 inline int clz(
unsigned int x){
return __builtin_clz(x); }
216 inline int clz(
unsigned long int x){
return __builtin_clzl(x); }
217 inline int clz(
unsigned long long int x){
return __builtin_clzll(x); }
225 #define __TBB_Log2(V) __TBB_machine_lg(V)
227 #if __TBB_WORDSIZE==4
228 #define __TBB_USE_GENERIC_DWORD_LOAD_STORE 1
231 #if __TBB_x86_32 || __TBB_x86_64