7 #ifndef V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
8 #define V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
20 #if !defined(__SSE2__)
26 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
34 __asm__ __volatile__(
"lock; cmpxchgl %1,%2"
36 :
"q" (new_value),
"m" (*ptr),
"0" (old_value)
43 __asm__ __volatile__(
"xchgl %1,%0"
45 :
"m" (*ptr),
"0" (new_value)
53 __asm__ __volatile__(
"lock; xaddl %0,%1"
54 :
"+r" (temp),
"+m" (*ptr)
57 return temp + increment;
63 __asm__ __volatile__(
"lock; xaddl %0,%1"
64 :
"+r" (temp),
"+m" (*ptr)
68 __asm__ __volatile__(
"lfence" : : :
"memory");
70 return temp + increment;
78 __asm__ __volatile__(
"lfence" : : :
"memory");
97 #if defined(__x86_64__) || defined(__SSE2__)
102 __asm__ __volatile__(
"mfence" : : :
"memory");
114 __asm__ __volatile__(
"mfence" : : :
"memory");
124 __asm__ __volatile__(
"mfence" : : :
"memory");
158 #if defined(__x86_64__) && defined(V8_HOST_ARCH_64_BIT)
164 Atomic64 new_value) {
166 __asm__ __volatile__(
"lock; cmpxchgq %1,%2"
168 :
"q" (new_value),
"m" (*ptr),
"0" (old_value)
174 Atomic64 new_value) {
175 __asm__ __volatile__(
"xchgq %1,%0"
177 :
"m" (*ptr),
"0" (new_value)
183 Atomic64 increment) {
184 Atomic64 temp = increment;
185 __asm__ __volatile__(
"lock; xaddq %0,%1"
186 :
"+r" (temp),
"+m" (*ptr)
189 return temp + increment;
193 Atomic64 increment) {
194 Atomic64 temp = increment;
195 __asm__ __volatile__(
"lock; xaddq %0,%1"
196 :
"+r" (temp),
"+m" (*ptr)
200 __asm__ __volatile__(
"lfence" : : :
"memory");
202 return temp + increment;
209 inline void Acquire_Store(
volatile Atomic64* ptr, Atomic64 value) {
214 inline void Release_Store(
volatile Atomic64* ptr, Atomic64 value) {
239 inline Atomic64
Acquire_Load(
volatile const Atomic64* ptr) {
240 Atomic64 value = *ptr;
247 inline Atomic64
Release_Load(
volatile const Atomic64* ptr) {
254 Atomic64 new_value) {
257 __asm__ __volatile__(
"lfence" : : :
"memory");
264 Atomic64 new_value) {
272 #undef ATOMICOPS_COMPILER_BARRIER
#define ATOMICOPS_COMPILER_BARRIER()
Atomic32 Release_Load(volatile const Atomic32 *ptr)
Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value)
void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value)
Atomic32 Acquire_Load(volatile const Atomic32 *ptr)
struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures
Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr, Atomic32 increment)
void Release_Store(volatile Atomic32 *ptr, Atomic32 value)
Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr, Atomic32 increment)
void NoBarrier_Store(volatile Atomic8 *ptr, Atomic8 value)
Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr, Atomic32 new_value)
Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value)
Atomic8 NoBarrier_Load(volatile const Atomic8 *ptr)
Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value)
Debugger support for the V8 JavaScript engine.