9 #ifndef V8_BASE_ATOMICOPS_INTERNALS_TSAN_H_
10 #define V8_BASE_ATOMICOPS_INTERNALS_TSAN_H_
15 #ifndef TSAN_INTERFACE_ATOMIC_H
16 #define TSAN_INTERFACE_ATOMIC_H
25 #if defined(__SIZEOF_INT128__) \
26 || (__clang_major__ * 100 + __clang_minor__ >= 302)
28 #define __TSAN_HAS_INT128 1
31 #define __TSAN_HAS_INT128 0
278 Atomic64 new_value) {
279 Atomic64 cmp = old_value;
286 Atomic64 new_value) {
291 Atomic64 new_value) {
296 Atomic64 new_value) {
301 Atomic64 increment) {
307 Atomic64 increment) {
316 inline void Acquire_Store(
volatile Atomic64* ptr, Atomic64 value) {
321 inline void Release_Store(
volatile Atomic64* ptr, Atomic64 value) {
329 inline Atomic64
Acquire_Load(
volatile const Atomic64* ptr) {
333 inline Atomic64
Release_Load(
volatile const Atomic64* ptr) {
340 Atomic64 new_value) {
341 Atomic64 cmp = old_value;
349 Atomic64 new_value) {
350 Atomic64 cmp = old_value;
__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo)
@ __tsan_memory_order_acq_rel
@ __tsan_memory_order_seq_cst
@ __tsan_memory_order_release
@ __tsan_memory_order_acquire
@ __tsan_memory_order_relaxed
@ __tsan_memory_order_consume
void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo)
__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128 *a, __tsan_memory_order mo)
Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr, Atomic32 new_value)
Atomic32 Release_Load(volatile const Atomic32 *ptr)
__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo)
__tsan_atomic32 __tsan_atomic32_compare_exchange_val(volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8 *a, __tsan_memory_order mo)
__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo)
Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value)
__tsan_atomic16 __tsan_atomic16_compare_exchange_val(volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo)
void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value)
Atomic32 Acquire_Load(volatile const Atomic32 *ptr)
void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo)
__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo)
__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo)
void __tsan_atomic16_store(volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo)
__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo)
int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128 *a, __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a, __tsan_memory_order mo)
Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr, Atomic32 new_value)
__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo)
__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo)
__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo)
Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr, Atomic32 increment)
__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo)
__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo)
__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo)
int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128 *a, __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
void __tsan_atomic128_store(volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo)
__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo)
void Release_Store(volatile Atomic32 *ptr, Atomic32 value)
__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo)
__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo)
int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo)
__tsan_atomic8 __tsan_atomic8_compare_exchange_val(volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo)
__tsan_atomic128 __tsan_atomic128_compare_exchange_val(volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16 *a, __tsan_memory_order mo)
__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo)
__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo)
__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo)
void __tsan_atomic_signal_fence(__tsan_memory_order mo)
void __tsan_atomic_thread_fence(__tsan_memory_order mo)
__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo)
Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr, Atomic32 increment)
__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo)
__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo)
void NoBarrier_Store(volatile Atomic8 *ptr, Atomic8 value)
int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo)
Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr, Atomic32 new_value)
int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a, __tsan_memory_order mo)
__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo)
__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo)
__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo)
void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo)
__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo)
__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo)
__tsan_atomic64 __tsan_atomic64_compare_exchange_val(volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value)
int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
Atomic8 NoBarrier_Load(volatile const Atomic8 *ptr)
int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value)
Debugger support for the V8 JavaScript engine.