V8 Project
atomicops_internals_tsan.h
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 
6 // This file is an internal atomic implementation for compiler-based
7 // ThreadSanitizer. Use base/atomicops.h instead.
8 
9 #ifndef V8_BASE_ATOMICOPS_INTERNALS_TSAN_H_
10 #define V8_BASE_ATOMICOPS_INTERNALS_TSAN_H_
11 
12 namespace v8 {
13 namespace base {
14 
15 #ifndef TSAN_INTERFACE_ATOMIC_H
16 #define TSAN_INTERFACE_ATOMIC_H
17 
18 
19 extern "C" {
20 typedef char __tsan_atomic8;
21 typedef short __tsan_atomic16; // NOLINT
22 typedef int __tsan_atomic32;
23 typedef long __tsan_atomic64; // NOLINT
24 
25 #if defined(__SIZEOF_INT128__) \
26  || (__clang_major__ * 100 + __clang_minor__ >= 302)
27 typedef __int128 __tsan_atomic128;
28 #define __TSAN_HAS_INT128 1
29 #else
30 typedef char __tsan_atomic128;
31 #define __TSAN_HAS_INT128 0
32 #endif
33 
34 typedef enum {
42 
53 
64 
75 
86 
97 
108 
119 
130 
133  __tsan_memory_order fail_mo);
136  __tsan_memory_order fail_mo);
139  __tsan_memory_order fail_mo);
142  __tsan_memory_order fail_mo);
145  __tsan_memory_order fail_mo);
146 
149  __tsan_memory_order fail_mo);
152  __tsan_memory_order fail_mo);
155  __tsan_memory_order fail_mo);
158  __tsan_memory_order fail_mo);
161  __tsan_memory_order fail_mo);
162 
164  volatile __tsan_atomic8* a, __tsan_atomic8 c, __tsan_atomic8 v,
178 
181 } // extern "C"
182 
183 #endif // #ifndef TSAN_INTERFACE_ATOMIC_H
184 
185 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
186  Atomic32 old_value,
187  Atomic32 new_value) {
188  Atomic32 cmp = old_value;
189  __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
191  return cmp;
192 }
193 
194 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
195  Atomic32 new_value) {
196  return __tsan_atomic32_exchange(ptr, new_value,
198 }
199 
201  Atomic32 new_value) {
202  return __tsan_atomic32_exchange(ptr, new_value,
204 }
205 
207  Atomic32 new_value) {
208  return __tsan_atomic32_exchange(ptr, new_value,
210 }
211 
212 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
213  Atomic32 increment) {
214  return increment + __tsan_atomic32_fetch_add(ptr, increment,
216 }
217 
218 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
219  Atomic32 increment) {
220  return increment + __tsan_atomic32_fetch_add(ptr, increment,
222 }
223 
224 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
225  Atomic32 old_value,
226  Atomic32 new_value) {
227  Atomic32 cmp = old_value;
228  __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
230  return cmp;
231 }
232 
233 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
234  Atomic32 old_value,
235  Atomic32 new_value) {
236  Atomic32 cmp = old_value;
237  __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
239  return cmp;
240 }
241 
242 inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
244 }
245 
246 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
248 }
249 
250 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
253 }
254 
255 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
257 }
258 
259 inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
261 }
262 
263 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
265 }
266 
267 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
269 }
270 
271 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
274 }
275 
276 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
277  Atomic64 old_value,
278  Atomic64 new_value) {
279  Atomic64 cmp = old_value;
280  __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
282  return cmp;
283 }
284 
285 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
286  Atomic64 new_value) {
288 }
289 
290 inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
291  Atomic64 new_value) {
293 }
294 
295 inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
296  Atomic64 new_value) {
298 }
299 
300 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
301  Atomic64 increment) {
302  return increment + __tsan_atomic64_fetch_add(ptr, increment,
304 }
305 
306 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
307  Atomic64 increment) {
308  return increment + __tsan_atomic64_fetch_add(ptr, increment,
310 }
311 
312 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
314 }
315 
316 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
319 }
320 
321 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
323 }
324 
325 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
327 }
328 
329 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
331 }
332 
333 inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
336 }
337 
338 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
339  Atomic64 old_value,
340  Atomic64 new_value) {
341  Atomic64 cmp = old_value;
342  __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
344  return cmp;
345 }
346 
347 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
348  Atomic64 old_value,
349  Atomic64 new_value) {
350  Atomic64 cmp = old_value;
351  __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
353  return cmp;
354 }
355 
356 inline void MemoryBarrier() {
358 }
359 
360 } // namespace base
361 } // namespace v8
362 
363 #endif // V8_BASE_ATOMICOPS_INTERNALS_TSAN_H_
__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo)
void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo)
__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128 *a, __tsan_memory_order mo)
Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr, Atomic32 new_value)
Atomic32 Release_Load(volatile const Atomic32 *ptr)
__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo)
__tsan_atomic32 __tsan_atomic32_compare_exchange_val(volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8 *a, __tsan_memory_order mo)
__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo)
Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value)
int32_t Atomic32
Definition: atomicops.h:44
__tsan_atomic16 __tsan_atomic16_compare_exchange_val(volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo)
void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value)
Atomic32 Acquire_Load(volatile const Atomic32 *ptr)
void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo)
__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo)
__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo)
void __tsan_atomic16_store(volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo)
__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo)
int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128 *a, __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a, __tsan_memory_order mo)
Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr, Atomic32 new_value)
__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo)
__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo)
__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo)
Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr, Atomic32 increment)
__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo)
__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo)
__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo)
int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128 *a, __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
void __tsan_atomic128_store(volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo)
__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo)
void Release_Store(volatile Atomic32 *ptr, Atomic32 value)
__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo)
__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo)
int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo)
__tsan_atomic8 __tsan_atomic8_compare_exchange_val(volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo)
__tsan_atomic128 __tsan_atomic128_compare_exchange_val(volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16 *a, __tsan_memory_order mo)
__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo)
__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo)
__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo)
void __tsan_atomic_signal_fence(__tsan_memory_order mo)
void __tsan_atomic_thread_fence(__tsan_memory_order mo)
__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo)
Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr, Atomic32 increment)
__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo)
__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo)
void NoBarrier_Store(volatile Atomic8 *ptr, Atomic8 value)
int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo)
Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr, Atomic32 new_value)
int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a, __tsan_memory_order mo)
__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo)
__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo)
__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo)
void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo)
__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo)
__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo)
__tsan_atomic64 __tsan_atomic64_compare_exchange_val(volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value)
int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
Atomic8 NoBarrier_Load(volatile const Atomic8 *ptr)
char Atomic8
Definition: atomicops.h:43
int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value)
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20