1//===-- tsan_interface_atomic.cc ------------------------------------------===//
2//
3// This file is distributed under the University of Illinois Open Source
4// License. See LICENSE.TXT for details.
5//
6//===----------------------------------------------------------------------===//
7//
8// This file is a part of ThreadSanitizer (TSan), a race detector.
9//
10//===----------------------------------------------------------------------===//
11
12// ThreadSanitizer atomic operations are based on C++11/C1x standards.
13// For background see C++11 standard.  A slightly older, publicly
14// available draft of the standard (not entirely up-to-date, but close enough
15// for casual browsing) is available here:
16// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
17// The following page contains more background information:
18// http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
19
20#include "sanitizer_common/sanitizer_placement_new.h"
21#include "sanitizer_common/sanitizer_stacktrace.h"
22#include "sanitizer_common/sanitizer_mutex.h"
23#include "tsan_flags.h"
24#include "tsan_rtl.h"
25
26using namespace __tsan;  // NOLINT
27
28// These should match declarations from public tsan_interface_atomic.h header.
29typedef unsigned char      a8;
30typedef unsigned short     a16;  // NOLINT
31typedef unsigned int       a32;
32typedef unsigned long long a64;  // NOLINT
33#if !defined(TSAN_GO) && (defined(__SIZEOF_INT128__) \
34    || (__clang_major__ * 100 + __clang_minor__ >= 302))
35__extension__ typedef __int128 a128;
36# define __TSAN_HAS_INT128 1
37#else
38# define __TSAN_HAS_INT128 0
39#endif
40
41#ifndef TSAN_GO
42// Protects emulation of 128-bit atomic operations.
43static StaticSpinMutex mutex128;
44#endif
45
46// Part of ABI, do not change.
47// http://llvm.org/viewvc/llvm-project/libcxx/trunk/include/atomic?view=markup
48typedef enum {
49  mo_relaxed,
50  mo_consume,
51  mo_acquire,
52  mo_release,
53  mo_acq_rel,
54  mo_seq_cst
55} morder;
56
57static bool IsLoadOrder(morder mo) {
58  return mo == mo_relaxed || mo == mo_consume
59      || mo == mo_acquire || mo == mo_seq_cst;
60}
61
62static bool IsStoreOrder(morder mo) {
63  return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
64}
65
66static bool IsReleaseOrder(morder mo) {
67  return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
68}
69
70static bool IsAcquireOrder(morder mo) {
71  return mo == mo_consume || mo == mo_acquire
72      || mo == mo_acq_rel || mo == mo_seq_cst;
73}
74
75static bool IsAcqRelOrder(morder mo) {
76  return mo == mo_acq_rel || mo == mo_seq_cst;
77}
78
79template<typename T> T func_xchg(volatile T *v, T op) {
80  T res = __sync_lock_test_and_set(v, op);
81  // __sync_lock_test_and_set does not contain full barrier.
82  __sync_synchronize();
83  return res;
84}
85
86template<typename T> T func_add(volatile T *v, T op) {
87  return __sync_fetch_and_add(v, op);
88}
89
90template<typename T> T func_sub(volatile T *v, T op) {
91  return __sync_fetch_and_sub(v, op);
92}
93
94template<typename T> T func_and(volatile T *v, T op) {
95  return __sync_fetch_and_and(v, op);
96}
97
98template<typename T> T func_or(volatile T *v, T op) {
99  return __sync_fetch_and_or(v, op);
100}
101
102template<typename T> T func_xor(volatile T *v, T op) {
103  return __sync_fetch_and_xor(v, op);
104}
105
106template<typename T> T func_nand(volatile T *v, T op) {
107  // clang does not support __sync_fetch_and_nand.
108  T cmp = *v;
109  for (;;) {
110    T newv = ~(cmp & op);
111    T cur = __sync_val_compare_and_swap(v, cmp, newv);
112    if (cmp == cur)
113      return cmp;
114    cmp = cur;
115  }
116}
117
118template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
119  return __sync_val_compare_and_swap(v, cmp, xch);
120}
121
122// clang does not support 128-bit atomic ops.
123// Atomic ops are executed under tsan internal mutex,
124// here we assume that the atomic variables are not accessed
125// from non-instrumented code.
126#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !defined(TSAN_GO)
127a128 func_xchg(volatile a128 *v, a128 op) {
128  SpinMutexLock lock(&mutex128);
129  a128 cmp = *v;
130  *v = op;
131  return cmp;
132}
133
134a128 func_add(volatile a128 *v, a128 op) {
135  SpinMutexLock lock(&mutex128);
136  a128 cmp = *v;
137  *v = cmp + op;
138  return cmp;
139}
140
141a128 func_sub(volatile a128 *v, a128 op) {
142  SpinMutexLock lock(&mutex128);
143  a128 cmp = *v;
144  *v = cmp - op;
145  return cmp;
146}
147
148a128 func_and(volatile a128 *v, a128 op) {
149  SpinMutexLock lock(&mutex128);
150  a128 cmp = *v;
151  *v = cmp & op;
152  return cmp;
153}
154
155a128 func_or(volatile a128 *v, a128 op) {
156  SpinMutexLock lock(&mutex128);
157  a128 cmp = *v;
158  *v = cmp | op;
159  return cmp;
160}
161
162a128 func_xor(volatile a128 *v, a128 op) {
163  SpinMutexLock lock(&mutex128);
164  a128 cmp = *v;
165  *v = cmp ^ op;
166  return cmp;
167}
168
169a128 func_nand(volatile a128 *v, a128 op) {
170  SpinMutexLock lock(&mutex128);
171  a128 cmp = *v;
172  *v = ~(cmp & op);
173  return cmp;
174}
175
176a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
177  SpinMutexLock lock(&mutex128);
178  a128 cur = *v;
179  if (cur == cmp)
180    *v = xch;
181  return cur;
182}
183#endif
184
185template<typename T>
186static int SizeLog() {
187  if (sizeof(T) <= 1)
188    return kSizeLog1;
189  else if (sizeof(T) <= 2)
190    return kSizeLog2;
191  else if (sizeof(T) <= 4)
192    return kSizeLog4;
193  else
194    return kSizeLog8;
195  // For 16-byte atomics we also use 8-byte memory access,
196  // this leads to false negatives only in very obscure cases.
197}
198
199#ifndef TSAN_GO
200static atomic_uint8_t *to_atomic(const volatile a8 *a) {
201  return (atomic_uint8_t*)a;
202}
203
204static atomic_uint16_t *to_atomic(const volatile a16 *a) {
205  return (atomic_uint16_t*)a;
206}
207#endif
208
209static atomic_uint32_t *to_atomic(const volatile a32 *a) {
210  return (atomic_uint32_t*)a;
211}
212
213static atomic_uint64_t *to_atomic(const volatile a64 *a) {
214  return (atomic_uint64_t*)a;
215}
216
217static memory_order to_mo(morder mo) {
218  switch (mo) {
219  case mo_relaxed: return memory_order_relaxed;
220  case mo_consume: return memory_order_consume;
221  case mo_acquire: return memory_order_acquire;
222  case mo_release: return memory_order_release;
223  case mo_acq_rel: return memory_order_acq_rel;
224  case mo_seq_cst: return memory_order_seq_cst;
225  }
226  CHECK(0);
227  return memory_order_seq_cst;
228}
229
230template<typename T>
231static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
232  return atomic_load(to_atomic(a), to_mo(mo));
233}
234
235#if __TSAN_HAS_INT128 && !defined(TSAN_GO)
236static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
237  SpinMutexLock lock(&mutex128);
238  return *a;
239}
240#endif
241
242template<typename T>
243static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
244    morder mo) {
245  CHECK(IsLoadOrder(mo));
246  // This fast-path is critical for performance.
247  // Assume the access is atomic.
248  if (!IsAcquireOrder(mo)) {
249    MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
250    return NoTsanAtomicLoad(a, mo);
251  }
252  SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, false);
253  AcquireImpl(thr, pc, &s->clock);
254  T v = NoTsanAtomicLoad(a, mo);
255  s->mtx.ReadUnlock();
256  MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
257  return v;
258}
259
260template<typename T>
261static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
262  atomic_store(to_atomic(a), v, to_mo(mo));
263}
264
265#if __TSAN_HAS_INT128 && !defined(TSAN_GO)
266static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
267  SpinMutexLock lock(&mutex128);
268  *a = v;
269}
270#endif
271
272template<typename T>
273static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
274    morder mo) {
275  CHECK(IsStoreOrder(mo));
276  MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
277  // This fast-path is critical for performance.
278  // Assume the access is atomic.
279  // Strictly saying even relaxed store cuts off release sequence,
280  // so must reset the clock.
281  if (!IsReleaseOrder(mo)) {
282    NoTsanAtomicStore(a, v, mo);
283    return;
284  }
285  __sync_synchronize();
286  SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
287  thr->fast_state.IncrementEpoch();
288  // Can't increment epoch w/o writing to the trace as well.
289  TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
290  ReleaseImpl(thr, pc, &s->clock);
291  NoTsanAtomicStore(a, v, mo);
292  s->mtx.Unlock();
293}
294
295template<typename T, T (*F)(volatile T *v, T op)>
296static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
297  MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
298  SyncVar *s = 0;
299  if (mo != mo_relaxed) {
300    s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
301    thr->fast_state.IncrementEpoch();
302    // Can't increment epoch w/o writing to the trace as well.
303    TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
304    if (IsAcqRelOrder(mo))
305      AcquireReleaseImpl(thr, pc, &s->clock);
306    else if (IsReleaseOrder(mo))
307      ReleaseImpl(thr, pc, &s->clock);
308    else if (IsAcquireOrder(mo))
309      AcquireImpl(thr, pc, &s->clock);
310  }
311  v = F(a, v);
312  if (s)
313    s->mtx.Unlock();
314  return v;
315}
316
317template<typename T>
318static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) {
319  return func_xchg(a, v);
320}
321
322template<typename T>
323static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) {
324  return func_add(a, v);
325}
326
327template<typename T>
328static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) {
329  return func_sub(a, v);
330}
331
332template<typename T>
333static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) {
334  return func_and(a, v);
335}
336
337template<typename T>
338static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) {
339  return func_or(a, v);
340}
341
342template<typename T>
343static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) {
344  return func_xor(a, v);
345}
346
347template<typename T>
348static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) {
349  return func_nand(a, v);
350}
351
352template<typename T>
353static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
354    morder mo) {
355  return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
356}
357
358template<typename T>
359static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
360    morder mo) {
361  return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
362}
363
364template<typename T>
365static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
366    morder mo) {
367  return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
368}
369
370template<typename T>
371static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
372    morder mo) {
373  return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
374}
375
376template<typename T>
377static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
378    morder mo) {
379  return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
380}
381
382template<typename T>
383static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
384    morder mo) {
385  return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
386}
387
388template<typename T>
389static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
390    morder mo) {
391  return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
392}
393
394template<typename T>
395static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) {
396  return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
397}
398
399#if __TSAN_HAS_INT128
400static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v,
401    morder mo, morder fmo) {
402  a128 old = *c;
403  a128 cur = func_cas(a, old, v);
404  if (cur == old)
405    return true;
406  *c = cur;
407  return false;
408}
409#endif
410
411template<typename T>
412static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
413  NoTsanAtomicCAS(a, &c, v, mo, fmo);
414  return c;
415}
416
417template<typename T>
418static bool AtomicCAS(ThreadState *thr, uptr pc,
419    volatile T *a, T *c, T v, morder mo, morder fmo) {
420  (void)fmo;  // Unused because llvm does not pass it yet.
421  MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
422  SyncVar *s = 0;
423  bool write_lock = mo != mo_acquire && mo != mo_consume;
424  if (mo != mo_relaxed) {
425    s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, write_lock);
426    thr->fast_state.IncrementEpoch();
427    // Can't increment epoch w/o writing to the trace as well.
428    TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
429    if (IsAcqRelOrder(mo))
430      AcquireReleaseImpl(thr, pc, &s->clock);
431    else if (IsReleaseOrder(mo))
432      ReleaseImpl(thr, pc, &s->clock);
433    else if (IsAcquireOrder(mo))
434      AcquireImpl(thr, pc, &s->clock);
435  }
436  T cc = *c;
437  T pr = func_cas(a, cc, v);
438  if (s) {
439    if (write_lock)
440      s->mtx.Unlock();
441    else
442      s->mtx.ReadUnlock();
443  }
444  if (pr == cc)
445    return true;
446  *c = pr;
447  return false;
448}
449
450template<typename T>
451static T AtomicCAS(ThreadState *thr, uptr pc,
452    volatile T *a, T c, T v, morder mo, morder fmo) {
453  AtomicCAS(thr, pc, a, &c, v, mo, fmo);
454  return c;
455}
456
457#ifndef TSAN_GO
458static void NoTsanAtomicFence(morder mo) {
459  __sync_synchronize();
460}
461
462static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
463  // FIXME(dvyukov): not implemented.
464  __sync_synchronize();
465}
466#endif
467
468// Interface functions follow.
469#ifndef TSAN_GO
470
471// C/C++
472
473#define SCOPED_ATOMIC(func, ...) \
474    const uptr callpc = (uptr)__builtin_return_address(0); \
475    uptr pc = StackTrace::GetCurrentPc(); \
476    mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
477    ThreadState *const thr = cur_thread(); \
478    if (thr->ignore_interceptors) \
479      return NoTsanAtomic##func(__VA_ARGS__); \
480    AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
481    ScopedAtomic sa(thr, callpc, a, mo, __func__); \
482    return Atomic##func(thr, pc, __VA_ARGS__); \
483/**/
484
485class ScopedAtomic {
486 public:
487  ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a,
488               morder mo, const char *func)
489      : thr_(thr) {
490    FuncEntry(thr_, pc);
491    DPrintf("#%d: %s(%p, %d)\n", thr_->tid, func, a, mo);
492  }
493  ~ScopedAtomic() {
494    ProcessPendingSignals(thr_);
495    FuncExit(thr_);
496  }
497 private:
498  ThreadState *thr_;
499};
500
501static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
502  StatInc(thr, StatAtomic);
503  StatInc(thr, t);
504  StatInc(thr, size == 1 ? StatAtomic1
505             : size == 2 ? StatAtomic2
506             : size == 4 ? StatAtomic4
507             : size == 8 ? StatAtomic8
508             :             StatAtomic16);
509  StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
510             : mo == mo_consume ? StatAtomicConsume
511             : mo == mo_acquire ? StatAtomicAcquire
512             : mo == mo_release ? StatAtomicRelease
513             : mo == mo_acq_rel ? StatAtomicAcq_Rel
514             :                    StatAtomicSeq_Cst);
515}
516
517extern "C" {
518SANITIZER_INTERFACE_ATTRIBUTE
519a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
520  SCOPED_ATOMIC(Load, a, mo);
521}
522
523SANITIZER_INTERFACE_ATTRIBUTE
524a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
525  SCOPED_ATOMIC(Load, a, mo);
526}
527
528SANITIZER_INTERFACE_ATTRIBUTE
529a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
530  SCOPED_ATOMIC(Load, a, mo);
531}
532
533SANITIZER_INTERFACE_ATTRIBUTE
534a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
535  SCOPED_ATOMIC(Load, a, mo);
536}
537
538#if __TSAN_HAS_INT128
539SANITIZER_INTERFACE_ATTRIBUTE
540a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
541  SCOPED_ATOMIC(Load, a, mo);
542}
543#endif
544
545SANITIZER_INTERFACE_ATTRIBUTE
546void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
547  SCOPED_ATOMIC(Store, a, v, mo);
548}
549
550SANITIZER_INTERFACE_ATTRIBUTE
551void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
552  SCOPED_ATOMIC(Store, a, v, mo);
553}
554
555SANITIZER_INTERFACE_ATTRIBUTE
556void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
557  SCOPED_ATOMIC(Store, a, v, mo);
558}
559
560SANITIZER_INTERFACE_ATTRIBUTE
561void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
562  SCOPED_ATOMIC(Store, a, v, mo);
563}
564
565#if __TSAN_HAS_INT128
566SANITIZER_INTERFACE_ATTRIBUTE
567void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
568  SCOPED_ATOMIC(Store, a, v, mo);
569}
570#endif
571
572SANITIZER_INTERFACE_ATTRIBUTE
573a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
574  SCOPED_ATOMIC(Exchange, a, v, mo);
575}
576
577SANITIZER_INTERFACE_ATTRIBUTE
578a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
579  SCOPED_ATOMIC(Exchange, a, v, mo);
580}
581
582SANITIZER_INTERFACE_ATTRIBUTE
583a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
584  SCOPED_ATOMIC(Exchange, a, v, mo);
585}
586
587SANITIZER_INTERFACE_ATTRIBUTE
588a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
589  SCOPED_ATOMIC(Exchange, a, v, mo);
590}
591
592#if __TSAN_HAS_INT128
593SANITIZER_INTERFACE_ATTRIBUTE
594a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
595  SCOPED_ATOMIC(Exchange, a, v, mo);
596}
597#endif
598
599SANITIZER_INTERFACE_ATTRIBUTE
600a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
601  SCOPED_ATOMIC(FetchAdd, a, v, mo);
602}
603
604SANITIZER_INTERFACE_ATTRIBUTE
605a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
606  SCOPED_ATOMIC(FetchAdd, a, v, mo);
607}
608
609SANITIZER_INTERFACE_ATTRIBUTE
610a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
611  SCOPED_ATOMIC(FetchAdd, a, v, mo);
612}
613
614SANITIZER_INTERFACE_ATTRIBUTE
615a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
616  SCOPED_ATOMIC(FetchAdd, a, v, mo);
617}
618
619#if __TSAN_HAS_INT128
620SANITIZER_INTERFACE_ATTRIBUTE
621a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
622  SCOPED_ATOMIC(FetchAdd, a, v, mo);
623}
624#endif
625
626SANITIZER_INTERFACE_ATTRIBUTE
627a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
628  SCOPED_ATOMIC(FetchSub, a, v, mo);
629}
630
631SANITIZER_INTERFACE_ATTRIBUTE
632a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
633  SCOPED_ATOMIC(FetchSub, a, v, mo);
634}
635
636SANITIZER_INTERFACE_ATTRIBUTE
637a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
638  SCOPED_ATOMIC(FetchSub, a, v, mo);
639}
640
641SANITIZER_INTERFACE_ATTRIBUTE
642a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
643  SCOPED_ATOMIC(FetchSub, a, v, mo);
644}
645
646#if __TSAN_HAS_INT128
647SANITIZER_INTERFACE_ATTRIBUTE
648a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
649  SCOPED_ATOMIC(FetchSub, a, v, mo);
650}
651#endif
652
653SANITIZER_INTERFACE_ATTRIBUTE
654a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
655  SCOPED_ATOMIC(FetchAnd, a, v, mo);
656}
657
658SANITIZER_INTERFACE_ATTRIBUTE
659a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
660  SCOPED_ATOMIC(FetchAnd, a, v, mo);
661}
662
663SANITIZER_INTERFACE_ATTRIBUTE
664a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
665  SCOPED_ATOMIC(FetchAnd, a, v, mo);
666}
667
668SANITIZER_INTERFACE_ATTRIBUTE
669a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
670  SCOPED_ATOMIC(FetchAnd, a, v, mo);
671}
672
673#if __TSAN_HAS_INT128
674SANITIZER_INTERFACE_ATTRIBUTE
675a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
676  SCOPED_ATOMIC(FetchAnd, a, v, mo);
677}
678#endif
679
680SANITIZER_INTERFACE_ATTRIBUTE
681a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
682  SCOPED_ATOMIC(FetchOr, a, v, mo);
683}
684
685SANITIZER_INTERFACE_ATTRIBUTE
686a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
687  SCOPED_ATOMIC(FetchOr, a, v, mo);
688}
689
690SANITIZER_INTERFACE_ATTRIBUTE
691a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
692  SCOPED_ATOMIC(FetchOr, a, v, mo);
693}
694
695SANITIZER_INTERFACE_ATTRIBUTE
696a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
697  SCOPED_ATOMIC(FetchOr, a, v, mo);
698}
699
700#if __TSAN_HAS_INT128
701SANITIZER_INTERFACE_ATTRIBUTE
702a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
703  SCOPED_ATOMIC(FetchOr, a, v, mo);
704}
705#endif
706
707SANITIZER_INTERFACE_ATTRIBUTE
708a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
709  SCOPED_ATOMIC(FetchXor, a, v, mo);
710}
711
712SANITIZER_INTERFACE_ATTRIBUTE
713a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
714  SCOPED_ATOMIC(FetchXor, a, v, mo);
715}
716
717SANITIZER_INTERFACE_ATTRIBUTE
718a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
719  SCOPED_ATOMIC(FetchXor, a, v, mo);
720}
721
722SANITIZER_INTERFACE_ATTRIBUTE
723a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
724  SCOPED_ATOMIC(FetchXor, a, v, mo);
725}
726
727#if __TSAN_HAS_INT128
728SANITIZER_INTERFACE_ATTRIBUTE
729a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
730  SCOPED_ATOMIC(FetchXor, a, v, mo);
731}
732#endif
733
734SANITIZER_INTERFACE_ATTRIBUTE
735a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
736  SCOPED_ATOMIC(FetchNand, a, v, mo);
737}
738
739SANITIZER_INTERFACE_ATTRIBUTE
740a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
741  SCOPED_ATOMIC(FetchNand, a, v, mo);
742}
743
744SANITIZER_INTERFACE_ATTRIBUTE
745a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
746  SCOPED_ATOMIC(FetchNand, a, v, mo);
747}
748
749SANITIZER_INTERFACE_ATTRIBUTE
750a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
751  SCOPED_ATOMIC(FetchNand, a, v, mo);
752}
753
754#if __TSAN_HAS_INT128
755SANITIZER_INTERFACE_ATTRIBUTE
756a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
757  SCOPED_ATOMIC(FetchNand, a, v, mo);
758}
759#endif
760
761SANITIZER_INTERFACE_ATTRIBUTE
762int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
763    morder mo, morder fmo) {
764  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
765}
766
767SANITIZER_INTERFACE_ATTRIBUTE
768int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
769    morder mo, morder fmo) {
770  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
771}
772
773SANITIZER_INTERFACE_ATTRIBUTE
774int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
775    morder mo, morder fmo) {
776  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
777}
778
779SANITIZER_INTERFACE_ATTRIBUTE
780int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
781    morder mo, morder fmo) {
782  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
783}
784
785#if __TSAN_HAS_INT128
786SANITIZER_INTERFACE_ATTRIBUTE
787int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
788    morder mo, morder fmo) {
789  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
790}
791#endif
792
793SANITIZER_INTERFACE_ATTRIBUTE
794int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
795    morder mo, morder fmo) {
796  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
797}
798
799SANITIZER_INTERFACE_ATTRIBUTE
800int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
801    morder mo, morder fmo) {
802  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
803}
804
805SANITIZER_INTERFACE_ATTRIBUTE
806int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
807    morder mo, morder fmo) {
808  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
809}
810
811SANITIZER_INTERFACE_ATTRIBUTE
812int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
813    morder mo, morder fmo) {
814  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
815}
816
817#if __TSAN_HAS_INT128
818SANITIZER_INTERFACE_ATTRIBUTE
819int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
820    morder mo, morder fmo) {
821  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
822}
823#endif
824
825SANITIZER_INTERFACE_ATTRIBUTE
826a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
827    morder mo, morder fmo) {
828  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
829}
830
831SANITIZER_INTERFACE_ATTRIBUTE
832a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
833    morder mo, morder fmo) {
834  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
835}
836
837SANITIZER_INTERFACE_ATTRIBUTE
838a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
839    morder mo, morder fmo) {
840  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
841}
842
843SANITIZER_INTERFACE_ATTRIBUTE
844a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
845    morder mo, morder fmo) {
846  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
847}
848
849#if __TSAN_HAS_INT128
850SANITIZER_INTERFACE_ATTRIBUTE
851a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
852    morder mo, morder fmo) {
853  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
854}
855#endif
856
857SANITIZER_INTERFACE_ATTRIBUTE
858void __tsan_atomic_thread_fence(morder mo) {
859  char* a = 0;
860  SCOPED_ATOMIC(Fence, mo);
861}
862
863SANITIZER_INTERFACE_ATTRIBUTE
864void __tsan_atomic_signal_fence(morder mo) {
865}
866}  // extern "C"
867
868#else  // #ifndef TSAN_GO
869
870// Go
871
872#define ATOMIC(func, ...) \
873    if (thr->ignore_sync) { \
874      NoTsanAtomic##func(__VA_ARGS__); \
875    } else { \
876      FuncEntry(thr, cpc); \
877      Atomic##func(thr, pc, __VA_ARGS__); \
878      FuncExit(thr); \
879    } \
880/**/
881
882#define ATOMIC_RET(func, ret, ...) \
883    if (thr->ignore_sync) { \
884      (ret) = NoTsanAtomic##func(__VA_ARGS__); \
885    } else { \
886      FuncEntry(thr, cpc); \
887      (ret) = Atomic##func(thr, pc, __VA_ARGS__); \
888      FuncExit(thr); \
889    } \
890/**/
891
892extern "C" {
893SANITIZER_INTERFACE_ATTRIBUTE
894void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
895  ATOMIC_RET(Load, *(a32*)(a+8), *(a32**)a, mo_acquire);
896}
897
898SANITIZER_INTERFACE_ATTRIBUTE
899void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
900  ATOMIC_RET(Load, *(a64*)(a+8), *(a64**)a, mo_acquire);
901}
902
903SANITIZER_INTERFACE_ATTRIBUTE
904void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
905  ATOMIC(Store, *(a32**)a, *(a32*)(a+8), mo_release);
906}
907
908SANITIZER_INTERFACE_ATTRIBUTE
909void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
910  ATOMIC(Store, *(a64**)a, *(a64*)(a+8), mo_release);
911}
912
913SANITIZER_INTERFACE_ATTRIBUTE
914void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
915  ATOMIC_RET(FetchAdd, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
916}
917
918SANITIZER_INTERFACE_ATTRIBUTE
919void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
920  ATOMIC_RET(FetchAdd, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
921}
922
923SANITIZER_INTERFACE_ATTRIBUTE
924void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
925  ATOMIC_RET(Exchange, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
926}
927
928SANITIZER_INTERFACE_ATTRIBUTE
929void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
930  ATOMIC_RET(Exchange, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
931}
932
933SANITIZER_INTERFACE_ATTRIBUTE
934void __tsan_go_atomic32_compare_exchange(
935    ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
936  a32 cur = 0;
937  a32 cmp = *(a32*)(a+8);
938  ATOMIC_RET(CAS, cur, *(a32**)a, cmp, *(a32*)(a+12), mo_acq_rel, mo_acquire);
939  *(bool*)(a+16) = (cur == cmp);
940}
941
942SANITIZER_INTERFACE_ATTRIBUTE
943void __tsan_go_atomic64_compare_exchange(
944    ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
945  a64 cur = 0;
946  a64 cmp = *(a64*)(a+8);
947  ATOMIC_RET(CAS, cur, *(a64**)a, cmp, *(a64*)(a+16), mo_acq_rel, mo_acquire);
948  *(bool*)(a+24) = (cur == cmp);
949}
950}  // extern "C"
951#endif  // #ifndef TSAN_GO
952