1//===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12// Main internal TSan header file.
13//
14// Ground rules:
15//   - C++ run-time should not be used (static CTORs, RTTI, exceptions, static
16//     function-scope locals)
17//   - All functions/classes/etc reside in namespace __tsan, except for those
18//     declared in tsan_interface.h.
19//   - Platform-specific files should be used instead of ifdefs (*).
20//   - No system headers included in header files (*).
21//   - Platform specific headres included only into platform-specific files (*).
22//
23//  (*) Except when inlining is critical for performance.
24//===----------------------------------------------------------------------===//
25
26#ifndef TSAN_RTL_H
27#define TSAN_RTL_H
28
29#include "sanitizer_common/sanitizer_allocator.h"
30#include "sanitizer_common/sanitizer_allocator_internal.h"
31#include "sanitizer_common/sanitizer_asm.h"
32#include "sanitizer_common/sanitizer_common.h"
33#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
34#include "sanitizer_common/sanitizer_libignore.h"
35#include "sanitizer_common/sanitizer_suppressions.h"
36#include "sanitizer_common/sanitizer_thread_registry.h"
37#include "tsan_clock.h"
38#include "tsan_defs.h"
39#include "tsan_flags.h"
40#include "tsan_sync.h"
41#include "tsan_trace.h"
42#include "tsan_vector.h"
43#include "tsan_report.h"
44#include "tsan_platform.h"
45#include "tsan_mutexset.h"
46#include "tsan_ignoreset.h"
47#include "tsan_stack_trace.h"
48
49#if SANITIZER_WORDSIZE != 64
50# error "ThreadSanitizer is supported only on 64-bit platforms"
51#endif
52
53namespace __tsan {
54
55#ifndef SANITIZER_GO
56struct MapUnmapCallback;
57#if defined(__mips64) || defined(__aarch64__) || defined(__powerpc__)
58static const uptr kAllocatorSpace = 0;
59static const uptr kAllocatorSize = SANITIZER_MMAP_RANGE_SIZE;
60static const uptr kAllocatorRegionSizeLog = 20;
61static const uptr kAllocatorNumRegions =
62    kAllocatorSize >> kAllocatorRegionSizeLog;
63typedef TwoLevelByteMap<(kAllocatorNumRegions >> 12), 1 << 12,
64    MapUnmapCallback> ByteMap;
65typedef SizeClassAllocator32<kAllocatorSpace, kAllocatorSize, 0,
66    CompactSizeClassMap, kAllocatorRegionSizeLog, ByteMap,
67    MapUnmapCallback> PrimaryAllocator;
68#else
69typedef SizeClassAllocator64<Mapping::kHeapMemBeg,
70    Mapping::kHeapMemEnd - Mapping::kHeapMemBeg, 0,
71    DefaultSizeClassMap, MapUnmapCallback> PrimaryAllocator;
72#endif
73typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
74typedef LargeMmapAllocator<MapUnmapCallback> SecondaryAllocator;
75typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
76    SecondaryAllocator> Allocator;
77Allocator *allocator();
78#endif
79
80void TsanCheckFailed(const char *file, int line, const char *cond,
81                     u64 v1, u64 v2);
82
83const u64 kShadowRodata = (u64)-1;  // .rodata shadow marker
84
85// FastState (from most significant bit):
86//   ignore          : 1
87//   tid             : kTidBits
88//   unused          : -
89//   history_size    : 3
90//   epoch           : kClkBits
91class FastState {
92 public:
93  FastState(u64 tid, u64 epoch) {
94    x_ = tid << kTidShift;
95    x_ |= epoch;
96    DCHECK_EQ(tid, this->tid());
97    DCHECK_EQ(epoch, this->epoch());
98    DCHECK_EQ(GetIgnoreBit(), false);
99  }
100
101  explicit FastState(u64 x)
102      : x_(x) {
103  }
104
105  u64 raw() const {
106    return x_;
107  }
108
109  u64 tid() const {
110    u64 res = (x_ & ~kIgnoreBit) >> kTidShift;
111    return res;
112  }
113
114  u64 TidWithIgnore() const {
115    u64 res = x_ >> kTidShift;
116    return res;
117  }
118
119  u64 epoch() const {
120    u64 res = x_ & ((1ull << kClkBits) - 1);
121    return res;
122  }
123
124  void IncrementEpoch() {
125    u64 old_epoch = epoch();
126    x_ += 1;
127    DCHECK_EQ(old_epoch + 1, epoch());
128    (void)old_epoch;
129  }
130
131  void SetIgnoreBit() { x_ |= kIgnoreBit; }
132  void ClearIgnoreBit() { x_ &= ~kIgnoreBit; }
133  bool GetIgnoreBit() const { return (s64)x_ < 0; }
134
135  void SetHistorySize(int hs) {
136    CHECK_GE(hs, 0);
137    CHECK_LE(hs, 7);
138    x_ = (x_ & ~(kHistoryMask << kHistoryShift)) | (u64(hs) << kHistoryShift);
139  }
140
141  ALWAYS_INLINE
142  int GetHistorySize() const {
143    return (int)((x_ >> kHistoryShift) & kHistoryMask);
144  }
145
146  void ClearHistorySize() {
147    SetHistorySize(0);
148  }
149
150  ALWAYS_INLINE
151  u64 GetTracePos() const {
152    const int hs = GetHistorySize();
153    // When hs == 0, the trace consists of 2 parts.
154    const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1;
155    return epoch() & mask;
156  }
157
158 private:
159  friend class Shadow;
160  static const int kTidShift = 64 - kTidBits - 1;
161  static const u64 kIgnoreBit = 1ull << 63;
162  static const u64 kFreedBit = 1ull << 63;
163  static const u64 kHistoryShift = kClkBits;
164  static const u64 kHistoryMask = 7;
165  u64 x_;
166};
167
168// Shadow (from most significant bit):
169//   freed           : 1
170//   tid             : kTidBits
171//   is_atomic       : 1
172//   is_read         : 1
173//   size_log        : 2
174//   addr0           : 3
175//   epoch           : kClkBits
176class Shadow : public FastState {
177 public:
178  explicit Shadow(u64 x)
179      : FastState(x) {
180  }
181
182  explicit Shadow(const FastState &s)
183      : FastState(s.x_) {
184    ClearHistorySize();
185  }
186
187  void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) {
188    DCHECK_EQ((x_ >> kClkBits) & 31, 0);
189    DCHECK_LE(addr0, 7);
190    DCHECK_LE(kAccessSizeLog, 3);
191    x_ |= ((kAccessSizeLog << 3) | addr0) << kClkBits;
192    DCHECK_EQ(kAccessSizeLog, size_log());
193    DCHECK_EQ(addr0, this->addr0());
194  }
195
196  void SetWrite(unsigned kAccessIsWrite) {
197    DCHECK_EQ(x_ & kReadBit, 0);
198    if (!kAccessIsWrite)
199      x_ |= kReadBit;
200    DCHECK_EQ(kAccessIsWrite, IsWrite());
201  }
202
203  void SetAtomic(bool kIsAtomic) {
204    DCHECK(!IsAtomic());
205    if (kIsAtomic)
206      x_ |= kAtomicBit;
207    DCHECK_EQ(IsAtomic(), kIsAtomic);
208  }
209
210  bool IsAtomic() const {
211    return x_ & kAtomicBit;
212  }
213
214  bool IsZero() const {
215    return x_ == 0;
216  }
217
218  static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) {
219    u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift;
220    DCHECK_EQ(shifted_xor == 0, s1.TidWithIgnore() == s2.TidWithIgnore());
221    return shifted_xor == 0;
222  }
223
224  static ALWAYS_INLINE
225  bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) {
226    u64 masked_xor = ((s1.x_ ^ s2.x_) >> kClkBits) & 31;
227    return masked_xor == 0;
228  }
229
230  static ALWAYS_INLINE bool TwoRangesIntersect(Shadow s1, Shadow s2,
231      unsigned kS2AccessSize) {
232    bool res = false;
233    u64 diff = s1.addr0() - s2.addr0();
234    if ((s64)diff < 0) {  // s1.addr0 < s2.addr0  // NOLINT
235      // if (s1.addr0() + size1) > s2.addr0()) return true;
236      if (s1.size() > -diff)
237        res = true;
238    } else {
239      // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true;
240      if (kS2AccessSize > diff)
241        res = true;
242    }
243    DCHECK_EQ(res, TwoRangesIntersectSlow(s1, s2));
244    DCHECK_EQ(res, TwoRangesIntersectSlow(s2, s1));
245    return res;
246  }
247
248  u64 ALWAYS_INLINE addr0() const { return (x_ >> kClkBits) & 7; }
249  u64 ALWAYS_INLINE size() const { return 1ull << size_log(); }
250  bool ALWAYS_INLINE IsWrite() const { return !IsRead(); }
251  bool ALWAYS_INLINE IsRead() const { return x_ & kReadBit; }
252
253  // The idea behind the freed bit is as follows.
254  // When the memory is freed (or otherwise unaccessible) we write to the shadow
255  // values with tid/epoch related to the free and the freed bit set.
256  // During memory accesses processing the freed bit is considered
257  // as msb of tid. So any access races with shadow with freed bit set
258  // (it is as if write from a thread with which we never synchronized before).
259  // This allows us to detect accesses to freed memory w/o additional
260  // overheads in memory access processing and at the same time restore
261  // tid/epoch of free.
262  void MarkAsFreed() {
263     x_ |= kFreedBit;
264  }
265
266  bool IsFreed() const {
267    return x_ & kFreedBit;
268  }
269
270  bool GetFreedAndReset() {
271    bool res = x_ & kFreedBit;
272    x_ &= ~kFreedBit;
273    return res;
274  }
275
276  bool ALWAYS_INLINE IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const {
277    bool v = x_ & ((u64(kIsWrite ^ 1) << kReadShift)
278        | (u64(kIsAtomic) << kAtomicShift));
279    DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic));
280    return v;
281  }
282
283  bool ALWAYS_INLINE IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const {
284    bool v = ((x_ >> kReadShift) & 3)
285        <= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
286    DCHECK_EQ(v, (IsAtomic() < kIsAtomic) ||
287        (IsAtomic() == kIsAtomic && !IsWrite() <= !kIsWrite));
288    return v;
289  }
290
291  bool ALWAYS_INLINE IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const {
292    bool v = ((x_ >> kReadShift) & 3)
293        >= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
294    DCHECK_EQ(v, (IsAtomic() > kIsAtomic) ||
295        (IsAtomic() == kIsAtomic && !IsWrite() >= !kIsWrite));
296    return v;
297  }
298
299 private:
300  static const u64 kReadShift   = 5 + kClkBits;
301  static const u64 kReadBit     = 1ull << kReadShift;
302  static const u64 kAtomicShift = 6 + kClkBits;
303  static const u64 kAtomicBit   = 1ull << kAtomicShift;
304
305  u64 size_log() const { return (x_ >> (3 + kClkBits)) & 3; }
306
307  static bool TwoRangesIntersectSlow(const Shadow s1, const Shadow s2) {
308    if (s1.addr0() == s2.addr0()) return true;
309    if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0())
310      return true;
311    if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0())
312      return true;
313    return false;
314  }
315};
316
317struct ThreadSignalContext;
318
319struct JmpBuf {
320  uptr sp;
321  uptr mangled_sp;
322  int int_signal_send;
323  bool in_blocking_func;
324  uptr in_signal_handler;
325  uptr *shadow_stack_pos;
326};
327
328// This struct is stored in TLS.
329struct ThreadState {
330  FastState fast_state;
331  // Synch epoch represents the threads's epoch before the last synchronization
332  // action. It allows to reduce number of shadow state updates.
333  // For example, fast_synch_epoch=100, last write to addr X was at epoch=150,
334  // if we are processing write to X from the same thread at epoch=200,
335  // we do nothing, because both writes happen in the same 'synch epoch'.
336  // That is, if another memory access does not race with the former write,
337  // it does not race with the latter as well.
338  // QUESTION: can we can squeeze this into ThreadState::Fast?
339  // E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are
340  // taken by epoch between synchs.
341  // This way we can save one load from tls.
342  u64 fast_synch_epoch;
343  // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read.
344  // We do not distinguish beteween ignoring reads and writes
345  // for better performance.
346  int ignore_reads_and_writes;
347  int ignore_sync;
348  // Go does not support ignores.
349#ifndef SANITIZER_GO
350  IgnoreSet mop_ignore_set;
351  IgnoreSet sync_ignore_set;
352#endif
353  // C/C++ uses fixed size shadow stack embed into Trace.
354  // Go uses malloc-allocated shadow stack with dynamic size.
355  uptr *shadow_stack;
356  uptr *shadow_stack_end;
357  uptr *shadow_stack_pos;
358  u64 *racy_shadow_addr;
359  u64 racy_state[2];
360  MutexSet mset;
361  ThreadClock clock;
362#ifndef SANITIZER_GO
363  AllocatorCache alloc_cache;
364  InternalAllocatorCache internal_alloc_cache;
365  Vector<JmpBuf> jmp_bufs;
366  int ignore_interceptors;
367#endif
368#if TSAN_COLLECT_STATS
369  u64 stat[StatCnt];
370#endif
371  const int tid;
372  const int unique_id;
373  bool in_symbolizer;
374  bool in_ignored_lib;
375  bool is_inited;
376  bool is_dead;
377  bool is_freeing;
378  bool is_vptr_access;
379  const uptr stk_addr;
380  const uptr stk_size;
381  const uptr tls_addr;
382  const uptr tls_size;
383  ThreadContext *tctx;
384
385#if SANITIZER_DEBUG && !SANITIZER_GO
386  InternalDeadlockDetector internal_deadlock_detector;
387#endif
388  DDPhysicalThread *dd_pt;
389  DDLogicalThread *dd_lt;
390
391  atomic_uintptr_t in_signal_handler;
392  ThreadSignalContext *signal_ctx;
393
394  DenseSlabAllocCache block_cache;
395  DenseSlabAllocCache sync_cache;
396  DenseSlabAllocCache clock_cache;
397
398#ifndef SANITIZER_GO
399  u32 last_sleep_stack_id;
400  ThreadClock last_sleep_clock;
401#endif
402
403  // Set in regions of runtime that must be signal-safe and fork-safe.
404  // If set, malloc must not be called.
405  int nomalloc;
406
407  explicit ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
408                       unsigned reuse_count,
409                       uptr stk_addr, uptr stk_size,
410                       uptr tls_addr, uptr tls_size);
411};
412
413#ifndef SANITIZER_GO
414#if SANITIZER_MAC
415ThreadState *cur_thread();
416void cur_thread_finalize();
417#else
418__attribute__((tls_model("initial-exec")))
419extern THREADLOCAL char cur_thread_placeholder[];
420INLINE ThreadState *cur_thread() {
421  return reinterpret_cast<ThreadState *>(&cur_thread_placeholder);
422}
423INLINE void cur_thread_finalize() { }
424#endif  // SANITIZER_MAC
425#endif  // SANITIZER_GO
426
427class ThreadContext : public ThreadContextBase {
428 public:
429  explicit ThreadContext(int tid);
430  ~ThreadContext();
431  ThreadState *thr;
432  u32 creation_stack_id;
433  SyncClock sync;
434  // Epoch at which the thread had started.
435  // If we see an event from the thread stamped by an older epoch,
436  // the event is from a dead thread that shared tid with this thread.
437  u64 epoch0;
438  u64 epoch1;
439
440  // Override superclass callbacks.
441  void OnDead() override;
442  void OnJoined(void *arg) override;
443  void OnFinished() override;
444  void OnStarted(void *arg) override;
445  void OnCreated(void *arg) override;
446  void OnReset() override;
447  void OnDetached(void *arg) override;
448};
449
450struct RacyStacks {
451  MD5Hash hash[2];
452  bool operator==(const RacyStacks &other) const {
453    if (hash[0] == other.hash[0] && hash[1] == other.hash[1])
454      return true;
455    if (hash[0] == other.hash[1] && hash[1] == other.hash[0])
456      return true;
457    return false;
458  }
459};
460
461struct RacyAddress {
462  uptr addr_min;
463  uptr addr_max;
464};
465
466struct FiredSuppression {
467  ReportType type;
468  uptr pc_or_addr;
469  Suppression *supp;
470};
471
472struct Context {
473  Context();
474
475  bool initialized;
476  bool after_multithreaded_fork;
477
478  MetaMap metamap;
479
480  Mutex report_mtx;
481  int nreported;
482  int nmissed_expected;
483  atomic_uint64_t last_symbolize_time_ns;
484
485  void *background_thread;
486  atomic_uint32_t stop_background_thread;
487
488  ThreadRegistry *thread_registry;
489
490  Mutex racy_mtx;
491  Vector<RacyStacks> racy_stacks;
492  Vector<RacyAddress> racy_addresses;
493  // Number of fired suppressions may be large enough.
494  Mutex fired_suppressions_mtx;
495  InternalMmapVector<FiredSuppression> fired_suppressions;
496  DDetector *dd;
497
498  ClockAlloc clock_alloc;
499
500  Flags flags;
501
502  u64 stat[StatCnt];
503  u64 int_alloc_cnt[MBlockTypeCount];
504  u64 int_alloc_siz[MBlockTypeCount];
505};
506
507extern Context *ctx;  // The one and the only global runtime context.
508
509struct ScopedIgnoreInterceptors {
510  ScopedIgnoreInterceptors() {
511#ifndef SANITIZER_GO
512    cur_thread()->ignore_interceptors++;
513#endif
514  }
515
516  ~ScopedIgnoreInterceptors() {
517#ifndef SANITIZER_GO
518    cur_thread()->ignore_interceptors--;
519#endif
520  }
521};
522
523class ScopedReport {
524 public:
525  explicit ScopedReport(ReportType typ);
526  ~ScopedReport();
527
528  void AddMemoryAccess(uptr addr, Shadow s, StackTrace stack,
529                       const MutexSet *mset);
530  void AddStack(StackTrace stack, bool suppressable = false);
531  void AddThread(const ThreadContext *tctx, bool suppressable = false);
532  void AddThread(int unique_tid, bool suppressable = false);
533  void AddUniqueTid(int unique_tid);
534  void AddMutex(const SyncVar *s);
535  u64 AddMutex(u64 id);
536  void AddLocation(uptr addr, uptr size);
537  void AddSleep(u32 stack_id);
538  void SetCount(int count);
539
540  const ReportDesc *GetReport() const;
541
542 private:
543  ReportDesc *rep_;
544  // Symbolizer makes lots of intercepted calls. If we try to process them,
545  // at best it will cause deadlocks on internal mutexes.
546  ScopedIgnoreInterceptors ignore_interceptors_;
547
548  void AddDeadMutex(u64 id);
549
550  ScopedReport(const ScopedReport&);
551  void operator = (const ScopedReport&);
552};
553
554void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
555                  MutexSet *mset);
556
557template<typename StackTraceTy>
558void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack) {
559  uptr size = thr->shadow_stack_pos - thr->shadow_stack;
560  uptr start = 0;
561  if (size + !!toppc > kStackTraceMax) {
562    start = size + !!toppc - kStackTraceMax;
563    size = kStackTraceMax - !!toppc;
564  }
565  stack->Init(&thr->shadow_stack[start], size, toppc);
566}
567
568
569#if TSAN_COLLECT_STATS
570void StatAggregate(u64 *dst, u64 *src);
571void StatOutput(u64 *stat);
572#endif
573
574void ALWAYS_INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) {
575#if TSAN_COLLECT_STATS
576  thr->stat[typ] += n;
577#endif
578}
579void ALWAYS_INLINE StatSet(ThreadState *thr, StatType typ, u64 n) {
580#if TSAN_COLLECT_STATS
581  thr->stat[typ] = n;
582#endif
583}
584
585void MapShadow(uptr addr, uptr size);
586void MapThreadTrace(uptr addr, uptr size, const char *name);
587void DontNeedShadowFor(uptr addr, uptr size);
588void InitializeShadowMemory();
589void InitializeInterceptors();
590void InitializeLibIgnore();
591void InitializeDynamicAnnotations();
592
593void ForkBefore(ThreadState *thr, uptr pc);
594void ForkParentAfter(ThreadState *thr, uptr pc);
595void ForkChildAfter(ThreadState *thr, uptr pc);
596
597void ReportRace(ThreadState *thr);
598bool OutputReport(ThreadState *thr, const ScopedReport &srep);
599bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace);
600bool IsExpectedReport(uptr addr, uptr size);
601void PrintMatchedBenignRaces();
602
603#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
604# define DPrintf Printf
605#else
606# define DPrintf(...)
607#endif
608
609#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2
610# define DPrintf2 Printf
611#else
612# define DPrintf2(...)
613#endif
614
615u32 CurrentStackId(ThreadState *thr, uptr pc);
616ReportStack *SymbolizeStackId(u32 stack_id);
617void PrintCurrentStack(ThreadState *thr, uptr pc);
618void PrintCurrentStackSlow(uptr pc);  // uses libunwind
619
620void Initialize(ThreadState *thr);
621int Finalize(ThreadState *thr);
622
623void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write);
624void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write);
625
626void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
627    int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic);
628void MemoryAccessImpl(ThreadState *thr, uptr addr,
629    int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
630    u64 *shadow_mem, Shadow cur);
631void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
632    uptr size, bool is_write);
633void MemoryAccessRangeStep(ThreadState *thr, uptr pc, uptr addr,
634    uptr size, uptr step, bool is_write);
635void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
636    int size, bool kAccessIsWrite, bool kIsAtomic);
637
638const int kSizeLog1 = 0;
639const int kSizeLog2 = 1;
640const int kSizeLog4 = 2;
641const int kSizeLog8 = 3;
642
643void ALWAYS_INLINE MemoryRead(ThreadState *thr, uptr pc,
644                                     uptr addr, int kAccessSizeLog) {
645  MemoryAccess(thr, pc, addr, kAccessSizeLog, false, false);
646}
647
648void ALWAYS_INLINE MemoryWrite(ThreadState *thr, uptr pc,
649                                      uptr addr, int kAccessSizeLog) {
650  MemoryAccess(thr, pc, addr, kAccessSizeLog, true, false);
651}
652
653void ALWAYS_INLINE MemoryReadAtomic(ThreadState *thr, uptr pc,
654                                           uptr addr, int kAccessSizeLog) {
655  MemoryAccess(thr, pc, addr, kAccessSizeLog, false, true);
656}
657
658void ALWAYS_INLINE MemoryWriteAtomic(ThreadState *thr, uptr pc,
659                                            uptr addr, int kAccessSizeLog) {
660  MemoryAccess(thr, pc, addr, kAccessSizeLog, true, true);
661}
662
663void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
664void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
665void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);
666
667void ThreadIgnoreBegin(ThreadState *thr, uptr pc);
668void ThreadIgnoreEnd(ThreadState *thr, uptr pc);
669void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc);
670void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc);
671
672void FuncEntry(ThreadState *thr, uptr pc);
673void FuncExit(ThreadState *thr);
674
675int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
676void ThreadStart(ThreadState *thr, int tid, uptr os_id);
677void ThreadFinish(ThreadState *thr);
678int ThreadTid(ThreadState *thr, uptr pc, uptr uid);
679void ThreadJoin(ThreadState *thr, uptr pc, int tid);
680void ThreadDetach(ThreadState *thr, uptr pc, int tid);
681void ThreadFinalize(ThreadState *thr);
682void ThreadSetName(ThreadState *thr, const char *name);
683int ThreadCount(ThreadState *thr);
684void ProcessPendingSignals(ThreadState *thr);
685
686void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
687                 bool rw, bool recursive, bool linker_init);
688void MutexDestroy(ThreadState *thr, uptr pc, uptr addr);
689void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec = 1,
690               bool try_lock = false);
691int  MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all = false);
692void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool try_lock = false);
693void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
694void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
695void MutexRepair(ThreadState *thr, uptr pc, uptr addr);  // call on EOWNERDEAD
696
697void Acquire(ThreadState *thr, uptr pc, uptr addr);
698// AcquireGlobal synchronizes the current thread with all other threads.
699// In terms of happens-before relation, it draws a HB edge from all threads
700// (where they happen to execute right now) to the current thread. We use it to
701// handle Go finalizers. Namely, finalizer goroutine executes AcquireGlobal
702// right before executing finalizers. This provides a coarse, but simple
703// approximation of the actual required synchronization.
704void AcquireGlobal(ThreadState *thr, uptr pc);
705void Release(ThreadState *thr, uptr pc, uptr addr);
706void ReleaseStore(ThreadState *thr, uptr pc, uptr addr);
707void AfterSleep(ThreadState *thr, uptr pc);
708void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c);
709void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
710void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c);
711void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
712
713// The hacky call uses custom calling convention and an assembly thunk.
714// It is considerably faster that a normal call for the caller
715// if it is not executed (it is intended for slow paths from hot functions).
716// The trick is that the call preserves all registers and the compiler
717// does not treat it as a call.
718// If it does not work for you, use normal call.
719#if !SANITIZER_DEBUG && defined(__x86_64__) && !SANITIZER_MAC
720// The caller may not create the stack frame for itself at all,
721// so we create a reserve stack frame for it (1024b must be enough).
722#define HACKY_CALL(f) \
723  __asm__ __volatile__("sub $1024, %%rsp;" \
724                       CFI_INL_ADJUST_CFA_OFFSET(1024) \
725                       ".hidden " #f "_thunk;" \
726                       "call " #f "_thunk;" \
727                       "add $1024, %%rsp;" \
728                       CFI_INL_ADJUST_CFA_OFFSET(-1024) \
729                       ::: "memory", "cc");
730#else
731#define HACKY_CALL(f) f()
732#endif
733
734void TraceSwitch(ThreadState *thr);
735uptr TraceTopPC(ThreadState *thr);
736uptr TraceSize();
737uptr TraceParts();
738Trace *ThreadTrace(int tid);
739
740extern "C" void __tsan_trace_switch();
741void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
742                                        EventType typ, u64 addr) {
743  if (!kCollectHistory)
744    return;
745  DCHECK_GE((int)typ, 0);
746  DCHECK_LE((int)typ, 7);
747  DCHECK_EQ(GetLsb(addr, 61), addr);
748  StatInc(thr, StatEvents);
749  u64 pos = fs.GetTracePos();
750  if (UNLIKELY((pos % kTracePartSize) == 0)) {
751#ifndef SANITIZER_GO
752    HACKY_CALL(__tsan_trace_switch);
753#else
754    TraceSwitch(thr);
755#endif
756  }
757  Event *trace = (Event*)GetThreadTrace(fs.tid());
758  Event *evp = &trace[pos];
759  Event ev = (u64)addr | ((u64)typ << 61);
760  *evp = ev;
761}
762
763#ifndef SANITIZER_GO
764uptr ALWAYS_INLINE HeapEnd() {
765  return HeapMemEnd() + PrimaryAllocator::AdditionalSize();
766}
767#endif
768
769}  // namespace __tsan
770
771#endif  // TSAN_RTL_H
772