1//===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of ThreadSanitizer (TSan), a race detector.
10//
11// Main internal TSan header file.
12//
13// Ground rules:
14//   - C++ run-time should not be used (static CTORs, RTTI, exceptions, static
15//     function-scope locals)
16//   - All functions/classes/etc reside in namespace __tsan, except for those
17//     declared in tsan_interface.h.
18//   - Platform-specific files should be used instead of ifdefs (*).
19//   - No system headers included in header files (*).
20//   - Platform specific headres included only into platform-specific files (*).
21//
22//  (*) Except when inlining is critical for performance.
23//===----------------------------------------------------------------------===//
24
25#ifndef TSAN_RTL_H
26#define TSAN_RTL_H
27
28#include "sanitizer_common/sanitizer_allocator.h"
29#include "sanitizer_common/sanitizer_allocator_internal.h"
30#include "sanitizer_common/sanitizer_asm.h"
31#include "sanitizer_common/sanitizer_common.h"
32#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
33#include "sanitizer_common/sanitizer_libignore.h"
34#include "sanitizer_common/sanitizer_suppressions.h"
35#include "sanitizer_common/sanitizer_thread_registry.h"
36#include "sanitizer_common/sanitizer_vector.h"
37#include "tsan_clock.h"
38#include "tsan_defs.h"
39#include "tsan_flags.h"
40#include "tsan_mman.h"
41#include "tsan_sync.h"
42#include "tsan_trace.h"
43#include "tsan_report.h"
44#include "tsan_platform.h"
45#include "tsan_mutexset.h"
46#include "tsan_ignoreset.h"
47#include "tsan_stack_trace.h"
48
49#if SANITIZER_WORDSIZE != 64
50# error "ThreadSanitizer is supported only on 64-bit platforms"
51#endif
52
53namespace __tsan {
54
55#if !SANITIZER_GO
56struct MapUnmapCallback;
57#if defined(__mips64) || defined(__aarch64__) || defined(__powerpc__)
58
59struct AP32 {
60  static const uptr kSpaceBeg = 0;
61  static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
62  static const uptr kMetadataSize = 0;
63  typedef __sanitizer::CompactSizeClassMap SizeClassMap;
64  static const uptr kRegionSizeLog = 20;
65  using AddressSpaceView = LocalAddressSpaceView;
66  typedef __tsan::MapUnmapCallback MapUnmapCallback;
67  static const uptr kFlags = 0;
68};
69typedef SizeClassAllocator32<AP32> PrimaryAllocator;
70#else
71struct AP64 {  // Allocator64 parameters. Deliberately using a short name.
72  static const uptr kSpaceBeg = Mapping::kHeapMemBeg;
73  static const uptr kSpaceSize = Mapping::kHeapMemEnd - Mapping::kHeapMemBeg;
74  static const uptr kMetadataSize = 0;
75  typedef DefaultSizeClassMap SizeClassMap;
76  typedef __tsan::MapUnmapCallback MapUnmapCallback;
77  static const uptr kFlags = 0;
78  using AddressSpaceView = LocalAddressSpaceView;
79};
80typedef SizeClassAllocator64<AP64> PrimaryAllocator;
81#endif
82typedef CombinedAllocator<PrimaryAllocator> Allocator;
83typedef Allocator::AllocatorCache AllocatorCache;
84Allocator *allocator();
85#endif
86
87void TsanCheckFailed(const char *file, int line, const char *cond,
88                     u64 v1, u64 v2);
89
90const u64 kShadowRodata = (u64)-1;  // .rodata shadow marker
91
92// FastState (from most significant bit):
93//   ignore          : 1
94//   tid             : kTidBits
95//   unused          : -
96//   history_size    : 3
97//   epoch           : kClkBits
98class FastState {
99 public:
100  FastState(u64 tid, u64 epoch) {
101    x_ = tid << kTidShift;
102    x_ |= epoch;
103    DCHECK_EQ(tid, this->tid());
104    DCHECK_EQ(epoch, this->epoch());
105    DCHECK_EQ(GetIgnoreBit(), false);
106  }
107
108  explicit FastState(u64 x)
109      : x_(x) {
110  }
111
112  u64 raw() const {
113    return x_;
114  }
115
116  u64 tid() const {
117    u64 res = (x_ & ~kIgnoreBit) >> kTidShift;
118    return res;
119  }
120
121  u64 TidWithIgnore() const {
122    u64 res = x_ >> kTidShift;
123    return res;
124  }
125
126  u64 epoch() const {
127    u64 res = x_ & ((1ull << kClkBits) - 1);
128    return res;
129  }
130
131  void IncrementEpoch() {
132    u64 old_epoch = epoch();
133    x_ += 1;
134    DCHECK_EQ(old_epoch + 1, epoch());
135    (void)old_epoch;
136  }
137
138  void SetIgnoreBit() { x_ |= kIgnoreBit; }
139  void ClearIgnoreBit() { x_ &= ~kIgnoreBit; }
140  bool GetIgnoreBit() const { return (s64)x_ < 0; }
141
142  void SetHistorySize(int hs) {
143    CHECK_GE(hs, 0);
144    CHECK_LE(hs, 7);
145    x_ = (x_ & ~(kHistoryMask << kHistoryShift)) | (u64(hs) << kHistoryShift);
146  }
147
148  ALWAYS_INLINE
149  int GetHistorySize() const {
150    return (int)((x_ >> kHistoryShift) & kHistoryMask);
151  }
152
153  void ClearHistorySize() {
154    SetHistorySize(0);
155  }
156
157  ALWAYS_INLINE
158  u64 GetTracePos() const {
159    const int hs = GetHistorySize();
160    // When hs == 0, the trace consists of 2 parts.
161    const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1;
162    return epoch() & mask;
163  }
164
165 private:
166  friend class Shadow;
167  static const int kTidShift = 64 - kTidBits - 1;
168  static const u64 kIgnoreBit = 1ull << 63;
169  static const u64 kFreedBit = 1ull << 63;
170  static const u64 kHistoryShift = kClkBits;
171  static const u64 kHistoryMask = 7;
172  u64 x_;
173};
174
175// Shadow (from most significant bit):
176//   freed           : 1
177//   tid             : kTidBits
178//   is_atomic       : 1
179//   is_read         : 1
180//   size_log        : 2
181//   addr0           : 3
182//   epoch           : kClkBits
183class Shadow : public FastState {
184 public:
185  explicit Shadow(u64 x)
186      : FastState(x) {
187  }
188
189  explicit Shadow(const FastState &s)
190      : FastState(s.x_) {
191    ClearHistorySize();
192  }
193
194  void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) {
195    DCHECK_EQ((x_ >> kClkBits) & 31, 0);
196    DCHECK_LE(addr0, 7);
197    DCHECK_LE(kAccessSizeLog, 3);
198    x_ |= ((kAccessSizeLog << 3) | addr0) << kClkBits;
199    DCHECK_EQ(kAccessSizeLog, size_log());
200    DCHECK_EQ(addr0, this->addr0());
201  }
202
203  void SetWrite(unsigned kAccessIsWrite) {
204    DCHECK_EQ(x_ & kReadBit, 0);
205    if (!kAccessIsWrite)
206      x_ |= kReadBit;
207    DCHECK_EQ(kAccessIsWrite, IsWrite());
208  }
209
210  void SetAtomic(bool kIsAtomic) {
211    DCHECK(!IsAtomic());
212    if (kIsAtomic)
213      x_ |= kAtomicBit;
214    DCHECK_EQ(IsAtomic(), kIsAtomic);
215  }
216
217  bool IsAtomic() const {
218    return x_ & kAtomicBit;
219  }
220
221  bool IsZero() const {
222    return x_ == 0;
223  }
224
225  static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) {
226    u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift;
227    DCHECK_EQ(shifted_xor == 0, s1.TidWithIgnore() == s2.TidWithIgnore());
228    return shifted_xor == 0;
229  }
230
231  static ALWAYS_INLINE
232  bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) {
233    u64 masked_xor = ((s1.x_ ^ s2.x_) >> kClkBits) & 31;
234    return masked_xor == 0;
235  }
236
237  static ALWAYS_INLINE bool TwoRangesIntersect(Shadow s1, Shadow s2,
238      unsigned kS2AccessSize) {
239    bool res = false;
240    u64 diff = s1.addr0() - s2.addr0();
241    if ((s64)diff < 0) {  // s1.addr0 < s2.addr0
242      // if (s1.addr0() + size1) > s2.addr0()) return true;
243      if (s1.size() > -diff)
244        res = true;
245    } else {
246      // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true;
247      if (kS2AccessSize > diff)
248        res = true;
249    }
250    DCHECK_EQ(res, TwoRangesIntersectSlow(s1, s2));
251    DCHECK_EQ(res, TwoRangesIntersectSlow(s2, s1));
252    return res;
253  }
254
255  u64 ALWAYS_INLINE addr0() const { return (x_ >> kClkBits) & 7; }
256  u64 ALWAYS_INLINE size() const { return 1ull << size_log(); }
257  bool ALWAYS_INLINE IsWrite() const { return !IsRead(); }
258  bool ALWAYS_INLINE IsRead() const { return x_ & kReadBit; }
259
260  // The idea behind the freed bit is as follows.
261  // When the memory is freed (or otherwise unaccessible) we write to the shadow
262  // values with tid/epoch related to the free and the freed bit set.
263  // During memory accesses processing the freed bit is considered
264  // as msb of tid. So any access races with shadow with freed bit set
265  // (it is as if write from a thread with which we never synchronized before).
266  // This allows us to detect accesses to freed memory w/o additional
267  // overheads in memory access processing and at the same time restore
268  // tid/epoch of free.
269  void MarkAsFreed() {
270     x_ |= kFreedBit;
271  }
272
273  bool IsFreed() const {
274    return x_ & kFreedBit;
275  }
276
277  bool GetFreedAndReset() {
278    bool res = x_ & kFreedBit;
279    x_ &= ~kFreedBit;
280    return res;
281  }
282
283  bool ALWAYS_INLINE IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const {
284    bool v = x_ & ((u64(kIsWrite ^ 1) << kReadShift)
285        | (u64(kIsAtomic) << kAtomicShift));
286    DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic));
287    return v;
288  }
289
290  bool ALWAYS_INLINE IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const {
291    bool v = ((x_ >> kReadShift) & 3)
292        <= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
293    DCHECK_EQ(v, (IsAtomic() < kIsAtomic) ||
294        (IsAtomic() == kIsAtomic && !IsWrite() <= !kIsWrite));
295    return v;
296  }
297
298  bool ALWAYS_INLINE IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const {
299    bool v = ((x_ >> kReadShift) & 3)
300        >= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
301    DCHECK_EQ(v, (IsAtomic() > kIsAtomic) ||
302        (IsAtomic() == kIsAtomic && !IsWrite() >= !kIsWrite));
303    return v;
304  }
305
306 private:
307  static const u64 kReadShift   = 5 + kClkBits;
308  static const u64 kReadBit     = 1ull << kReadShift;
309  static const u64 kAtomicShift = 6 + kClkBits;
310  static const u64 kAtomicBit   = 1ull << kAtomicShift;
311
312  u64 size_log() const { return (x_ >> (3 + kClkBits)) & 3; }
313
314  static bool TwoRangesIntersectSlow(const Shadow s1, const Shadow s2) {
315    if (s1.addr0() == s2.addr0()) return true;
316    if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0())
317      return true;
318    if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0())
319      return true;
320    return false;
321  }
322};
323
324struct ThreadSignalContext;
325
326struct JmpBuf {
327  uptr sp;
328  int int_signal_send;
329  bool in_blocking_func;
330  uptr in_signal_handler;
331  uptr *shadow_stack_pos;
332};
333
334// A Processor represents a physical thread, or a P for Go.
335// It is used to store internal resources like allocate cache, and does not
336// participate in race-detection logic (invisible to end user).
337// In C++ it is tied to an OS thread just like ThreadState, however ideally
338// it should be tied to a CPU (this way we will have fewer allocator caches).
339// In Go it is tied to a P, so there are significantly fewer Processor's than
340// ThreadState's (which are tied to Gs).
341// A ThreadState must be wired with a Processor to handle events.
342struct Processor {
343  ThreadState *thr; // currently wired thread, or nullptr
344#if !SANITIZER_GO
345  AllocatorCache alloc_cache;
346  InternalAllocatorCache internal_alloc_cache;
347#endif
348  DenseSlabAllocCache block_cache;
349  DenseSlabAllocCache sync_cache;
350  DenseSlabAllocCache clock_cache;
351  DDPhysicalThread *dd_pt;
352};
353
354#if !SANITIZER_GO
355// ScopedGlobalProcessor temporary setups a global processor for the current
356// thread, if it does not have one. Intended for interceptors that can run
357// at the very thread end, when we already destroyed the thread processor.
358struct ScopedGlobalProcessor {
359  ScopedGlobalProcessor();
360  ~ScopedGlobalProcessor();
361};
362#endif
363
364// This struct is stored in TLS.
365struct ThreadState {
366  FastState fast_state;
367  // Synch epoch represents the threads's epoch before the last synchronization
368  // action. It allows to reduce number of shadow state updates.
369  // For example, fast_synch_epoch=100, last write to addr X was at epoch=150,
370  // if we are processing write to X from the same thread at epoch=200,
371  // we do nothing, because both writes happen in the same 'synch epoch'.
372  // That is, if another memory access does not race with the former write,
373  // it does not race with the latter as well.
374  // QUESTION: can we can squeeze this into ThreadState::Fast?
375  // E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are
376  // taken by epoch between synchs.
377  // This way we can save one load from tls.
378  u64 fast_synch_epoch;
379  // Technically `current` should be a separate THREADLOCAL variable;
380  // but it is placed here in order to share cache line with previous fields.
381  ThreadState* current;
382  // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read.
383  // We do not distinguish beteween ignoring reads and writes
384  // for better performance.
385  int ignore_reads_and_writes;
386  int ignore_sync;
387  int suppress_reports;
388  // Go does not support ignores.
389#if !SANITIZER_GO
390  IgnoreSet mop_ignore_set;
391  IgnoreSet sync_ignore_set;
392#endif
393  // C/C++ uses fixed size shadow stack embed into Trace.
394  // Go uses malloc-allocated shadow stack with dynamic size.
395  uptr *shadow_stack;
396  uptr *shadow_stack_end;
397  uptr *shadow_stack_pos;
398  u64 *racy_shadow_addr;
399  u64 racy_state[2];
400  MutexSet mset;
401  ThreadClock clock;
402#if !SANITIZER_GO
403  Vector<JmpBuf> jmp_bufs;
404  int ignore_interceptors;
405#endif
406#if TSAN_COLLECT_STATS
407  u64 stat[StatCnt];
408#endif
409  const int tid;
410  const int unique_id;
411  bool in_symbolizer;
412  bool in_ignored_lib;
413  bool is_inited;
414  bool is_dead;
415  bool is_freeing;
416  bool is_vptr_access;
417  const uptr stk_addr;
418  const uptr stk_size;
419  const uptr tls_addr;
420  const uptr tls_size;
421  ThreadContext *tctx;
422
423#if SANITIZER_DEBUG && !SANITIZER_GO
424  InternalDeadlockDetector internal_deadlock_detector;
425#endif
426  DDLogicalThread *dd_lt;
427
428  // Current wired Processor, or nullptr. Required to handle any events.
429  Processor *proc1;
430#if !SANITIZER_GO
431  Processor *proc() { return proc1; }
432#else
433  Processor *proc();
434#endif
435
436  atomic_uintptr_t in_signal_handler;
437  ThreadSignalContext *signal_ctx;
438
439#if !SANITIZER_GO
440  u32 last_sleep_stack_id;
441  ThreadClock last_sleep_clock;
442#endif
443
444  // Set in regions of runtime that must be signal-safe and fork-safe.
445  // If set, malloc must not be called.
446  int nomalloc;
447
448  const ReportDesc *current_report;
449
450  explicit ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
451                       unsigned reuse_count,
452                       uptr stk_addr, uptr stk_size,
453                       uptr tls_addr, uptr tls_size);
454};
455
456#if !SANITIZER_GO
457#if SANITIZER_MAC || SANITIZER_ANDROID
458ThreadState *cur_thread();
459void set_cur_thread(ThreadState *thr);
460void cur_thread_finalize();
461INLINE void cur_thread_init() { }
462#else
463__attribute__((tls_model("initial-exec")))
464extern THREADLOCAL char cur_thread_placeholder[];
465INLINE ThreadState *cur_thread() {
466  return reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current;
467}
468INLINE void cur_thread_init() {
469  ThreadState *thr = reinterpret_cast<ThreadState *>(cur_thread_placeholder);
470  if (UNLIKELY(!thr->current))
471    thr->current = thr;
472}
473INLINE void set_cur_thread(ThreadState *thr) {
474  reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current = thr;
475}
476INLINE void cur_thread_finalize() { }
477#endif  // SANITIZER_MAC || SANITIZER_ANDROID
478#endif  // SANITIZER_GO
479
480class ThreadContext : public ThreadContextBase {
481 public:
482  explicit ThreadContext(int tid);
483  ~ThreadContext();
484  ThreadState *thr;
485  u32 creation_stack_id;
486  SyncClock sync;
487  // Epoch at which the thread had started.
488  // If we see an event from the thread stamped by an older epoch,
489  // the event is from a dead thread that shared tid with this thread.
490  u64 epoch0;
491  u64 epoch1;
492
493  // Override superclass callbacks.
494  void OnDead() override;
495  void OnJoined(void *arg) override;
496  void OnFinished() override;
497  void OnStarted(void *arg) override;
498  void OnCreated(void *arg) override;
499  void OnReset() override;
500  void OnDetached(void *arg) override;
501};
502
503struct RacyStacks {
504  MD5Hash hash[2];
505  bool operator==(const RacyStacks &other) const {
506    if (hash[0] == other.hash[0] && hash[1] == other.hash[1])
507      return true;
508    if (hash[0] == other.hash[1] && hash[1] == other.hash[0])
509      return true;
510    return false;
511  }
512};
513
514struct RacyAddress {
515  uptr addr_min;
516  uptr addr_max;
517};
518
519struct FiredSuppression {
520  ReportType type;
521  uptr pc_or_addr;
522  Suppression *supp;
523};
524
525struct Context {
526  Context();
527
528  bool initialized;
529#if !SANITIZER_GO
530  bool after_multithreaded_fork;
531#endif
532
533  MetaMap metamap;
534
535  Mutex report_mtx;
536  int nreported;
537  int nmissed_expected;
538  atomic_uint64_t last_symbolize_time_ns;
539
540  void *background_thread;
541  atomic_uint32_t stop_background_thread;
542
543  ThreadRegistry *thread_registry;
544
545  Mutex racy_mtx;
546  Vector<RacyStacks> racy_stacks;
547  Vector<RacyAddress> racy_addresses;
548  // Number of fired suppressions may be large enough.
549  Mutex fired_suppressions_mtx;
550  InternalMmapVector<FiredSuppression> fired_suppressions;
551  DDetector *dd;
552
553  ClockAlloc clock_alloc;
554
555  Flags flags;
556
557  u64 stat[StatCnt];
558  u64 int_alloc_cnt[MBlockTypeCount];
559  u64 int_alloc_siz[MBlockTypeCount];
560};
561
562extern Context *ctx;  // The one and the only global runtime context.
563
564ALWAYS_INLINE Flags *flags() {
565  return &ctx->flags;
566}
567
568struct ScopedIgnoreInterceptors {
569  ScopedIgnoreInterceptors() {
570#if !SANITIZER_GO
571    cur_thread()->ignore_interceptors++;
572#endif
573  }
574
575  ~ScopedIgnoreInterceptors() {
576#if !SANITIZER_GO
577    cur_thread()->ignore_interceptors--;
578#endif
579  }
580};
581
582const char *GetObjectTypeFromTag(uptr tag);
583const char *GetReportHeaderFromTag(uptr tag);
584uptr TagFromShadowStackFrame(uptr pc);
585
586class ScopedReportBase {
587 public:
588  void AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, StackTrace stack,
589                       const MutexSet *mset);
590  void AddStack(StackTrace stack, bool suppressable = false);
591  void AddThread(const ThreadContext *tctx, bool suppressable = false);
592  void AddThread(int unique_tid, bool suppressable = false);
593  void AddUniqueTid(int unique_tid);
594  void AddMutex(const SyncVar *s);
595  u64 AddMutex(u64 id);
596  void AddLocation(uptr addr, uptr size);
597  void AddSleep(u32 stack_id);
598  void SetCount(int count);
599
600  const ReportDesc *GetReport() const;
601
602 protected:
603  ScopedReportBase(ReportType typ, uptr tag);
604  ~ScopedReportBase();
605
606 private:
607  ReportDesc *rep_;
608  // Symbolizer makes lots of intercepted calls. If we try to process them,
609  // at best it will cause deadlocks on internal mutexes.
610  ScopedIgnoreInterceptors ignore_interceptors_;
611
612  void AddDeadMutex(u64 id);
613
614  ScopedReportBase(const ScopedReportBase &) = delete;
615  void operator=(const ScopedReportBase &) = delete;
616};
617
618class ScopedReport : public ScopedReportBase {
619 public:
620  explicit ScopedReport(ReportType typ, uptr tag = kExternalTagNone);
621  ~ScopedReport();
622
623 private:
624  ScopedErrorReportLock lock_;
625};
626
627ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack);
628void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
629                  MutexSet *mset, uptr *tag = nullptr);
630
631// The stack could look like:
632//   <start> | <main> | <foo> | tag | <bar>
633// This will extract the tag and keep:
634//   <start> | <main> | <foo> | <bar>
635template<typename StackTraceTy>
636void ExtractTagFromStack(StackTraceTy *stack, uptr *tag = nullptr) {
637  if (stack->size < 2) return;
638  uptr possible_tag_pc = stack->trace[stack->size - 2];
639  uptr possible_tag = TagFromShadowStackFrame(possible_tag_pc);
640  if (possible_tag == kExternalTagNone) return;
641  stack->trace_buffer[stack->size - 2] = stack->trace_buffer[stack->size - 1];
642  stack->size -= 1;
643  if (tag) *tag = possible_tag;
644}
645
646template<typename StackTraceTy>
647void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack,
648                        uptr *tag = nullptr) {
649  uptr size = thr->shadow_stack_pos - thr->shadow_stack;
650  uptr start = 0;
651  if (size + !!toppc > kStackTraceMax) {
652    start = size + !!toppc - kStackTraceMax;
653    size = kStackTraceMax - !!toppc;
654  }
655  stack->Init(&thr->shadow_stack[start], size, toppc);
656  ExtractTagFromStack(stack, tag);
657}
658
659#define GET_STACK_TRACE_FATAL(thr, pc) \
660  VarSizeStackTrace stack; \
661  ObtainCurrentStack(thr, pc, &stack); \
662  stack.ReverseOrder();
663
664#if TSAN_COLLECT_STATS
665void StatAggregate(u64 *dst, u64 *src);
666void StatOutput(u64 *stat);
667#endif
668
669void ALWAYS_INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) {
670#if TSAN_COLLECT_STATS
671  thr->stat[typ] += n;
672#endif
673}
674void ALWAYS_INLINE StatSet(ThreadState *thr, StatType typ, u64 n) {
675#if TSAN_COLLECT_STATS
676  thr->stat[typ] = n;
677#endif
678}
679
680void MapShadow(uptr addr, uptr size);
681void MapThreadTrace(uptr addr, uptr size, const char *name);
682void DontNeedShadowFor(uptr addr, uptr size);
683void UnmapShadow(ThreadState *thr, uptr addr, uptr size);
684void InitializeShadowMemory();
685void InitializeInterceptors();
686void InitializeLibIgnore();
687void InitializeDynamicAnnotations();
688
689void ForkBefore(ThreadState *thr, uptr pc);
690void ForkParentAfter(ThreadState *thr, uptr pc);
691void ForkChildAfter(ThreadState *thr, uptr pc);
692
693void ReportRace(ThreadState *thr);
694bool OutputReport(ThreadState *thr, const ScopedReport &srep);
695bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace);
696bool IsExpectedReport(uptr addr, uptr size);
697void PrintMatchedBenignRaces();
698
699#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
700# define DPrintf Printf
701#else
702# define DPrintf(...)
703#endif
704
705#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2
706# define DPrintf2 Printf
707#else
708# define DPrintf2(...)
709#endif
710
711u32 CurrentStackId(ThreadState *thr, uptr pc);
712ReportStack *SymbolizeStackId(u32 stack_id);
713void PrintCurrentStack(ThreadState *thr, uptr pc);
714void PrintCurrentStackSlow(uptr pc);  // uses libunwind
715
716void Initialize(ThreadState *thr);
717void MaybeSpawnBackgroundThread();
718int Finalize(ThreadState *thr);
719
720void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write);
721void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write);
722
723void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
724    int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic);
725void MemoryAccessImpl(ThreadState *thr, uptr addr,
726    int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
727    u64 *shadow_mem, Shadow cur);
728void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
729    uptr size, bool is_write);
730void MemoryAccessRangeStep(ThreadState *thr, uptr pc, uptr addr,
731    uptr size, uptr step, bool is_write);
732void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
733    int size, bool kAccessIsWrite, bool kIsAtomic);
734
735const int kSizeLog1 = 0;
736const int kSizeLog2 = 1;
737const int kSizeLog4 = 2;
738const int kSizeLog8 = 3;
739
740void ALWAYS_INLINE MemoryRead(ThreadState *thr, uptr pc,
741                                     uptr addr, int kAccessSizeLog) {
742  MemoryAccess(thr, pc, addr, kAccessSizeLog, false, false);
743}
744
745void ALWAYS_INLINE MemoryWrite(ThreadState *thr, uptr pc,
746                                      uptr addr, int kAccessSizeLog) {
747  MemoryAccess(thr, pc, addr, kAccessSizeLog, true, false);
748}
749
750void ALWAYS_INLINE MemoryReadAtomic(ThreadState *thr, uptr pc,
751                                           uptr addr, int kAccessSizeLog) {
752  MemoryAccess(thr, pc, addr, kAccessSizeLog, false, true);
753}
754
755void ALWAYS_INLINE MemoryWriteAtomic(ThreadState *thr, uptr pc,
756                                            uptr addr, int kAccessSizeLog) {
757  MemoryAccess(thr, pc, addr, kAccessSizeLog, true, true);
758}
759
760void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
761void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
762void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);
763void MemoryRangeImitateWriteOrResetRange(ThreadState *thr, uptr pc, uptr addr,
764                                         uptr size);
765
766void ThreadIgnoreBegin(ThreadState *thr, uptr pc, bool save_stack = true);
767void ThreadIgnoreEnd(ThreadState *thr, uptr pc);
768void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc, bool save_stack = true);
769void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc);
770
771void FuncEntry(ThreadState *thr, uptr pc);
772void FuncExit(ThreadState *thr);
773
774int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
775void ThreadStart(ThreadState *thr, int tid, tid_t os_id,
776                 ThreadType thread_type);
777void ThreadFinish(ThreadState *thr);
778int ThreadTid(ThreadState *thr, uptr pc, uptr uid);
779void ThreadJoin(ThreadState *thr, uptr pc, int tid);
780void ThreadDetach(ThreadState *thr, uptr pc, int tid);
781void ThreadFinalize(ThreadState *thr);
782void ThreadSetName(ThreadState *thr, const char *name);
783int ThreadCount(ThreadState *thr);
784void ProcessPendingSignals(ThreadState *thr);
785void ThreadNotJoined(ThreadState *thr, uptr pc, int tid, uptr uid);
786
787Processor *ProcCreate();
788void ProcDestroy(Processor *proc);
789void ProcWire(Processor *proc, ThreadState *thr);
790void ProcUnwire(Processor *proc, ThreadState *thr);
791
792// Note: the parameter is called flagz, because flags is already taken
793// by the global function that returns flags.
794void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
795void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
796void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
797void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0,
798    int rec = 1);
799int  MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
800void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
801void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
802void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
803void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
804void MutexRepair(ThreadState *thr, uptr pc, uptr addr);  // call on EOWNERDEAD
805void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr);
806
807void Acquire(ThreadState *thr, uptr pc, uptr addr);
808// AcquireGlobal synchronizes the current thread with all other threads.
809// In terms of happens-before relation, it draws a HB edge from all threads
810// (where they happen to execute right now) to the current thread. We use it to
811// handle Go finalizers. Namely, finalizer goroutine executes AcquireGlobal
812// right before executing finalizers. This provides a coarse, but simple
813// approximation of the actual required synchronization.
814void AcquireGlobal(ThreadState *thr, uptr pc);
815void Release(ThreadState *thr, uptr pc, uptr addr);
816void ReleaseStore(ThreadState *thr, uptr pc, uptr addr);
817void AfterSleep(ThreadState *thr, uptr pc);
818void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c);
819void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
820void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c);
821void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
822
823// The hacky call uses custom calling convention and an assembly thunk.
824// It is considerably faster that a normal call for the caller
825// if it is not executed (it is intended for slow paths from hot functions).
826// The trick is that the call preserves all registers and the compiler
827// does not treat it as a call.
828// If it does not work for you, use normal call.
829#if !SANITIZER_DEBUG && defined(__x86_64__) && !SANITIZER_MAC
830// The caller may not create the stack frame for itself at all,
831// so we create a reserve stack frame for it (1024b must be enough).
832#define HACKY_CALL(f) \
833  __asm__ __volatile__("sub $1024, %%rsp;" \
834                       CFI_INL_ADJUST_CFA_OFFSET(1024) \
835                       ".hidden " #f "_thunk;" \
836                       "call " #f "_thunk;" \
837                       "add $1024, %%rsp;" \
838                       CFI_INL_ADJUST_CFA_OFFSET(-1024) \
839                       ::: "memory", "cc");
840#else
841#define HACKY_CALL(f) f()
842#endif
843
844void TraceSwitch(ThreadState *thr);
845uptr TraceTopPC(ThreadState *thr);
846uptr TraceSize();
847uptr TraceParts();
848Trace *ThreadTrace(int tid);
849
850extern "C" void __tsan_trace_switch();
851void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
852                                        EventType typ, u64 addr) {
853  if (!kCollectHistory)
854    return;
855  DCHECK_GE((int)typ, 0);
856  DCHECK_LE((int)typ, 7);
857  DCHECK_EQ(GetLsb(addr, kEventPCBits), addr);
858  StatInc(thr, StatEvents);
859  u64 pos = fs.GetTracePos();
860  if (UNLIKELY((pos % kTracePartSize) == 0)) {
861#if !SANITIZER_GO
862    HACKY_CALL(__tsan_trace_switch);
863#else
864    TraceSwitch(thr);
865#endif
866  }
867  Event *trace = (Event*)GetThreadTrace(fs.tid());
868  Event *evp = &trace[pos];
869  Event ev = (u64)addr | ((u64)typ << kEventPCBits);
870  *evp = ev;
871}
872
873#if !SANITIZER_GO
874uptr ALWAYS_INLINE HeapEnd() {
875  return HeapMemEnd() + PrimaryAllocator::AdditionalSize();
876}
877#endif
878
879ThreadState *FiberCreate(ThreadState *thr, uptr pc, unsigned flags);
880void FiberDestroy(ThreadState *thr, uptr pc, ThreadState *fiber);
881void FiberSwitch(ThreadState *thr, uptr pc, ThreadState *fiber, unsigned flags);
882
883// These need to match __tsan_switch_to_fiber_* flags defined in
884// tsan_interface.h. See documentation there as well.
885enum FiberSwitchFlags {
886  FiberSwitchFlagNoSync = 1 << 0, // __tsan_switch_to_fiber_no_sync
887};
888
889}  // namespace __tsan
890
891#endif  // TSAN_RTL_H
892