1//===-- tsan_rtl.cpp ------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of ThreadSanitizer (TSan), a race detector.
10//
11// Main file (entry points) for the TSan run-time.
12//===----------------------------------------------------------------------===//
13
14#include "tsan_rtl.h"
15
16#include "sanitizer_common/sanitizer_atomic.h"
17#include "sanitizer_common/sanitizer_common.h"
18#include "sanitizer_common/sanitizer_file.h"
19#include "sanitizer_common/sanitizer_libc.h"
20#include "sanitizer_common/sanitizer_placement_new.h"
21#include "sanitizer_common/sanitizer_stackdepot.h"
22#include "sanitizer_common/sanitizer_symbolizer.h"
23#include "tsan_defs.h"
24#include "tsan_interface.h"
25#include "tsan_mman.h"
26#include "tsan_platform.h"
27#include "tsan_suppressions.h"
28#include "tsan_symbolize.h"
29#include "ubsan/ubsan_init.h"
30
31volatile int __tsan_resumed = 0;
32
33extern "C" void __tsan_resume() {
34  __tsan_resumed = 1;
35}
36
37namespace __tsan {
38
39#if !SANITIZER_GO
40void (*on_initialize)(void);
41int (*on_finalize)(int);
42#endif
43
44#if !SANITIZER_GO && !SANITIZER_MAC
45__attribute__((tls_model("initial-exec")))
46THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(
47    SANITIZER_CACHE_LINE_SIZE);
48#endif
49static char ctx_placeholder[sizeof(Context)] ALIGNED(SANITIZER_CACHE_LINE_SIZE);
50Context *ctx;
51
52// Can be overriden by a front-end.
53#ifdef TSAN_EXTERNAL_HOOKS
54bool OnFinalize(bool failed);
55void OnInitialize();
56#else
57#include <dlfcn.h>
58SANITIZER_WEAK_CXX_DEFAULT_IMPL
59bool OnFinalize(bool failed) {
60#if !SANITIZER_GO
61  if (on_finalize)
62    return on_finalize(failed);
63#endif
64  return failed;
65}
66SANITIZER_WEAK_CXX_DEFAULT_IMPL
67void OnInitialize() {
68#if !SANITIZER_GO
69  if (on_initialize)
70    on_initialize();
71#endif
72}
73#endif
74
75static ThreadContextBase *CreateThreadContext(Tid tid) {
76  // Map thread trace when context is created.
77  char name[50];
78  internal_snprintf(name, sizeof(name), "trace %u", tid);
79  MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event), name);
80  const uptr hdr = GetThreadTraceHeader(tid);
81  internal_snprintf(name, sizeof(name), "trace header %u", tid);
82  MapThreadTrace(hdr, sizeof(Trace), name);
83  new((void*)hdr) Trace();
84  // We are going to use only a small part of the trace with the default
85  // value of history_size. However, the constructor writes to the whole trace.
86  // Release the unused part.
87  uptr hdr_end = hdr + sizeof(Trace);
88  hdr_end -= sizeof(TraceHeader) * (kTraceParts - TraceParts());
89  hdr_end = RoundUp(hdr_end, GetPageSizeCached());
90  if (hdr_end < hdr + sizeof(Trace)) {
91    ReleaseMemoryPagesToOS(hdr_end, hdr + sizeof(Trace));
92    uptr unused = hdr + sizeof(Trace) - hdr_end;
93    if (hdr_end != (uptr)MmapFixedNoAccess(hdr_end, unused)) {
94      Report("ThreadSanitizer: failed to mprotect [0x%zx-0x%zx) \n", hdr_end,
95             unused);
96      CHECK("unable to mprotect" && 0);
97    }
98  }
99  return New<ThreadContext>(tid);
100}
101
102#if !SANITIZER_GO
103static const u32 kThreadQuarantineSize = 16;
104#else
105static const u32 kThreadQuarantineSize = 64;
106#endif
107
108Context::Context()
109    : initialized(),
110      report_mtx(MutexTypeReport),
111      nreported(),
112      thread_registry(CreateThreadContext, kMaxTid, kThreadQuarantineSize,
113                      kMaxTidReuse),
114      racy_mtx(MutexTypeRacy),
115      racy_stacks(),
116      racy_addresses(),
117      fired_suppressions_mtx(MutexTypeFired),
118      clock_alloc(LINKER_INITIALIZED, "clock allocator") {
119  fired_suppressions.reserve(8);
120}
121
122// The objects are allocated in TLS, so one may rely on zero-initialization.
123ThreadState::ThreadState(Context *ctx, Tid tid, int unique_id, u64 epoch,
124                         unsigned reuse_count, uptr stk_addr, uptr stk_size,
125                         uptr tls_addr, uptr tls_size)
126    : fast_state(tid, epoch)
127      // Do not touch these, rely on zero initialization,
128      // they may be accessed before the ctor.
129      // , ignore_reads_and_writes()
130      // , ignore_interceptors()
131      ,
132      clock(tid, reuse_count)
133#if !SANITIZER_GO
134      ,
135      jmp_bufs()
136#endif
137      ,
138      tid(tid),
139      unique_id(unique_id),
140      stk_addr(stk_addr),
141      stk_size(stk_size),
142      tls_addr(tls_addr),
143      tls_size(tls_size)
144#if !SANITIZER_GO
145      ,
146      last_sleep_clock(tid)
147#endif
148{
149  CHECK_EQ(reinterpret_cast<uptr>(this) % SANITIZER_CACHE_LINE_SIZE, 0);
150#if !SANITIZER_GO
151  shadow_stack_pos = shadow_stack;
152  shadow_stack_end = shadow_stack + kShadowStackSize;
153#else
154  // Setup dynamic shadow stack.
155  const int kInitStackSize = 8;
156  shadow_stack = (uptr *)Alloc(kInitStackSize * sizeof(uptr));
157  shadow_stack_pos = shadow_stack;
158  shadow_stack_end = shadow_stack + kInitStackSize;
159#endif
160}
161
162#if !SANITIZER_GO
163void MemoryProfiler(u64 uptime) {
164  if (ctx->memprof_fd == kInvalidFd)
165    return;
166  InternalMmapVector<char> buf(4096);
167  WriteMemoryProfile(buf.data(), buf.size(), uptime);
168  WriteToFile(ctx->memprof_fd, buf.data(), internal_strlen(buf.data()));
169}
170
171void InitializeMemoryProfiler() {
172  ctx->memprof_fd = kInvalidFd;
173  const char *fname = flags()->profile_memory;
174  if (!fname || !fname[0])
175    return;
176  if (internal_strcmp(fname, "stdout") == 0) {
177    ctx->memprof_fd = 1;
178  } else if (internal_strcmp(fname, "stderr") == 0) {
179    ctx->memprof_fd = 2;
180  } else {
181    InternalScopedString filename;
182    filename.append("%s.%d", fname, (int)internal_getpid());
183    ctx->memprof_fd = OpenFile(filename.data(), WrOnly);
184    if (ctx->memprof_fd == kInvalidFd) {
185      Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
186             filename.data());
187      return;
188    }
189  }
190  MemoryProfiler(0);
191  MaybeSpawnBackgroundThread();
192}
193
194static void *BackgroundThread(void *arg) {
195  // This is a non-initialized non-user thread, nothing to see here.
196  // We don't use ScopedIgnoreInterceptors, because we want ignores to be
197  // enabled even when the thread function exits (e.g. during pthread thread
198  // shutdown code).
199  cur_thread_init()->ignore_interceptors++;
200  const u64 kMs2Ns = 1000 * 1000;
201  const u64 start = NanoTime();
202
203  u64 last_flush = NanoTime();
204  uptr last_rss = 0;
205  for (int i = 0;
206      atomic_load(&ctx->stop_background_thread, memory_order_relaxed) == 0;
207      i++) {
208    SleepForMillis(100);
209    u64 now = NanoTime();
210
211    // Flush memory if requested.
212    if (flags()->flush_memory_ms > 0) {
213      if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) {
214        VPrintf(1, "ThreadSanitizer: periodic memory flush\n");
215        FlushShadowMemory();
216        last_flush = NanoTime();
217      }
218    }
219    if (flags()->memory_limit_mb > 0) {
220      uptr rss = GetRSS();
221      uptr limit = uptr(flags()->memory_limit_mb) << 20;
222      VPrintf(1, "ThreadSanitizer: memory flush check"
223                 " RSS=%llu LAST=%llu LIMIT=%llu\n",
224              (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20);
225      if (2 * rss > limit + last_rss) {
226        VPrintf(1, "ThreadSanitizer: flushing memory due to RSS\n");
227        FlushShadowMemory();
228        rss = GetRSS();
229        VPrintf(1, "ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20);
230      }
231      last_rss = rss;
232    }
233
234    MemoryProfiler(now - start);
235
236    // Flush symbolizer cache if requested.
237    if (flags()->flush_symbolizer_ms > 0) {
238      u64 last = atomic_load(&ctx->last_symbolize_time_ns,
239                             memory_order_relaxed);
240      if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) {
241        Lock l(&ctx->report_mtx);
242        ScopedErrorReportLock l2;
243        SymbolizeFlush();
244        atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed);
245      }
246    }
247  }
248  return nullptr;
249}
250
251static void StartBackgroundThread() {
252  ctx->background_thread = internal_start_thread(&BackgroundThread, 0);
253}
254
255#ifndef __mips__
256static void StopBackgroundThread() {
257  atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed);
258  internal_join_thread(ctx->background_thread);
259  ctx->background_thread = 0;
260}
261#endif
262#endif
263
264void DontNeedShadowFor(uptr addr, uptr size) {
265  ReleaseMemoryPagesToOS(reinterpret_cast<uptr>(MemToShadow(addr)),
266                         reinterpret_cast<uptr>(MemToShadow(addr + size)));
267}
268
269#if !SANITIZER_GO
270void UnmapShadow(ThreadState *thr, uptr addr, uptr size) {
271  if (size == 0) return;
272  DontNeedShadowFor(addr, size);
273  ScopedGlobalProcessor sgp;
274  ctx->metamap.ResetRange(thr->proc(), addr, size);
275}
276#endif
277
278void MapShadow(uptr addr, uptr size) {
279  // Global data is not 64K aligned, but there are no adjacent mappings,
280  // so we can get away with unaligned mapping.
281  // CHECK_EQ(addr, addr & ~((64 << 10) - 1));  // windows wants 64K alignment
282  const uptr kPageSize = GetPageSizeCached();
283  uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize);
284  uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize);
285  if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin,
286                               "shadow"))
287    Die();
288
289  // Meta shadow is 2:1, so tread carefully.
290  static bool data_mapped = false;
291  static uptr mapped_meta_end = 0;
292  uptr meta_begin = (uptr)MemToMeta(addr);
293  uptr meta_end = (uptr)MemToMeta(addr + size);
294  meta_begin = RoundDownTo(meta_begin, 64 << 10);
295  meta_end = RoundUpTo(meta_end, 64 << 10);
296  if (!data_mapped) {
297    // First call maps data+bss.
298    data_mapped = true;
299    if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin,
300                                 "meta shadow"))
301      Die();
302  } else {
303    // Mapping continuous heap.
304    // Windows wants 64K alignment.
305    meta_begin = RoundDownTo(meta_begin, 64 << 10);
306    meta_end = RoundUpTo(meta_end, 64 << 10);
307    if (meta_end <= mapped_meta_end)
308      return;
309    if (meta_begin < mapped_meta_end)
310      meta_begin = mapped_meta_end;
311    if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin,
312                                 "meta shadow"))
313      Die();
314    mapped_meta_end = meta_end;
315  }
316  VPrintf(2, "mapped meta shadow for (0x%zx-0x%zx) at (0x%zx-0x%zx)\n", addr,
317          addr + size, meta_begin, meta_end);
318}
319
320void MapThreadTrace(uptr addr, uptr size, const char *name) {
321  DPrintf("#0: Mapping trace at 0x%zx-0x%zx(0x%zx)\n", addr, addr + size, size);
322  CHECK_GE(addr, TraceMemBeg());
323  CHECK_LE(addr + size, TraceMemEnd());
324  CHECK_EQ(addr, addr & ~((64 << 10) - 1));  // windows wants 64K alignment
325  if (!MmapFixedSuperNoReserve(addr, size, name)) {
326    Printf("FATAL: ThreadSanitizer can not mmap thread trace (0x%zx/0x%zx)\n",
327           addr, size);
328    Die();
329  }
330}
331
332#if !SANITIZER_GO
333static void OnStackUnwind(const SignalContext &sig, const void *,
334                          BufferedStackTrace *stack) {
335  stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context,
336                common_flags()->fast_unwind_on_fatal);
337}
338
339static void TsanOnDeadlySignal(int signo, void *siginfo, void *context) {
340  HandleDeadlySignal(siginfo, context, GetTid(), &OnStackUnwind, nullptr);
341}
342#endif
343
344void CheckUnwind() {
345  // There is high probability that interceptors will check-fail as well,
346  // on the other hand there is no sense in processing interceptors
347  // since we are going to die soon.
348  ScopedIgnoreInterceptors ignore;
349#if !SANITIZER_GO
350  cur_thread()->ignore_sync++;
351  cur_thread()->ignore_reads_and_writes++;
352#endif
353  PrintCurrentStackSlow(StackTrace::GetCurrentPc());
354}
355
356bool is_initialized;
357
358void Initialize(ThreadState *thr) {
359  // Thread safe because done before all threads exist.
360  if (is_initialized)
361    return;
362  is_initialized = true;
363  // We are not ready to handle interceptors yet.
364  ScopedIgnoreInterceptors ignore;
365  SanitizerToolName = "ThreadSanitizer";
366  // Install tool-specific callbacks in sanitizer_common.
367  SetCheckUnwindCallback(CheckUnwind);
368
369  ctx = new(ctx_placeholder) Context;
370  const char *env_name = SANITIZER_GO ? "GORACE" : "TSAN_OPTIONS";
371  const char *options = GetEnv(env_name);
372  CacheBinaryName();
373  CheckASLR();
374  InitializeFlags(&ctx->flags, options, env_name);
375  AvoidCVE_2016_2143();
376  __sanitizer::InitializePlatformEarly();
377  __tsan::InitializePlatformEarly();
378
379#if !SANITIZER_GO
380  // Re-exec ourselves if we need to set additional env or command line args.
381  MaybeReexec();
382
383  InitializeAllocator();
384  ReplaceSystemMalloc();
385#endif
386  if (common_flags()->detect_deadlocks)
387    ctx->dd = DDetector::Create(flags());
388  Processor *proc = ProcCreate();
389  ProcWire(proc, thr);
390  InitializeInterceptors();
391  InitializePlatform();
392  InitializeDynamicAnnotations();
393#if !SANITIZER_GO
394  InitializeShadowMemory();
395  InitializeAllocatorLate();
396  InstallDeadlySignalHandlers(TsanOnDeadlySignal);
397#endif
398  // Setup correct file descriptor for error reports.
399  __sanitizer_set_report_path(common_flags()->log_path);
400  InitializeSuppressions();
401#if !SANITIZER_GO
402  InitializeLibIgnore();
403  Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
404#endif
405
406  VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n",
407          (int)internal_getpid());
408
409  // Initialize thread 0.
410  Tid tid = ThreadCreate(thr, 0, 0, true);
411  CHECK_EQ(tid, kMainTid);
412  ThreadStart(thr, tid, GetTid(), ThreadType::Regular);
413#if TSAN_CONTAINS_UBSAN
414  __ubsan::InitAsPlugin();
415#endif
416  ctx->initialized = true;
417
418#if !SANITIZER_GO
419  Symbolizer::LateInitialize();
420  InitializeMemoryProfiler();
421#endif
422
423  if (flags()->stop_on_start) {
424    Printf("ThreadSanitizer is suspended at startup (pid %d)."
425           " Call __tsan_resume().\n",
426           (int)internal_getpid());
427    while (__tsan_resumed == 0) {}
428  }
429
430  OnInitialize();
431}
432
433void MaybeSpawnBackgroundThread() {
434  // On MIPS, TSan initialization is run before
435  // __pthread_initialize_minimal_internal() is finished, so we can not spawn
436  // new threads.
437#if !SANITIZER_GO && !defined(__mips__)
438  static atomic_uint32_t bg_thread = {};
439  if (atomic_load(&bg_thread, memory_order_relaxed) == 0 &&
440      atomic_exchange(&bg_thread, 1, memory_order_relaxed) == 0) {
441    StartBackgroundThread();
442    SetSandboxingCallback(StopBackgroundThread);
443  }
444#endif
445}
446
447
448int Finalize(ThreadState *thr) {
449  bool failed = false;
450
451  if (common_flags()->print_module_map == 1)
452    DumpProcessMap();
453
454  if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
455    SleepForMillis(flags()->atexit_sleep_ms);
456
457  // Wait for pending reports.
458  ctx->report_mtx.Lock();
459  { ScopedErrorReportLock l; }
460  ctx->report_mtx.Unlock();
461
462#if !SANITIZER_GO
463  if (Verbosity()) AllocatorPrintStats();
464#endif
465
466  ThreadFinalize(thr);
467
468  if (ctx->nreported) {
469    failed = true;
470#if !SANITIZER_GO
471    Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
472#else
473    Printf("Found %d data race(s)\n", ctx->nreported);
474#endif
475  }
476
477  if (common_flags()->print_suppressions)
478    PrintMatchedSuppressions();
479
480  failed = OnFinalize(failed);
481
482  return failed ? common_flags()->exitcode : 0;
483}
484
485#if !SANITIZER_GO
486void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
487  ctx->thread_registry.Lock();
488  ctx->report_mtx.Lock();
489  ScopedErrorReportLock::Lock();
490  // Suppress all reports in the pthread_atfork callbacks.
491  // Reports will deadlock on the report_mtx.
492  // We could ignore sync operations as well,
493  // but so far it's unclear if it will do more good or harm.
494  // Unnecessarily ignoring things can lead to false positives later.
495  thr->suppress_reports++;
496  // On OS X, REAL(fork) can call intercepted functions (OSSpinLockLock), and
497  // we'll assert in CheckNoLocks() unless we ignore interceptors.
498  thr->ignore_interceptors++;
499}
500
501void ForkParentAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
502  thr->suppress_reports--;  // Enabled in ForkBefore.
503  thr->ignore_interceptors--;
504  ScopedErrorReportLock::Unlock();
505  ctx->report_mtx.Unlock();
506  ctx->thread_registry.Unlock();
507}
508
509void ForkChildAfter(ThreadState *thr, uptr pc,
510                    bool start_thread) NO_THREAD_SAFETY_ANALYSIS {
511  thr->suppress_reports--;  // Enabled in ForkBefore.
512  thr->ignore_interceptors--;
513  ScopedErrorReportLock::Unlock();
514  ctx->report_mtx.Unlock();
515  ctx->thread_registry.Unlock();
516
517  uptr nthread = 0;
518  ctx->thread_registry.GetNumberOfThreads(0, 0, &nthread /* alive threads */);
519  VPrintf(1, "ThreadSanitizer: forked new process with pid %d,"
520      " parent had %d threads\n", (int)internal_getpid(), (int)nthread);
521  if (nthread == 1) {
522    if (start_thread)
523      StartBackgroundThread();
524  } else {
525    // We've just forked a multi-threaded process. We cannot reasonably function
526    // after that (some mutexes may be locked before fork). So just enable
527    // ignores for everything in the hope that we will exec soon.
528    ctx->after_multithreaded_fork = true;
529    thr->ignore_interceptors++;
530    ThreadIgnoreBegin(thr, pc);
531    ThreadIgnoreSyncBegin(thr, pc);
532  }
533}
534#endif
535
536#if SANITIZER_GO
537NOINLINE
538void GrowShadowStack(ThreadState *thr) {
539  const int sz = thr->shadow_stack_end - thr->shadow_stack;
540  const int newsz = 2 * sz;
541  auto *newstack = (uptr *)Alloc(newsz * sizeof(uptr));
542  internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
543  Free(thr->shadow_stack);
544  thr->shadow_stack = newstack;
545  thr->shadow_stack_pos = newstack + sz;
546  thr->shadow_stack_end = newstack + newsz;
547}
548#endif
549
550StackID CurrentStackId(ThreadState *thr, uptr pc) {
551  if (!thr->is_inited)  // May happen during bootstrap.
552    return kInvalidStackID;
553  if (pc != 0) {
554#if !SANITIZER_GO
555    DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
556#else
557    if (thr->shadow_stack_pos == thr->shadow_stack_end)
558      GrowShadowStack(thr);
559#endif
560    thr->shadow_stack_pos[0] = pc;
561    thr->shadow_stack_pos++;
562  }
563  StackID id = StackDepotPut(
564      StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack));
565  if (pc != 0)
566    thr->shadow_stack_pos--;
567  return id;
568}
569
570namespace v3 {
571
572NOINLINE
573void TraceSwitchPart(ThreadState *thr) {
574  Trace *trace = &thr->tctx->trace;
575  Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(&thr->trace_pos));
576  DCHECK_EQ(reinterpret_cast<uptr>(pos + 1) & TracePart::kAlignment, 0);
577  auto *part = trace->parts.Back();
578  DPrintf("TraceSwitchPart part=%p pos=%p\n", part, pos);
579  if (part) {
580    // We can get here when we still have space in the current trace part.
581    // The fast-path check in TraceAcquire has false positives in the middle of
582    // the part. Check if we are indeed at the end of the current part or not,
583    // and fill any gaps with NopEvent's.
584    Event *end = &part->events[TracePart::kSize];
585    DCHECK_GE(pos, &part->events[0]);
586    DCHECK_LE(pos, end);
587    if (pos + 1 < end) {
588      if ((reinterpret_cast<uptr>(pos) & TracePart::kAlignment) ==
589          TracePart::kAlignment)
590        *pos++ = NopEvent;
591      *pos++ = NopEvent;
592      DCHECK_LE(pos + 2, end);
593      atomic_store_relaxed(&thr->trace_pos, reinterpret_cast<uptr>(pos));
594      // Ensure we setup trace so that the next TraceAcquire
595      // won't detect trace part end.
596      Event *ev;
597      CHECK(TraceAcquire(thr, &ev));
598      return;
599    }
600    // We are indeed at the end.
601    for (; pos < end; pos++) *pos = NopEvent;
602  }
603#if !SANITIZER_GO
604  if (ctx->after_multithreaded_fork) {
605    // We just need to survive till exec.
606    CHECK(part);
607    atomic_store_relaxed(&thr->trace_pos,
608                         reinterpret_cast<uptr>(&part->events[0]));
609    return;
610  }
611#endif
612  part = new (MmapOrDie(sizeof(TracePart), "TracePart")) TracePart();
613  part->trace = trace;
614  thr->trace_prev_pc = 0;
615  {
616    Lock lock(&trace->mtx);
617    trace->parts.PushBack(part);
618    atomic_store_relaxed(&thr->trace_pos,
619                         reinterpret_cast<uptr>(&part->events[0]));
620  }
621  // Make this part self-sufficient by restoring the current stack
622  // and mutex set in the beginning of the trace.
623  TraceTime(thr);
624  for (uptr *pos = &thr->shadow_stack[0]; pos < thr->shadow_stack_pos; pos++)
625    CHECK(TryTraceFunc(thr, *pos));
626  for (uptr i = 0; i < thr->mset.Size(); i++) {
627    MutexSet::Desc d = thr->mset.Get(i);
628    TraceMutexLock(thr, d.write ? EventType::kLock : EventType::kRLock, 0,
629                   d.addr, d.stack_id);
630  }
631}
632
633}  // namespace v3
634
635void TraceSwitch(ThreadState *thr) {
636#if !SANITIZER_GO
637  if (ctx->after_multithreaded_fork)
638    return;
639#endif
640  thr->nomalloc++;
641  Trace *thr_trace = ThreadTrace(thr->tid);
642  Lock l(&thr_trace->mtx);
643  unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
644  TraceHeader *hdr = &thr_trace->headers[trace];
645  hdr->epoch0 = thr->fast_state.epoch();
646  ObtainCurrentStack(thr, 0, &hdr->stack0);
647  hdr->mset0 = thr->mset;
648  thr->nomalloc--;
649}
650
651Trace *ThreadTrace(Tid tid) { return (Trace *)GetThreadTraceHeader(tid); }
652
653uptr TraceTopPC(ThreadState *thr) {
654  Event *events = (Event*)GetThreadTrace(thr->tid);
655  uptr pc = events[thr->fast_state.GetTracePos()];
656  return pc;
657}
658
659uptr TraceSize() {
660  return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1));
661}
662
663uptr TraceParts() {
664  return TraceSize() / kTracePartSize;
665}
666
667#if !SANITIZER_GO
668extern "C" void __tsan_trace_switch() {
669  TraceSwitch(cur_thread());
670}
671
672extern "C" void __tsan_report_race() {
673  ReportRace(cur_thread());
674}
675#endif
676
677void ThreadIgnoreBegin(ThreadState *thr, uptr pc) {
678  DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid);
679  thr->ignore_reads_and_writes++;
680  CHECK_GT(thr->ignore_reads_and_writes, 0);
681  thr->fast_state.SetIgnoreBit();
682#if !SANITIZER_GO
683  if (pc && !ctx->after_multithreaded_fork)
684    thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
685#endif
686}
687
688void ThreadIgnoreEnd(ThreadState *thr) {
689  DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid);
690  CHECK_GT(thr->ignore_reads_and_writes, 0);
691  thr->ignore_reads_and_writes--;
692  if (thr->ignore_reads_and_writes == 0) {
693    thr->fast_state.ClearIgnoreBit();
694#if !SANITIZER_GO
695    thr->mop_ignore_set.Reset();
696#endif
697  }
698}
699
700#if !SANITIZER_GO
701extern "C" SANITIZER_INTERFACE_ATTRIBUTE
702uptr __tsan_testonly_shadow_stack_current_size() {
703  ThreadState *thr = cur_thread();
704  return thr->shadow_stack_pos - thr->shadow_stack;
705}
706#endif
707
708void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) {
709  DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);
710  thr->ignore_sync++;
711  CHECK_GT(thr->ignore_sync, 0);
712#if !SANITIZER_GO
713  if (pc && !ctx->after_multithreaded_fork)
714    thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
715#endif
716}
717
718void ThreadIgnoreSyncEnd(ThreadState *thr) {
719  DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);
720  CHECK_GT(thr->ignore_sync, 0);
721  thr->ignore_sync--;
722#if !SANITIZER_GO
723  if (thr->ignore_sync == 0)
724    thr->sync_ignore_set.Reset();
725#endif
726}
727
728bool MD5Hash::operator==(const MD5Hash &other) const {
729  return hash[0] == other.hash[0] && hash[1] == other.hash[1];
730}
731
732#if SANITIZER_DEBUG
733void build_consistency_debug() {}
734#else
735void build_consistency_release() {}
736#endif
737
738}  // namespace __tsan
739
740#if SANITIZER_CHECK_DEADLOCKS
741namespace __sanitizer {
742using namespace __tsan;
743MutexMeta mutex_meta[] = {
744    {MutexInvalid, "Invalid", {}},
745    {MutexThreadRegistry, "ThreadRegistry", {}},
746    {MutexTypeTrace, "Trace", {MutexLeaf}},
747    {MutexTypeReport, "Report", {MutexTypeSyncVar}},
748    {MutexTypeSyncVar, "SyncVar", {}},
749    {MutexTypeAnnotations, "Annotations", {}},
750    {MutexTypeAtExit, "AtExit", {MutexTypeSyncVar}},
751    {MutexTypeFired, "Fired", {MutexLeaf}},
752    {MutexTypeRacy, "Racy", {MutexLeaf}},
753    {MutexTypeGlobalProc, "GlobalProc", {}},
754    {},
755};
756
757void PrintMutexPC(uptr pc) { StackTrace(&pc, 1).Print(); }
758}  // namespace __sanitizer
759#endif
760