1//===-- tsan_interceptors_posix.cpp ---------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of ThreadSanitizer (TSan), a race detector.
10//
11// FIXME: move as many interceptors as possible into
12// sanitizer_common/sanitizer_common_interceptors.inc
13//===----------------------------------------------------------------------===//
14
15#include "sanitizer_common/sanitizer_atomic.h"
16#include "sanitizer_common/sanitizer_errno.h"
17#include "sanitizer_common/sanitizer_glibc_version.h"
18#include "sanitizer_common/sanitizer_libc.h"
19#include "sanitizer_common/sanitizer_linux.h"
20#include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
21#include "sanitizer_common/sanitizer_platform_limits_posix.h"
22#include "sanitizer_common/sanitizer_placement_new.h"
23#include "sanitizer_common/sanitizer_posix.h"
24#include "sanitizer_common/sanitizer_stacktrace.h"
25#include "sanitizer_common/sanitizer_tls_get_addr.h"
26#include "interception/interception.h"
27#include "tsan_interceptors.h"
28#include "tsan_interface.h"
29#include "tsan_platform.h"
30#include "tsan_suppressions.h"
31#include "tsan_rtl.h"
32#include "tsan_mman.h"
33#include "tsan_fd.h"
34
35#include <stdarg.h>
36
37using namespace __tsan;
38
39DECLARE_REAL(void *, memcpy, void *to, const void *from, SIZE_T size)
40DECLARE_REAL(void *, memset, void *block, int c, SIZE_T size)
41
42#if SANITIZER_FREEBSD || SANITIZER_APPLE
43#define stdout __stdoutp
44#define stderr __stderrp
45#endif
46
47#if SANITIZER_NETBSD
48#define dirfd(dirp) (*(int *)(dirp))
49#define fileno_unlocked(fp)              \
50  (((__sanitizer_FILE *)fp)->_file == -1 \
51       ? -1                              \
52       : (int)(unsigned short)(((__sanitizer_FILE *)fp)->_file))
53
54#define stdout ((__sanitizer_FILE*)&__sF[1])
55#define stderr ((__sanitizer_FILE*)&__sF[2])
56
57#define nanosleep __nanosleep50
58#define vfork __vfork14
59#endif
60
61#ifdef __mips__
62const int kSigCount = 129;
63#else
64const int kSigCount = 65;
65#endif
66
67#ifdef __mips__
68struct ucontext_t {
69  u64 opaque[768 / sizeof(u64) + 1];
70};
71#else
72struct ucontext_t {
73  // The size is determined by looking at sizeof of real ucontext_t on linux.
74  u64 opaque[936 / sizeof(u64) + 1];
75};
76#endif
77
78#if defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1 || \
79    defined(__s390x__)
80#define PTHREAD_ABI_BASE  "GLIBC_2.3.2"
81#elif defined(__aarch64__) || SANITIZER_PPC64V2
82#define PTHREAD_ABI_BASE  "GLIBC_2.17"
83#elif SANITIZER_LOONGARCH64
84#define PTHREAD_ABI_BASE  "GLIBC_2.36"
85#elif SANITIZER_RISCV64
86#  define PTHREAD_ABI_BASE "GLIBC_2.27"
87#endif
88
89extern "C" int pthread_attr_init(void *attr);
90extern "C" int pthread_attr_destroy(void *attr);
91DECLARE_REAL(int, pthread_attr_getdetachstate, void *, void *)
92extern "C" int pthread_attr_setstacksize(void *attr, uptr stacksize);
93extern "C" int pthread_atfork(void (*prepare)(void), void (*parent)(void),
94                              void (*child)(void));
95extern "C" int pthread_key_create(unsigned *key, void (*destructor)(void* v));
96extern "C" int pthread_setspecific(unsigned key, const void *v);
97DECLARE_REAL(int, pthread_mutexattr_gettype, void *, void *)
98DECLARE_REAL(int, fflush, __sanitizer_FILE *fp)
99DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr size)
100DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
101extern "C" int pthread_equal(void *t1, void *t2);
102extern "C" void *pthread_self();
103extern "C" void _exit(int status);
104#if !SANITIZER_NETBSD
105extern "C" int fileno_unlocked(void *stream);
106extern "C" int dirfd(void *dirp);
107#endif
108#if SANITIZER_NETBSD
109extern __sanitizer_FILE __sF[];
110#else
111extern __sanitizer_FILE *stdout, *stderr;
112#endif
113#if !SANITIZER_FREEBSD && !SANITIZER_APPLE && !SANITIZER_NETBSD
114const int PTHREAD_MUTEX_RECURSIVE = 1;
115const int PTHREAD_MUTEX_RECURSIVE_NP = 1;
116#else
117const int PTHREAD_MUTEX_RECURSIVE = 2;
118const int PTHREAD_MUTEX_RECURSIVE_NP = 2;
119#endif
120#if !SANITIZER_FREEBSD && !SANITIZER_APPLE && !SANITIZER_NETBSD
121const int EPOLL_CTL_ADD = 1;
122#endif
123const int SIGILL = 4;
124const int SIGTRAP = 5;
125const int SIGABRT = 6;
126const int SIGFPE = 8;
127const int SIGSEGV = 11;
128const int SIGPIPE = 13;
129const int SIGTERM = 15;
130#if defined(__mips__) || SANITIZER_FREEBSD || SANITIZER_APPLE || SANITIZER_NETBSD
131const int SIGBUS = 10;
132const int SIGSYS = 12;
133#else
134const int SIGBUS = 7;
135const int SIGSYS = 31;
136#endif
137#if SANITIZER_HAS_SIGINFO
138const int SI_TIMER = -2;
139#endif
140void *const MAP_FAILED = (void*)-1;
141#if SANITIZER_NETBSD
142const int PTHREAD_BARRIER_SERIAL_THREAD = 1234567;
143#elif !SANITIZER_APPLE
144const int PTHREAD_BARRIER_SERIAL_THREAD = -1;
145#endif
146const int MAP_FIXED = 0x10;
147typedef long long_t;
148typedef __sanitizer::u16 mode_t;
149
150// From /usr/include/unistd.h
151# define F_ULOCK 0      /* Unlock a previously locked region.  */
152# define F_LOCK  1      /* Lock a region for exclusive use.  */
153# define F_TLOCK 2      /* Test and lock a region for exclusive use.  */
154# define F_TEST  3      /* Test a region for other processes locks.  */
155
156#if SANITIZER_FREEBSD || SANITIZER_APPLE || SANITIZER_NETBSD
157const int SA_SIGINFO = 0x40;
158const int SIG_SETMASK = 3;
159#elif defined(__mips__)
160const int SA_SIGINFO = 8;
161const int SIG_SETMASK = 3;
162#else
163const int SA_SIGINFO = 4;
164const int SIG_SETMASK = 2;
165#endif
166
167namespace __tsan {
168struct SignalDesc {
169  bool armed;
170  __sanitizer_siginfo siginfo;
171  ucontext_t ctx;
172};
173
174struct ThreadSignalContext {
175  int int_signal_send;
176  SignalDesc pending_signals[kSigCount];
177  // emptyset and oldset are too big for stack.
178  __sanitizer_sigset_t emptyset;
179  __sanitizer_sigset_t oldset;
180};
181
182void EnterBlockingFunc(ThreadState *thr) {
183  for (;;) {
184    // The order is important to not delay a signal infinitely if it's
185    // delivered right before we set in_blocking_func. Note: we can't call
186    // ProcessPendingSignals when in_blocking_func is set, or we can handle
187    // a signal synchronously when we are already handling a signal.
188    atomic_store(&thr->in_blocking_func, 1, memory_order_relaxed);
189    if (atomic_load(&thr->pending_signals, memory_order_relaxed) == 0)
190      break;
191    atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
192    ProcessPendingSignals(thr);
193  }
194}
195
196// The sole reason tsan wraps atexit callbacks is to establish synchronization
197// between callback setup and callback execution.
198struct AtExitCtx {
199  void (*f)();
200  void *arg;
201  uptr pc;
202};
203
204// InterceptorContext holds all global data required for interceptors.
205// It's explicitly constructed in InitializeInterceptors with placement new
206// and is never destroyed. This allows usage of members with non-trivial
207// constructors and destructors.
208struct InterceptorContext {
209  // The object is 64-byte aligned, because we want hot data to be located
210  // in a single cache line if possible (it's accessed in every interceptor).
211  ALIGNED(64) LibIgnore libignore;
212  __sanitizer_sigaction sigactions[kSigCount];
213#if !SANITIZER_APPLE && !SANITIZER_NETBSD
214  unsigned finalize_key;
215#endif
216
217  Mutex atexit_mu;
218  Vector<struct AtExitCtx *> AtExitStack;
219
220  InterceptorContext() : libignore(LINKER_INITIALIZED), atexit_mu(MutexTypeAtExit), AtExitStack() {}
221};
222
223static ALIGNED(64) char interceptor_placeholder[sizeof(InterceptorContext)];
224InterceptorContext *interceptor_ctx() {
225  return reinterpret_cast<InterceptorContext*>(&interceptor_placeholder[0]);
226}
227
228LibIgnore *libignore() {
229  return &interceptor_ctx()->libignore;
230}
231
232void InitializeLibIgnore() {
233  const SuppressionContext &supp = *Suppressions();
234  const uptr n = supp.SuppressionCount();
235  for (uptr i = 0; i < n; i++) {
236    const Suppression *s = supp.SuppressionAt(i);
237    if (0 == internal_strcmp(s->type, kSuppressionLib))
238      libignore()->AddIgnoredLibrary(s->templ);
239  }
240  if (flags()->ignore_noninstrumented_modules)
241    libignore()->IgnoreNoninstrumentedModules(true);
242  libignore()->OnLibraryLoaded(0);
243}
244
245// The following two hooks can be used by for cooperative scheduling when
246// locking.
247#ifdef TSAN_EXTERNAL_HOOKS
248void OnPotentiallyBlockingRegionBegin();
249void OnPotentiallyBlockingRegionEnd();
250#else
251SANITIZER_WEAK_CXX_DEFAULT_IMPL void OnPotentiallyBlockingRegionBegin() {}
252SANITIZER_WEAK_CXX_DEFAULT_IMPL void OnPotentiallyBlockingRegionEnd() {}
253#endif
254
255}  // namespace __tsan
256
257static ThreadSignalContext *SigCtx(ThreadState *thr) {
258  // This function may be called reentrantly if it is interrupted by a signal
259  // handler. Use CAS to handle the race.
260  uptr ctx = atomic_load(&thr->signal_ctx, memory_order_relaxed);
261  if (ctx == 0 && !thr->is_dead) {
262    uptr pctx =
263        (uptr)MmapOrDie(sizeof(ThreadSignalContext), "ThreadSignalContext");
264    MemoryResetRange(thr, (uptr)&SigCtx, pctx, sizeof(ThreadSignalContext));
265    if (atomic_compare_exchange_strong(&thr->signal_ctx, &ctx, pctx,
266                                       memory_order_relaxed)) {
267      ctx = pctx;
268    } else {
269      UnmapOrDie((ThreadSignalContext *)pctx, sizeof(ThreadSignalContext));
270    }
271  }
272  return (ThreadSignalContext *)ctx;
273}
274
275ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
276                                     uptr pc)
277    : thr_(thr) {
278  LazyInitialize(thr);
279  if (UNLIKELY(atomic_load(&thr->in_blocking_func, memory_order_relaxed))) {
280    // pthread_join is marked as blocking, but it's also known to call other
281    // intercepted functions (mmap, free). If we don't reset in_blocking_func
282    // we can get deadlocks and memory corruptions if we deliver a synchronous
283    // signal inside of an mmap/free interceptor.
284    // So reset it and restore it back in the destructor.
285    // See https://github.com/google/sanitizers/issues/1540
286    atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
287    in_blocking_func_ = true;
288  }
289  if (!thr_->is_inited) return;
290  if (!thr_->ignore_interceptors) FuncEntry(thr, pc);
291  DPrintf("#%d: intercept %s()\n", thr_->tid, fname);
292  ignoring_ =
293      !thr_->in_ignored_lib && (flags()->ignore_interceptors_accesses ||
294                                libignore()->IsIgnored(pc, &in_ignored_lib_));
295  EnableIgnores();
296}
297
298ScopedInterceptor::~ScopedInterceptor() {
299  if (!thr_->is_inited) return;
300  DisableIgnores();
301  if (UNLIKELY(in_blocking_func_))
302    EnterBlockingFunc(thr_);
303  if (!thr_->ignore_interceptors) {
304    ProcessPendingSignals(thr_);
305    FuncExit(thr_);
306    CheckedMutex::CheckNoLocks();
307  }
308}
309
310NOINLINE
311void ScopedInterceptor::EnableIgnoresImpl() {
312  ThreadIgnoreBegin(thr_, 0);
313  if (flags()->ignore_noninstrumented_modules)
314    thr_->suppress_reports++;
315  if (in_ignored_lib_) {
316    DCHECK(!thr_->in_ignored_lib);
317    thr_->in_ignored_lib = true;
318  }
319}
320
321NOINLINE
322void ScopedInterceptor::DisableIgnoresImpl() {
323  ThreadIgnoreEnd(thr_);
324  if (flags()->ignore_noninstrumented_modules)
325    thr_->suppress_reports--;
326  if (in_ignored_lib_) {
327    DCHECK(thr_->in_ignored_lib);
328    thr_->in_ignored_lib = false;
329  }
330}
331
332#define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
333#if SANITIZER_FREEBSD || SANITIZER_NETBSD
334#  define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func)
335#else
336#  define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver)
337#endif
338#if SANITIZER_FREEBSD
339#  define TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(func) \
340    INTERCEPT_FUNCTION(_pthread_##func)
341#else
342#  define TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(func)
343#endif
344#if SANITIZER_NETBSD
345#  define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func) \
346    INTERCEPT_FUNCTION(__libc_##func)
347#  define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func) \
348    INTERCEPT_FUNCTION(__libc_thr_##func)
349#else
350#  define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func)
351#  define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func)
352#endif
353
354#define READ_STRING_OF_LEN(thr, pc, s, len, n)                 \
355  MemoryAccessRange((thr), (pc), (uptr)(s),                         \
356    common_flags()->strict_string_checks ? (len) + 1 : (n), false)
357
358#define READ_STRING(thr, pc, s, n)                             \
359    READ_STRING_OF_LEN((thr), (pc), (s), internal_strlen(s), (n))
360
361#define BLOCK_REAL(name) (BlockingCall(thr), REAL(name))
362
363struct BlockingCall {
364  explicit BlockingCall(ThreadState *thr)
365      : thr(thr) {
366    EnterBlockingFunc(thr);
367    // When we are in a "blocking call", we process signals asynchronously
368    // (right when they arrive). In this context we do not expect to be
369    // executing any user/runtime code. The known interceptor sequence when
370    // this is not true is: pthread_join -> munmap(stack). It's fine
371    // to ignore munmap in this case -- we handle stack shadow separately.
372    thr->ignore_interceptors++;
373  }
374
375  ~BlockingCall() {
376    thr->ignore_interceptors--;
377    atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
378  }
379
380  ThreadState *thr;
381};
382
383TSAN_INTERCEPTOR(unsigned, sleep, unsigned sec) {
384  SCOPED_TSAN_INTERCEPTOR(sleep, sec);
385  unsigned res = BLOCK_REAL(sleep)(sec);
386  AfterSleep(thr, pc);
387  return res;
388}
389
390TSAN_INTERCEPTOR(int, usleep, long_t usec) {
391  SCOPED_TSAN_INTERCEPTOR(usleep, usec);
392  int res = BLOCK_REAL(usleep)(usec);
393  AfterSleep(thr, pc);
394  return res;
395}
396
397TSAN_INTERCEPTOR(int, nanosleep, void *req, void *rem) {
398  SCOPED_TSAN_INTERCEPTOR(nanosleep, req, rem);
399  int res = BLOCK_REAL(nanosleep)(req, rem);
400  AfterSleep(thr, pc);
401  return res;
402}
403
404TSAN_INTERCEPTOR(int, pause, int fake) {
405  SCOPED_TSAN_INTERCEPTOR(pause, fake);
406  return BLOCK_REAL(pause)(fake);
407}
408
409// Note: we specifically call the function in such strange way
410// with "installed_at" because in reports it will appear between
411// callback frames and the frame that installed the callback.
412static void at_exit_callback_installed_at() {
413  AtExitCtx *ctx;
414  {
415    // Ensure thread-safety.
416    Lock l(&interceptor_ctx()->atexit_mu);
417
418    // Pop AtExitCtx from the top of the stack of callback functions
419    uptr element = interceptor_ctx()->AtExitStack.Size() - 1;
420    ctx = interceptor_ctx()->AtExitStack[element];
421    interceptor_ctx()->AtExitStack.PopBack();
422  }
423
424  ThreadState *thr = cur_thread();
425  Acquire(thr, ctx->pc, (uptr)ctx);
426  FuncEntry(thr, ctx->pc);
427  ((void(*)())ctx->f)();
428  FuncExit(thr);
429  Free(ctx);
430}
431
432static void cxa_at_exit_callback_installed_at(void *arg) {
433  ThreadState *thr = cur_thread();
434  AtExitCtx *ctx = (AtExitCtx*)arg;
435  Acquire(thr, ctx->pc, (uptr)arg);
436  FuncEntry(thr, ctx->pc);
437  ((void(*)(void *arg))ctx->f)(ctx->arg);
438  FuncExit(thr);
439  Free(ctx);
440}
441
442static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
443      void *arg, void *dso);
444
445#if !SANITIZER_ANDROID
446TSAN_INTERCEPTOR(int, atexit, void (*f)()) {
447  if (in_symbolizer())
448    return 0;
449  // We want to setup the atexit callback even if we are in ignored lib
450  // or after fork.
451  SCOPED_INTERCEPTOR_RAW(atexit, f);
452  return setup_at_exit_wrapper(thr, GET_CALLER_PC(), (void (*)())f, 0, 0);
453}
454#endif
455
456TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) {
457  if (in_symbolizer())
458    return 0;
459  SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso);
460  return setup_at_exit_wrapper(thr, GET_CALLER_PC(), (void (*)())f, arg, dso);
461}
462
463static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
464      void *arg, void *dso) {
465  auto *ctx = New<AtExitCtx>();
466  ctx->f = f;
467  ctx->arg = arg;
468  ctx->pc = pc;
469  Release(thr, pc, (uptr)ctx);
470  // Memory allocation in __cxa_atexit will race with free during exit,
471  // because we do not see synchronization around atexit callback list.
472  ThreadIgnoreBegin(thr, pc);
473  int res;
474  if (!dso) {
475    // NetBSD does not preserve the 2nd argument if dso is equal to 0
476    // Store ctx in a local stack-like structure
477
478    // Ensure thread-safety.
479    Lock l(&interceptor_ctx()->atexit_mu);
480    // __cxa_atexit calls calloc. If we don't ignore interceptors, we will fail
481    // due to atexit_mu held on exit from the calloc interceptor.
482    ScopedIgnoreInterceptors ignore;
483
484    res = REAL(__cxa_atexit)((void (*)(void *a))at_exit_callback_installed_at,
485                             0, 0);
486    // Push AtExitCtx on the top of the stack of callback functions
487    if (!res) {
488      interceptor_ctx()->AtExitStack.PushBack(ctx);
489    }
490  } else {
491    res = REAL(__cxa_atexit)(cxa_at_exit_callback_installed_at, ctx, dso);
492  }
493  ThreadIgnoreEnd(thr);
494  return res;
495}
496
497#if !SANITIZER_APPLE && !SANITIZER_NETBSD
498static void on_exit_callback_installed_at(int status, void *arg) {
499  ThreadState *thr = cur_thread();
500  AtExitCtx *ctx = (AtExitCtx*)arg;
501  Acquire(thr, ctx->pc, (uptr)arg);
502  FuncEntry(thr, ctx->pc);
503  ((void(*)(int status, void *arg))ctx->f)(status, ctx->arg);
504  FuncExit(thr);
505  Free(ctx);
506}
507
508TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) {
509  if (in_symbolizer())
510    return 0;
511  SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg);
512  auto *ctx = New<AtExitCtx>();
513  ctx->f = (void(*)())f;
514  ctx->arg = arg;
515  ctx->pc = GET_CALLER_PC();
516  Release(thr, pc, (uptr)ctx);
517  // Memory allocation in __cxa_atexit will race with free during exit,
518  // because we do not see synchronization around atexit callback list.
519  ThreadIgnoreBegin(thr, pc);
520  int res = REAL(on_exit)(on_exit_callback_installed_at, ctx);
521  ThreadIgnoreEnd(thr);
522  return res;
523}
524#define TSAN_MAYBE_INTERCEPT_ON_EXIT TSAN_INTERCEPT(on_exit)
525#else
526#define TSAN_MAYBE_INTERCEPT_ON_EXIT
527#endif
528
529// Cleanup old bufs.
530static void JmpBufGarbageCollect(ThreadState *thr, uptr sp) {
531  for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
532    JmpBuf *buf = &thr->jmp_bufs[i];
533    if (buf->sp <= sp) {
534      uptr sz = thr->jmp_bufs.Size();
535      internal_memcpy(buf, &thr->jmp_bufs[sz - 1], sizeof(*buf));
536      thr->jmp_bufs.PopBack();
537      i--;
538    }
539  }
540}
541
542static void SetJmp(ThreadState *thr, uptr sp) {
543  if (!thr->is_inited)  // called from libc guts during bootstrap
544    return;
545  // Cleanup old bufs.
546  JmpBufGarbageCollect(thr, sp);
547  // Remember the buf.
548  JmpBuf *buf = thr->jmp_bufs.PushBack();
549  buf->sp = sp;
550  buf->shadow_stack_pos = thr->shadow_stack_pos;
551  ThreadSignalContext *sctx = SigCtx(thr);
552  buf->int_signal_send = sctx ? sctx->int_signal_send : 0;
553  buf->in_blocking_func = atomic_load(&thr->in_blocking_func, memory_order_relaxed);
554  buf->in_signal_handler = atomic_load(&thr->in_signal_handler,
555      memory_order_relaxed);
556}
557
558static void LongJmp(ThreadState *thr, uptr *env) {
559  uptr sp = ExtractLongJmpSp(env);
560  // Find the saved buf with matching sp.
561  for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
562    JmpBuf *buf = &thr->jmp_bufs[i];
563    if (buf->sp == sp) {
564      CHECK_GE(thr->shadow_stack_pos, buf->shadow_stack_pos);
565      // Unwind the stack.
566      while (thr->shadow_stack_pos > buf->shadow_stack_pos)
567        FuncExit(thr);
568      ThreadSignalContext *sctx = SigCtx(thr);
569      if (sctx)
570        sctx->int_signal_send = buf->int_signal_send;
571      atomic_store(&thr->in_blocking_func, buf->in_blocking_func,
572          memory_order_relaxed);
573      atomic_store(&thr->in_signal_handler, buf->in_signal_handler,
574          memory_order_relaxed);
575      JmpBufGarbageCollect(thr, buf->sp - 1);  // do not collect buf->sp
576      return;
577    }
578  }
579  Printf("ThreadSanitizer: can't find longjmp buf\n");
580  CHECK(0);
581}
582
583// FIXME: put everything below into a common extern "C" block?
584extern "C" void __tsan_setjmp(uptr sp) { SetJmp(cur_thread_init(), sp); }
585
586#if SANITIZER_APPLE
587TSAN_INTERCEPTOR(int, setjmp, void *env);
588TSAN_INTERCEPTOR(int, _setjmp, void *env);
589TSAN_INTERCEPTOR(int, sigsetjmp, void *env);
590#else  // SANITIZER_APPLE
591
592#if SANITIZER_NETBSD
593#define setjmp_symname __setjmp14
594#define sigsetjmp_symname __sigsetjmp14
595#else
596#define setjmp_symname setjmp
597#define sigsetjmp_symname sigsetjmp
598#endif
599
600DEFINE_REAL(int, setjmp_symname, void *env)
601DEFINE_REAL(int, _setjmp, void *env)
602DEFINE_REAL(int, sigsetjmp_symname, void *env)
603#if !SANITIZER_NETBSD
604DEFINE_REAL(int, __sigsetjmp, void *env)
605#endif
606
607// The real interceptor for setjmp is special, and implemented in pure asm. We
608// just need to initialize the REAL functions so that they can be used in asm.
609static void InitializeSetjmpInterceptors() {
610  // We can not use TSAN_INTERCEPT to get setjmp addr, because it does &setjmp and
611  // setjmp is not present in some versions of libc.
612  using __interception::InterceptFunction;
613  InterceptFunction(SANITIZER_STRINGIFY(setjmp_symname), (uptr*)&REAL(setjmp_symname), 0, 0);
614  InterceptFunction("_setjmp", (uptr*)&REAL(_setjmp), 0, 0);
615  InterceptFunction(SANITIZER_STRINGIFY(sigsetjmp_symname), (uptr*)&REAL(sigsetjmp_symname), 0,
616                    0);
617#if !SANITIZER_NETBSD
618  InterceptFunction("__sigsetjmp", (uptr*)&REAL(__sigsetjmp), 0, 0);
619#endif
620}
621#endif  // SANITIZER_APPLE
622
623#if SANITIZER_NETBSD
624#define longjmp_symname __longjmp14
625#define siglongjmp_symname __siglongjmp14
626#else
627#define longjmp_symname longjmp
628#define siglongjmp_symname siglongjmp
629#endif
630
631TSAN_INTERCEPTOR(void, longjmp_symname, uptr *env, int val) {
632  // Note: if we call REAL(longjmp) in the context of ScopedInterceptor,
633  // bad things will happen. We will jump over ScopedInterceptor dtor and can
634  // leave thr->in_ignored_lib set.
635  {
636    SCOPED_INTERCEPTOR_RAW(longjmp_symname, env, val);
637  }
638  LongJmp(cur_thread(), env);
639  REAL(longjmp_symname)(env, val);
640}
641
642TSAN_INTERCEPTOR(void, siglongjmp_symname, uptr *env, int val) {
643  {
644    SCOPED_INTERCEPTOR_RAW(siglongjmp_symname, env, val);
645  }
646  LongJmp(cur_thread(), env);
647  REAL(siglongjmp_symname)(env, val);
648}
649
650#if SANITIZER_NETBSD
651TSAN_INTERCEPTOR(void, _longjmp, uptr *env, int val) {
652  {
653    SCOPED_INTERCEPTOR_RAW(_longjmp, env, val);
654  }
655  LongJmp(cur_thread(), env);
656  REAL(_longjmp)(env, val);
657}
658#endif
659
660#if !SANITIZER_APPLE
661TSAN_INTERCEPTOR(void*, malloc, uptr size) {
662  if (in_symbolizer())
663    return InternalAlloc(size);
664  void *p = 0;
665  {
666    SCOPED_INTERCEPTOR_RAW(malloc, size);
667    p = user_alloc(thr, pc, size);
668  }
669  invoke_malloc_hook(p, size);
670  return p;
671}
672
673// In glibc<2.25, dynamic TLS blocks are allocated by __libc_memalign. Intercept
674// __libc_memalign so that (1) we can detect races (2) free will not be called
675// on libc internally allocated blocks.
676TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) {
677  SCOPED_INTERCEPTOR_RAW(__libc_memalign, align, sz);
678  return user_memalign(thr, pc, align, sz);
679}
680
681TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) {
682  if (in_symbolizer())
683    return InternalCalloc(size, n);
684  void *p = 0;
685  {
686    SCOPED_INTERCEPTOR_RAW(calloc, size, n);
687    p = user_calloc(thr, pc, size, n);
688  }
689  invoke_malloc_hook(p, n * size);
690  return p;
691}
692
693TSAN_INTERCEPTOR(void*, realloc, void *p, uptr size) {
694  if (in_symbolizer())
695    return InternalRealloc(p, size);
696  if (p)
697    invoke_free_hook(p);
698  {
699    SCOPED_INTERCEPTOR_RAW(realloc, p, size);
700    p = user_realloc(thr, pc, p, size);
701  }
702  invoke_malloc_hook(p, size);
703  return p;
704}
705
706TSAN_INTERCEPTOR(void*, reallocarray, void *p, uptr size, uptr n) {
707  if (in_symbolizer())
708    return InternalReallocArray(p, size, n);
709  if (p)
710    invoke_free_hook(p);
711  {
712    SCOPED_INTERCEPTOR_RAW(reallocarray, p, size, n);
713    p = user_reallocarray(thr, pc, p, size, n);
714  }
715  invoke_malloc_hook(p, size);
716  return p;
717}
718
719TSAN_INTERCEPTOR(void, free, void *p) {
720  if (p == 0)
721    return;
722  if (in_symbolizer())
723    return InternalFree(p);
724  invoke_free_hook(p);
725  SCOPED_INTERCEPTOR_RAW(free, p);
726  user_free(thr, pc, p);
727}
728
729TSAN_INTERCEPTOR(void, cfree, void *p) {
730  if (p == 0)
731    return;
732  if (in_symbolizer())
733    return InternalFree(p);
734  invoke_free_hook(p);
735  SCOPED_INTERCEPTOR_RAW(cfree, p);
736  user_free(thr, pc, p);
737}
738
739TSAN_INTERCEPTOR(uptr, malloc_usable_size, void *p) {
740  SCOPED_INTERCEPTOR_RAW(malloc_usable_size, p);
741  return user_alloc_usable_size(p);
742}
743#endif
744
745TSAN_INTERCEPTOR(char *, strcpy, char *dst, const char *src) {
746  SCOPED_TSAN_INTERCEPTOR(strcpy, dst, src);
747  uptr srclen = internal_strlen(src);
748  MemoryAccessRange(thr, pc, (uptr)dst, srclen + 1, true);
749  MemoryAccessRange(thr, pc, (uptr)src, srclen + 1, false);
750  return REAL(strcpy)(dst, src);
751}
752
753TSAN_INTERCEPTOR(char*, strncpy, char *dst, char *src, uptr n) {
754  SCOPED_TSAN_INTERCEPTOR(strncpy, dst, src, n);
755  uptr srclen = internal_strnlen(src, n);
756  MemoryAccessRange(thr, pc, (uptr)dst, n, true);
757  MemoryAccessRange(thr, pc, (uptr)src, min(srclen + 1, n), false);
758  return REAL(strncpy)(dst, src, n);
759}
760
761TSAN_INTERCEPTOR(char*, strdup, const char *str) {
762  SCOPED_TSAN_INTERCEPTOR(strdup, str);
763  // strdup will call malloc, so no instrumentation is required here.
764  return REAL(strdup)(str);
765}
766
767// Zero out addr if it points into shadow memory and was provided as a hint
768// only, i.e., MAP_FIXED is not set.
769static bool fix_mmap_addr(void **addr, long_t sz, int flags) {
770  if (*addr) {
771    if (!IsAppMem((uptr)*addr) || !IsAppMem((uptr)*addr + sz - 1)) {
772      if (flags & MAP_FIXED) {
773        errno = errno_EINVAL;
774        return false;
775      } else {
776        *addr = 0;
777      }
778    }
779  }
780  return true;
781}
782
783template <class Mmap>
784static void *mmap_interceptor(ThreadState *thr, uptr pc, Mmap real_mmap,
785                              void *addr, SIZE_T sz, int prot, int flags,
786                              int fd, OFF64_T off) {
787  if (!fix_mmap_addr(&addr, sz, flags)) return MAP_FAILED;
788  void *res = real_mmap(addr, sz, prot, flags, fd, off);
789  if (res != MAP_FAILED) {
790    if (!IsAppMem((uptr)res) || !IsAppMem((uptr)res + sz - 1)) {
791      Report("ThreadSanitizer: mmap at bad address: addr=%p size=%p res=%p\n",
792             addr, (void*)sz, res);
793      Die();
794    }
795    if (fd > 0) FdAccess(thr, pc, fd);
796    MemoryRangeImitateWriteOrResetRange(thr, pc, (uptr)res, sz);
797  }
798  return res;
799}
800
801template <class Munmap>
802static int munmap_interceptor(ThreadState *thr, uptr pc, Munmap real_munmap,
803                                void *addr, SIZE_T sz) {
804  UnmapShadow(thr, (uptr)addr, sz);
805  int res = real_munmap(addr, sz);
806  return res;
807}
808
809#if SANITIZER_LINUX
810TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
811  SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
812  return user_memalign(thr, pc, align, sz);
813}
814#define TSAN_MAYBE_INTERCEPT_MEMALIGN TSAN_INTERCEPT(memalign)
815#else
816#define TSAN_MAYBE_INTERCEPT_MEMALIGN
817#endif
818
819#if !SANITIZER_APPLE
820TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) {
821  if (in_symbolizer())
822    return InternalAlloc(sz, nullptr, align);
823  SCOPED_INTERCEPTOR_RAW(aligned_alloc, align, sz);
824  return user_aligned_alloc(thr, pc, align, sz);
825}
826
827TSAN_INTERCEPTOR(void*, valloc, uptr sz) {
828  if (in_symbolizer())
829    return InternalAlloc(sz, nullptr, GetPageSizeCached());
830  SCOPED_INTERCEPTOR_RAW(valloc, sz);
831  return user_valloc(thr, pc, sz);
832}
833#endif
834
835#if SANITIZER_LINUX
836TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
837  if (in_symbolizer()) {
838    uptr PageSize = GetPageSizeCached();
839    sz = sz ? RoundUpTo(sz, PageSize) : PageSize;
840    return InternalAlloc(sz, nullptr, PageSize);
841  }
842  SCOPED_INTERCEPTOR_RAW(pvalloc, sz);
843  return user_pvalloc(thr, pc, sz);
844}
845#define TSAN_MAYBE_INTERCEPT_PVALLOC TSAN_INTERCEPT(pvalloc)
846#else
847#define TSAN_MAYBE_INTERCEPT_PVALLOC
848#endif
849
850#if !SANITIZER_APPLE
851TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
852  if (in_symbolizer()) {
853    void *p = InternalAlloc(sz, nullptr, align);
854    if (!p)
855      return errno_ENOMEM;
856    *memptr = p;
857    return 0;
858  }
859  SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, align, sz);
860  return user_posix_memalign(thr, pc, memptr, align, sz);
861}
862#endif
863
864// Both __cxa_guard_acquire and pthread_once 0-initialize
865// the object initially. pthread_once does not have any
866// other ABI requirements. __cxa_guard_acquire assumes
867// that any non-0 value in the first byte means that
868// initialization is completed. Contents of the remaining
869// bytes are up to us.
870constexpr u32 kGuardInit = 0;
871constexpr u32 kGuardDone = 1;
872constexpr u32 kGuardRunning = 1 << 16;
873constexpr u32 kGuardWaiter = 1 << 17;
874
875static int guard_acquire(ThreadState *thr, uptr pc, atomic_uint32_t *g,
876                         bool blocking_hooks = true) {
877  if (blocking_hooks)
878    OnPotentiallyBlockingRegionBegin();
879  auto on_exit = at_scope_exit([blocking_hooks] {
880    if (blocking_hooks)
881      OnPotentiallyBlockingRegionEnd();
882  });
883
884  for (;;) {
885    u32 cmp = atomic_load(g, memory_order_acquire);
886    if (cmp == kGuardInit) {
887      if (atomic_compare_exchange_strong(g, &cmp, kGuardRunning,
888                                         memory_order_relaxed))
889        return 1;
890    } else if (cmp == kGuardDone) {
891      if (!thr->in_ignored_lib)
892        Acquire(thr, pc, (uptr)g);
893      return 0;
894    } else {
895      if ((cmp & kGuardWaiter) ||
896          atomic_compare_exchange_strong(g, &cmp, cmp | kGuardWaiter,
897                                         memory_order_relaxed))
898        FutexWait(g, cmp | kGuardWaiter);
899    }
900  }
901}
902
903static void guard_release(ThreadState *thr, uptr pc, atomic_uint32_t *g,
904                          u32 v) {
905  if (!thr->in_ignored_lib)
906    Release(thr, pc, (uptr)g);
907  u32 old = atomic_exchange(g, v, memory_order_release);
908  if (old & kGuardWaiter)
909    FutexWake(g, 1 << 30);
910}
911
912// __cxa_guard_acquire and friends need to be intercepted in a special way -
913// regular interceptors will break statically-linked libstdc++. Linux
914// interceptors are especially defined as weak functions (so that they don't
915// cause link errors when user defines them as well). So they silently
916// auto-disable themselves when such symbol is already present in the binary. If
917// we link libstdc++ statically, it will bring own __cxa_guard_acquire which
918// will silently replace our interceptor.  That's why on Linux we simply export
919// these interceptors with INTERFACE_ATTRIBUTE.
920// On OS X, we don't support statically linking, so we just use a regular
921// interceptor.
922#if SANITIZER_APPLE
923#define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
924#else
925#define STDCXX_INTERCEPTOR(rettype, name, ...) \
926  extern "C" rettype INTERFACE_ATTRIBUTE name(__VA_ARGS__)
927#endif
928
929// Used in thread-safe function static initialization.
930STDCXX_INTERCEPTOR(int, __cxa_guard_acquire, atomic_uint32_t *g) {
931  SCOPED_INTERCEPTOR_RAW(__cxa_guard_acquire, g);
932  return guard_acquire(thr, pc, g);
933}
934
935STDCXX_INTERCEPTOR(void, __cxa_guard_release, atomic_uint32_t *g) {
936  SCOPED_INTERCEPTOR_RAW(__cxa_guard_release, g);
937  guard_release(thr, pc, g, kGuardDone);
938}
939
940STDCXX_INTERCEPTOR(void, __cxa_guard_abort, atomic_uint32_t *g) {
941  SCOPED_INTERCEPTOR_RAW(__cxa_guard_abort, g);
942  guard_release(thr, pc, g, kGuardInit);
943}
944
945namespace __tsan {
946void DestroyThreadState() {
947  ThreadState *thr = cur_thread();
948  Processor *proc = thr->proc();
949  ThreadFinish(thr);
950  ProcUnwire(proc, thr);
951  ProcDestroy(proc);
952  DTLS_Destroy();
953  cur_thread_finalize();
954}
955
956void PlatformCleanUpThreadState(ThreadState *thr) {
957  ThreadSignalContext *sctx = (ThreadSignalContext *)atomic_load(
958      &thr->signal_ctx, memory_order_relaxed);
959  if (sctx) {
960    atomic_store(&thr->signal_ctx, 0, memory_order_relaxed);
961    UnmapOrDie(sctx, sizeof(*sctx));
962  }
963}
964}  // namespace __tsan
965
966#if !SANITIZER_APPLE && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
967static void thread_finalize(void *v) {
968  uptr iter = (uptr)v;
969  if (iter > 1) {
970    if (pthread_setspecific(interceptor_ctx()->finalize_key,
971        (void*)(iter - 1))) {
972      Printf("ThreadSanitizer: failed to set thread key\n");
973      Die();
974    }
975    return;
976  }
977  DestroyThreadState();
978}
979#endif
980
981
982struct ThreadParam {
983  void* (*callback)(void *arg);
984  void *param;
985  Tid tid;
986  Semaphore created;
987  Semaphore started;
988};
989
990extern "C" void *__tsan_thread_start_func(void *arg) {
991  ThreadParam *p = (ThreadParam*)arg;
992  void* (*callback)(void *arg) = p->callback;
993  void *param = p->param;
994  {
995    ThreadState *thr = cur_thread_init();
996    // Thread-local state is not initialized yet.
997    ScopedIgnoreInterceptors ignore;
998#if !SANITIZER_APPLE && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
999    ThreadIgnoreBegin(thr, 0);
1000    if (pthread_setspecific(interceptor_ctx()->finalize_key,
1001                            (void *)GetPthreadDestructorIterations())) {
1002      Printf("ThreadSanitizer: failed to set thread key\n");
1003      Die();
1004    }
1005    ThreadIgnoreEnd(thr);
1006#endif
1007    p->created.Wait();
1008    Processor *proc = ProcCreate();
1009    ProcWire(proc, thr);
1010    ThreadStart(thr, p->tid, GetTid(), ThreadType::Regular);
1011    p->started.Post();
1012  }
1013  void *res = callback(param);
1014  // Prevent the callback from being tail called,
1015  // it mixes up stack traces.
1016  volatile int foo = 42;
1017  foo++;
1018  return res;
1019}
1020
1021TSAN_INTERCEPTOR(int, pthread_create,
1022    void *th, void *attr, void *(*callback)(void*), void * param) {
1023  SCOPED_INTERCEPTOR_RAW(pthread_create, th, attr, callback, param);
1024
1025  MaybeSpawnBackgroundThread();
1026
1027  if (ctx->after_multithreaded_fork) {
1028    if (flags()->die_after_fork) {
1029      Report("ThreadSanitizer: starting new threads after multi-threaded "
1030          "fork is not supported. Dying (set die_after_fork=0 to override)\n");
1031      Die();
1032    } else {
1033      VPrintf(1,
1034              "ThreadSanitizer: starting new threads after multi-threaded "
1035              "fork is not supported (pid %lu). Continuing because of "
1036              "die_after_fork=0, but you are on your own\n",
1037              internal_getpid());
1038    }
1039  }
1040  __sanitizer_pthread_attr_t myattr;
1041  if (attr == 0) {
1042    pthread_attr_init(&myattr);
1043    attr = &myattr;
1044  }
1045  int detached = 0;
1046  REAL(pthread_attr_getdetachstate)(attr, &detached);
1047  AdjustStackSize(attr);
1048
1049  ThreadParam p;
1050  p.callback = callback;
1051  p.param = param;
1052  p.tid = kMainTid;
1053  int res = -1;
1054  {
1055    // Otherwise we see false positives in pthread stack manipulation.
1056    ScopedIgnoreInterceptors ignore;
1057    ThreadIgnoreBegin(thr, pc);
1058    res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p);
1059    ThreadIgnoreEnd(thr);
1060  }
1061  if (res == 0) {
1062    p.tid = ThreadCreate(thr, pc, *(uptr *)th, IsStateDetached(detached));
1063    CHECK_NE(p.tid, kMainTid);
1064    // Synchronization on p.tid serves two purposes:
1065    // 1. ThreadCreate must finish before the new thread starts.
1066    //    Otherwise the new thread can call pthread_detach, but the pthread_t
1067    //    identifier is not yet registered in ThreadRegistry by ThreadCreate.
1068    // 2. ThreadStart must finish before this thread continues.
1069    //    Otherwise, this thread can call pthread_detach and reset thr->sync
1070    //    before the new thread got a chance to acquire from it in ThreadStart.
1071    p.created.Post();
1072    p.started.Wait();
1073  }
1074  if (attr == &myattr)
1075    pthread_attr_destroy(&myattr);
1076  return res;
1077}
1078
1079TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) {
1080  SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret);
1081  Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
1082  ThreadIgnoreBegin(thr, pc);
1083  int res = BLOCK_REAL(pthread_join)(th, ret);
1084  ThreadIgnoreEnd(thr);
1085  if (res == 0) {
1086    ThreadJoin(thr, pc, tid);
1087  }
1088  return res;
1089}
1090
1091DEFINE_REAL_PTHREAD_FUNCTIONS
1092
1093TSAN_INTERCEPTOR(int, pthread_detach, void *th) {
1094  SCOPED_INTERCEPTOR_RAW(pthread_detach, th);
1095  Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
1096  int res = REAL(pthread_detach)(th);
1097  if (res == 0) {
1098    ThreadDetach(thr, pc, tid);
1099  }
1100  return res;
1101}
1102
1103TSAN_INTERCEPTOR(void, pthread_exit, void *retval) {
1104  {
1105    SCOPED_INTERCEPTOR_RAW(pthread_exit, retval);
1106#if !SANITIZER_APPLE && !SANITIZER_ANDROID
1107    CHECK_EQ(thr, &cur_thread_placeholder);
1108#endif
1109  }
1110  REAL(pthread_exit)(retval);
1111}
1112
1113#if SANITIZER_LINUX
1114TSAN_INTERCEPTOR(int, pthread_tryjoin_np, void *th, void **ret) {
1115  SCOPED_INTERCEPTOR_RAW(pthread_tryjoin_np, th, ret);
1116  Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
1117  ThreadIgnoreBegin(thr, pc);
1118  int res = REAL(pthread_tryjoin_np)(th, ret);
1119  ThreadIgnoreEnd(thr);
1120  if (res == 0)
1121    ThreadJoin(thr, pc, tid);
1122  else
1123    ThreadNotJoined(thr, pc, tid, (uptr)th);
1124  return res;
1125}
1126
1127TSAN_INTERCEPTOR(int, pthread_timedjoin_np, void *th, void **ret,
1128                 const struct timespec *abstime) {
1129  SCOPED_INTERCEPTOR_RAW(pthread_timedjoin_np, th, ret, abstime);
1130  Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
1131  ThreadIgnoreBegin(thr, pc);
1132  int res = BLOCK_REAL(pthread_timedjoin_np)(th, ret, abstime);
1133  ThreadIgnoreEnd(thr);
1134  if (res == 0)
1135    ThreadJoin(thr, pc, tid);
1136  else
1137    ThreadNotJoined(thr, pc, tid, (uptr)th);
1138  return res;
1139}
1140#endif
1141
1142// Problem:
1143// NPTL implementation of pthread_cond has 2 versions (2.2.5 and 2.3.2).
1144// pthread_cond_t has different size in the different versions.
1145// If call new REAL functions for old pthread_cond_t, they will corrupt memory
1146// after pthread_cond_t (old cond is smaller).
1147// If we call old REAL functions for new pthread_cond_t, we will lose  some
1148// functionality (e.g. old functions do not support waiting against
1149// CLOCK_REALTIME).
1150// Proper handling would require to have 2 versions of interceptors as well.
1151// But this is messy, in particular requires linker scripts when sanitizer
1152// runtime is linked into a shared library.
1153// Instead we assume we don't have dynamic libraries built against old
1154// pthread (2.2.5 is dated by 2002). And provide legacy_pthread_cond flag
1155// that allows to work with old libraries (but this mode does not support
1156// some features, e.g. pthread_condattr_getpshared).
1157static void *init_cond(void *c, bool force = false) {
1158  // sizeof(pthread_cond_t) >= sizeof(uptr) in both versions.
1159  // So we allocate additional memory on the side large enough to hold
1160  // any pthread_cond_t object. Always call new REAL functions, but pass
1161  // the aux object to them.
1162  // Note: the code assumes that PTHREAD_COND_INITIALIZER initializes
1163  // first word of pthread_cond_t to zero.
1164  // It's all relevant only for linux.
1165  if (!common_flags()->legacy_pthread_cond)
1166    return c;
1167  atomic_uintptr_t *p = (atomic_uintptr_t*)c;
1168  uptr cond = atomic_load(p, memory_order_acquire);
1169  if (!force && cond != 0)
1170    return (void*)cond;
1171  void *newcond = WRAP(malloc)(pthread_cond_t_sz);
1172  internal_memset(newcond, 0, pthread_cond_t_sz);
1173  if (atomic_compare_exchange_strong(p, &cond, (uptr)newcond,
1174      memory_order_acq_rel))
1175    return newcond;
1176  WRAP(free)(newcond);
1177  return (void*)cond;
1178}
1179
1180namespace {
1181
1182template <class Fn>
1183struct CondMutexUnlockCtx {
1184  ScopedInterceptor *si;
1185  ThreadState *thr;
1186  uptr pc;
1187  void *m;
1188  void *c;
1189  const Fn &fn;
1190
1191  int Cancel() const { return fn(); }
1192  void Unlock() const;
1193};
1194
1195template <class Fn>
1196void CondMutexUnlockCtx<Fn>::Unlock() const {
1197  // pthread_cond_wait interceptor has enabled async signal delivery
1198  // (see BlockingCall below). Disable async signals since we are running
1199  // tsan code. Also ScopedInterceptor and BlockingCall destructors won't run
1200  // since the thread is cancelled, so we have to manually execute them
1201  // (the thread still can run some user code due to pthread_cleanup_push).
1202  CHECK_EQ(atomic_load(&thr->in_blocking_func, memory_order_relaxed), 1);
1203  atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
1204  MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock);
1205  // Undo BlockingCall ctor effects.
1206  thr->ignore_interceptors--;
1207  si->~ScopedInterceptor();
1208}
1209}  // namespace
1210
1211INTERCEPTOR(int, pthread_cond_init, void *c, void *a) {
1212  void *cond = init_cond(c, true);
1213  SCOPED_TSAN_INTERCEPTOR(pthread_cond_init, cond, a);
1214  MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
1215  return REAL(pthread_cond_init)(cond, a);
1216}
1217
1218template <class Fn>
1219int cond_wait(ThreadState *thr, uptr pc, ScopedInterceptor *si, const Fn &fn,
1220              void *c, void *m) {
1221  MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
1222  MutexUnlock(thr, pc, (uptr)m);
1223  int res = 0;
1224  // This ensures that we handle mutex lock even in case of pthread_cancel.
1225  // See test/tsan/cond_cancel.cpp.
1226  {
1227    // Enable signal delivery while the thread is blocked.
1228    BlockingCall bc(thr);
1229    CondMutexUnlockCtx<Fn> arg = {si, thr, pc, m, c, fn};
1230    res = call_pthread_cancel_with_cleanup(
1231        [](void *arg) -> int {
1232          return ((const CondMutexUnlockCtx<Fn> *)arg)->Cancel();
1233        },
1234        [](void *arg) { ((const CondMutexUnlockCtx<Fn> *)arg)->Unlock(); },
1235        &arg);
1236  }
1237  if (res == errno_EOWNERDEAD) MutexRepair(thr, pc, (uptr)m);
1238  MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock);
1239  return res;
1240}
1241
1242INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
1243  void *cond = init_cond(c);
1244  SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m);
1245  return cond_wait(
1246      thr, pc, &si, [=]() { return REAL(pthread_cond_wait)(cond, m); }, cond,
1247      m);
1248}
1249
1250INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) {
1251  void *cond = init_cond(c);
1252  SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, cond, m, abstime);
1253  return cond_wait(
1254      thr, pc, &si,
1255      [=]() { return REAL(pthread_cond_timedwait)(cond, m, abstime); }, cond,
1256      m);
1257}
1258
1259#if SANITIZER_LINUX
1260INTERCEPTOR(int, pthread_cond_clockwait, void *c, void *m,
1261            __sanitizer_clockid_t clock, void *abstime) {
1262  void *cond = init_cond(c);
1263  SCOPED_TSAN_INTERCEPTOR(pthread_cond_clockwait, cond, m, clock, abstime);
1264  return cond_wait(
1265      thr, pc, &si,
1266      [=]() { return REAL(pthread_cond_clockwait)(cond, m, clock, abstime); },
1267      cond, m);
1268}
1269#define TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT TSAN_INTERCEPT(pthread_cond_clockwait)
1270#else
1271#define TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT
1272#endif
1273
1274#if SANITIZER_APPLE
1275INTERCEPTOR(int, pthread_cond_timedwait_relative_np, void *c, void *m,
1276            void *reltime) {
1277  void *cond = init_cond(c);
1278  SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait_relative_np, cond, m, reltime);
1279  return cond_wait(
1280      thr, pc, &si,
1281      [=]() {
1282        return REAL(pthread_cond_timedwait_relative_np)(cond, m, reltime);
1283      },
1284      cond, m);
1285}
1286#endif
1287
1288INTERCEPTOR(int, pthread_cond_signal, void *c) {
1289  void *cond = init_cond(c);
1290  SCOPED_TSAN_INTERCEPTOR(pthread_cond_signal, cond);
1291  MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
1292  return REAL(pthread_cond_signal)(cond);
1293}
1294
1295INTERCEPTOR(int, pthread_cond_broadcast, void *c) {
1296  void *cond = init_cond(c);
1297  SCOPED_TSAN_INTERCEPTOR(pthread_cond_broadcast, cond);
1298  MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
1299  return REAL(pthread_cond_broadcast)(cond);
1300}
1301
1302INTERCEPTOR(int, pthread_cond_destroy, void *c) {
1303  void *cond = init_cond(c);
1304  SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, cond);
1305  MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
1306  int res = REAL(pthread_cond_destroy)(cond);
1307  if (common_flags()->legacy_pthread_cond) {
1308    // Free our aux cond and zero the pointer to not leave dangling pointers.
1309    WRAP(free)(cond);
1310    atomic_store((atomic_uintptr_t*)c, 0, memory_order_relaxed);
1311  }
1312  return res;
1313}
1314
1315TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) {
1316  SCOPED_TSAN_INTERCEPTOR(pthread_mutex_init, m, a);
1317  int res = REAL(pthread_mutex_init)(m, a);
1318  if (res == 0) {
1319    u32 flagz = 0;
1320    if (a) {
1321      int type = 0;
1322      if (REAL(pthread_mutexattr_gettype)(a, &type) == 0)
1323        if (type == PTHREAD_MUTEX_RECURSIVE ||
1324            type == PTHREAD_MUTEX_RECURSIVE_NP)
1325          flagz |= MutexFlagWriteReentrant;
1326    }
1327    MutexCreate(thr, pc, (uptr)m, flagz);
1328  }
1329  return res;
1330}
1331
1332TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) {
1333  SCOPED_TSAN_INTERCEPTOR(pthread_mutex_destroy, m);
1334  int res = REAL(pthread_mutex_destroy)(m);
1335  if (res == 0 || res == errno_EBUSY) {
1336    MutexDestroy(thr, pc, (uptr)m);
1337  }
1338  return res;
1339}
1340
1341TSAN_INTERCEPTOR(int, pthread_mutex_lock, void *m) {
1342  SCOPED_TSAN_INTERCEPTOR(pthread_mutex_lock, m);
1343  MutexPreLock(thr, pc, (uptr)m);
1344  int res = REAL(pthread_mutex_lock)(m);
1345  if (res == errno_EOWNERDEAD)
1346    MutexRepair(thr, pc, (uptr)m);
1347  if (res == 0 || res == errno_EOWNERDEAD)
1348    MutexPostLock(thr, pc, (uptr)m);
1349  if (res == errno_EINVAL)
1350    MutexInvalidAccess(thr, pc, (uptr)m);
1351  return res;
1352}
1353
1354TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
1355  SCOPED_TSAN_INTERCEPTOR(pthread_mutex_trylock, m);
1356  int res = REAL(pthread_mutex_trylock)(m);
1357  if (res == errno_EOWNERDEAD)
1358    MutexRepair(thr, pc, (uptr)m);
1359  if (res == 0 || res == errno_EOWNERDEAD)
1360    MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1361  return res;
1362}
1363
1364#if !SANITIZER_APPLE
1365TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) {
1366  SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime);
1367  int res = REAL(pthread_mutex_timedlock)(m, abstime);
1368  if (res == 0) {
1369    MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1370  }
1371  return res;
1372}
1373#endif
1374
1375TSAN_INTERCEPTOR(int, pthread_mutex_unlock, void *m) {
1376  SCOPED_TSAN_INTERCEPTOR(pthread_mutex_unlock, m);
1377  MutexUnlock(thr, pc, (uptr)m);
1378  int res = REAL(pthread_mutex_unlock)(m);
1379  if (res == errno_EINVAL)
1380    MutexInvalidAccess(thr, pc, (uptr)m);
1381  return res;
1382}
1383
1384#if SANITIZER_LINUX
1385TSAN_INTERCEPTOR(int, pthread_mutex_clocklock, void *m,
1386                 __sanitizer_clockid_t clock, void *abstime) {
1387  SCOPED_TSAN_INTERCEPTOR(pthread_mutex_clocklock, m, clock, abstime);
1388  MutexPreLock(thr, pc, (uptr)m);
1389  int res = REAL(pthread_mutex_clocklock)(m, clock, abstime);
1390  if (res == errno_EOWNERDEAD)
1391    MutexRepair(thr, pc, (uptr)m);
1392  if (res == 0 || res == errno_EOWNERDEAD)
1393    MutexPostLock(thr, pc, (uptr)m);
1394  if (res == errno_EINVAL)
1395    MutexInvalidAccess(thr, pc, (uptr)m);
1396  return res;
1397}
1398#endif
1399
1400#if SANITIZER_GLIBC
1401#  if !__GLIBC_PREREQ(2, 34)
1402// glibc 2.34 applies a non-default version for the two functions. They are no
1403// longer expected to be intercepted by programs.
1404TSAN_INTERCEPTOR(int, __pthread_mutex_lock, void *m) {
1405  SCOPED_TSAN_INTERCEPTOR(__pthread_mutex_lock, m);
1406  MutexPreLock(thr, pc, (uptr)m);
1407  int res = REAL(__pthread_mutex_lock)(m);
1408  if (res == errno_EOWNERDEAD)
1409    MutexRepair(thr, pc, (uptr)m);
1410  if (res == 0 || res == errno_EOWNERDEAD)
1411    MutexPostLock(thr, pc, (uptr)m);
1412  if (res == errno_EINVAL)
1413    MutexInvalidAccess(thr, pc, (uptr)m);
1414  return res;
1415}
1416
1417TSAN_INTERCEPTOR(int, __pthread_mutex_unlock, void *m) {
1418  SCOPED_TSAN_INTERCEPTOR(__pthread_mutex_unlock, m);
1419  MutexUnlock(thr, pc, (uptr)m);
1420  int res = REAL(__pthread_mutex_unlock)(m);
1421  if (res == errno_EINVAL)
1422    MutexInvalidAccess(thr, pc, (uptr)m);
1423  return res;
1424}
1425#  endif
1426#endif
1427
1428#if !SANITIZER_APPLE
1429TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) {
1430  SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared);
1431  int res = REAL(pthread_spin_init)(m, pshared);
1432  if (res == 0) {
1433    MutexCreate(thr, pc, (uptr)m);
1434  }
1435  return res;
1436}
1437
1438TSAN_INTERCEPTOR(int, pthread_spin_destroy, void *m) {
1439  SCOPED_TSAN_INTERCEPTOR(pthread_spin_destroy, m);
1440  int res = REAL(pthread_spin_destroy)(m);
1441  if (res == 0) {
1442    MutexDestroy(thr, pc, (uptr)m);
1443  }
1444  return res;
1445}
1446
1447TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) {
1448  SCOPED_TSAN_INTERCEPTOR(pthread_spin_lock, m);
1449  MutexPreLock(thr, pc, (uptr)m);
1450  int res = REAL(pthread_spin_lock)(m);
1451  if (res == 0) {
1452    MutexPostLock(thr, pc, (uptr)m);
1453  }
1454  return res;
1455}
1456
1457TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) {
1458  SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m);
1459  int res = REAL(pthread_spin_trylock)(m);
1460  if (res == 0) {
1461    MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1462  }
1463  return res;
1464}
1465
1466TSAN_INTERCEPTOR(int, pthread_spin_unlock, void *m) {
1467  SCOPED_TSAN_INTERCEPTOR(pthread_spin_unlock, m);
1468  MutexUnlock(thr, pc, (uptr)m);
1469  int res = REAL(pthread_spin_unlock)(m);
1470  return res;
1471}
1472#endif
1473
1474TSAN_INTERCEPTOR(int, pthread_rwlock_init, void *m, void *a) {
1475  SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_init, m, a);
1476  int res = REAL(pthread_rwlock_init)(m, a);
1477  if (res == 0) {
1478    MutexCreate(thr, pc, (uptr)m);
1479  }
1480  return res;
1481}
1482
1483TSAN_INTERCEPTOR(int, pthread_rwlock_destroy, void *m) {
1484  SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_destroy, m);
1485  int res = REAL(pthread_rwlock_destroy)(m);
1486  if (res == 0) {
1487    MutexDestroy(thr, pc, (uptr)m);
1488  }
1489  return res;
1490}
1491
1492TSAN_INTERCEPTOR(int, pthread_rwlock_rdlock, void *m) {
1493  SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_rdlock, m);
1494  MutexPreReadLock(thr, pc, (uptr)m);
1495  int res = REAL(pthread_rwlock_rdlock)(m);
1496  if (res == 0) {
1497    MutexPostReadLock(thr, pc, (uptr)m);
1498  }
1499  return res;
1500}
1501
1502TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) {
1503  SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m);
1504  int res = REAL(pthread_rwlock_tryrdlock)(m);
1505  if (res == 0) {
1506    MutexPostReadLock(thr, pc, (uptr)m, MutexFlagTryLock);
1507  }
1508  return res;
1509}
1510
1511#if !SANITIZER_APPLE
1512TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) {
1513  SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime);
1514  int res = REAL(pthread_rwlock_timedrdlock)(m, abstime);
1515  if (res == 0) {
1516    MutexPostReadLock(thr, pc, (uptr)m);
1517  }
1518  return res;
1519}
1520#endif
1521
1522TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) {
1523  SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_wrlock, m);
1524  MutexPreLock(thr, pc, (uptr)m);
1525  int res = REAL(pthread_rwlock_wrlock)(m);
1526  if (res == 0) {
1527    MutexPostLock(thr, pc, (uptr)m);
1528  }
1529  return res;
1530}
1531
1532TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) {
1533  SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m);
1534  int res = REAL(pthread_rwlock_trywrlock)(m);
1535  if (res == 0) {
1536    MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1537  }
1538  return res;
1539}
1540
1541#if !SANITIZER_APPLE
1542TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) {
1543  SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime);
1544  int res = REAL(pthread_rwlock_timedwrlock)(m, abstime);
1545  if (res == 0) {
1546    MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1547  }
1548  return res;
1549}
1550#endif
1551
1552TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
1553  SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_unlock, m);
1554  MutexReadOrWriteUnlock(thr, pc, (uptr)m);
1555  int res = REAL(pthread_rwlock_unlock)(m);
1556  return res;
1557}
1558
1559#if !SANITIZER_APPLE
1560TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) {
1561  SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count);
1562  MemoryAccess(thr, pc, (uptr)b, 1, kAccessWrite);
1563  int res = REAL(pthread_barrier_init)(b, a, count);
1564  return res;
1565}
1566
1567TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) {
1568  SCOPED_TSAN_INTERCEPTOR(pthread_barrier_destroy, b);
1569  MemoryAccess(thr, pc, (uptr)b, 1, kAccessWrite);
1570  int res = REAL(pthread_barrier_destroy)(b);
1571  return res;
1572}
1573
1574TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) {
1575  SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b);
1576  Release(thr, pc, (uptr)b);
1577  MemoryAccess(thr, pc, (uptr)b, 1, kAccessRead);
1578  int res = REAL(pthread_barrier_wait)(b);
1579  MemoryAccess(thr, pc, (uptr)b, 1, kAccessRead);
1580  if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) {
1581    Acquire(thr, pc, (uptr)b);
1582  }
1583  return res;
1584}
1585#endif
1586
1587TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
1588  SCOPED_INTERCEPTOR_RAW(pthread_once, o, f);
1589  if (o == 0 || f == 0)
1590    return errno_EINVAL;
1591  atomic_uint32_t *a;
1592
1593  if (SANITIZER_APPLE)
1594    a = static_cast<atomic_uint32_t*>((void *)((char *)o + sizeof(long_t)));
1595  else if (SANITIZER_NETBSD)
1596    a = static_cast<atomic_uint32_t*>
1597          ((void *)((char *)o + __sanitizer::pthread_mutex_t_sz));
1598  else
1599    a = static_cast<atomic_uint32_t*>(o);
1600
1601  // Mac OS X appears to use pthread_once() where calling BlockingRegion hooks
1602  // result in crashes due to too little stack space.
1603  if (guard_acquire(thr, pc, a, !SANITIZER_APPLE)) {
1604    (*f)();
1605    guard_release(thr, pc, a, kGuardDone);
1606  }
1607  return 0;
1608}
1609
1610#if SANITIZER_GLIBC
1611TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) {
1612  SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf);
1613  if (fd > 0)
1614    FdAccess(thr, pc, fd);
1615  return REAL(__fxstat)(version, fd, buf);
1616}
1617
1618TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) {
1619  SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf);
1620  if (fd > 0)
1621    FdAccess(thr, pc, fd);
1622  return REAL(__fxstat64)(version, fd, buf);
1623}
1624#define TSAN_MAYBE_INTERCEPT___FXSTAT TSAN_INTERCEPT(__fxstat); TSAN_INTERCEPT(__fxstat64)
1625#else
1626#define TSAN_MAYBE_INTERCEPT___FXSTAT
1627#endif
1628
1629#if !SANITIZER_GLIBC || __GLIBC_PREREQ(2, 33)
1630TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) {
1631  SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf);
1632  if (fd > 0)
1633    FdAccess(thr, pc, fd);
1634  return REAL(fstat)(fd, buf);
1635}
1636#  define TSAN_MAYBE_INTERCEPT_FSTAT TSAN_INTERCEPT(fstat)
1637#else
1638#  define TSAN_MAYBE_INTERCEPT_FSTAT
1639#endif
1640
1641#if __GLIBC_PREREQ(2, 33)
1642TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) {
1643  SCOPED_TSAN_INTERCEPTOR(fstat64, fd, buf);
1644  if (fd > 0)
1645    FdAccess(thr, pc, fd);
1646  return REAL(fstat64)(fd, buf);
1647}
1648#  define TSAN_MAYBE_INTERCEPT_FSTAT64 TSAN_INTERCEPT(fstat64)
1649#else
1650#  define TSAN_MAYBE_INTERCEPT_FSTAT64
1651#endif
1652
1653TSAN_INTERCEPTOR(int, open, const char *name, int oflag, ...) {
1654  va_list ap;
1655  va_start(ap, oflag);
1656  mode_t mode = va_arg(ap, int);
1657  va_end(ap);
1658  SCOPED_TSAN_INTERCEPTOR(open, name, oflag, mode);
1659  READ_STRING(thr, pc, name, 0);
1660  int fd = REAL(open)(name, oflag, mode);
1661  if (fd >= 0)
1662    FdFileCreate(thr, pc, fd);
1663  return fd;
1664}
1665
1666#if SANITIZER_LINUX
1667TSAN_INTERCEPTOR(int, open64, const char *name, int oflag, ...) {
1668  va_list ap;
1669  va_start(ap, oflag);
1670  mode_t mode = va_arg(ap, int);
1671  va_end(ap);
1672  SCOPED_TSAN_INTERCEPTOR(open64, name, oflag, mode);
1673  READ_STRING(thr, pc, name, 0);
1674  int fd = REAL(open64)(name, oflag, mode);
1675  if (fd >= 0)
1676    FdFileCreate(thr, pc, fd);
1677  return fd;
1678}
1679#define TSAN_MAYBE_INTERCEPT_OPEN64 TSAN_INTERCEPT(open64)
1680#else
1681#define TSAN_MAYBE_INTERCEPT_OPEN64
1682#endif
1683
1684TSAN_INTERCEPTOR(int, creat, const char *name, int mode) {
1685  SCOPED_TSAN_INTERCEPTOR(creat, name, mode);
1686  READ_STRING(thr, pc, name, 0);
1687  int fd = REAL(creat)(name, mode);
1688  if (fd >= 0)
1689    FdFileCreate(thr, pc, fd);
1690  return fd;
1691}
1692
1693#if SANITIZER_LINUX
1694TSAN_INTERCEPTOR(int, creat64, const char *name, int mode) {
1695  SCOPED_TSAN_INTERCEPTOR(creat64, name, mode);
1696  READ_STRING(thr, pc, name, 0);
1697  int fd = REAL(creat64)(name, mode);
1698  if (fd >= 0)
1699    FdFileCreate(thr, pc, fd);
1700  return fd;
1701}
1702#define TSAN_MAYBE_INTERCEPT_CREAT64 TSAN_INTERCEPT(creat64)
1703#else
1704#define TSAN_MAYBE_INTERCEPT_CREAT64
1705#endif
1706
1707TSAN_INTERCEPTOR(int, dup, int oldfd) {
1708  SCOPED_TSAN_INTERCEPTOR(dup, oldfd);
1709  int newfd = REAL(dup)(oldfd);
1710  if (oldfd >= 0 && newfd >= 0 && newfd != oldfd)
1711    FdDup(thr, pc, oldfd, newfd, true);
1712  return newfd;
1713}
1714
1715TSAN_INTERCEPTOR(int, dup2, int oldfd, int newfd) {
1716  SCOPED_TSAN_INTERCEPTOR(dup2, oldfd, newfd);
1717  int newfd2 = REAL(dup2)(oldfd, newfd);
1718  if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
1719    FdDup(thr, pc, oldfd, newfd2, false);
1720  return newfd2;
1721}
1722
1723#if !SANITIZER_APPLE
1724TSAN_INTERCEPTOR(int, dup3, int oldfd, int newfd, int flags) {
1725  SCOPED_TSAN_INTERCEPTOR(dup3, oldfd, newfd, flags);
1726  int newfd2 = REAL(dup3)(oldfd, newfd, flags);
1727  if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
1728    FdDup(thr, pc, oldfd, newfd2, false);
1729  return newfd2;
1730}
1731#endif
1732
1733#if SANITIZER_LINUX
1734TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) {
1735  SCOPED_TSAN_INTERCEPTOR(eventfd, initval, flags);
1736  int fd = REAL(eventfd)(initval, flags);
1737  if (fd >= 0)
1738    FdEventCreate(thr, pc, fd);
1739  return fd;
1740}
1741#define TSAN_MAYBE_INTERCEPT_EVENTFD TSAN_INTERCEPT(eventfd)
1742#else
1743#define TSAN_MAYBE_INTERCEPT_EVENTFD
1744#endif
1745
1746#if SANITIZER_LINUX
1747TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) {
1748  SCOPED_INTERCEPTOR_RAW(signalfd, fd, mask, flags);
1749  FdClose(thr, pc, fd);
1750  fd = REAL(signalfd)(fd, mask, flags);
1751  if (!MustIgnoreInterceptor(thr))
1752    FdSignalCreate(thr, pc, fd);
1753  return fd;
1754}
1755#define TSAN_MAYBE_INTERCEPT_SIGNALFD TSAN_INTERCEPT(signalfd)
1756#else
1757#define TSAN_MAYBE_INTERCEPT_SIGNALFD
1758#endif
1759
1760#if SANITIZER_LINUX
1761TSAN_INTERCEPTOR(int, inotify_init, int fake) {
1762  SCOPED_TSAN_INTERCEPTOR(inotify_init, fake);
1763  int fd = REAL(inotify_init)(fake);
1764  if (fd >= 0)
1765    FdInotifyCreate(thr, pc, fd);
1766  return fd;
1767}
1768#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT TSAN_INTERCEPT(inotify_init)
1769#else
1770#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT
1771#endif
1772
1773#if SANITIZER_LINUX
1774TSAN_INTERCEPTOR(int, inotify_init1, int flags) {
1775  SCOPED_TSAN_INTERCEPTOR(inotify_init1, flags);
1776  int fd = REAL(inotify_init1)(flags);
1777  if (fd >= 0)
1778    FdInotifyCreate(thr, pc, fd);
1779  return fd;
1780}
1781#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1 TSAN_INTERCEPT(inotify_init1)
1782#else
1783#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1
1784#endif
1785
1786TSAN_INTERCEPTOR(int, socket, int domain, int type, int protocol) {
1787  SCOPED_TSAN_INTERCEPTOR(socket, domain, type, protocol);
1788  int fd = REAL(socket)(domain, type, protocol);
1789  if (fd >= 0)
1790    FdSocketCreate(thr, pc, fd);
1791  return fd;
1792}
1793
1794TSAN_INTERCEPTOR(int, socketpair, int domain, int type, int protocol, int *fd) {
1795  SCOPED_TSAN_INTERCEPTOR(socketpair, domain, type, protocol, fd);
1796  int res = REAL(socketpair)(domain, type, protocol, fd);
1797  if (res == 0 && fd[0] >= 0 && fd[1] >= 0)
1798    FdPipeCreate(thr, pc, fd[0], fd[1]);
1799  return res;
1800}
1801
1802TSAN_INTERCEPTOR(int, connect, int fd, void *addr, unsigned addrlen) {
1803  SCOPED_TSAN_INTERCEPTOR(connect, fd, addr, addrlen);
1804  FdSocketConnecting(thr, pc, fd);
1805  int res = REAL(connect)(fd, addr, addrlen);
1806  if (res == 0 && fd >= 0)
1807    FdSocketConnect(thr, pc, fd);
1808  return res;
1809}
1810
1811TSAN_INTERCEPTOR(int, bind, int fd, void *addr, unsigned addrlen) {
1812  SCOPED_TSAN_INTERCEPTOR(bind, fd, addr, addrlen);
1813  int res = REAL(bind)(fd, addr, addrlen);
1814  if (fd > 0 && res == 0)
1815    FdAccess(thr, pc, fd);
1816  return res;
1817}
1818
1819TSAN_INTERCEPTOR(int, listen, int fd, int backlog) {
1820  SCOPED_TSAN_INTERCEPTOR(listen, fd, backlog);
1821  int res = REAL(listen)(fd, backlog);
1822  if (fd > 0 && res == 0)
1823    FdAccess(thr, pc, fd);
1824  return res;
1825}
1826
1827TSAN_INTERCEPTOR(int, close, int fd) {
1828  SCOPED_INTERCEPTOR_RAW(close, fd);
1829  if (!in_symbolizer())
1830    FdClose(thr, pc, fd);
1831  return REAL(close)(fd);
1832}
1833
1834#if SANITIZER_LINUX
1835TSAN_INTERCEPTOR(int, __close, int fd) {
1836  SCOPED_INTERCEPTOR_RAW(__close, fd);
1837  FdClose(thr, pc, fd);
1838  return REAL(__close)(fd);
1839}
1840#define TSAN_MAYBE_INTERCEPT___CLOSE TSAN_INTERCEPT(__close)
1841#else
1842#define TSAN_MAYBE_INTERCEPT___CLOSE
1843#endif
1844
1845// glibc guts
1846#if SANITIZER_LINUX && !SANITIZER_ANDROID
1847TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) {
1848  SCOPED_INTERCEPTOR_RAW(__res_iclose, state, free_addr);
1849  int fds[64];
1850  int cnt = ExtractResolvFDs(state, fds, ARRAY_SIZE(fds));
1851  for (int i = 0; i < cnt; i++) FdClose(thr, pc, fds[i]);
1852  REAL(__res_iclose)(state, free_addr);
1853}
1854#define TSAN_MAYBE_INTERCEPT___RES_ICLOSE TSAN_INTERCEPT(__res_iclose)
1855#else
1856#define TSAN_MAYBE_INTERCEPT___RES_ICLOSE
1857#endif
1858
1859TSAN_INTERCEPTOR(int, pipe, int *pipefd) {
1860  SCOPED_TSAN_INTERCEPTOR(pipe, pipefd);
1861  int res = REAL(pipe)(pipefd);
1862  if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
1863    FdPipeCreate(thr, pc, pipefd[0], pipefd[1]);
1864  return res;
1865}
1866
1867#if !SANITIZER_APPLE
1868TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) {
1869  SCOPED_TSAN_INTERCEPTOR(pipe2, pipefd, flags);
1870  int res = REAL(pipe2)(pipefd, flags);
1871  if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
1872    FdPipeCreate(thr, pc, pipefd[0], pipefd[1]);
1873  return res;
1874}
1875#endif
1876
1877TSAN_INTERCEPTOR(int, unlink, char *path) {
1878  SCOPED_TSAN_INTERCEPTOR(unlink, path);
1879  Release(thr, pc, File2addr(path));
1880  int res = REAL(unlink)(path);
1881  return res;
1882}
1883
1884TSAN_INTERCEPTOR(void*, tmpfile, int fake) {
1885  SCOPED_TSAN_INTERCEPTOR(tmpfile, fake);
1886  void *res = REAL(tmpfile)(fake);
1887  if (res) {
1888    int fd = fileno_unlocked(res);
1889    if (fd >= 0)
1890      FdFileCreate(thr, pc, fd);
1891  }
1892  return res;
1893}
1894
1895#if SANITIZER_LINUX
1896TSAN_INTERCEPTOR(void*, tmpfile64, int fake) {
1897  SCOPED_TSAN_INTERCEPTOR(tmpfile64, fake);
1898  void *res = REAL(tmpfile64)(fake);
1899  if (res) {
1900    int fd = fileno_unlocked(res);
1901    if (fd >= 0)
1902      FdFileCreate(thr, pc, fd);
1903  }
1904  return res;
1905}
1906#define TSAN_MAYBE_INTERCEPT_TMPFILE64 TSAN_INTERCEPT(tmpfile64)
1907#else
1908#define TSAN_MAYBE_INTERCEPT_TMPFILE64
1909#endif
1910
1911static void FlushStreams() {
1912  // Flushing all the streams here may freeze the process if a child thread is
1913  // performing file stream operations at the same time.
1914  REAL(fflush)(stdout);
1915  REAL(fflush)(stderr);
1916}
1917
1918TSAN_INTERCEPTOR(void, abort, int fake) {
1919  SCOPED_TSAN_INTERCEPTOR(abort, fake);
1920  FlushStreams();
1921  REAL(abort)(fake);
1922}
1923
1924TSAN_INTERCEPTOR(int, rmdir, char *path) {
1925  SCOPED_TSAN_INTERCEPTOR(rmdir, path);
1926  Release(thr, pc, Dir2addr(path));
1927  int res = REAL(rmdir)(path);
1928  return res;
1929}
1930
1931TSAN_INTERCEPTOR(int, closedir, void *dirp) {
1932  SCOPED_INTERCEPTOR_RAW(closedir, dirp);
1933  if (dirp) {
1934    int fd = dirfd(dirp);
1935    FdClose(thr, pc, fd);
1936  }
1937  return REAL(closedir)(dirp);
1938}
1939
1940#if SANITIZER_LINUX
1941TSAN_INTERCEPTOR(int, epoll_create, int size) {
1942  SCOPED_TSAN_INTERCEPTOR(epoll_create, size);
1943  int fd = REAL(epoll_create)(size);
1944  if (fd >= 0)
1945    FdPollCreate(thr, pc, fd);
1946  return fd;
1947}
1948
1949TSAN_INTERCEPTOR(int, epoll_create1, int flags) {
1950  SCOPED_TSAN_INTERCEPTOR(epoll_create1, flags);
1951  int fd = REAL(epoll_create1)(flags);
1952  if (fd >= 0)
1953    FdPollCreate(thr, pc, fd);
1954  return fd;
1955}
1956
1957TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
1958  SCOPED_TSAN_INTERCEPTOR(epoll_ctl, epfd, op, fd, ev);
1959  if (epfd >= 0)
1960    FdAccess(thr, pc, epfd);
1961  if (epfd >= 0 && fd >= 0)
1962    FdAccess(thr, pc, fd);
1963  if (op == EPOLL_CTL_ADD && epfd >= 0) {
1964    FdPollAdd(thr, pc, epfd, fd);
1965    FdRelease(thr, pc, epfd);
1966  }
1967  int res = REAL(epoll_ctl)(epfd, op, fd, ev);
1968  return res;
1969}
1970
1971TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) {
1972  SCOPED_TSAN_INTERCEPTOR(epoll_wait, epfd, ev, cnt, timeout);
1973  if (epfd >= 0)
1974    FdAccess(thr, pc, epfd);
1975  int res = BLOCK_REAL(epoll_wait)(epfd, ev, cnt, timeout);
1976  if (res > 0 && epfd >= 0)
1977    FdAcquire(thr, pc, epfd);
1978  return res;
1979}
1980
1981TSAN_INTERCEPTOR(int, epoll_pwait, int epfd, void *ev, int cnt, int timeout,
1982                 void *sigmask) {
1983  SCOPED_TSAN_INTERCEPTOR(epoll_pwait, epfd, ev, cnt, timeout, sigmask);
1984  if (epfd >= 0)
1985    FdAccess(thr, pc, epfd);
1986  int res = BLOCK_REAL(epoll_pwait)(epfd, ev, cnt, timeout, sigmask);
1987  if (res > 0 && epfd >= 0)
1988    FdAcquire(thr, pc, epfd);
1989  return res;
1990}
1991
1992TSAN_INTERCEPTOR(int, epoll_pwait2, int epfd, void *ev, int cnt, void *timeout,
1993                 void *sigmask) {
1994  SCOPED_INTERCEPTOR_RAW(epoll_pwait2, epfd, ev, cnt, timeout, sigmask);
1995  // This function is new and may not be present in libc and/or kernel.
1996  // Since we effectively add it to libc (as will be probed by the program
1997  // using dlsym or a weak function pointer) we need to handle the case
1998  // when it's not present in the actual libc.
1999  if (!REAL(epoll_pwait2)) {
2000    errno = errno_ENOSYS;
2001    return -1;
2002  }
2003  if (MustIgnoreInterceptor(thr))
2004    REAL(epoll_pwait2)(epfd, ev, cnt, timeout, sigmask);
2005  if (epfd >= 0)
2006    FdAccess(thr, pc, epfd);
2007  int res = BLOCK_REAL(epoll_pwait2)(epfd, ev, cnt, timeout, sigmask);
2008  if (res > 0 && epfd >= 0)
2009    FdAcquire(thr, pc, epfd);
2010  return res;
2011}
2012
2013#  define TSAN_MAYBE_INTERCEPT_EPOLL \
2014    TSAN_INTERCEPT(epoll_create);    \
2015    TSAN_INTERCEPT(epoll_create1);   \
2016    TSAN_INTERCEPT(epoll_ctl);       \
2017    TSAN_INTERCEPT(epoll_wait);      \
2018    TSAN_INTERCEPT(epoll_pwait);     \
2019    TSAN_INTERCEPT(epoll_pwait2)
2020#else
2021#define TSAN_MAYBE_INTERCEPT_EPOLL
2022#endif
2023
2024// The following functions are intercepted merely to process pending signals.
2025// If program blocks signal X, we must deliver the signal before the function
2026// returns. Similarly, if program unblocks a signal (or returns from sigsuspend)
2027// it's better to deliver the signal straight away.
2028TSAN_INTERCEPTOR(int, sigsuspend, const __sanitizer_sigset_t *mask) {
2029  SCOPED_TSAN_INTERCEPTOR(sigsuspend, mask);
2030  return REAL(sigsuspend)(mask);
2031}
2032
2033TSAN_INTERCEPTOR(int, sigblock, int mask) {
2034  SCOPED_TSAN_INTERCEPTOR(sigblock, mask);
2035  return REAL(sigblock)(mask);
2036}
2037
2038TSAN_INTERCEPTOR(int, sigsetmask, int mask) {
2039  SCOPED_TSAN_INTERCEPTOR(sigsetmask, mask);
2040  return REAL(sigsetmask)(mask);
2041}
2042
2043TSAN_INTERCEPTOR(int, pthread_sigmask, int how, const __sanitizer_sigset_t *set,
2044    __sanitizer_sigset_t *oldset) {
2045  SCOPED_TSAN_INTERCEPTOR(pthread_sigmask, how, set, oldset);
2046  return REAL(pthread_sigmask)(how, set, oldset);
2047}
2048
2049namespace __tsan {
2050
2051static void ReportErrnoSpoiling(ThreadState *thr, uptr pc, int sig) {
2052  VarSizeStackTrace stack;
2053  // StackTrace::GetNestInstructionPc(pc) is used because return address is
2054  // expected, OutputReport() will undo this.
2055  ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack);
2056  ThreadRegistryLock l(&ctx->thread_registry);
2057  ScopedReport rep(ReportTypeErrnoInSignal);
2058  rep.SetSigNum(sig);
2059  if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) {
2060    rep.AddStack(stack, true);
2061    OutputReport(thr, rep);
2062  }
2063}
2064
2065static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
2066                                  int sig, __sanitizer_siginfo *info,
2067                                  void *uctx) {
2068  CHECK(thr->slot);
2069  __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
2070  if (acquire)
2071    Acquire(thr, 0, (uptr)&sigactions[sig]);
2072  // Signals are generally asynchronous, so if we receive a signals when
2073  // ignores are enabled we should disable ignores. This is critical for sync
2074  // and interceptors, because otherwise we can miss synchronization and report
2075  // false races.
2076  int ignore_reads_and_writes = thr->ignore_reads_and_writes;
2077  int ignore_interceptors = thr->ignore_interceptors;
2078  int ignore_sync = thr->ignore_sync;
2079  // For symbolizer we only process SIGSEGVs synchronously
2080  // (bug in symbolizer or in tsan). But we want to reset
2081  // in_symbolizer to fail gracefully. Symbolizer and user code
2082  // use different memory allocators, so if we don't reset
2083  // in_symbolizer we can get memory allocated with one being
2084  // feed with another, which can cause more crashes.
2085  int in_symbolizer = thr->in_symbolizer;
2086  if (!ctx->after_multithreaded_fork) {
2087    thr->ignore_reads_and_writes = 0;
2088    thr->fast_state.ClearIgnoreBit();
2089    thr->ignore_interceptors = 0;
2090    thr->ignore_sync = 0;
2091    thr->in_symbolizer = 0;
2092  }
2093  // Ensure that the handler does not spoil errno.
2094  const int saved_errno = errno;
2095  errno = 99;
2096  // This code races with sigaction. Be careful to not read sa_sigaction twice.
2097  // Also need to remember pc for reporting before the call,
2098  // because the handler can reset it.
2099  volatile uptr pc = (sigactions[sig].sa_flags & SA_SIGINFO)
2100                         ? (uptr)sigactions[sig].sigaction
2101                         : (uptr)sigactions[sig].handler;
2102  if (pc != sig_dfl && pc != sig_ign) {
2103    // The callback can be either sa_handler or sa_sigaction.
2104    // They have different signatures, but we assume that passing
2105    // additional arguments to sa_handler works and is harmless.
2106    ((__sanitizer_sigactionhandler_ptr)pc)(sig, info, uctx);
2107  }
2108  if (!ctx->after_multithreaded_fork) {
2109    thr->ignore_reads_and_writes = ignore_reads_and_writes;
2110    if (ignore_reads_and_writes)
2111      thr->fast_state.SetIgnoreBit();
2112    thr->ignore_interceptors = ignore_interceptors;
2113    thr->ignore_sync = ignore_sync;
2114    thr->in_symbolizer = in_symbolizer;
2115  }
2116  // We do not detect errno spoiling for SIGTERM,
2117  // because some SIGTERM handlers do spoil errno but reraise SIGTERM,
2118  // tsan reports false positive in such case.
2119  // It's difficult to properly detect this situation (reraise),
2120  // because in async signal processing case (when handler is called directly
2121  // from rtl_generic_sighandler) we have not yet received the reraised
2122  // signal; and it looks too fragile to intercept all ways to reraise a signal.
2123  if (ShouldReport(thr, ReportTypeErrnoInSignal) && !sync && sig != SIGTERM &&
2124      errno != 99)
2125    ReportErrnoSpoiling(thr, pc, sig);
2126  errno = saved_errno;
2127}
2128
2129void ProcessPendingSignalsImpl(ThreadState *thr) {
2130  atomic_store(&thr->pending_signals, 0, memory_order_relaxed);
2131  ThreadSignalContext *sctx = SigCtx(thr);
2132  if (sctx == 0)
2133    return;
2134  atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
2135  internal_sigfillset(&sctx->emptyset);
2136  int res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->emptyset, &sctx->oldset);
2137  CHECK_EQ(res, 0);
2138  for (int sig = 0; sig < kSigCount; sig++) {
2139    SignalDesc *signal = &sctx->pending_signals[sig];
2140    if (signal->armed) {
2141      signal->armed = false;
2142      CallUserSignalHandler(thr, false, true, sig, &signal->siginfo,
2143                            &signal->ctx);
2144    }
2145  }
2146  res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->oldset, 0);
2147  CHECK_EQ(res, 0);
2148  atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
2149}
2150
2151}  // namespace __tsan
2152
2153static bool is_sync_signal(ThreadSignalContext *sctx, int sig,
2154                           __sanitizer_siginfo *info) {
2155  // If we are sending signal to ourselves, we must process it now.
2156  if (sctx && sig == sctx->int_signal_send)
2157    return true;
2158#if SANITIZER_HAS_SIGINFO
2159  // POSIX timers can be configured to send any kind of signal; however, it
2160  // doesn't make any sense to consider a timer signal as synchronous!
2161  if (info->si_code == SI_TIMER)
2162    return false;
2163#endif
2164  return sig == SIGSEGV || sig == SIGBUS || sig == SIGILL || sig == SIGTRAP ||
2165         sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS;
2166}
2167
2168void sighandler(int sig, __sanitizer_siginfo *info, void *ctx) {
2169  ThreadState *thr = cur_thread_init();
2170  ThreadSignalContext *sctx = SigCtx(thr);
2171  if (sig < 0 || sig >= kSigCount) {
2172    VPrintf(1, "ThreadSanitizer: ignoring signal %d\n", sig);
2173    return;
2174  }
2175  // Don't mess with synchronous signals.
2176  const bool sync = is_sync_signal(sctx, sig, info);
2177  if (sync ||
2178      // If we are in blocking function, we can safely process it now
2179      // (but check if we are in a recursive interceptor,
2180      // i.e. pthread_join()->munmap()).
2181      atomic_load(&thr->in_blocking_func, memory_order_relaxed)) {
2182    atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
2183    if (atomic_load(&thr->in_blocking_func, memory_order_relaxed)) {
2184      atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
2185      CallUserSignalHandler(thr, sync, true, sig, info, ctx);
2186      atomic_store(&thr->in_blocking_func, 1, memory_order_relaxed);
2187    } else {
2188      // Be very conservative with when we do acquire in this case.
2189      // It's unsafe to do acquire in async handlers, because ThreadState
2190      // can be in inconsistent state.
2191      // SIGSYS looks relatively safe -- it's synchronous and can actually
2192      // need some global state.
2193      bool acq = (sig == SIGSYS);
2194      CallUserSignalHandler(thr, sync, acq, sig, info, ctx);
2195    }
2196    atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
2197    return;
2198  }
2199
2200  if (sctx == 0)
2201    return;
2202  SignalDesc *signal = &sctx->pending_signals[sig];
2203  if (signal->armed == false) {
2204    signal->armed = true;
2205    internal_memcpy(&signal->siginfo, info, sizeof(*info));
2206    internal_memcpy(&signal->ctx, ctx, sizeof(signal->ctx));
2207    atomic_store(&thr->pending_signals, 1, memory_order_relaxed);
2208  }
2209}
2210
2211TSAN_INTERCEPTOR(int, raise, int sig) {
2212  SCOPED_TSAN_INTERCEPTOR(raise, sig);
2213  ThreadSignalContext *sctx = SigCtx(thr);
2214  CHECK_NE(sctx, 0);
2215  int prev = sctx->int_signal_send;
2216  sctx->int_signal_send = sig;
2217  int res = REAL(raise)(sig);
2218  CHECK_EQ(sctx->int_signal_send, sig);
2219  sctx->int_signal_send = prev;
2220  return res;
2221}
2222
2223TSAN_INTERCEPTOR(int, kill, int pid, int sig) {
2224  SCOPED_TSAN_INTERCEPTOR(kill, pid, sig);
2225  ThreadSignalContext *sctx = SigCtx(thr);
2226  CHECK_NE(sctx, 0);
2227  int prev = sctx->int_signal_send;
2228  if (pid == (int)internal_getpid()) {
2229    sctx->int_signal_send = sig;
2230  }
2231  int res = REAL(kill)(pid, sig);
2232  if (pid == (int)internal_getpid()) {
2233    CHECK_EQ(sctx->int_signal_send, sig);
2234    sctx->int_signal_send = prev;
2235  }
2236  return res;
2237}
2238
2239TSAN_INTERCEPTOR(int, pthread_kill, void *tid, int sig) {
2240  SCOPED_TSAN_INTERCEPTOR(pthread_kill, tid, sig);
2241  ThreadSignalContext *sctx = SigCtx(thr);
2242  CHECK_NE(sctx, 0);
2243  int prev = sctx->int_signal_send;
2244  bool self = pthread_equal(tid, pthread_self());
2245  if (self)
2246    sctx->int_signal_send = sig;
2247  int res = REAL(pthread_kill)(tid, sig);
2248  if (self) {
2249    CHECK_EQ(sctx->int_signal_send, sig);
2250    sctx->int_signal_send = prev;
2251  }
2252  return res;
2253}
2254
2255TSAN_INTERCEPTOR(int, gettimeofday, void *tv, void *tz) {
2256  SCOPED_TSAN_INTERCEPTOR(gettimeofday, tv, tz);
2257  // It's intercepted merely to process pending signals.
2258  return REAL(gettimeofday)(tv, tz);
2259}
2260
2261TSAN_INTERCEPTOR(int, getaddrinfo, void *node, void *service,
2262    void *hints, void *rv) {
2263  SCOPED_TSAN_INTERCEPTOR(getaddrinfo, node, service, hints, rv);
2264  // We miss atomic synchronization in getaddrinfo,
2265  // and can report false race between malloc and free
2266  // inside of getaddrinfo. So ignore memory accesses.
2267  ThreadIgnoreBegin(thr, pc);
2268  int res = REAL(getaddrinfo)(node, service, hints, rv);
2269  ThreadIgnoreEnd(thr);
2270  return res;
2271}
2272
2273TSAN_INTERCEPTOR(int, fork, int fake) {
2274  if (in_symbolizer())
2275    return REAL(fork)(fake);
2276  SCOPED_INTERCEPTOR_RAW(fork, fake);
2277  return REAL(fork)(fake);
2278}
2279
2280void atfork_prepare() {
2281  if (in_symbolizer())
2282    return;
2283  ThreadState *thr = cur_thread();
2284  const uptr pc = StackTrace::GetCurrentPc();
2285  ForkBefore(thr, pc);
2286}
2287
2288void atfork_parent() {
2289  if (in_symbolizer())
2290    return;
2291  ThreadState *thr = cur_thread();
2292  const uptr pc = StackTrace::GetCurrentPc();
2293  ForkParentAfter(thr, pc);
2294}
2295
2296void atfork_child() {
2297  if (in_symbolizer())
2298    return;
2299  ThreadState *thr = cur_thread();
2300  const uptr pc = StackTrace::GetCurrentPc();
2301  ForkChildAfter(thr, pc, true);
2302  FdOnFork(thr, pc);
2303}
2304
2305#if !SANITIZER_IOS
2306TSAN_INTERCEPTOR(int, vfork, int fake) {
2307  // Some programs (e.g. openjdk) call close for all file descriptors
2308  // in the child process. Under tsan it leads to false positives, because
2309  // address space is shared, so the parent process also thinks that
2310  // the descriptors are closed (while they are actually not).
2311  // This leads to false positives due to missed synchronization.
2312  // Strictly saying this is undefined behavior, because vfork child is not
2313  // allowed to call any functions other than exec/exit. But this is what
2314  // openjdk does, so we want to handle it.
2315  // We could disable interceptors in the child process. But it's not possible
2316  // to simply intercept and wrap vfork, because vfork child is not allowed
2317  // to return from the function that calls vfork, and that's exactly what
2318  // we would do. So this would require some assembly trickery as well.
2319  // Instead we simply turn vfork into fork.
2320  return WRAP(fork)(fake);
2321}
2322#endif
2323
2324#if SANITIZER_LINUX
2325TSAN_INTERCEPTOR(int, clone, int (*fn)(void *), void *stack, int flags,
2326                 void *arg, int *parent_tid, void *tls, pid_t *child_tid) {
2327  SCOPED_INTERCEPTOR_RAW(clone, fn, stack, flags, arg, parent_tid, tls,
2328                         child_tid);
2329  struct Arg {
2330    int (*fn)(void *);
2331    void *arg;
2332  };
2333  auto wrapper = +[](void *p) -> int {
2334    auto *thr = cur_thread();
2335    uptr pc = GET_CURRENT_PC();
2336    // Start the background thread for fork, but not for clone.
2337    // For fork we did this always and it's known to work (or user code has
2338    // adopted). But if we do this for the new clone interceptor some code
2339    // (sandbox2) fails. So model we used to do for years and don't start the
2340    // background thread after clone.
2341    ForkChildAfter(thr, pc, false);
2342    FdOnFork(thr, pc);
2343    auto *arg = static_cast<Arg *>(p);
2344    return arg->fn(arg->arg);
2345  };
2346  ForkBefore(thr, pc);
2347  Arg arg_wrapper = {fn, arg};
2348  int pid = REAL(clone)(wrapper, stack, flags, &arg_wrapper, parent_tid, tls,
2349                        child_tid);
2350  ForkParentAfter(thr, pc);
2351  return pid;
2352}
2353#endif
2354
2355#if !SANITIZER_APPLE && !SANITIZER_ANDROID
2356typedef int (*dl_iterate_phdr_cb_t)(__sanitizer_dl_phdr_info *info, SIZE_T size,
2357                                    void *data);
2358struct dl_iterate_phdr_data {
2359  ThreadState *thr;
2360  uptr pc;
2361  dl_iterate_phdr_cb_t cb;
2362  void *data;
2363};
2364
2365static bool IsAppNotRodata(uptr addr) {
2366  return IsAppMem(addr) && *MemToShadow(addr) != Shadow::kRodata;
2367}
2368
2369static int dl_iterate_phdr_cb(__sanitizer_dl_phdr_info *info, SIZE_T size,
2370                              void *data) {
2371  dl_iterate_phdr_data *cbdata = (dl_iterate_phdr_data *)data;
2372  // dlopen/dlclose allocate/free dynamic-linker-internal memory, which is later
2373  // accessible in dl_iterate_phdr callback. But we don't see synchronization
2374  // inside of dynamic linker, so we "unpoison" it here in order to not
2375  // produce false reports. Ignoring malloc/free in dlopen/dlclose is not enough
2376  // because some libc functions call __libc_dlopen.
2377  if (info && IsAppNotRodata((uptr)info->dlpi_name))
2378    MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name,
2379                     internal_strlen(info->dlpi_name));
2380  int res = cbdata->cb(info, size, cbdata->data);
2381  // Perform the check one more time in case info->dlpi_name was overwritten
2382  // by user callback.
2383  if (info && IsAppNotRodata((uptr)info->dlpi_name))
2384    MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name,
2385                     internal_strlen(info->dlpi_name));
2386  return res;
2387}
2388
2389TSAN_INTERCEPTOR(int, dl_iterate_phdr, dl_iterate_phdr_cb_t cb, void *data) {
2390  SCOPED_TSAN_INTERCEPTOR(dl_iterate_phdr, cb, data);
2391  dl_iterate_phdr_data cbdata;
2392  cbdata.thr = thr;
2393  cbdata.pc = pc;
2394  cbdata.cb = cb;
2395  cbdata.data = data;
2396  int res = REAL(dl_iterate_phdr)(dl_iterate_phdr_cb, &cbdata);
2397  return res;
2398}
2399#endif
2400
2401static int OnExit(ThreadState *thr) {
2402  int status = Finalize(thr);
2403  FlushStreams();
2404  return status;
2405}
2406
2407#if !SANITIZER_APPLE
2408static void HandleRecvmsg(ThreadState *thr, uptr pc,
2409    __sanitizer_msghdr *msg) {
2410  int fds[64];
2411  int cnt = ExtractRecvmsgFDs(msg, fds, ARRAY_SIZE(fds));
2412  for (int i = 0; i < cnt; i++)
2413    FdEventCreate(thr, pc, fds[i]);
2414}
2415#endif
2416
2417#include "sanitizer_common/sanitizer_platform_interceptors.h"
2418// Causes interceptor recursion (getaddrinfo() and fopen())
2419#undef SANITIZER_INTERCEPT_GETADDRINFO
2420// We define our own.
2421#if SANITIZER_INTERCEPT_TLS_GET_ADDR
2422#define NEED_TLS_GET_ADDR
2423#endif
2424#undef SANITIZER_INTERCEPT_TLS_GET_ADDR
2425#define SANITIZER_INTERCEPT_TLS_GET_OFFSET 1
2426#undef SANITIZER_INTERCEPT_PTHREAD_SIGMASK
2427
2428#define COMMON_INTERCEPT_FUNCTION_VER(name, ver)                          \
2429  INTERCEPT_FUNCTION_VER(name, ver)
2430#define COMMON_INTERCEPT_FUNCTION_VER_UNVERSIONED_FALLBACK(name, ver) \
2431  (INTERCEPT_FUNCTION_VER(name, ver) || INTERCEPT_FUNCTION(name))
2432
2433#define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, func, ...) \
2434  SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__);              \
2435  TsanInterceptorContext _ctx = {thr, pc};                \
2436  ctx = (void *)&_ctx;                                    \
2437  (void)ctx;
2438
2439#define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) \
2440  if (path)                                           \
2441    Acquire(thr, pc, File2addr(path));                \
2442  if (file) {                                         \
2443    int fd = fileno_unlocked(file);                   \
2444    if (fd >= 0) FdFileCreate(thr, pc, fd);           \
2445  }
2446
2447#define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) \
2448  if (file) {                                    \
2449    int fd = fileno_unlocked(file);              \
2450    FdClose(thr, pc, fd);                        \
2451  }
2452
2453#define COMMON_INTERCEPTOR_DLOPEN(filename, flag) \
2454  ({                                              \
2455    CheckNoDeepBind(filename, flag);              \
2456    ThreadIgnoreBegin(thr, 0);                    \
2457    void *res = REAL(dlopen)(filename, flag);     \
2458    ThreadIgnoreEnd(thr);                         \
2459    res;                                          \
2460  })
2461
2462// Ignore interceptors in OnLibraryLoaded()/Unloaded().  These hooks use code
2463// (ListOfModules::init, MemoryMappingLayout::DumpListOfModules) that make
2464// intercepted calls, which can cause deadlockes with ReportRace() which also
2465// uses this code.
2466#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
2467  ({                                                        \
2468    ScopedIgnoreInterceptors ignore_interceptors;           \
2469    libignore()->OnLibraryLoaded(filename);                 \
2470  })
2471
2472#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED()     \
2473  ({                                              \
2474    ScopedIgnoreInterceptors ignore_interceptors; \
2475    libignore()->OnLibraryUnloaded();             \
2476  })
2477
2478#define COMMON_INTERCEPTOR_ACQUIRE(ctx, u) \
2479  Acquire(((TsanInterceptorContext *) ctx)->thr, pc, u)
2480
2481#define COMMON_INTERCEPTOR_RELEASE(ctx, u) \
2482  Release(((TsanInterceptorContext *) ctx)->thr, pc, u)
2483
2484#define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
2485  Acquire(((TsanInterceptorContext *) ctx)->thr, pc, Dir2addr(path))
2486
2487#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
2488  FdAcquire(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2489
2490#define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \
2491  FdRelease(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2492
2493#define COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd) \
2494  FdAccess(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2495
2496#define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \
2497  FdSocketAccept(((TsanInterceptorContext *) ctx)->thr, pc, fd, newfd)
2498
2499#define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \
2500  ThreadSetName(((TsanInterceptorContext *) ctx)->thr, name)
2501
2502#define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name)         \
2503  if (pthread_equal(pthread_self(), reinterpret_cast<void *>(thread))) \
2504    COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name);                     \
2505  else                                                                 \
2506    __tsan::ctx->thread_registry.SetThreadNameByUserId(thread, name)
2507
2508#define COMMON_INTERCEPTOR_BLOCK_REAL(name) BLOCK_REAL(name)
2509
2510#define COMMON_INTERCEPTOR_ON_EXIT(ctx) \
2511  OnExit(((TsanInterceptorContext *) ctx)->thr)
2512
2513#define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd,  \
2514                                     off)                                   \
2515  do {                                                                      \
2516    return mmap_interceptor(thr, pc, REAL(mmap), addr, sz, prot, flags, fd, \
2517                            off);                                           \
2518  } while (false)
2519
2520#define COMMON_INTERCEPTOR_MUNMAP_IMPL(ctx, addr, sz)           \
2521  do {                                                          \
2522    return munmap_interceptor(thr, pc, REAL(munmap), addr, sz); \
2523  } while (false)
2524
2525#if !SANITIZER_APPLE
2526#define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) \
2527  HandleRecvmsg(((TsanInterceptorContext *)ctx)->thr, \
2528      ((TsanInterceptorContext *)ctx)->pc, msg)
2529#endif
2530
2531#define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end)                           \
2532  if (TsanThread *t = GetCurrentThread()) {                                    \
2533    *begin = t->tls_begin();                                                   \
2534    *end = t->tls_end();                                                       \
2535  } else {                                                                     \
2536    *begin = *end = 0;                                                         \
2537  }
2538
2539#define COMMON_INTERCEPTOR_USER_CALLBACK_START() \
2540  SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START()
2541
2542#define COMMON_INTERCEPTOR_USER_CALLBACK_END() \
2543  SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END()
2544
2545#include "sanitizer_common/sanitizer_common_interceptors.inc"
2546
2547static int sigaction_impl(int sig, const __sanitizer_sigaction *act,
2548                          __sanitizer_sigaction *old);
2549static __sanitizer_sighandler_ptr signal_impl(int sig,
2550                                              __sanitizer_sighandler_ptr h);
2551
2552#define SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signo, act, oldact) \
2553  { return sigaction_impl(signo, act, oldact); }
2554
2555#define SIGNAL_INTERCEPTOR_SIGNAL_IMPL(func, signo, handler) \
2556  { return (uptr)signal_impl(signo, (__sanitizer_sighandler_ptr)handler); }
2557
2558#define SIGNAL_INTERCEPTOR_ENTER() LazyInitialize(cur_thread_init())
2559
2560#include "sanitizer_common/sanitizer_signal_interceptors.inc"
2561
2562int sigaction_impl(int sig, const __sanitizer_sigaction *act,
2563                   __sanitizer_sigaction *old) {
2564  // Note: if we call REAL(sigaction) directly for any reason without proxying
2565  // the signal handler through sighandler, very bad things will happen.
2566  // The handler will run synchronously and corrupt tsan per-thread state.
2567  SCOPED_INTERCEPTOR_RAW(sigaction, sig, act, old);
2568  if (sig <= 0 || sig >= kSigCount) {
2569    errno = errno_EINVAL;
2570    return -1;
2571  }
2572  __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
2573  __sanitizer_sigaction old_stored;
2574  if (old) internal_memcpy(&old_stored, &sigactions[sig], sizeof(old_stored));
2575  __sanitizer_sigaction newact;
2576  if (act) {
2577    // Copy act into sigactions[sig].
2578    // Can't use struct copy, because compiler can emit call to memcpy.
2579    // Can't use internal_memcpy, because it copies byte-by-byte,
2580    // and signal handler reads the handler concurrently. It can read
2581    // some bytes from old value and some bytes from new value.
2582    // Use volatile to prevent insertion of memcpy.
2583    sigactions[sig].handler =
2584        *(volatile __sanitizer_sighandler_ptr const *)&act->handler;
2585    sigactions[sig].sa_flags = *(volatile int const *)&act->sa_flags;
2586    internal_memcpy(&sigactions[sig].sa_mask, &act->sa_mask,
2587                    sizeof(sigactions[sig].sa_mask));
2588#if !SANITIZER_FREEBSD && !SANITIZER_APPLE && !SANITIZER_NETBSD
2589    sigactions[sig].sa_restorer = act->sa_restorer;
2590#endif
2591    internal_memcpy(&newact, act, sizeof(newact));
2592    internal_sigfillset(&newact.sa_mask);
2593    if ((act->sa_flags & SA_SIGINFO) ||
2594        ((uptr)act->handler != sig_ign && (uptr)act->handler != sig_dfl)) {
2595      newact.sa_flags |= SA_SIGINFO;
2596      newact.sigaction = sighandler;
2597    }
2598    ReleaseStore(thr, pc, (uptr)&sigactions[sig]);
2599    act = &newact;
2600  }
2601  int res = REAL(sigaction)(sig, act, old);
2602  if (res == 0 && old && old->sigaction == sighandler)
2603    internal_memcpy(old, &old_stored, sizeof(*old));
2604  return res;
2605}
2606
2607static __sanitizer_sighandler_ptr signal_impl(int sig,
2608                                              __sanitizer_sighandler_ptr h) {
2609  __sanitizer_sigaction act;
2610  act.handler = h;
2611  internal_memset(&act.sa_mask, -1, sizeof(act.sa_mask));
2612  act.sa_flags = 0;
2613  __sanitizer_sigaction old;
2614  int res = sigaction_symname(sig, &act, &old);
2615  if (res) return (__sanitizer_sighandler_ptr)sig_err;
2616  return old.handler;
2617}
2618
2619#define TSAN_SYSCALL()             \
2620  ThreadState *thr = cur_thread(); \
2621  if (thr->ignore_interceptors)    \
2622    return;                        \
2623  ScopedSyscall scoped_syscall(thr)
2624
2625struct ScopedSyscall {
2626  ThreadState *thr;
2627
2628  explicit ScopedSyscall(ThreadState *thr) : thr(thr) { LazyInitialize(thr); }
2629
2630  ~ScopedSyscall() {
2631    ProcessPendingSignals(thr);
2632  }
2633};
2634
2635#if !SANITIZER_FREEBSD && !SANITIZER_APPLE
2636static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) {
2637  TSAN_SYSCALL();
2638  MemoryAccessRange(thr, pc, p, s, write);
2639}
2640
2641static USED void syscall_acquire(uptr pc, uptr addr) {
2642  TSAN_SYSCALL();
2643  Acquire(thr, pc, addr);
2644  DPrintf("syscall_acquire(0x%zx))\n", addr);
2645}
2646
2647static USED void syscall_release(uptr pc, uptr addr) {
2648  TSAN_SYSCALL();
2649  DPrintf("syscall_release(0x%zx)\n", addr);
2650  Release(thr, pc, addr);
2651}
2652
2653static void syscall_fd_close(uptr pc, int fd) {
2654  auto *thr = cur_thread();
2655  FdClose(thr, pc, fd);
2656}
2657
2658static USED void syscall_fd_acquire(uptr pc, int fd) {
2659  TSAN_SYSCALL();
2660  FdAcquire(thr, pc, fd);
2661  DPrintf("syscall_fd_acquire(%d)\n", fd);
2662}
2663
2664static USED void syscall_fd_release(uptr pc, int fd) {
2665  TSAN_SYSCALL();
2666  DPrintf("syscall_fd_release(%d)\n", fd);
2667  FdRelease(thr, pc, fd);
2668}
2669
2670static void syscall_pre_fork(uptr pc) { ForkBefore(cur_thread(), pc); }
2671
2672static void syscall_post_fork(uptr pc, int pid) {
2673  ThreadState *thr = cur_thread();
2674  if (pid == 0) {
2675    // child
2676    ForkChildAfter(thr, pc, true);
2677    FdOnFork(thr, pc);
2678  } else if (pid > 0) {
2679    // parent
2680    ForkParentAfter(thr, pc);
2681  } else {
2682    // error
2683    ForkParentAfter(thr, pc);
2684  }
2685}
2686#endif
2687
2688#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) \
2689  syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), false)
2690
2691#define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) \
2692  syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), true)
2693
2694#define COMMON_SYSCALL_POST_READ_RANGE(p, s) \
2695  do {                                       \
2696    (void)(p);                               \
2697    (void)(s);                               \
2698  } while (false)
2699
2700#define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \
2701  do {                                        \
2702    (void)(p);                                \
2703    (void)(s);                                \
2704  } while (false)
2705
2706#define COMMON_SYSCALL_ACQUIRE(addr) \
2707    syscall_acquire(GET_CALLER_PC(), (uptr)(addr))
2708
2709#define COMMON_SYSCALL_RELEASE(addr) \
2710    syscall_release(GET_CALLER_PC(), (uptr)(addr))
2711
2712#define COMMON_SYSCALL_FD_CLOSE(fd) syscall_fd_close(GET_CALLER_PC(), fd)
2713
2714#define COMMON_SYSCALL_FD_ACQUIRE(fd) syscall_fd_acquire(GET_CALLER_PC(), fd)
2715
2716#define COMMON_SYSCALL_FD_RELEASE(fd) syscall_fd_release(GET_CALLER_PC(), fd)
2717
2718#define COMMON_SYSCALL_PRE_FORK() \
2719  syscall_pre_fork(GET_CALLER_PC())
2720
2721#define COMMON_SYSCALL_POST_FORK(res) \
2722  syscall_post_fork(GET_CALLER_PC(), res)
2723
2724#include "sanitizer_common/sanitizer_common_syscalls.inc"
2725#include "sanitizer_common/sanitizer_syscalls_netbsd.inc"
2726
2727#ifdef NEED_TLS_GET_ADDR
2728
2729static void handle_tls_addr(void *arg, void *res) {
2730  ThreadState *thr = cur_thread();
2731  if (!thr)
2732    return;
2733  DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res, thr->tls_addr,
2734                                        thr->tls_addr + thr->tls_size);
2735  if (!dtv)
2736    return;
2737  // New DTLS block has been allocated.
2738  MemoryResetRange(thr, 0, dtv->beg, dtv->size);
2739}
2740
2741#if !SANITIZER_S390
2742// Define own interceptor instead of sanitizer_common's for three reasons:
2743// 1. It must not process pending signals.
2744//    Signal handlers may contain MOVDQA instruction (see below).
2745// 2. It must be as simple as possible to not contain MOVDQA.
2746// 3. Sanitizer_common version uses COMMON_INTERCEPTOR_INITIALIZE_RANGE which
2747//    is empty for tsan (meant only for msan).
2748// Note: __tls_get_addr can be called with mis-aligned stack due to:
2749// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066
2750// So the interceptor must work with mis-aligned stack, in particular, does not
2751// execute MOVDQA with stack addresses.
2752TSAN_INTERCEPTOR(void *, __tls_get_addr, void *arg) {
2753  void *res = REAL(__tls_get_addr)(arg);
2754  handle_tls_addr(arg, res);
2755  return res;
2756}
2757#else // SANITIZER_S390
2758TSAN_INTERCEPTOR(uptr, __tls_get_addr_internal, void *arg) {
2759  uptr res = __tls_get_offset_wrapper(arg, REAL(__tls_get_offset));
2760  char *tp = static_cast<char *>(__builtin_thread_pointer());
2761  handle_tls_addr(arg, res + tp);
2762  return res;
2763}
2764#endif
2765#endif
2766
2767#if SANITIZER_NETBSD
2768TSAN_INTERCEPTOR(void, _lwp_exit) {
2769  SCOPED_TSAN_INTERCEPTOR(_lwp_exit);
2770  DestroyThreadState();
2771  REAL(_lwp_exit)();
2772}
2773#define TSAN_MAYBE_INTERCEPT__LWP_EXIT TSAN_INTERCEPT(_lwp_exit)
2774#else
2775#define TSAN_MAYBE_INTERCEPT__LWP_EXIT
2776#endif
2777
2778#if SANITIZER_FREEBSD
2779TSAN_INTERCEPTOR(void, thr_exit, tid_t *state) {
2780  SCOPED_TSAN_INTERCEPTOR(thr_exit, state);
2781  DestroyThreadState();
2782  REAL(thr_exit(state));
2783}
2784#define TSAN_MAYBE_INTERCEPT_THR_EXIT TSAN_INTERCEPT(thr_exit)
2785#else
2786#define TSAN_MAYBE_INTERCEPT_THR_EXIT
2787#endif
2788
2789TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_init, void *c, void *a)
2790TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_destroy, void *c)
2791TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_signal, void *c)
2792TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_broadcast, void *c)
2793TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_wait, void *c, void *m)
2794TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_init, void *m, void *a)
2795TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_destroy, void *m)
2796TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_lock, void *m)
2797TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_trylock, void *m)
2798TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_unlock, void *m)
2799TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_init, void *l, void *a)
2800TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_destroy, void *l)
2801TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_rdlock, void *l)
2802TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_tryrdlock, void *l)
2803TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_wrlock, void *l)
2804TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_trywrlock, void *l)
2805TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_unlock, void *l)
2806TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, once, void *o, void (*i)())
2807TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, sigmask, int f, void *n, void *o)
2808
2809TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_init, void *c, void *a)
2810TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_signal, void *c)
2811TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_broadcast, void *c)
2812TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_wait, void *c, void *m)
2813TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_destroy, void *c)
2814TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_init, void *m, void *a)
2815TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_destroy, void *m)
2816TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_lock, void *m)
2817TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_trylock, void *m)
2818TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_unlock, void *m)
2819TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_init, void *m, void *a)
2820TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_destroy, void *m)
2821TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_rdlock, void *m)
2822TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_tryrdlock, void *m)
2823TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_wrlock, void *m)
2824TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_trywrlock, void *m)
2825TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_unlock, void *m)
2826TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(int, once, void *o, void (*f)())
2827TSAN_INTERCEPTOR_NETBSD_ALIAS_THR2(int, sigsetmask, sigmask, int a, void *b,
2828  void *c)
2829
2830namespace __tsan {
2831
2832static void finalize(void *arg) {
2833  ThreadState *thr = cur_thread();
2834  int status = Finalize(thr);
2835  // Make sure the output is not lost.
2836  FlushStreams();
2837  if (status)
2838    Die();
2839}
2840
2841#if !SANITIZER_APPLE && !SANITIZER_ANDROID
2842static void unreachable() {
2843  Report("FATAL: ThreadSanitizer: unreachable called\n");
2844  Die();
2845}
2846#endif
2847
2848// Define default implementation since interception of libdispatch  is optional.
2849SANITIZER_WEAK_ATTRIBUTE void InitializeLibdispatchInterceptors() {}
2850
2851void InitializeInterceptors() {
2852#if !SANITIZER_APPLE
2853  // We need to setup it early, because functions like dlsym() can call it.
2854  REAL(memset) = internal_memset;
2855  REAL(memcpy) = internal_memcpy;
2856#endif
2857
2858  new(interceptor_ctx()) InterceptorContext();
2859
2860  InitializeCommonInterceptors();
2861  InitializeSignalInterceptors();
2862  InitializeLibdispatchInterceptors();
2863
2864#if !SANITIZER_APPLE
2865  InitializeSetjmpInterceptors();
2866#endif
2867
2868  TSAN_INTERCEPT(longjmp_symname);
2869  TSAN_INTERCEPT(siglongjmp_symname);
2870#if SANITIZER_NETBSD
2871  TSAN_INTERCEPT(_longjmp);
2872#endif
2873
2874  TSAN_INTERCEPT(malloc);
2875  TSAN_INTERCEPT(__libc_memalign);
2876  TSAN_INTERCEPT(calloc);
2877  TSAN_INTERCEPT(realloc);
2878  TSAN_INTERCEPT(reallocarray);
2879  TSAN_INTERCEPT(free);
2880  TSAN_INTERCEPT(cfree);
2881  TSAN_INTERCEPT(munmap);
2882  TSAN_MAYBE_INTERCEPT_MEMALIGN;
2883  TSAN_INTERCEPT(valloc);
2884  TSAN_MAYBE_INTERCEPT_PVALLOC;
2885  TSAN_INTERCEPT(posix_memalign);
2886
2887  TSAN_INTERCEPT(strcpy);
2888  TSAN_INTERCEPT(strncpy);
2889  TSAN_INTERCEPT(strdup);
2890
2891  TSAN_INTERCEPT(pthread_create);
2892  TSAN_INTERCEPT(pthread_join);
2893  TSAN_INTERCEPT(pthread_detach);
2894  TSAN_INTERCEPT(pthread_exit);
2895  #if SANITIZER_LINUX
2896  TSAN_INTERCEPT(pthread_tryjoin_np);
2897  TSAN_INTERCEPT(pthread_timedjoin_np);
2898  #endif
2899
2900  TSAN_INTERCEPT_VER(pthread_cond_init, PTHREAD_ABI_BASE);
2901  TSAN_INTERCEPT_VER(pthread_cond_signal, PTHREAD_ABI_BASE);
2902  TSAN_INTERCEPT_VER(pthread_cond_broadcast, PTHREAD_ABI_BASE);
2903  TSAN_INTERCEPT_VER(pthread_cond_wait, PTHREAD_ABI_BASE);
2904  TSAN_INTERCEPT_VER(pthread_cond_timedwait, PTHREAD_ABI_BASE);
2905  TSAN_INTERCEPT_VER(pthread_cond_destroy, PTHREAD_ABI_BASE);
2906
2907  TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT;
2908
2909  TSAN_INTERCEPT(pthread_mutex_init);
2910  TSAN_INTERCEPT(pthread_mutex_destroy);
2911  TSAN_INTERCEPT(pthread_mutex_lock);
2912  TSAN_INTERCEPT(pthread_mutex_trylock);
2913  TSAN_INTERCEPT(pthread_mutex_timedlock);
2914  TSAN_INTERCEPT(pthread_mutex_unlock);
2915#if SANITIZER_LINUX
2916  TSAN_INTERCEPT(pthread_mutex_clocklock);
2917#endif
2918#if SANITIZER_GLIBC
2919#  if !__GLIBC_PREREQ(2, 34)
2920  TSAN_INTERCEPT(__pthread_mutex_lock);
2921  TSAN_INTERCEPT(__pthread_mutex_unlock);
2922#  endif
2923#endif
2924
2925  TSAN_INTERCEPT(pthread_spin_init);
2926  TSAN_INTERCEPT(pthread_spin_destroy);
2927  TSAN_INTERCEPT(pthread_spin_lock);
2928  TSAN_INTERCEPT(pthread_spin_trylock);
2929  TSAN_INTERCEPT(pthread_spin_unlock);
2930
2931  TSAN_INTERCEPT(pthread_rwlock_init);
2932  TSAN_INTERCEPT(pthread_rwlock_destroy);
2933  TSAN_INTERCEPT(pthread_rwlock_rdlock);
2934  TSAN_INTERCEPT(pthread_rwlock_tryrdlock);
2935  TSAN_INTERCEPT(pthread_rwlock_timedrdlock);
2936  TSAN_INTERCEPT(pthread_rwlock_wrlock);
2937  TSAN_INTERCEPT(pthread_rwlock_trywrlock);
2938  TSAN_INTERCEPT(pthread_rwlock_timedwrlock);
2939  TSAN_INTERCEPT(pthread_rwlock_unlock);
2940
2941  TSAN_INTERCEPT(pthread_barrier_init);
2942  TSAN_INTERCEPT(pthread_barrier_destroy);
2943  TSAN_INTERCEPT(pthread_barrier_wait);
2944
2945  TSAN_INTERCEPT(pthread_once);
2946
2947  TSAN_MAYBE_INTERCEPT___FXSTAT;
2948  TSAN_MAYBE_INTERCEPT_FSTAT;
2949  TSAN_MAYBE_INTERCEPT_FSTAT64;
2950  TSAN_INTERCEPT(open);
2951  TSAN_MAYBE_INTERCEPT_OPEN64;
2952  TSAN_INTERCEPT(creat);
2953  TSAN_MAYBE_INTERCEPT_CREAT64;
2954  TSAN_INTERCEPT(dup);
2955  TSAN_INTERCEPT(dup2);
2956  TSAN_INTERCEPT(dup3);
2957  TSAN_MAYBE_INTERCEPT_EVENTFD;
2958  TSAN_MAYBE_INTERCEPT_SIGNALFD;
2959  TSAN_MAYBE_INTERCEPT_INOTIFY_INIT;
2960  TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1;
2961  TSAN_INTERCEPT(socket);
2962  TSAN_INTERCEPT(socketpair);
2963  TSAN_INTERCEPT(connect);
2964  TSAN_INTERCEPT(bind);
2965  TSAN_INTERCEPT(listen);
2966  TSAN_MAYBE_INTERCEPT_EPOLL;
2967  TSAN_INTERCEPT(close);
2968  TSAN_MAYBE_INTERCEPT___CLOSE;
2969  TSAN_MAYBE_INTERCEPT___RES_ICLOSE;
2970  TSAN_INTERCEPT(pipe);
2971  TSAN_INTERCEPT(pipe2);
2972
2973  TSAN_INTERCEPT(unlink);
2974  TSAN_INTERCEPT(tmpfile);
2975  TSAN_MAYBE_INTERCEPT_TMPFILE64;
2976  TSAN_INTERCEPT(abort);
2977  TSAN_INTERCEPT(rmdir);
2978  TSAN_INTERCEPT(closedir);
2979
2980  TSAN_INTERCEPT(sigsuspend);
2981  TSAN_INTERCEPT(sigblock);
2982  TSAN_INTERCEPT(sigsetmask);
2983  TSAN_INTERCEPT(pthread_sigmask);
2984  TSAN_INTERCEPT(raise);
2985  TSAN_INTERCEPT(kill);
2986  TSAN_INTERCEPT(pthread_kill);
2987  TSAN_INTERCEPT(sleep);
2988  TSAN_INTERCEPT(usleep);
2989  TSAN_INTERCEPT(nanosleep);
2990  TSAN_INTERCEPT(pause);
2991  TSAN_INTERCEPT(gettimeofday);
2992  TSAN_INTERCEPT(getaddrinfo);
2993
2994  TSAN_INTERCEPT(fork);
2995  TSAN_INTERCEPT(vfork);
2996#if SANITIZER_LINUX
2997  TSAN_INTERCEPT(clone);
2998#endif
2999#if !SANITIZER_ANDROID
3000  TSAN_INTERCEPT(dl_iterate_phdr);
3001#endif
3002  TSAN_MAYBE_INTERCEPT_ON_EXIT;
3003  TSAN_INTERCEPT(__cxa_atexit);
3004  TSAN_INTERCEPT(_exit);
3005
3006#ifdef NEED_TLS_GET_ADDR
3007#if !SANITIZER_S390
3008  TSAN_INTERCEPT(__tls_get_addr);
3009#else
3010  TSAN_INTERCEPT(__tls_get_addr_internal);
3011  TSAN_INTERCEPT(__tls_get_offset);
3012#endif
3013#endif
3014
3015  TSAN_MAYBE_INTERCEPT__LWP_EXIT;
3016  TSAN_MAYBE_INTERCEPT_THR_EXIT;
3017
3018#if !SANITIZER_APPLE && !SANITIZER_ANDROID
3019  // Need to setup it, because interceptors check that the function is resolved.
3020  // But atexit is emitted directly into the module, so can't be resolved.
3021  REAL(atexit) = (int(*)(void(*)()))unreachable;
3022#endif
3023
3024  if (REAL(__cxa_atexit)(&finalize, 0, 0)) {
3025    Printf("ThreadSanitizer: failed to setup atexit callback\n");
3026    Die();
3027  }
3028  if (pthread_atfork(atfork_prepare, atfork_parent, atfork_child)) {
3029    Printf("ThreadSanitizer: failed to setup atfork callbacks\n");
3030    Die();
3031  }
3032
3033#if !SANITIZER_APPLE && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
3034  if (pthread_key_create(&interceptor_ctx()->finalize_key, &thread_finalize)) {
3035    Printf("ThreadSanitizer: failed to create thread key\n");
3036    Die();
3037  }
3038#endif
3039
3040  TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_init);
3041  TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_destroy);
3042  TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_signal);
3043  TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_broadcast);
3044  TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_wait);
3045  TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_init);
3046  TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_destroy);
3047  TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_lock);
3048  TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_trylock);
3049  TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_unlock);
3050  TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_init);
3051  TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_destroy);
3052  TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_rdlock);
3053  TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_tryrdlock);
3054  TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_wrlock);
3055  TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_trywrlock);
3056  TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_unlock);
3057  TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(once);
3058  TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(sigmask);
3059
3060  TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_init);
3061  TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_signal);
3062  TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_broadcast);
3063  TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_wait);
3064  TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_destroy);
3065  TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_init);
3066  TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_destroy);
3067  TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_lock);
3068  TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_trylock);
3069  TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_unlock);
3070  TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_init);
3071  TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_destroy);
3072  TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_rdlock);
3073  TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_tryrdlock);
3074  TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_wrlock);
3075  TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_trywrlock);
3076  TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_unlock);
3077  TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(once);
3078  TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(sigsetmask);
3079
3080  FdInit();
3081}
3082
3083}  // namespace __tsan
3084
3085// Invisible barrier for tests.
3086// There were several unsuccessful iterations for this functionality:
3087// 1. Initially it was implemented in user code using
3088//    REAL(pthread_barrier_wait). But pthread_barrier_wait is not supported on
3089//    MacOS. Futexes are linux-specific for this matter.
3090// 2. Then we switched to atomics+usleep(10). But usleep produced parasitic
3091//    "as-if synchronized via sleep" messages in reports which failed some
3092//    output tests.
3093// 3. Then we switched to atomics+sched_yield. But this produced tons of tsan-
3094//    visible events, which lead to "failed to restore stack trace" failures.
3095// Note that no_sanitize_thread attribute does not turn off atomic interception
3096// so attaching it to the function defined in user code does not help.
3097// That's why we now have what we have.
3098constexpr u32 kBarrierThreadBits = 10;
3099constexpr u32 kBarrierThreads = 1 << kBarrierThreadBits;
3100
3101extern "C" {
3102
3103SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_init(
3104    atomic_uint32_t *barrier, u32 num_threads) {
3105  if (num_threads >= kBarrierThreads) {
3106    Printf("barrier_init: count is too large (%d)\n", num_threads);
3107    Die();
3108  }
3109  // kBarrierThreadBits lsb is thread count,
3110  // the remaining are count of entered threads.
3111  atomic_store(barrier, num_threads, memory_order_relaxed);
3112}
3113
3114static u32 barrier_epoch(u32 value) {
3115  return (value >> kBarrierThreadBits) / (value & (kBarrierThreads - 1));
3116}
3117
3118SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_wait(
3119    atomic_uint32_t *barrier) {
3120  u32 old = atomic_fetch_add(barrier, kBarrierThreads, memory_order_relaxed);
3121  u32 old_epoch = barrier_epoch(old);
3122  if (barrier_epoch(old + kBarrierThreads) != old_epoch) {
3123    FutexWake(barrier, (1 << 30));
3124    return;
3125  }
3126  for (;;) {
3127    u32 cur = atomic_load(barrier, memory_order_relaxed);
3128    if (barrier_epoch(cur) != old_epoch)
3129      return;
3130    FutexWait(barrier, cur);
3131  }
3132}
3133
3134}  // extern "C"
3135