1//===-- tsan_platform_mac.cpp ---------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of ThreadSanitizer (TSan), a race detector.
10//
11// Mac-specific code.
12//===----------------------------------------------------------------------===//
13
14#include "sanitizer_common/sanitizer_platform.h"
15#if SANITIZER_APPLE
16
17#include "sanitizer_common/sanitizer_atomic.h"
18#include "sanitizer_common/sanitizer_common.h"
19#include "sanitizer_common/sanitizer_libc.h"
20#include "sanitizer_common/sanitizer_posix.h"
21#include "sanitizer_common/sanitizer_procmaps.h"
22#include "sanitizer_common/sanitizer_ptrauth.h"
23#include "sanitizer_common/sanitizer_stackdepot.h"
24#include "tsan_platform.h"
25#include "tsan_rtl.h"
26#include "tsan_flags.h"
27
28#include <limits.h>
29#include <mach/mach.h>
30#include <pthread.h>
31#include <signal.h>
32#include <stdio.h>
33#include <stdlib.h>
34#include <string.h>
35#include <stdarg.h>
36#include <sys/mman.h>
37#include <sys/syscall.h>
38#include <sys/time.h>
39#include <sys/types.h>
40#include <sys/resource.h>
41#include <sys/stat.h>
42#include <unistd.h>
43#include <errno.h>
44#include <sched.h>
45
46namespace __tsan {
47
48#if !SANITIZER_GO
49static char main_thread_state[sizeof(ThreadState)] ALIGNED(
50    SANITIZER_CACHE_LINE_SIZE);
51static ThreadState *dead_thread_state;
52static pthread_key_t thread_state_key;
53
54// We rely on the following documented, but Darwin-specific behavior to keep the
55// reference to the ThreadState object alive in TLS:
56// pthread_key_create man page:
57//   If, after all the destructors have been called for all non-NULL values with
58//   associated destructors, there are still some non-NULL values with
59//   associated destructors, then the process is repeated.  If, after at least
60//   [PTHREAD_DESTRUCTOR_ITERATIONS] iterations of destructor calls for
61//   outstanding non-NULL values, there are still some non-NULL values with
62//   associated destructors, the implementation stops calling destructors.
63static_assert(PTHREAD_DESTRUCTOR_ITERATIONS == 4, "Small number of iterations");
64static void ThreadStateDestructor(void *thr) {
65  int res = pthread_setspecific(thread_state_key, thr);
66  CHECK_EQ(res, 0);
67}
68
69static void InitializeThreadStateStorage() {
70  int res;
71  CHECK_EQ(thread_state_key, 0);
72  res = pthread_key_create(&thread_state_key, ThreadStateDestructor);
73  CHECK_EQ(res, 0);
74  res = pthread_setspecific(thread_state_key, main_thread_state);
75  CHECK_EQ(res, 0);
76
77  auto dts = (ThreadState *)MmapOrDie(sizeof(ThreadState), "ThreadState");
78  dts->fast_state.SetIgnoreBit();
79  dts->ignore_interceptors = 1;
80  dts->is_dead = true;
81  const_cast<Tid &>(dts->tid) = kInvalidTid;
82  res = internal_mprotect(dts, sizeof(ThreadState), PROT_READ);  // immutable
83  CHECK_EQ(res, 0);
84  dead_thread_state = dts;
85}
86
87ThreadState *cur_thread() {
88  // Some interceptors get called before libpthread has been initialized and in
89  // these cases we must avoid calling any pthread APIs.
90  if (UNLIKELY(!thread_state_key)) {
91    return (ThreadState *)main_thread_state;
92  }
93
94  // We only reach this line after InitializeThreadStateStorage() ran, i.e,
95  // after TSan (and therefore libpthread) have been initialized.
96  ThreadState *thr = (ThreadState *)pthread_getspecific(thread_state_key);
97  if (UNLIKELY(!thr)) {
98    thr = (ThreadState *)MmapOrDie(sizeof(ThreadState), "ThreadState");
99    int res = pthread_setspecific(thread_state_key, thr);
100    CHECK_EQ(res, 0);
101  }
102  return thr;
103}
104
105void set_cur_thread(ThreadState *thr) {
106  int res = pthread_setspecific(thread_state_key, thr);
107  CHECK_EQ(res, 0);
108}
109
110void cur_thread_finalize() {
111  ThreadState *thr = (ThreadState *)pthread_getspecific(thread_state_key);
112  CHECK(thr);
113  if (thr == (ThreadState *)main_thread_state) {
114    // Calling dispatch_main() or xpc_main() actually invokes pthread_exit to
115    // exit the main thread. Let's keep the main thread's ThreadState.
116    return;
117  }
118  // Intercepted functions can still get called after cur_thread_finalize()
119  // (called from DestroyThreadState()), so put a fake thread state for "dead"
120  // threads.  An alternative solution would be to release the ThreadState
121  // object from THREAD_DESTROY (which is delivered later and on the parent
122  // thread) instead of THREAD_TERMINATE.
123  int res = pthread_setspecific(thread_state_key, dead_thread_state);
124  CHECK_EQ(res, 0);
125  UnmapOrDie(thr, sizeof(ThreadState));
126}
127#endif
128
129static void RegionMemUsage(uptr start, uptr end, uptr *res, uptr *dirty) {
130  vm_address_t address = start;
131  vm_address_t end_address = end;
132  uptr resident_pages = 0;
133  uptr dirty_pages = 0;
134  while (address < end_address) {
135    vm_size_t vm_region_size;
136    mach_msg_type_number_t count = VM_REGION_EXTENDED_INFO_COUNT;
137    vm_region_extended_info_data_t vm_region_info;
138    mach_port_t object_name;
139    kern_return_t ret = vm_region_64(
140        mach_task_self(), &address, &vm_region_size, VM_REGION_EXTENDED_INFO,
141        (vm_region_info_t)&vm_region_info, &count, &object_name);
142    if (ret != KERN_SUCCESS) break;
143
144    resident_pages += vm_region_info.pages_resident;
145    dirty_pages += vm_region_info.pages_dirtied;
146
147    address += vm_region_size;
148  }
149  *res = resident_pages * GetPageSizeCached();
150  *dirty = dirty_pages * GetPageSizeCached();
151}
152
153void WriteMemoryProfile(char *buf, uptr buf_size, u64 uptime_ns) {
154  uptr shadow_res, shadow_dirty;
155  uptr meta_res, meta_dirty;
156  RegionMemUsage(ShadowBeg(), ShadowEnd(), &shadow_res, &shadow_dirty);
157  RegionMemUsage(MetaShadowBeg(), MetaShadowEnd(), &meta_res, &meta_dirty);
158
159#  if !SANITIZER_GO
160  uptr low_res, low_dirty;
161  uptr high_res, high_dirty;
162  uptr heap_res, heap_dirty;
163  RegionMemUsage(LoAppMemBeg(), LoAppMemEnd(), &low_res, &low_dirty);
164  RegionMemUsage(HiAppMemBeg(), HiAppMemEnd(), &high_res, &high_dirty);
165  RegionMemUsage(HeapMemBeg(), HeapMemEnd(), &heap_res, &heap_dirty);
166#else  // !SANITIZER_GO
167  uptr app_res, app_dirty;
168  RegionMemUsage(LoAppMemBeg(), LoAppMemEnd(), &app_res, &app_dirty);
169#endif
170
171  StackDepotStats stacks = StackDepotGetStats();
172  uptr nthread, nlive;
173  ctx->thread_registry.GetNumberOfThreads(&nthread, &nlive);
174  internal_snprintf(
175      buf, buf_size,
176      "shadow   (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
177      "meta     (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
178#  if !SANITIZER_GO
179      "low app  (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
180      "high app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
181      "heap     (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
182#  else  // !SANITIZER_GO
183      "app      (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
184#  endif
185      "stacks: %zd unique IDs, %zd kB allocated\n"
186      "threads: %zd total, %zd live\n"
187      "------------------------------\n",
188      ShadowBeg(), ShadowEnd(), shadow_res / 1024, shadow_dirty / 1024,
189      MetaShadowBeg(), MetaShadowEnd(), meta_res / 1024, meta_dirty / 1024,
190#  if !SANITIZER_GO
191      LoAppMemBeg(), LoAppMemEnd(), low_res / 1024, low_dirty / 1024,
192      HiAppMemBeg(), HiAppMemEnd(), high_res / 1024, high_dirty / 1024,
193      HeapMemBeg(), HeapMemEnd(), heap_res / 1024, heap_dirty / 1024,
194#  else  // !SANITIZER_GO
195      LoAppMemBeg(), LoAppMemEnd(), app_res / 1024, app_dirty / 1024,
196#  endif
197      stacks.n_uniq_ids, stacks.allocated / 1024, nthread, nlive);
198}
199
200#  if !SANITIZER_GO
201void InitializeShadowMemoryPlatform() { }
202
203// Register GCD worker threads, which are created without an observable call to
204// pthread_create().
205static void ThreadCreateCallback(uptr thread, bool gcd_worker) {
206  if (gcd_worker) {
207    ThreadState *thr = cur_thread();
208    Processor *proc = ProcCreate();
209    ProcWire(proc, thr);
210    ThreadState *parent_thread_state = nullptr;  // No parent.
211    Tid tid = ThreadCreate(parent_thread_state, 0, (uptr)thread, true);
212    CHECK_NE(tid, kMainTid);
213    ThreadStart(thr, tid, GetTid(), ThreadType::Worker);
214  }
215}
216
217// Destroy thread state for *all* threads.
218static void ThreadTerminateCallback(uptr thread) {
219  ThreadState *thr = cur_thread();
220  if (thr->tctx) {
221    DestroyThreadState();
222  }
223}
224#endif
225
226void InitializePlatformEarly() {
227#  if !SANITIZER_GO && SANITIZER_IOS
228  uptr max_vm = GetMaxUserVirtualAddress() + 1;
229  if (max_vm != HiAppMemEnd()) {
230    Printf("ThreadSanitizer: unsupported vm address limit %p, expected %p.\n",
231           (void *)max_vm, (void *)HiAppMemEnd());
232    Die();
233  }
234#endif
235}
236
237static uptr longjmp_xor_key = 0;
238
239void InitializePlatform() {
240  DisableCoreDumperIfNecessary();
241#if !SANITIZER_GO
242  CheckAndProtect();
243
244  InitializeThreadStateStorage();
245
246  ThreadEventCallbacks callbacks = {
247      .create = ThreadCreateCallback,
248      .terminate = ThreadTerminateCallback,
249  };
250  InstallPthreadIntrospectionHook(callbacks);
251#endif
252
253  if (GetMacosAlignedVersion() >= MacosVersion(10, 14)) {
254    // Libsystem currently uses a process-global key; this might change.
255    const unsigned kTLSLongjmpXorKeySlot = 0x7;
256    longjmp_xor_key = (uptr)pthread_getspecific(kTLSLongjmpXorKeySlot);
257  }
258}
259
260#ifdef __aarch64__
261# define LONG_JMP_SP_ENV_SLOT \
262    ((GetMacosAlignedVersion() >= MacosVersion(10, 14)) ? 12 : 13)
263#else
264# define LONG_JMP_SP_ENV_SLOT 2
265#endif
266
267uptr ExtractLongJmpSp(uptr *env) {
268  uptr mangled_sp = env[LONG_JMP_SP_ENV_SLOT];
269  uptr sp = mangled_sp ^ longjmp_xor_key;
270  sp = (uptr)ptrauth_auth_data((void *)sp, ptrauth_key_asdb,
271                               ptrauth_string_discriminator("sp"));
272  return sp;
273}
274
275#if !SANITIZER_GO
276extern "C" void __tsan_tls_initialization() {}
277
278void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size) {
279  const uptr pc = StackTrace::GetNextInstructionPc(
280      reinterpret_cast<uptr>(__tsan_tls_initialization));
281  // Unlike Linux, we only store a pointer to the ThreadState object in TLS;
282  // just mark the entire range as written to.
283  MemoryRangeImitateWrite(thr, pc, tls_addr, tls_size);
284}
285#endif
286
287#if !SANITIZER_GO
288// Note: this function runs with async signals enabled,
289// so it must not touch any tsan state.
290int call_pthread_cancel_with_cleanup(int (*fn)(void *arg),
291                                     void (*cleanup)(void *arg), void *arg) {
292  // pthread_cleanup_push/pop are hardcore macros mess.
293  // We can't intercept nor call them w/o including pthread.h.
294  int res;
295  pthread_cleanup_push(cleanup, arg);
296  res = fn(arg);
297  pthread_cleanup_pop(0);
298  return res;
299}
300#endif
301
302}  // namespace __tsan
303
304#endif  // SANITIZER_APPLE
305