1//===-- tsan_platform_mac.cc ----------------------------------------------===// 2// 3// This file is distributed under the University of Illinois Open Source 4// License. See LICENSE.TXT for details. 5// 6//===----------------------------------------------------------------------===// 7// 8// This file is a part of ThreadSanitizer (TSan), a race detector. 9// 10// Mac-specific code. 11//===----------------------------------------------------------------------===// 12 13#include "sanitizer_common/sanitizer_platform.h" 14#if SANITIZER_MAC 15 16#include "sanitizer_common/sanitizer_atomic.h" 17#include "sanitizer_common/sanitizer_common.h" 18#include "sanitizer_common/sanitizer_libc.h" 19#include "sanitizer_common/sanitizer_posix.h" 20#include "sanitizer_common/sanitizer_procmaps.h" 21#include "sanitizer_common/sanitizer_stackdepot.h" 22#include "tsan_platform.h" 23#include "tsan_rtl.h" 24#include "tsan_flags.h" 25 26#include <mach/mach.h> 27#include <pthread.h> 28#include <signal.h> 29#include <stdio.h> 30#include <stdlib.h> 31#include <string.h> 32#include <stdarg.h> 33#include <sys/mman.h> 34#include <sys/syscall.h> 35#include <sys/time.h> 36#include <sys/types.h> 37#include <sys/resource.h> 38#include <sys/stat.h> 39#include <unistd.h> 40#include <errno.h> 41#include <sched.h> 42 43namespace __tsan { 44 45#if !SANITIZER_GO 46static void *SignalSafeGetOrAllocate(uptr *dst, uptr size) { 47 atomic_uintptr_t *a = (atomic_uintptr_t *)dst; 48 void *val = (void *)atomic_load_relaxed(a); 49 atomic_signal_fence(memory_order_acquire); // Turns the previous load into 50 // acquire wrt signals. 51 if (UNLIKELY(val == nullptr)) { 52 val = (void *)internal_mmap(nullptr, size, PROT_READ | PROT_WRITE, 53 MAP_PRIVATE | MAP_ANON, -1, 0); 54 CHECK(val); 55 void *cmp = nullptr; 56 if (!atomic_compare_exchange_strong(a, (uintptr_t *)&cmp, (uintptr_t)val, 57 memory_order_acq_rel)) { 58 internal_munmap(val, size); 59 val = cmp; 60 } 61 } 62 return val; 63} 64 65// On OS X, accessing TLVs via __thread or manually by using pthread_key_* is 66// problematic, because there are several places where interceptors are called 67// when TLVs are not accessible (early process startup, thread cleanup, ...). 68// The following provides a "poor man's TLV" implementation, where we use the 69// shadow memory of the pointer returned by pthread_self() to store a pointer to 70// the ThreadState object. The main thread's ThreadState is stored separately 71// in a static variable, because we need to access it even before the 72// shadow memory is set up. 73static uptr main_thread_identity = 0; 74ALIGNED(64) static char main_thread_state[sizeof(ThreadState)]; 75 76ThreadState **cur_thread_location() { 77 ThreadState **thread_identity = (ThreadState **)pthread_self(); 78 return ((uptr)thread_identity == main_thread_identity) ? nullptr 79 : thread_identity; 80} 81 82ThreadState *cur_thread() { 83 ThreadState **thr_state_loc = cur_thread_location(); 84 if (thr_state_loc == nullptr || main_thread_identity == 0) { 85 return (ThreadState *)&main_thread_state; 86 } 87 ThreadState **fake_tls = (ThreadState **)MemToShadow((uptr)thr_state_loc); 88 ThreadState *thr = (ThreadState *)SignalSafeGetOrAllocate( 89 (uptr *)fake_tls, sizeof(ThreadState)); 90 return thr; 91} 92 93// TODO(kuba.brecka): This is not async-signal-safe. In particular, we call 94// munmap first and then clear `fake_tls`; if we receive a signal in between, 95// handler will try to access the unmapped ThreadState. 96void cur_thread_finalize() { 97 ThreadState **thr_state_loc = cur_thread_location(); 98 if (thr_state_loc == nullptr) { 99 // Calling dispatch_main() or xpc_main() actually invokes pthread_exit to 100 // exit the main thread. Let's keep the main thread's ThreadState. 101 return; 102 } 103 ThreadState **fake_tls = (ThreadState **)MemToShadow((uptr)thr_state_loc); 104 internal_munmap(*fake_tls, sizeof(ThreadState)); 105 *fake_tls = nullptr; 106} 107#endif 108 109void FlushShadowMemory() { 110} 111 112static void RegionMemUsage(uptr start, uptr end, uptr *res, uptr *dirty) { 113 vm_address_t address = start; 114 vm_address_t end_address = end; 115 uptr resident_pages = 0; 116 uptr dirty_pages = 0; 117 while (address < end_address) { 118 vm_size_t vm_region_size; 119 mach_msg_type_number_t count = VM_REGION_EXTENDED_INFO_COUNT; 120 vm_region_extended_info_data_t vm_region_info; 121 mach_port_t object_name; 122 kern_return_t ret = vm_region_64( 123 mach_task_self(), &address, &vm_region_size, VM_REGION_EXTENDED_INFO, 124 (vm_region_info_t)&vm_region_info, &count, &object_name); 125 if (ret != KERN_SUCCESS) break; 126 127 resident_pages += vm_region_info.pages_resident; 128 dirty_pages += vm_region_info.pages_dirtied; 129 130 address += vm_region_size; 131 } 132 *res = resident_pages * GetPageSizeCached(); 133 *dirty = dirty_pages * GetPageSizeCached(); 134} 135 136void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) { 137 uptr shadow_res, shadow_dirty; 138 uptr meta_res, meta_dirty; 139 uptr trace_res, trace_dirty; 140 RegionMemUsage(ShadowBeg(), ShadowEnd(), &shadow_res, &shadow_dirty); 141 RegionMemUsage(MetaShadowBeg(), MetaShadowEnd(), &meta_res, &meta_dirty); 142 RegionMemUsage(TraceMemBeg(), TraceMemEnd(), &trace_res, &trace_dirty); 143 144#if !SANITIZER_GO 145 uptr low_res, low_dirty; 146 uptr high_res, high_dirty; 147 uptr heap_res, heap_dirty; 148 RegionMemUsage(LoAppMemBeg(), LoAppMemEnd(), &low_res, &low_dirty); 149 RegionMemUsage(HiAppMemBeg(), HiAppMemEnd(), &high_res, &high_dirty); 150 RegionMemUsage(HeapMemBeg(), HeapMemEnd(), &heap_res, &heap_dirty); 151#else // !SANITIZER_GO 152 uptr app_res, app_dirty; 153 RegionMemUsage(AppMemBeg(), AppMemEnd(), &app_res, &app_dirty); 154#endif 155 156 StackDepotStats *stacks = StackDepotGetStats(); 157 internal_snprintf(buf, buf_size, 158 "shadow (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n" 159 "meta (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n" 160 "traces (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n" 161#if !SANITIZER_GO 162 "low app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n" 163 "high app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n" 164 "heap (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n" 165#else // !SANITIZER_GO 166 "app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n" 167#endif 168 "stacks: %zd unique IDs, %zd kB allocated\n" 169 "threads: %zd total, %zd live\n" 170 "------------------------------\n", 171 ShadowBeg(), ShadowEnd(), shadow_res / 1024, shadow_dirty / 1024, 172 MetaShadowBeg(), MetaShadowEnd(), meta_res / 1024, meta_dirty / 1024, 173 TraceMemBeg(), TraceMemEnd(), trace_res / 1024, trace_dirty / 1024, 174#if !SANITIZER_GO 175 LoAppMemBeg(), LoAppMemEnd(), low_res / 1024, low_dirty / 1024, 176 HiAppMemBeg(), HiAppMemEnd(), high_res / 1024, high_dirty / 1024, 177 HeapMemBeg(), HeapMemEnd(), heap_res / 1024, heap_dirty / 1024, 178#else // !SANITIZER_GO 179 AppMemBeg(), AppMemEnd(), app_res / 1024, app_dirty / 1024, 180#endif 181 stacks->n_uniq_ids, stacks->allocated / 1024, 182 nthread, nlive); 183} 184 185#if !SANITIZER_GO 186void InitializeShadowMemoryPlatform() { } 187 188// On OS X, GCD worker threads are created without a call to pthread_create. We 189// need to properly register these threads with ThreadCreate and ThreadStart. 190// These threads don't have a parent thread, as they are created "spuriously". 191// We're using a libpthread API that notifies us about a newly created thread. 192// The `thread == pthread_self()` check indicates this is actually a worker 193// thread. If it's just a regular thread, this hook is called on the parent 194// thread. 195typedef void (*pthread_introspection_hook_t)(unsigned int event, 196 pthread_t thread, void *addr, 197 size_t size); 198extern "C" pthread_introspection_hook_t pthread_introspection_hook_install( 199 pthread_introspection_hook_t hook); 200static const uptr PTHREAD_INTROSPECTION_THREAD_CREATE = 1; 201static const uptr PTHREAD_INTROSPECTION_THREAD_TERMINATE = 3; 202static pthread_introspection_hook_t prev_pthread_introspection_hook; 203static void my_pthread_introspection_hook(unsigned int event, pthread_t thread, 204 void *addr, size_t size) { 205 if (event == PTHREAD_INTROSPECTION_THREAD_CREATE) { 206 if (thread == pthread_self()) { 207 // The current thread is a newly created GCD worker thread. 208 ThreadState *thr = cur_thread(); 209 Processor *proc = ProcCreate(); 210 ProcWire(proc, thr); 211 ThreadState *parent_thread_state = nullptr; // No parent. 212 int tid = ThreadCreate(parent_thread_state, 0, (uptr)thread, true); 213 CHECK_NE(tid, 0); 214 ThreadStart(thr, tid, GetTid(), /*workerthread*/ true); 215 } 216 } else if (event == PTHREAD_INTROSPECTION_THREAD_TERMINATE) { 217 if (thread == pthread_self()) { 218 ThreadState *thr = cur_thread(); 219 if (thr->tctx) { 220 DestroyThreadState(); 221 } 222 } 223 } 224 225 if (prev_pthread_introspection_hook != nullptr) 226 prev_pthread_introspection_hook(event, thread, addr, size); 227} 228#endif 229 230void InitializePlatformEarly() { 231#if defined(__aarch64__) 232 uptr max_vm = GetMaxUserVirtualAddress() + 1; 233 if (max_vm != Mapping::kHiAppMemEnd) { 234 Printf("ThreadSanitizer: unsupported vm address limit %p, expected %p.\n", 235 max_vm, Mapping::kHiAppMemEnd); 236 Die(); 237 } 238#endif 239} 240 241static const uptr kPthreadSetjmpXorKeySlot = 0x7; 242extern "C" uptr __tsan_darwin_setjmp_xor_key = 0; 243 244void InitializePlatform() { 245 DisableCoreDumperIfNecessary(); 246#if !SANITIZER_GO 247 CheckAndProtect(); 248 249 CHECK_EQ(main_thread_identity, 0); 250 main_thread_identity = (uptr)pthread_self(); 251 252 prev_pthread_introspection_hook = 253 pthread_introspection_hook_install(&my_pthread_introspection_hook); 254#endif 255 256 if (GetMacosVersion() >= MACOS_VERSION_MOJAVE) { 257 __tsan_darwin_setjmp_xor_key = 258 (uptr)pthread_getspecific(kPthreadSetjmpXorKeySlot); 259 } 260} 261 262#if !SANITIZER_GO 263void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size) { 264 // The pointer to the ThreadState object is stored in the shadow memory 265 // of the tls. 266 uptr tls_end = tls_addr + tls_size; 267 ThreadState **thr_state_loc = cur_thread_location(); 268 if (thr_state_loc == nullptr) { 269 MemoryRangeImitateWrite(thr, /*pc=*/2, tls_addr, tls_size); 270 } else { 271 uptr thr_state_start = (uptr)thr_state_loc; 272 uptr thr_state_end = thr_state_start + sizeof(uptr); 273 CHECK_GE(thr_state_start, tls_addr); 274 CHECK_LE(thr_state_start, tls_addr + tls_size); 275 CHECK_GE(thr_state_end, tls_addr); 276 CHECK_LE(thr_state_end, tls_addr + tls_size); 277 MemoryRangeImitateWrite(thr, /*pc=*/2, tls_addr, 278 thr_state_start - tls_addr); 279 MemoryRangeImitateWrite(thr, /*pc=*/2, thr_state_end, 280 tls_end - thr_state_end); 281 } 282} 283#endif 284 285#if !SANITIZER_GO 286// Note: this function runs with async signals enabled, 287// so it must not touch any tsan state. 288int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m, 289 void *abstime), void *c, void *m, void *abstime, 290 void(*cleanup)(void *arg), void *arg) { 291 // pthread_cleanup_push/pop are hardcore macros mess. 292 // We can't intercept nor call them w/o including pthread.h. 293 int res; 294 pthread_cleanup_push(cleanup, arg); 295 res = fn(c, m, abstime); 296 pthread_cleanup_pop(0); 297 return res; 298} 299#endif 300 301} // namespace __tsan 302 303#endif // SANITIZER_MAC 304