1//=-- lsan_common_mac.cpp -------------------------------------------------===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This file is a part of LeakSanitizer. 10// Implementation of common leak checking functionality. Darwin-specific code. 11// 12//===----------------------------------------------------------------------===// 13 14#include "sanitizer_common/sanitizer_platform.h" 15#include "sanitizer_common/sanitizer_libc.h" 16#include "lsan_common.h" 17 18#if CAN_SANITIZE_LEAKS && SANITIZER_APPLE 19 20# include <mach/mach.h> 21# include <mach/vm_statistics.h> 22# include <pthread.h> 23 24# include "lsan_allocator.h" 25# include "sanitizer_common/sanitizer_allocator_internal.h" 26namespace __lsan { 27 28enum class SeenRegion { 29 None = 0, 30 AllocOnce = 1 << 0, 31 LibDispatch = 1 << 1, 32 Foundation = 1 << 2, 33 All = AllocOnce | LibDispatch | Foundation 34}; 35 36inline SeenRegion operator|(SeenRegion left, SeenRegion right) { 37 return static_cast<SeenRegion>(static_cast<int>(left) | 38 static_cast<int>(right)); 39} 40 41inline SeenRegion &operator|=(SeenRegion &left, const SeenRegion &right) { 42 left = left | right; 43 return left; 44} 45 46struct RegionScanState { 47 SeenRegion seen_regions = SeenRegion::None; 48 bool in_libdispatch = false; 49}; 50 51typedef struct { 52 int disable_counter; 53 u32 current_thread_id; 54 AllocatorCache cache; 55} thread_local_data_t; 56 57static pthread_key_t key; 58static pthread_once_t key_once = PTHREAD_ONCE_INIT; 59 60// The main thread destructor requires the current thread id, 61// so we can't destroy it until it's been used and reset to invalid tid 62void restore_tid_data(void *ptr) { 63 thread_local_data_t *data = (thread_local_data_t *)ptr; 64 if (data->current_thread_id != kInvalidTid) 65 pthread_setspecific(key, data); 66} 67 68static void make_tls_key() { 69 CHECK_EQ(pthread_key_create(&key, restore_tid_data), 0); 70} 71 72static thread_local_data_t *get_tls_val(bool alloc) { 73 pthread_once(&key_once, make_tls_key); 74 75 thread_local_data_t *ptr = (thread_local_data_t *)pthread_getspecific(key); 76 if (ptr == NULL && alloc) { 77 ptr = (thread_local_data_t *)InternalAlloc(sizeof(*ptr)); 78 ptr->disable_counter = 0; 79 ptr->current_thread_id = kInvalidTid; 80 ptr->cache = AllocatorCache(); 81 pthread_setspecific(key, ptr); 82 } 83 84 return ptr; 85} 86 87bool DisabledInThisThread() { 88 thread_local_data_t *data = get_tls_val(false); 89 return data ? data->disable_counter > 0 : false; 90} 91 92void DisableInThisThread() { ++get_tls_val(true)->disable_counter; } 93 94void EnableInThisThread() { 95 int *disable_counter = &get_tls_val(true)->disable_counter; 96 if (*disable_counter == 0) { 97 DisableCounterUnderflow(); 98 } 99 --*disable_counter; 100} 101 102u32 GetCurrentThread() { 103 thread_local_data_t *data = get_tls_val(false); 104 return data ? data->current_thread_id : kInvalidTid; 105} 106 107void SetCurrentThread(u32 tid) { get_tls_val(true)->current_thread_id = tid; } 108 109AllocatorCache *GetAllocatorCache() { return &get_tls_val(true)->cache; } 110 111LoadedModule *GetLinker() { return nullptr; } 112 113// Required on Linux for initialization of TLS behavior, but should not be 114// required on Darwin. 115void InitializePlatformSpecificModules() {} 116 117// Sections which can't contain contain global pointers. This list errs on the 118// side of caution to avoid false positives, at the expense of performance. 119// 120// Other potentially safe sections include: 121// __all_image_info, __crash_info, __const, __got, __interpose, __objc_msg_break 122// 123// Sections which definitely cannot be included here are: 124// __objc_data, __objc_const, __data, __bss, __common, __thread_data, 125// __thread_bss, __thread_vars, __objc_opt_rw, __objc_opt_ptrs 126static const char *kSkippedSecNames[] = { 127 "__cfstring", "__la_symbol_ptr", "__mod_init_func", 128 "__mod_term_func", "__nl_symbol_ptr", "__objc_classlist", 129 "__objc_classrefs", "__objc_imageinfo", "__objc_nlclslist", 130 "__objc_protolist", "__objc_selrefs", "__objc_superrefs"}; 131 132// Scans global variables for heap pointers. 133void ProcessGlobalRegions(Frontier *frontier) { 134 for (auto name : kSkippedSecNames) 135 CHECK(internal_strnlen(name, kMaxSegName + 1) <= kMaxSegName); 136 137 MemoryMappingLayout memory_mapping(false); 138 InternalMmapVector<LoadedModule> modules; 139 modules.reserve(128); 140 memory_mapping.DumpListOfModules(&modules); 141 for (uptr i = 0; i < modules.size(); ++i) { 142 // Even when global scanning is disabled, we still need to scan 143 // system libraries for stashed pointers 144 if (!flags()->use_globals && modules[i].instrumented()) continue; 145 146 for (const __sanitizer::LoadedModule::AddressRange &range : 147 modules[i].ranges()) { 148 // Sections storing global variables are writable and non-executable 149 if (range.executable || !range.writable) continue; 150 151 for (auto name : kSkippedSecNames) { 152 if (!internal_strcmp(range.name, name)) continue; 153 } 154 155 ScanGlobalRange(range.beg, range.end, frontier); 156 } 157 } 158} 159 160void ProcessPlatformSpecificAllocations(Frontier *frontier) { 161 vm_address_t address = 0; 162 kern_return_t err = KERN_SUCCESS; 163 164 InternalMmapVectorNoCtor<RootRegion> const *root_regions = GetRootRegions(); 165 166 RegionScanState scan_state; 167 while (err == KERN_SUCCESS) { 168 vm_size_t size = 0; 169 unsigned depth = 1; 170 struct vm_region_submap_info_64 info; 171 mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64; 172 err = vm_region_recurse_64(mach_task_self(), &address, &size, &depth, 173 (vm_region_info_t)&info, &count); 174 175 uptr end_address = address + size; 176 if (info.user_tag == VM_MEMORY_OS_ALLOC_ONCE) { 177 // libxpc stashes some pointers in the Kernel Alloc Once page, 178 // make sure not to report those as leaks. 179 scan_state.seen_regions |= SeenRegion::AllocOnce; 180 ScanRangeForPointers(address, end_address, frontier, "GLOBAL", 181 kReachable); 182 } else if (info.user_tag == VM_MEMORY_FOUNDATION) { 183 // Objective-C block trampolines use the Foundation region. 184 scan_state.seen_regions |= SeenRegion::Foundation; 185 ScanRangeForPointers(address, end_address, frontier, "GLOBAL", 186 kReachable); 187 } else if (info.user_tag == VM_MEMORY_LIBDISPATCH) { 188 // Dispatch continuations use the libdispatch region. Empirically, there 189 // can be more than one region with this tag, so we'll optimistically 190 // assume that they're continguous. Otherwise, we would need to scan every 191 // region to ensure we find them all. 192 scan_state.in_libdispatch = true; 193 ScanRangeForPointers(address, end_address, frontier, "GLOBAL", 194 kReachable); 195 } else if (scan_state.in_libdispatch) { 196 scan_state.seen_regions |= SeenRegion::LibDispatch; 197 scan_state.in_libdispatch = false; 198 } 199 200 // Recursing over the full memory map is very slow, break out 201 // early if we don't need the full iteration. 202 if (scan_state.seen_regions == SeenRegion::All && 203 !(flags()->use_root_regions && root_regions->size() > 0)) { 204 break; 205 } 206 207 // This additional root region scan is required on Darwin in order to 208 // detect root regions contained within mmap'd memory regions, because 209 // the Darwin implementation of sanitizer_procmaps traverses images 210 // as loaded by dyld, and not the complete set of all memory regions. 211 // 212 // TODO(fjricci) - remove this once sanitizer_procmaps_mac has the same 213 // behavior as sanitizer_procmaps_linux and traverses all memory regions 214 if (flags()->use_root_regions) { 215 for (uptr i = 0; i < root_regions->size(); i++) { 216 ScanRootRegion(frontier, (*root_regions)[i], address, end_address, 217 info.protection & kProtectionRead); 218 } 219 } 220 221 address = end_address; 222 } 223} 224 225// On darwin, we can intercept _exit gracefully, and return a failing exit code 226// if required at that point. Calling Die() here is undefined behavior and 227// causes rare race conditions. 228void HandleLeaks() {} 229 230void LockStuffAndStopTheWorld(StopTheWorldCallback callback, 231 CheckForLeaksParam *argument) { 232 ScopedStopTheWorldLock lock; 233 StopTheWorld(callback, argument); 234} 235 236} // namespace __lsan 237 238#endif // CAN_SANITIZE_LEAKS && SANITIZER_APPLE 239