1//===-- sanitizer_allocator.cc --------------------------------------------===// 2// 3// This file is distributed under the University of Illinois Open Source 4// License. See LICENSE.TXT for details. 5// 6//===----------------------------------------------------------------------===// 7// 8// This file is shared between AddressSanitizer and ThreadSanitizer 9// run-time libraries. 10// This allocator is used inside run-times. 11//===----------------------------------------------------------------------===// 12 13#include "sanitizer_allocator.h" 14 15#include "sanitizer_allocator_checks.h" 16#include "sanitizer_allocator_internal.h" 17#include "sanitizer_atomic.h" 18#include "sanitizer_common.h" 19 20namespace __sanitizer { 21 22// Default allocator names. 23const char *PrimaryAllocatorName = "SizeClassAllocator"; 24const char *SecondaryAllocatorName = "LargeMmapAllocator"; 25 26// ThreadSanitizer for Go uses libc malloc/free. 27#if SANITIZER_GO || defined(SANITIZER_USE_MALLOC) 28# if SANITIZER_LINUX && !SANITIZER_ANDROID 29extern "C" void *__libc_malloc(uptr size); 30# if !SANITIZER_GO 31extern "C" void *__libc_memalign(uptr alignment, uptr size); 32# endif 33extern "C" void *__libc_realloc(void *ptr, uptr size); 34extern "C" void __libc_free(void *ptr); 35# else 36# include <stdlib.h> 37# define __libc_malloc malloc 38# if !SANITIZER_GO 39static void *__libc_memalign(uptr alignment, uptr size) { 40 void *p; 41 uptr error = posix_memalign(&p, alignment, size); 42 if (error) return nullptr; 43 return p; 44} 45# endif 46# define __libc_realloc realloc 47# define __libc_free free 48# endif 49 50static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache, 51 uptr alignment) { 52 (void)cache; 53#if !SANITIZER_GO 54 if (alignment == 0) 55 return __libc_malloc(size); 56 else 57 return __libc_memalign(alignment, size); 58#else 59 // Windows does not provide __libc_memalign/posix_memalign. It provides 60 // __aligned_malloc, but the allocated blocks can't be passed to free, 61 // they need to be passed to __aligned_free. InternalAlloc interface does 62 // not account for such requirement. Alignemnt does not seem to be used 63 // anywhere in runtime, so just call __libc_malloc for now. 64 DCHECK_EQ(alignment, 0); 65 return __libc_malloc(size); 66#endif 67} 68 69static void *RawInternalRealloc(void *ptr, uptr size, 70 InternalAllocatorCache *cache) { 71 (void)cache; 72 return __libc_realloc(ptr, size); 73} 74 75static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) { 76 (void)cache; 77 __libc_free(ptr); 78} 79 80InternalAllocator *internal_allocator() { 81 return 0; 82} 83 84#else // SANITIZER_GO || defined(SANITIZER_USE_MALLOC) 85 86static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)]; 87static atomic_uint8_t internal_allocator_initialized; 88static StaticSpinMutex internal_alloc_init_mu; 89 90static InternalAllocatorCache internal_allocator_cache; 91static StaticSpinMutex internal_allocator_cache_mu; 92 93InternalAllocator *internal_allocator() { 94 InternalAllocator *internal_allocator_instance = 95 reinterpret_cast<InternalAllocator *>(&internal_alloc_placeholder); 96 if (atomic_load(&internal_allocator_initialized, memory_order_acquire) == 0) { 97 SpinMutexLock l(&internal_alloc_init_mu); 98 if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) == 99 0) { 100 internal_allocator_instance->Init(kReleaseToOSIntervalNever); 101 atomic_store(&internal_allocator_initialized, 1, memory_order_release); 102 } 103 } 104 return internal_allocator_instance; 105} 106 107static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache, 108 uptr alignment) { 109 if (alignment == 0) alignment = 8; 110 if (cache == 0) { 111 SpinMutexLock l(&internal_allocator_cache_mu); 112 return internal_allocator()->Allocate(&internal_allocator_cache, size, 113 alignment); 114 } 115 return internal_allocator()->Allocate(cache, size, alignment); 116} 117 118static void *RawInternalRealloc(void *ptr, uptr size, 119 InternalAllocatorCache *cache) { 120 uptr alignment = 8; 121 if (cache == 0) { 122 SpinMutexLock l(&internal_allocator_cache_mu); 123 return internal_allocator()->Reallocate(&internal_allocator_cache, ptr, 124 size, alignment); 125 } 126 return internal_allocator()->Reallocate(cache, ptr, size, alignment); 127} 128 129static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) { 130 if (!cache) { 131 SpinMutexLock l(&internal_allocator_cache_mu); 132 return internal_allocator()->Deallocate(&internal_allocator_cache, ptr); 133 } 134 internal_allocator()->Deallocate(cache, ptr); 135} 136 137#endif // SANITIZER_GO || defined(SANITIZER_USE_MALLOC) 138 139const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull; 140 141static void NORETURN ReportInternalAllocatorOutOfMemory(uptr requested_size) { 142 SetAllocatorOutOfMemory(); 143 Report("FATAL: %s: internal allocator is out of memory trying to allocate " 144 "0x%zx bytes\n", SanitizerToolName, requested_size); 145 Die(); 146} 147 148void *InternalAlloc(uptr size, InternalAllocatorCache *cache, uptr alignment) { 149 if (size + sizeof(u64) < size) 150 return nullptr; 151 void *p = RawInternalAlloc(size + sizeof(u64), cache, alignment); 152 if (UNLIKELY(!p)) 153 ReportInternalAllocatorOutOfMemory(size + sizeof(u64)); 154 ((u64*)p)[0] = kBlockMagic; 155 return (char*)p + sizeof(u64); 156} 157 158void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) { 159 if (!addr) 160 return InternalAlloc(size, cache); 161 if (size + sizeof(u64) < size) 162 return nullptr; 163 addr = (char*)addr - sizeof(u64); 164 size = size + sizeof(u64); 165 CHECK_EQ(kBlockMagic, ((u64*)addr)[0]); 166 void *p = RawInternalRealloc(addr, size, cache); 167 if (UNLIKELY(!p)) 168 ReportInternalAllocatorOutOfMemory(size); 169 return (char*)p + sizeof(u64); 170} 171 172void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) { 173 if (UNLIKELY(CheckForCallocOverflow(count, size))) { 174 Report("FATAL: %s: calloc parameters overflow: count * size (%zd * %zd) " 175 "cannot be represented in type size_t\n", SanitizerToolName, count, 176 size); 177 Die(); 178 } 179 void *p = InternalAlloc(count * size, cache); 180 if (LIKELY(p)) 181 internal_memset(p, 0, count * size); 182 return p; 183} 184 185void InternalFree(void *addr, InternalAllocatorCache *cache) { 186 if (!addr) 187 return; 188 addr = (char*)addr - sizeof(u64); 189 CHECK_EQ(kBlockMagic, ((u64*)addr)[0]); 190 ((u64*)addr)[0] = 0; 191 RawInternalFree(addr, cache); 192} 193 194// LowLevelAllocator 195constexpr uptr kLowLevelAllocatorDefaultAlignment = 8; 196static uptr low_level_alloc_min_alignment = kLowLevelAllocatorDefaultAlignment; 197static LowLevelAllocateCallback low_level_alloc_callback; 198 199void *LowLevelAllocator::Allocate(uptr size) { 200 // Align allocation size. 201 size = RoundUpTo(size, low_level_alloc_min_alignment); 202 if (allocated_end_ - allocated_current_ < (sptr)size) { 203 uptr size_to_allocate = Max(size, GetPageSizeCached()); 204 allocated_current_ = 205 (char*)MmapOrDie(size_to_allocate, __func__); 206 allocated_end_ = allocated_current_ + size_to_allocate; 207 if (low_level_alloc_callback) { 208 low_level_alloc_callback((uptr)allocated_current_, 209 size_to_allocate); 210 } 211 } 212 CHECK(allocated_end_ - allocated_current_ >= (sptr)size); 213 void *res = allocated_current_; 214 allocated_current_ += size; 215 return res; 216} 217 218void SetLowLevelAllocateMinAlignment(uptr alignment) { 219 CHECK(IsPowerOfTwo(alignment)); 220 low_level_alloc_min_alignment = Max(alignment, low_level_alloc_min_alignment); 221} 222 223void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) { 224 low_level_alloc_callback = callback; 225} 226 227// Allocator's OOM and other errors handling support. 228 229static atomic_uint8_t allocator_out_of_memory = {0}; 230static atomic_uint8_t allocator_may_return_null = {0}; 231 232bool IsAllocatorOutOfMemory() { 233 return atomic_load_relaxed(&allocator_out_of_memory); 234} 235 236void SetAllocatorOutOfMemory() { 237 atomic_store_relaxed(&allocator_out_of_memory, 1); 238} 239 240bool AllocatorMayReturnNull() { 241 return atomic_load(&allocator_may_return_null, memory_order_relaxed); 242} 243 244void SetAllocatorMayReturnNull(bool may_return_null) { 245 atomic_store(&allocator_may_return_null, may_return_null, 246 memory_order_relaxed); 247} 248 249void PrintHintAllocatorCannotReturnNull() { 250 Report("HINT: if you don't care about these errors you may set " 251 "allocator_may_return_null=1\n"); 252} 253 254} // namespace __sanitizer 255