sanitizer_allocator.cc revision 274201
175479Sbp//===-- sanitizer_allocator.cc --------------------------------------------===// 275479Sbp// 375479Sbp// The LLVM Compiler Infrastructure 4211690Simp// 575479Sbp// This file is distributed under the University of Illinois Open Source 675479Sbp// License. See LICENSE.TXT for details. 775479Sbp// 875479Sbp//===----------------------------------------------------------------------===// 975479Sbp// 1075479Sbp// This file is shared between AddressSanitizer and ThreadSanitizer 1175479Sbp// run-time libraries. 1275479Sbp// This allocator is used inside run-times. 1375479Sbp//===----------------------------------------------------------------------===// 14154865Salc#include "sanitizer_allocator.h" 1575479Sbp#include "sanitizer_allocator_internal.h" 1675479Sbp#include "sanitizer_common.h" 1775479Sbp#include "sanitizer_flags.h" 1875479Sbp 1975479Sbpnamespace __sanitizer { 2075479Sbp 2175479Sbp// ThreadSanitizer for Go uses libc malloc/free. 22156326Syar#if defined(SANITIZER_GO) 2375479Sbp# if SANITIZER_LINUX && !SANITIZER_ANDROID 24211690Simpextern "C" void *__libc_malloc(uptr size); 2592581Smuxextern "C" void __libc_free(void *ptr); 2692581Smux# define LIBC_MALLOC __libc_malloc 2792581Smux# define LIBC_FREE __libc_free 2875479Sbp# else 2975479Sbp# include <stdlib.h> 3075479Sbp# define LIBC_MALLOC malloc 3175479Sbp# define LIBC_FREE free 3275479Sbp# endif 3375479Sbp 3475479Sbpstatic void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache) { 3575479Sbp (void)cache; 3675479Sbp return LIBC_MALLOC(size); 3775479Sbp} 3875479Sbp 3975479Sbpstatic void RawInternalFree(void *ptr, InternalAllocatorCache *cache) { 4075479Sbp (void)cache; 4175479Sbp LIBC_FREE(ptr); 42151350Syar} 43134748Sru 4475479SbpInternalAllocator *internal_allocator() { 4575479Sbp return 0; 4675479Sbp} 4775479Sbp 48134748Sru#else // SANITIZER_GO 4975479Sbp 5075479Sbpstatic ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)]; 5175479Sbpstatic atomic_uint8_t internal_allocator_initialized; 5275479Sbpstatic StaticSpinMutex internal_alloc_init_mu; 5375479Sbp 5475479Sbpstatic InternalAllocatorCache internal_allocator_cache; 55static StaticSpinMutex internal_allocator_cache_mu; 56 57InternalAllocator *internal_allocator() { 58 InternalAllocator *internal_allocator_instance = 59 reinterpret_cast<InternalAllocator *>(&internal_alloc_placeholder); 60 if (atomic_load(&internal_allocator_initialized, memory_order_acquire) == 0) { 61 SpinMutexLock l(&internal_alloc_init_mu); 62 if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) == 63 0) { 64 internal_allocator_instance->Init(); 65 atomic_store(&internal_allocator_initialized, 1, memory_order_release); 66 } 67 } 68 return internal_allocator_instance; 69} 70 71static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache) { 72 if (cache == 0) { 73 SpinMutexLock l(&internal_allocator_cache_mu); 74 return internal_allocator()->Allocate(&internal_allocator_cache, size, 8, 75 false); 76 } 77 return internal_allocator()->Allocate(cache, size, 8, false); 78} 79 80static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) { 81 if (cache == 0) { 82 SpinMutexLock l(&internal_allocator_cache_mu); 83 return internal_allocator()->Deallocate(&internal_allocator_cache, ptr); 84 } 85 internal_allocator()->Deallocate(cache, ptr); 86} 87 88#endif // SANITIZER_GO 89 90const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull; 91 92void *InternalAlloc(uptr size, InternalAllocatorCache *cache) { 93 if (size + sizeof(u64) < size) 94 return 0; 95 void *p = RawInternalAlloc(size + sizeof(u64), cache); 96 if (p == 0) 97 return 0; 98 ((u64*)p)[0] = kBlockMagic; 99 return (char*)p + sizeof(u64); 100} 101 102void InternalFree(void *addr, InternalAllocatorCache *cache) { 103 if (addr == 0) 104 return; 105 addr = (char*)addr - sizeof(u64); 106 CHECK_EQ(kBlockMagic, ((u64*)addr)[0]); 107 ((u64*)addr)[0] = 0; 108 RawInternalFree(addr, cache); 109} 110 111// LowLevelAllocator 112static LowLevelAllocateCallback low_level_alloc_callback; 113 114void *LowLevelAllocator::Allocate(uptr size) { 115 // Align allocation size. 116 size = RoundUpTo(size, 8); 117 if (allocated_end_ - allocated_current_ < (sptr)size) { 118 uptr size_to_allocate = Max(size, GetPageSizeCached()); 119 allocated_current_ = 120 (char*)MmapOrDie(size_to_allocate, __FUNCTION__); 121 allocated_end_ = allocated_current_ + size_to_allocate; 122 if (low_level_alloc_callback) { 123 low_level_alloc_callback((uptr)allocated_current_, 124 size_to_allocate); 125 } 126 } 127 CHECK(allocated_end_ - allocated_current_ >= (sptr)size); 128 void *res = allocated_current_; 129 allocated_current_ += size; 130 return res; 131} 132 133void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) { 134 low_level_alloc_callback = callback; 135} 136 137bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n) { 138 if (!size) return false; 139 uptr max = (uptr)-1L; 140 return (max / size) < n; 141} 142 143void *AllocatorReturnNull() { 144 if (common_flags()->allocator_may_return_null) 145 return 0; 146 Report("%s's allocator is terminating the process instead of returning 0\n", 147 SanitizerToolName); 148 Report("If you don't like this behavior set allocator_may_return_null=1\n"); 149 CHECK(0); 150 return 0; 151} 152 153} // namespace __sanitizer 154