1//===-- sanitizer_allocator.cc --------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is shared between AddressSanitizer and ThreadSanitizer
11// run-time libraries.
12// This allocator is used inside run-times.
13//===----------------------------------------------------------------------===//
14
15#include "sanitizer_allocator.h"
16
17#include "sanitizer_allocator_checks.h"
18#include "sanitizer_allocator_internal.h"
19#include "sanitizer_atomic.h"
20#include "sanitizer_common.h"
21
22namespace __sanitizer {
23
24// Default allocator names.
25const char *PrimaryAllocatorName = "SizeClassAllocator";
26const char *SecondaryAllocatorName = "LargeMmapAllocator";
27
28// ThreadSanitizer for Go uses libc malloc/free.
29#if SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
30# if SANITIZER_LINUX && !SANITIZER_ANDROID
31extern "C" void *__libc_malloc(uptr size);
32#  if !SANITIZER_GO
33extern "C" void *__libc_memalign(uptr alignment, uptr size);
34#  endif
35extern "C" void *__libc_realloc(void *ptr, uptr size);
36extern "C" void __libc_free(void *ptr);
37# else
38#  include <stdlib.h>
39#  define __libc_malloc malloc
40#  if !SANITIZER_GO
41static void *__libc_memalign(uptr alignment, uptr size) {
42  void *p;
43  uptr error = posix_memalign(&p, alignment, size);
44  if (error) return nullptr;
45  return p;
46}
47#  endif
48#  define __libc_realloc realloc
49#  define __libc_free free
50# endif
51
52static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
53                              uptr alignment) {
54  (void)cache;
55#if !SANITIZER_GO
56  if (alignment == 0)
57    return __libc_malloc(size);
58  else
59    return __libc_memalign(alignment, size);
60#else
61  // Windows does not provide __libc_memalign/posix_memalign. It provides
62  // __aligned_malloc, but the allocated blocks can't be passed to free,
63  // they need to be passed to __aligned_free. InternalAlloc interface does
64  // not account for such requirement. Alignemnt does not seem to be used
65  // anywhere in runtime, so just call __libc_malloc for now.
66  DCHECK_EQ(alignment, 0);
67  return __libc_malloc(size);
68#endif
69}
70
71static void *RawInternalRealloc(void *ptr, uptr size,
72                                InternalAllocatorCache *cache) {
73  (void)cache;
74  return __libc_realloc(ptr, size);
75}
76
77static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
78  (void)cache;
79  __libc_free(ptr);
80}
81
82InternalAllocator *internal_allocator() {
83  return 0;
84}
85
86#else  // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
87
88static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)];
89static atomic_uint8_t internal_allocator_initialized;
90static StaticSpinMutex internal_alloc_init_mu;
91
92static InternalAllocatorCache internal_allocator_cache;
93static StaticSpinMutex internal_allocator_cache_mu;
94
95InternalAllocator *internal_allocator() {
96  InternalAllocator *internal_allocator_instance =
97      reinterpret_cast<InternalAllocator *>(&internal_alloc_placeholder);
98  if (atomic_load(&internal_allocator_initialized, memory_order_acquire) == 0) {
99    SpinMutexLock l(&internal_alloc_init_mu);
100    if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==
101        0) {
102      internal_allocator_instance->Init(kReleaseToOSIntervalNever);
103      atomic_store(&internal_allocator_initialized, 1, memory_order_release);
104    }
105  }
106  return internal_allocator_instance;
107}
108
109static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
110                              uptr alignment) {
111  if (alignment == 0) alignment = 8;
112  if (cache == 0) {
113    SpinMutexLock l(&internal_allocator_cache_mu);
114    return internal_allocator()->Allocate(&internal_allocator_cache, size,
115                                          alignment);
116  }
117  return internal_allocator()->Allocate(cache, size, alignment);
118}
119
120static void *RawInternalRealloc(void *ptr, uptr size,
121                                InternalAllocatorCache *cache) {
122  uptr alignment = 8;
123  if (cache == 0) {
124    SpinMutexLock l(&internal_allocator_cache_mu);
125    return internal_allocator()->Reallocate(&internal_allocator_cache, ptr,
126                                            size, alignment);
127  }
128  return internal_allocator()->Reallocate(cache, ptr, size, alignment);
129}
130
131static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
132  if (!cache) {
133    SpinMutexLock l(&internal_allocator_cache_mu);
134    return internal_allocator()->Deallocate(&internal_allocator_cache, ptr);
135  }
136  internal_allocator()->Deallocate(cache, ptr);
137}
138
139#endif  // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
140
141const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull;
142
143static void NORETURN ReportInternalAllocatorOutOfMemory(uptr requested_size) {
144  SetAllocatorOutOfMemory();
145  Report("FATAL: %s: internal allocator is out of memory trying to allocate "
146         "0x%zx bytes\n", SanitizerToolName, requested_size);
147  Die();
148}
149
150void *InternalAlloc(uptr size, InternalAllocatorCache *cache, uptr alignment) {
151  if (size + sizeof(u64) < size)
152    return nullptr;
153  void *p = RawInternalAlloc(size + sizeof(u64), cache, alignment);
154  if (UNLIKELY(!p))
155    ReportInternalAllocatorOutOfMemory(size + sizeof(u64));
156  ((u64*)p)[0] = kBlockMagic;
157  return (char*)p + sizeof(u64);
158}
159
160void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) {
161  if (!addr)
162    return InternalAlloc(size, cache);
163  if (size + sizeof(u64) < size)
164    return nullptr;
165  addr = (char*)addr - sizeof(u64);
166  size = size + sizeof(u64);
167  CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
168  void *p = RawInternalRealloc(addr, size, cache);
169  if (UNLIKELY(!p))
170    ReportInternalAllocatorOutOfMemory(size);
171  return (char*)p + sizeof(u64);
172}
173
174void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {
175  if (UNLIKELY(CheckForCallocOverflow(count, size))) {
176    Report("FATAL: %s: calloc parameters overflow: count * size (%zd * %zd) "
177           "cannot be represented in type size_t\n", SanitizerToolName, count,
178           size);
179    Die();
180  }
181  void *p = InternalAlloc(count * size, cache);
182  if (LIKELY(p))
183    internal_memset(p, 0, count * size);
184  return p;
185}
186
187void InternalFree(void *addr, InternalAllocatorCache *cache) {
188  if (!addr)
189    return;
190  addr = (char*)addr - sizeof(u64);
191  CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
192  ((u64*)addr)[0] = 0;
193  RawInternalFree(addr, cache);
194}
195
196// LowLevelAllocator
197constexpr uptr kLowLevelAllocatorDefaultAlignment = 8;
198static uptr low_level_alloc_min_alignment = kLowLevelAllocatorDefaultAlignment;
199static LowLevelAllocateCallback low_level_alloc_callback;
200
201void *LowLevelAllocator::Allocate(uptr size) {
202  // Align allocation size.
203  size = RoundUpTo(size, low_level_alloc_min_alignment);
204  if (allocated_end_ - allocated_current_ < (sptr)size) {
205    uptr size_to_allocate = Max(size, GetPageSizeCached());
206    allocated_current_ =
207        (char*)MmapOrDie(size_to_allocate, __func__);
208    allocated_end_ = allocated_current_ + size_to_allocate;
209    if (low_level_alloc_callback) {
210      low_level_alloc_callback((uptr)allocated_current_,
211                               size_to_allocate);
212    }
213  }
214  CHECK(allocated_end_ - allocated_current_ >= (sptr)size);
215  void *res = allocated_current_;
216  allocated_current_ += size;
217  return res;
218}
219
220void SetLowLevelAllocateMinAlignment(uptr alignment) {
221  CHECK(IsPowerOfTwo(alignment));
222  low_level_alloc_min_alignment = Max(alignment, low_level_alloc_min_alignment);
223}
224
225void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) {
226  low_level_alloc_callback = callback;
227}
228
229// Allocator's OOM and other errors handling support.
230
231static atomic_uint8_t allocator_out_of_memory = {0};
232static atomic_uint8_t allocator_may_return_null = {0};
233
234bool IsAllocatorOutOfMemory() {
235  return atomic_load_relaxed(&allocator_out_of_memory);
236}
237
238void SetAllocatorOutOfMemory() {
239  atomic_store_relaxed(&allocator_out_of_memory, 1);
240}
241
242bool AllocatorMayReturnNull() {
243  return atomic_load(&allocator_may_return_null, memory_order_relaxed);
244}
245
246void SetAllocatorMayReturnNull(bool may_return_null) {
247  atomic_store(&allocator_may_return_null, may_return_null,
248               memory_order_relaxed);
249}
250
251void PrintHintAllocatorCannotReturnNull() {
252  Report("HINT: if you don't care about these errors you may set "
253         "allocator_may_return_null=1\n");
254}
255
256} // namespace __sanitizer
257