1//===-- msan_allocator.cpp -------------------------- ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of MemorySanitizer.
10//
11// MemorySanitizer allocator.
12//===----------------------------------------------------------------------===//
13
14#include "sanitizer_common/sanitizer_allocator.h"
15#include "sanitizer_common/sanitizer_allocator_checks.h"
16#include "sanitizer_common/sanitizer_allocator_interface.h"
17#include "sanitizer_common/sanitizer_allocator_report.h"
18#include "sanitizer_common/sanitizer_errno.h"
19#include "msan.h"
20#include "msan_allocator.h"
21#include "msan_origin.h"
22#include "msan_thread.h"
23#include "msan_poisoning.h"
24
25namespace __msan {
26
27struct Metadata {
28  uptr requested_size;
29};
30
31struct MsanMapUnmapCallback {
32  void OnMap(uptr p, uptr size) const {}
33  void OnUnmap(uptr p, uptr size) const {
34    __msan_unpoison((void *)p, size);
35
36    // We are about to unmap a chunk of user memory.
37    // Mark the corresponding shadow memory as not needed.
38    uptr shadow_p = MEM_TO_SHADOW(p);
39    ReleaseMemoryPagesToOS(shadow_p, shadow_p + size);
40    if (__msan_get_track_origins()) {
41      uptr origin_p = MEM_TO_ORIGIN(p);
42      ReleaseMemoryPagesToOS(origin_p, origin_p + size);
43    }
44  }
45};
46
47#if defined(__mips64)
48static const uptr kMaxAllowedMallocSize = 2UL << 30;
49
50struct AP32 {
51  static const uptr kSpaceBeg = 0;
52  static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
53  static const uptr kMetadataSize = sizeof(Metadata);
54  typedef __sanitizer::CompactSizeClassMap SizeClassMap;
55  static const uptr kRegionSizeLog = 20;
56  using AddressSpaceView = LocalAddressSpaceView;
57  typedef MsanMapUnmapCallback MapUnmapCallback;
58  static const uptr kFlags = 0;
59};
60typedef SizeClassAllocator32<AP32> PrimaryAllocator;
61#elif defined(__x86_64__)
62#if SANITIZER_NETBSD || \
63    (SANITIZER_LINUX && !defined(MSAN_LINUX_X86_64_OLD_MAPPING))
64static const uptr kAllocatorSpace = 0x700000000000ULL;
65#else
66static const uptr kAllocatorSpace = 0x600000000000ULL;
67#endif
68static const uptr kMaxAllowedMallocSize = 8UL << 30;
69
70struct AP64 {  // Allocator64 parameters. Deliberately using a short name.
71  static const uptr kSpaceBeg = kAllocatorSpace;
72  static const uptr kSpaceSize = 0x40000000000;  // 4T.
73  static const uptr kMetadataSize = sizeof(Metadata);
74  typedef DefaultSizeClassMap SizeClassMap;
75  typedef MsanMapUnmapCallback MapUnmapCallback;
76  static const uptr kFlags = 0;
77  using AddressSpaceView = LocalAddressSpaceView;
78};
79
80typedef SizeClassAllocator64<AP64> PrimaryAllocator;
81
82#elif defined(__powerpc64__)
83static const uptr kMaxAllowedMallocSize = 2UL << 30;  // 2G
84
85struct AP64 {  // Allocator64 parameters. Deliberately using a short name.
86  static const uptr kSpaceBeg = 0x300000000000;
87  static const uptr kSpaceSize = 0x020000000000;  // 2T.
88  static const uptr kMetadataSize = sizeof(Metadata);
89  typedef DefaultSizeClassMap SizeClassMap;
90  typedef MsanMapUnmapCallback MapUnmapCallback;
91  static const uptr kFlags = 0;
92  using AddressSpaceView = LocalAddressSpaceView;
93};
94
95typedef SizeClassAllocator64<AP64> PrimaryAllocator;
96#elif defined(__s390x__)
97static const uptr kMaxAllowedMallocSize = 2UL << 30;  // 2G
98
99struct AP64 {  // Allocator64 parameters. Deliberately using a short name.
100  static const uptr kSpaceBeg = 0x440000000000;
101  static const uptr kSpaceSize = 0x020000000000;  // 2T.
102  static const uptr kMetadataSize = sizeof(Metadata);
103  typedef DefaultSizeClassMap SizeClassMap;
104  typedef MsanMapUnmapCallback MapUnmapCallback;
105  static const uptr kFlags = 0;
106  using AddressSpaceView = LocalAddressSpaceView;
107};
108
109typedef SizeClassAllocator64<AP64> PrimaryAllocator;
110#elif defined(__aarch64__)
111static const uptr kMaxAllowedMallocSize = 2UL << 30;  // 2G
112
113struct AP32 {
114  static const uptr kSpaceBeg = 0;
115  static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
116  static const uptr kMetadataSize = sizeof(Metadata);
117  typedef __sanitizer::CompactSizeClassMap SizeClassMap;
118  static const uptr kRegionSizeLog = 20;
119  using AddressSpaceView = LocalAddressSpaceView;
120  typedef MsanMapUnmapCallback MapUnmapCallback;
121  static const uptr kFlags = 0;
122};
123typedef SizeClassAllocator32<AP32> PrimaryAllocator;
124#endif
125typedef CombinedAllocator<PrimaryAllocator> Allocator;
126typedef Allocator::AllocatorCache AllocatorCache;
127
128static Allocator allocator;
129static AllocatorCache fallback_allocator_cache;
130static StaticSpinMutex fallback_mutex;
131
132static uptr max_malloc_size;
133
134void MsanAllocatorInit() {
135  SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
136  allocator.Init(common_flags()->allocator_release_to_os_interval_ms);
137  if (common_flags()->max_allocation_size_mb)
138    max_malloc_size = Min(common_flags()->max_allocation_size_mb << 20,
139                          kMaxAllowedMallocSize);
140  else
141    max_malloc_size = kMaxAllowedMallocSize;
142}
143
144AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) {
145  CHECK(ms);
146  CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache));
147  return reinterpret_cast<AllocatorCache *>(ms->allocator_cache);
148}
149
150void MsanThreadLocalMallocStorage::CommitBack() {
151  allocator.SwallowCache(GetAllocatorCache(this));
152}
153
154static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment,
155                          bool zeroise) {
156  if (size > max_malloc_size) {
157    if (AllocatorMayReturnNull()) {
158      Report("WARNING: MemorySanitizer failed to allocate 0x%zx bytes\n", size);
159      return nullptr;
160    }
161    ReportAllocationSizeTooBig(size, max_malloc_size, stack);
162  }
163  MsanThread *t = GetCurrentThread();
164  void *allocated;
165  if (t) {
166    AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
167    allocated = allocator.Allocate(cache, size, alignment);
168  } else {
169    SpinMutexLock l(&fallback_mutex);
170    AllocatorCache *cache = &fallback_allocator_cache;
171    allocated = allocator.Allocate(cache, size, alignment);
172  }
173  if (UNLIKELY(!allocated)) {
174    SetAllocatorOutOfMemory();
175    if (AllocatorMayReturnNull())
176      return nullptr;
177    ReportOutOfMemory(size, stack);
178  }
179  Metadata *meta =
180      reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
181  meta->requested_size = size;
182  if (zeroise) {
183    __msan_clear_and_unpoison(allocated, size);
184  } else if (flags()->poison_in_malloc) {
185    __msan_poison(allocated, size);
186    if (__msan_get_track_origins()) {
187      stack->tag = StackTrace::TAG_ALLOC;
188      Origin o = Origin::CreateHeapOrigin(stack);
189      __msan_set_origin(allocated, size, o.raw_id());
190    }
191  }
192  MSAN_MALLOC_HOOK(allocated, size);
193  return allocated;
194}
195
196void MsanDeallocate(StackTrace *stack, void *p) {
197  CHECK(p);
198  MSAN_FREE_HOOK(p);
199  Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(p));
200  uptr size = meta->requested_size;
201  meta->requested_size = 0;
202  // This memory will not be reused by anyone else, so we are free to keep it
203  // poisoned.
204  if (flags()->poison_in_free) {
205    __msan_poison(p, size);
206    if (__msan_get_track_origins()) {
207      stack->tag = StackTrace::TAG_DEALLOC;
208      Origin o = Origin::CreateHeapOrigin(stack);
209      __msan_set_origin(p, size, o.raw_id());
210    }
211  }
212  MsanThread *t = GetCurrentThread();
213  if (t) {
214    AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
215    allocator.Deallocate(cache, p);
216  } else {
217    SpinMutexLock l(&fallback_mutex);
218    AllocatorCache *cache = &fallback_allocator_cache;
219    allocator.Deallocate(cache, p);
220  }
221}
222
223void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size,
224                     uptr alignment) {
225  Metadata *meta = reinterpret_cast<Metadata*>(allocator.GetMetaData(old_p));
226  uptr old_size = meta->requested_size;
227  uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p);
228  if (new_size <= actually_allocated_size) {
229    // We are not reallocating here.
230    meta->requested_size = new_size;
231    if (new_size > old_size) {
232      if (flags()->poison_in_malloc) {
233        stack->tag = StackTrace::TAG_ALLOC;
234        PoisonMemory((char *)old_p + old_size, new_size - old_size, stack);
235      }
236    }
237    return old_p;
238  }
239  uptr memcpy_size = Min(new_size, old_size);
240  void *new_p = MsanAllocate(stack, new_size, alignment, false /*zeroise*/);
241  if (new_p) {
242    CopyMemory(new_p, old_p, memcpy_size, stack);
243    MsanDeallocate(stack, old_p);
244  }
245  return new_p;
246}
247
248void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
249  if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
250    if (AllocatorMayReturnNull())
251      return nullptr;
252    ReportCallocOverflow(nmemb, size, stack);
253  }
254  return MsanAllocate(stack, nmemb * size, sizeof(u64), true);
255}
256
257static uptr AllocationSize(const void *p) {
258  if (!p) return 0;
259  const void *beg = allocator.GetBlockBegin(p);
260  if (beg != p) return 0;
261  Metadata *b = (Metadata *)allocator.GetMetaData(p);
262  return b->requested_size;
263}
264
265void *msan_malloc(uptr size, StackTrace *stack) {
266  return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false));
267}
268
269void *msan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
270  return SetErrnoOnNull(MsanCalloc(stack, nmemb, size));
271}
272
273void *msan_realloc(void *ptr, uptr size, StackTrace *stack) {
274  if (!ptr)
275    return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false));
276  if (size == 0) {
277    MsanDeallocate(stack, ptr);
278    return nullptr;
279  }
280  return SetErrnoOnNull(MsanReallocate(stack, ptr, size, sizeof(u64)));
281}
282
283void *msan_reallocarray(void *ptr, uptr nmemb, uptr size, StackTrace *stack) {
284  if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
285    errno = errno_ENOMEM;
286    if (AllocatorMayReturnNull())
287      return nullptr;
288    ReportReallocArrayOverflow(nmemb, size, stack);
289  }
290  return msan_realloc(ptr, nmemb * size, stack);
291}
292
293void *msan_valloc(uptr size, StackTrace *stack) {
294  return SetErrnoOnNull(MsanAllocate(stack, size, GetPageSizeCached(), false));
295}
296
297void *msan_pvalloc(uptr size, StackTrace *stack) {
298  uptr PageSize = GetPageSizeCached();
299  if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
300    errno = errno_ENOMEM;
301    if (AllocatorMayReturnNull())
302      return nullptr;
303    ReportPvallocOverflow(size, stack);
304  }
305  // pvalloc(0) should allocate one page.
306  size = size ? RoundUpTo(size, PageSize) : PageSize;
307  return SetErrnoOnNull(MsanAllocate(stack, size, PageSize, false));
308}
309
310void *msan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
311  if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
312    errno = errno_EINVAL;
313    if (AllocatorMayReturnNull())
314      return nullptr;
315    ReportInvalidAlignedAllocAlignment(size, alignment, stack);
316  }
317  return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));
318}
319
320void *msan_memalign(uptr alignment, uptr size, StackTrace *stack) {
321  if (UNLIKELY(!IsPowerOfTwo(alignment))) {
322    errno = errno_EINVAL;
323    if (AllocatorMayReturnNull())
324      return nullptr;
325    ReportInvalidAllocationAlignment(alignment, stack);
326  }
327  return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));
328}
329
330int msan_posix_memalign(void **memptr, uptr alignment, uptr size,
331                        StackTrace *stack) {
332  if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
333    if (AllocatorMayReturnNull())
334      return errno_EINVAL;
335    ReportInvalidPosixMemalignAlignment(alignment, stack);
336  }
337  void *ptr = MsanAllocate(stack, size, alignment, false);
338  if (UNLIKELY(!ptr))
339    // OOM error is already taken care of by MsanAllocate.
340    return errno_ENOMEM;
341  CHECK(IsAligned((uptr)ptr, alignment));
342  *memptr = ptr;
343  return 0;
344}
345
346} // namespace __msan
347
348using namespace __msan;
349
350uptr __sanitizer_get_current_allocated_bytes() {
351  uptr stats[AllocatorStatCount];
352  allocator.GetStats(stats);
353  return stats[AllocatorStatAllocated];
354}
355
356uptr __sanitizer_get_heap_size() {
357  uptr stats[AllocatorStatCount];
358  allocator.GetStats(stats);
359  return stats[AllocatorStatMapped];
360}
361
362uptr __sanitizer_get_free_bytes() { return 1; }
363
364uptr __sanitizer_get_unmapped_bytes() { return 1; }
365
366uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
367
368int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
369
370uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
371