1//===-- msan_allocator.cpp -------------------------- ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of MemorySanitizer.
10//
11// MemorySanitizer allocator.
12//===----------------------------------------------------------------------===//
13
14#include "sanitizer_common/sanitizer_allocator.h"
15#include "sanitizer_common/sanitizer_allocator_checks.h"
16#include "sanitizer_common/sanitizer_allocator_interface.h"
17#include "sanitizer_common/sanitizer_allocator_report.h"
18#include "sanitizer_common/sanitizer_errno.h"
19#include "msan.h"
20#include "msan_allocator.h"
21#include "msan_origin.h"
22#include "msan_thread.h"
23#include "msan_poisoning.h"
24
25namespace __msan {
26
27struct Metadata {
28  uptr requested_size;
29};
30
31struct MsanMapUnmapCallback {
32  void OnMap(uptr p, uptr size) const {}
33  void OnUnmap(uptr p, uptr size) const {
34    __msan_unpoison((void *)p, size);
35
36    // We are about to unmap a chunk of user memory.
37    // Mark the corresponding shadow memory as not needed.
38    uptr shadow_p = MEM_TO_SHADOW(p);
39    ReleaseMemoryPagesToOS(shadow_p, shadow_p + size);
40    if (__msan_get_track_origins()) {
41      uptr origin_p = MEM_TO_ORIGIN(p);
42      ReleaseMemoryPagesToOS(origin_p, origin_p + size);
43    }
44  }
45};
46
47#if defined(__mips64)
48static const uptr kMaxAllowedMallocSize = 2UL << 30;
49
50struct AP32 {
51  static const uptr kSpaceBeg = 0;
52  static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
53  static const uptr kMetadataSize = sizeof(Metadata);
54  typedef __sanitizer::CompactSizeClassMap SizeClassMap;
55  static const uptr kRegionSizeLog = 20;
56  using AddressSpaceView = LocalAddressSpaceView;
57  typedef MsanMapUnmapCallback MapUnmapCallback;
58  static const uptr kFlags = 0;
59};
60typedef SizeClassAllocator32<AP32> PrimaryAllocator;
61#elif defined(__x86_64__)
62#if SANITIZER_NETBSD || \
63    (SANITIZER_LINUX && !defined(MSAN_LINUX_X86_64_OLD_MAPPING))
64static const uptr kAllocatorSpace = 0x700000000000ULL;
65#else
66static const uptr kAllocatorSpace = 0x600000000000ULL;
67#endif
68static const uptr kMaxAllowedMallocSize = 8UL << 30;
69
70struct AP64 {  // Allocator64 parameters. Deliberately using a short name.
71  static const uptr kSpaceBeg = kAllocatorSpace;
72  static const uptr kSpaceSize = 0x40000000000;  // 4T.
73  static const uptr kMetadataSize = sizeof(Metadata);
74  typedef DefaultSizeClassMap SizeClassMap;
75  typedef MsanMapUnmapCallback MapUnmapCallback;
76  static const uptr kFlags = 0;
77  using AddressSpaceView = LocalAddressSpaceView;
78};
79
80typedef SizeClassAllocator64<AP64> PrimaryAllocator;
81
82#elif defined(__powerpc64__)
83static const uptr kMaxAllowedMallocSize = 2UL << 30;  // 2G
84
85struct AP64 {  // Allocator64 parameters. Deliberately using a short name.
86  static const uptr kSpaceBeg = 0x300000000000;
87  static const uptr kSpaceSize = 0x020000000000;  // 2T.
88  static const uptr kMetadataSize = sizeof(Metadata);
89  typedef DefaultSizeClassMap SizeClassMap;
90  typedef MsanMapUnmapCallback MapUnmapCallback;
91  static const uptr kFlags = 0;
92  using AddressSpaceView = LocalAddressSpaceView;
93};
94
95typedef SizeClassAllocator64<AP64> PrimaryAllocator;
96#elif defined(__aarch64__)
97static const uptr kMaxAllowedMallocSize = 2UL << 30;  // 2G
98
99struct AP32 {
100  static const uptr kSpaceBeg = 0;
101  static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
102  static const uptr kMetadataSize = sizeof(Metadata);
103  typedef __sanitizer::CompactSizeClassMap SizeClassMap;
104  static const uptr kRegionSizeLog = 20;
105  using AddressSpaceView = LocalAddressSpaceView;
106  typedef MsanMapUnmapCallback MapUnmapCallback;
107  static const uptr kFlags = 0;
108};
109typedef SizeClassAllocator32<AP32> PrimaryAllocator;
110#endif
111typedef CombinedAllocator<PrimaryAllocator> Allocator;
112typedef Allocator::AllocatorCache AllocatorCache;
113
114static Allocator allocator;
115static AllocatorCache fallback_allocator_cache;
116static StaticSpinMutex fallback_mutex;
117
118static uptr max_malloc_size;
119
120void MsanAllocatorInit() {
121  SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
122  allocator.Init(common_flags()->allocator_release_to_os_interval_ms);
123  if (common_flags()->max_allocation_size_mb)
124    max_malloc_size = Min(common_flags()->max_allocation_size_mb << 20,
125                          kMaxAllowedMallocSize);
126  else
127    max_malloc_size = kMaxAllowedMallocSize;
128}
129
130AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) {
131  CHECK(ms);
132  CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache));
133  return reinterpret_cast<AllocatorCache *>(ms->allocator_cache);
134}
135
136void MsanThreadLocalMallocStorage::CommitBack() {
137  allocator.SwallowCache(GetAllocatorCache(this));
138}
139
140static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment,
141                          bool zeroise) {
142  if (size > max_malloc_size) {
143    if (AllocatorMayReturnNull()) {
144      Report("WARNING: MemorySanitizer failed to allocate 0x%zx bytes\n", size);
145      return nullptr;
146    }
147    ReportAllocationSizeTooBig(size, max_malloc_size, stack);
148  }
149  MsanThread *t = GetCurrentThread();
150  void *allocated;
151  if (t) {
152    AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
153    allocated = allocator.Allocate(cache, size, alignment);
154  } else {
155    SpinMutexLock l(&fallback_mutex);
156    AllocatorCache *cache = &fallback_allocator_cache;
157    allocated = allocator.Allocate(cache, size, alignment);
158  }
159  if (UNLIKELY(!allocated)) {
160    SetAllocatorOutOfMemory();
161    if (AllocatorMayReturnNull())
162      return nullptr;
163    ReportOutOfMemory(size, stack);
164  }
165  Metadata *meta =
166      reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
167  meta->requested_size = size;
168  if (zeroise) {
169    __msan_clear_and_unpoison(allocated, size);
170  } else if (flags()->poison_in_malloc) {
171    __msan_poison(allocated, size);
172    if (__msan_get_track_origins()) {
173      stack->tag = StackTrace::TAG_ALLOC;
174      Origin o = Origin::CreateHeapOrigin(stack);
175      __msan_set_origin(allocated, size, o.raw_id());
176    }
177  }
178  MSAN_MALLOC_HOOK(allocated, size);
179  return allocated;
180}
181
182void MsanDeallocate(StackTrace *stack, void *p) {
183  CHECK(p);
184  MSAN_FREE_HOOK(p);
185  Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(p));
186  uptr size = meta->requested_size;
187  meta->requested_size = 0;
188  // This memory will not be reused by anyone else, so we are free to keep it
189  // poisoned.
190  if (flags()->poison_in_free) {
191    __msan_poison(p, size);
192    if (__msan_get_track_origins()) {
193      stack->tag = StackTrace::TAG_DEALLOC;
194      Origin o = Origin::CreateHeapOrigin(stack);
195      __msan_set_origin(p, size, o.raw_id());
196    }
197  }
198  MsanThread *t = GetCurrentThread();
199  if (t) {
200    AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
201    allocator.Deallocate(cache, p);
202  } else {
203    SpinMutexLock l(&fallback_mutex);
204    AllocatorCache *cache = &fallback_allocator_cache;
205    allocator.Deallocate(cache, p);
206  }
207}
208
209void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size,
210                     uptr alignment) {
211  Metadata *meta = reinterpret_cast<Metadata*>(allocator.GetMetaData(old_p));
212  uptr old_size = meta->requested_size;
213  uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p);
214  if (new_size <= actually_allocated_size) {
215    // We are not reallocating here.
216    meta->requested_size = new_size;
217    if (new_size > old_size) {
218      if (flags()->poison_in_malloc) {
219        stack->tag = StackTrace::TAG_ALLOC;
220        PoisonMemory((char *)old_p + old_size, new_size - old_size, stack);
221      }
222    }
223    return old_p;
224  }
225  uptr memcpy_size = Min(new_size, old_size);
226  void *new_p = MsanAllocate(stack, new_size, alignment, false /*zeroise*/);
227  if (new_p) {
228    CopyMemory(new_p, old_p, memcpy_size, stack);
229    MsanDeallocate(stack, old_p);
230  }
231  return new_p;
232}
233
234void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
235  if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
236    if (AllocatorMayReturnNull())
237      return nullptr;
238    ReportCallocOverflow(nmemb, size, stack);
239  }
240  return MsanAllocate(stack, nmemb * size, sizeof(u64), true);
241}
242
243static uptr AllocationSize(const void *p) {
244  if (!p) return 0;
245  const void *beg = allocator.GetBlockBegin(p);
246  if (beg != p) return 0;
247  Metadata *b = (Metadata *)allocator.GetMetaData(p);
248  return b->requested_size;
249}
250
251void *msan_malloc(uptr size, StackTrace *stack) {
252  return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false));
253}
254
255void *msan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
256  return SetErrnoOnNull(MsanCalloc(stack, nmemb, size));
257}
258
259void *msan_realloc(void *ptr, uptr size, StackTrace *stack) {
260  if (!ptr)
261    return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false));
262  if (size == 0) {
263    MsanDeallocate(stack, ptr);
264    return nullptr;
265  }
266  return SetErrnoOnNull(MsanReallocate(stack, ptr, size, sizeof(u64)));
267}
268
269void *msan_reallocarray(void *ptr, uptr nmemb, uptr size, StackTrace *stack) {
270  if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
271    errno = errno_ENOMEM;
272    if (AllocatorMayReturnNull())
273      return nullptr;
274    ReportReallocArrayOverflow(nmemb, size, stack);
275  }
276  return msan_realloc(ptr, nmemb * size, stack);
277}
278
279void *msan_valloc(uptr size, StackTrace *stack) {
280  return SetErrnoOnNull(MsanAllocate(stack, size, GetPageSizeCached(), false));
281}
282
283void *msan_pvalloc(uptr size, StackTrace *stack) {
284  uptr PageSize = GetPageSizeCached();
285  if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
286    errno = errno_ENOMEM;
287    if (AllocatorMayReturnNull())
288      return nullptr;
289    ReportPvallocOverflow(size, stack);
290  }
291  // pvalloc(0) should allocate one page.
292  size = size ? RoundUpTo(size, PageSize) : PageSize;
293  return SetErrnoOnNull(MsanAllocate(stack, size, PageSize, false));
294}
295
296void *msan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
297  if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
298    errno = errno_EINVAL;
299    if (AllocatorMayReturnNull())
300      return nullptr;
301    ReportInvalidAlignedAllocAlignment(size, alignment, stack);
302  }
303  return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));
304}
305
306void *msan_memalign(uptr alignment, uptr size, StackTrace *stack) {
307  if (UNLIKELY(!IsPowerOfTwo(alignment))) {
308    errno = errno_EINVAL;
309    if (AllocatorMayReturnNull())
310      return nullptr;
311    ReportInvalidAllocationAlignment(alignment, stack);
312  }
313  return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));
314}
315
316int msan_posix_memalign(void **memptr, uptr alignment, uptr size,
317                        StackTrace *stack) {
318  if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
319    if (AllocatorMayReturnNull())
320      return errno_EINVAL;
321    ReportInvalidPosixMemalignAlignment(alignment, stack);
322  }
323  void *ptr = MsanAllocate(stack, size, alignment, false);
324  if (UNLIKELY(!ptr))
325    // OOM error is already taken care of by MsanAllocate.
326    return errno_ENOMEM;
327  CHECK(IsAligned((uptr)ptr, alignment));
328  *memptr = ptr;
329  return 0;
330}
331
332} // namespace __msan
333
334using namespace __msan;
335
336uptr __sanitizer_get_current_allocated_bytes() {
337  uptr stats[AllocatorStatCount];
338  allocator.GetStats(stats);
339  return stats[AllocatorStatAllocated];
340}
341
342uptr __sanitizer_get_heap_size() {
343  uptr stats[AllocatorStatCount];
344  allocator.GetStats(stats);
345  return stats[AllocatorStatMapped];
346}
347
348uptr __sanitizer_get_free_bytes() { return 1; }
349
350uptr __sanitizer_get_unmapped_bytes() { return 1; }
351
352uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
353
354int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
355
356uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
357