1//===-- hwasan_allocator.cpp ------------------------ ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of HWAddressSanitizer.
10//
11// HWAddressSanitizer allocator.
12//===----------------------------------------------------------------------===//
13
14#include "sanitizer_common/sanitizer_atomic.h"
15#include "sanitizer_common/sanitizer_errno.h"
16#include "sanitizer_common/sanitizer_stackdepot.h"
17#include "hwasan.h"
18#include "hwasan_allocator.h"
19#include "hwasan_checks.h"
20#include "hwasan_mapping.h"
21#include "hwasan_malloc_bisect.h"
22#include "hwasan_thread.h"
23#include "hwasan_report.h"
24
25namespace __hwasan {
26
27static Allocator allocator;
28static AllocatorCache fallback_allocator_cache;
29static SpinMutex fallback_mutex;
30static atomic_uint8_t hwasan_allocator_tagging_enabled;
31
32static constexpr tag_t kFallbackAllocTag = 0xBB & kTagMask;
33static constexpr tag_t kFallbackFreeTag = 0xBC;
34
35enum RightAlignMode {
36  kRightAlignNever,
37  kRightAlignSometimes,
38  kRightAlignAlways
39};
40
41// Initialized in HwasanAllocatorInit, an never changed.
42static ALIGNED(16) u8 tail_magic[kShadowAlignment - 1];
43
44bool HwasanChunkView::IsAllocated() const {
45  return metadata_ && metadata_->alloc_context_id &&
46         metadata_->get_requested_size();
47}
48
49// Aligns the 'addr' right to the granule boundary.
50static uptr AlignRight(uptr addr, uptr requested_size) {
51  uptr tail_size = requested_size % kShadowAlignment;
52  if (!tail_size) return addr;
53  return addr + kShadowAlignment - tail_size;
54}
55
56uptr HwasanChunkView::Beg() const {
57  if (metadata_ && metadata_->right_aligned)
58    return AlignRight(block_, metadata_->get_requested_size());
59  return block_;
60}
61uptr HwasanChunkView::End() const {
62  return Beg() + UsedSize();
63}
64uptr HwasanChunkView::UsedSize() const {
65  return metadata_->get_requested_size();
66}
67u32 HwasanChunkView::GetAllocStackId() const {
68  return metadata_->alloc_context_id;
69}
70
71uptr HwasanChunkView::ActualSize() const {
72  return allocator.GetActuallyAllocatedSize(reinterpret_cast<void *>(block_));
73}
74
75bool HwasanChunkView::FromSmallHeap() const {
76  return allocator.FromPrimary(reinterpret_cast<void *>(block_));
77}
78
79void GetAllocatorStats(AllocatorStatCounters s) {
80  allocator.GetStats(s);
81}
82
83uptr GetAliasRegionStart() {
84#if defined(HWASAN_ALIASING_MODE)
85  constexpr uptr kAliasRegionOffset = 1ULL << (kTaggableRegionCheckShift - 1);
86  uptr AliasRegionStart =
87      __hwasan_shadow_memory_dynamic_address + kAliasRegionOffset;
88
89  CHECK_EQ(AliasRegionStart >> kTaggableRegionCheckShift,
90           __hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
91  CHECK_EQ(
92      (AliasRegionStart + kAliasRegionOffset - 1) >> kTaggableRegionCheckShift,
93      __hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
94  return AliasRegionStart;
95#else
96  return 0;
97#endif
98}
99
100void HwasanAllocatorInit() {
101  atomic_store_relaxed(&hwasan_allocator_tagging_enabled,
102                       !flags()->disable_allocator_tagging);
103  SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
104  allocator.Init(common_flags()->allocator_release_to_os_interval_ms,
105                 GetAliasRegionStart());
106  for (uptr i = 0; i < sizeof(tail_magic); i++)
107    tail_magic[i] = GetCurrentThread()->GenerateRandomTag();
108}
109
110void HwasanAllocatorLock() { allocator.ForceLock(); }
111
112void HwasanAllocatorUnlock() { allocator.ForceUnlock(); }
113
114void AllocatorSwallowThreadLocalCache(AllocatorCache *cache) {
115  allocator.SwallowCache(cache);
116}
117
118static uptr TaggedSize(uptr size) {
119  if (!size) size = 1;
120  uptr new_size = RoundUpTo(size, kShadowAlignment);
121  CHECK_GE(new_size, size);
122  return new_size;
123}
124
125static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
126                            bool zeroise) {
127  if (orig_size > kMaxAllowedMallocSize) {
128    if (AllocatorMayReturnNull()) {
129      Report("WARNING: HWAddressSanitizer failed to allocate 0x%zx bytes\n",
130             orig_size);
131      return nullptr;
132    }
133    ReportAllocationSizeTooBig(orig_size, kMaxAllowedMallocSize, stack);
134  }
135
136  alignment = Max(alignment, kShadowAlignment);
137  uptr size = TaggedSize(orig_size);
138  Thread *t = GetCurrentThread();
139  void *allocated;
140  if (t) {
141    allocated = allocator.Allocate(t->allocator_cache(), size, alignment);
142  } else {
143    SpinMutexLock l(&fallback_mutex);
144    AllocatorCache *cache = &fallback_allocator_cache;
145    allocated = allocator.Allocate(cache, size, alignment);
146  }
147  if (UNLIKELY(!allocated)) {
148    SetAllocatorOutOfMemory();
149    if (AllocatorMayReturnNull())
150      return nullptr;
151    ReportOutOfMemory(size, stack);
152  }
153  Metadata *meta =
154      reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
155  meta->set_requested_size(orig_size);
156  meta->alloc_context_id = StackDepotPut(*stack);
157  meta->right_aligned = false;
158  if (zeroise) {
159    internal_memset(allocated, 0, size);
160  } else if (flags()->max_malloc_fill_size > 0) {
161    uptr fill_size = Min(size, (uptr)flags()->max_malloc_fill_size);
162    internal_memset(allocated, flags()->malloc_fill_byte, fill_size);
163  }
164  if (size != orig_size) {
165    u8 *tail = reinterpret_cast<u8 *>(allocated) + orig_size;
166    uptr tail_length = size - orig_size;
167    internal_memcpy(tail, tail_magic, tail_length - 1);
168    // Short granule is excluded from magic tail, so we explicitly untag.
169    tail[tail_length - 1] = 0;
170  }
171
172  void *user_ptr = allocated;
173  // Tagging can only be skipped when both tag_in_malloc and tag_in_free are
174  // false. When tag_in_malloc = false and tag_in_free = true malloc needs to
175  // retag to 0.
176  if (InTaggableRegion(reinterpret_cast<uptr>(user_ptr)) &&
177      (flags()->tag_in_malloc || flags()->tag_in_free) &&
178      atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
179    if (flags()->tag_in_malloc && malloc_bisect(stack, orig_size)) {
180      tag_t tag = t ? t->GenerateRandomTag() : kFallbackAllocTag;
181      uptr tag_size = orig_size ? orig_size : 1;
182      uptr full_granule_size = RoundDownTo(tag_size, kShadowAlignment);
183      user_ptr =
184          (void *)TagMemoryAligned((uptr)user_ptr, full_granule_size, tag);
185      if (full_granule_size != tag_size) {
186        u8 *short_granule =
187            reinterpret_cast<u8 *>(allocated) + full_granule_size;
188        TagMemoryAligned((uptr)short_granule, kShadowAlignment,
189                         tag_size % kShadowAlignment);
190        short_granule[kShadowAlignment - 1] = tag;
191      }
192    } else {
193      user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, size, 0);
194    }
195  }
196
197  HWASAN_MALLOC_HOOK(user_ptr, size);
198  return user_ptr;
199}
200
201static bool PointerAndMemoryTagsMatch(void *tagged_ptr) {
202  CHECK(tagged_ptr);
203  uptr tagged_uptr = reinterpret_cast<uptr>(tagged_ptr);
204  if (!InTaggableRegion(tagged_uptr))
205    return true;
206  tag_t mem_tag = *reinterpret_cast<tag_t *>(
207      MemToShadow(reinterpret_cast<uptr>(UntagPtr(tagged_ptr))));
208  return PossiblyShortTagMatches(mem_tag, tagged_uptr, 1);
209}
210
211static bool CheckInvalidFree(StackTrace *stack, void *untagged_ptr,
212                             void *tagged_ptr) {
213  // This function can return true if halt_on_error is false.
214  if (!MemIsApp(reinterpret_cast<uptr>(untagged_ptr)) ||
215      !PointerAndMemoryTagsMatch(tagged_ptr)) {
216    ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
217    return true;
218  }
219  return false;
220}
221
222static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
223  CHECK(tagged_ptr);
224  HWASAN_FREE_HOOK(tagged_ptr);
225
226  bool in_taggable_region =
227      InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr));
228  void *untagged_ptr = in_taggable_region ? UntagPtr(tagged_ptr) : tagged_ptr;
229
230  if (CheckInvalidFree(stack, untagged_ptr, tagged_ptr))
231    return;
232
233  void *aligned_ptr = reinterpret_cast<void *>(
234      RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
235  tag_t pointer_tag = GetTagFromPointer(reinterpret_cast<uptr>(tagged_ptr));
236  Metadata *meta =
237      reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
238  if (!meta) {
239    ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
240    return;
241  }
242  uptr orig_size = meta->get_requested_size();
243  u32 free_context_id = StackDepotPut(*stack);
244  u32 alloc_context_id = meta->alloc_context_id;
245
246  // Check tail magic.
247  uptr tagged_size = TaggedSize(orig_size);
248  if (flags()->free_checks_tail_magic && orig_size &&
249      tagged_size != orig_size) {
250    uptr tail_size = tagged_size - orig_size - 1;
251    CHECK_LT(tail_size, kShadowAlignment);
252    void *tail_beg = reinterpret_cast<void *>(
253        reinterpret_cast<uptr>(aligned_ptr) + orig_size);
254    tag_t short_granule_memtag = *(reinterpret_cast<tag_t *>(
255        reinterpret_cast<uptr>(tail_beg) + tail_size));
256    if (tail_size &&
257        (internal_memcmp(tail_beg, tail_magic, tail_size) ||
258         (in_taggable_region && pointer_tag != short_granule_memtag)))
259      ReportTailOverwritten(stack, reinterpret_cast<uptr>(tagged_ptr),
260                            orig_size, tail_magic);
261  }
262
263  meta->set_requested_size(0);
264  meta->alloc_context_id = 0;
265  // This memory will not be reused by anyone else, so we are free to keep it
266  // poisoned.
267  Thread *t = GetCurrentThread();
268  if (flags()->max_free_fill_size > 0) {
269    uptr fill_size =
270        Min(TaggedSize(orig_size), (uptr)flags()->max_free_fill_size);
271    internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size);
272  }
273  if (in_taggable_region && flags()->tag_in_free && malloc_bisect(stack, 0) &&
274      atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
275    // Always store full 8-bit tags on free to maximize UAF detection.
276    tag_t tag;
277    if (t) {
278      // Make sure we are not using a short granule tag as a poison tag. This
279      // would make us attempt to read the memory on a UaF.
280      // The tag can be zero if tagging is disabled on this thread.
281      do {
282        tag = t->GenerateRandomTag(/*num_bits=*/8);
283      } while (
284          UNLIKELY((tag < kShadowAlignment || tag == pointer_tag) && tag != 0));
285    } else {
286      static_assert(kFallbackFreeTag >= kShadowAlignment,
287                    "fallback tag must not be a short granule tag.");
288      tag = kFallbackFreeTag;
289    }
290    TagMemoryAligned(reinterpret_cast<uptr>(aligned_ptr), TaggedSize(orig_size),
291                     tag);
292  }
293  if (t) {
294    allocator.Deallocate(t->allocator_cache(), aligned_ptr);
295    if (auto *ha = t->heap_allocations())
296      ha->push({reinterpret_cast<uptr>(tagged_ptr), alloc_context_id,
297                free_context_id, static_cast<u32>(orig_size)});
298  } else {
299    SpinMutexLock l(&fallback_mutex);
300    AllocatorCache *cache = &fallback_allocator_cache;
301    allocator.Deallocate(cache, aligned_ptr);
302  }
303}
304
305static void *HwasanReallocate(StackTrace *stack, void *tagged_ptr_old,
306                              uptr new_size, uptr alignment) {
307  void *untagged_ptr_old =
308      InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr_old))
309          ? UntagPtr(tagged_ptr_old)
310          : tagged_ptr_old;
311  if (CheckInvalidFree(stack, untagged_ptr_old, tagged_ptr_old))
312    return nullptr;
313  void *tagged_ptr_new =
314      HwasanAllocate(stack, new_size, alignment, false /*zeroise*/);
315  if (tagged_ptr_old && tagged_ptr_new) {
316    Metadata *meta =
317        reinterpret_cast<Metadata *>(allocator.GetMetaData(untagged_ptr_old));
318    internal_memcpy(
319        UntagPtr(tagged_ptr_new), untagged_ptr_old,
320        Min(new_size, static_cast<uptr>(meta->get_requested_size())));
321    HwasanDeallocate(stack, tagged_ptr_old);
322  }
323  return tagged_ptr_new;
324}
325
326static void *HwasanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
327  if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
328    if (AllocatorMayReturnNull())
329      return nullptr;
330    ReportCallocOverflow(nmemb, size, stack);
331  }
332  return HwasanAllocate(stack, nmemb * size, sizeof(u64), true);
333}
334
335HwasanChunkView FindHeapChunkByAddress(uptr address) {
336  if (!allocator.PointerIsMine(reinterpret_cast<void *>(address)))
337    return HwasanChunkView();
338  void *block = allocator.GetBlockBegin(reinterpret_cast<void*>(address));
339  if (!block)
340    return HwasanChunkView();
341  Metadata *metadata =
342      reinterpret_cast<Metadata*>(allocator.GetMetaData(block));
343  return HwasanChunkView(reinterpret_cast<uptr>(block), metadata);
344}
345
346static uptr AllocationSize(const void *tagged_ptr) {
347  const void *untagged_ptr = UntagPtr(tagged_ptr);
348  if (!untagged_ptr) return 0;
349  const void *beg = allocator.GetBlockBegin(untagged_ptr);
350  Metadata *b = (Metadata *)allocator.GetMetaData(untagged_ptr);
351  if (b->right_aligned) {
352    if (beg != reinterpret_cast<void *>(RoundDownTo(
353                   reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment)))
354      return 0;
355  } else {
356    if (beg != untagged_ptr) return 0;
357  }
358  return b->get_requested_size();
359}
360
361void *hwasan_malloc(uptr size, StackTrace *stack) {
362  return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
363}
364
365void *hwasan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
366  return SetErrnoOnNull(HwasanCalloc(stack, nmemb, size));
367}
368
369void *hwasan_realloc(void *ptr, uptr size, StackTrace *stack) {
370  if (!ptr)
371    return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
372  if (size == 0) {
373    HwasanDeallocate(stack, ptr);
374    return nullptr;
375  }
376  return SetErrnoOnNull(HwasanReallocate(stack, ptr, size, sizeof(u64)));
377}
378
379void *hwasan_reallocarray(void *ptr, uptr nmemb, uptr size, StackTrace *stack) {
380  if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
381    errno = errno_ENOMEM;
382    if (AllocatorMayReturnNull())
383      return nullptr;
384    ReportReallocArrayOverflow(nmemb, size, stack);
385  }
386  return hwasan_realloc(ptr, nmemb * size, stack);
387}
388
389void *hwasan_valloc(uptr size, StackTrace *stack) {
390  return SetErrnoOnNull(
391      HwasanAllocate(stack, size, GetPageSizeCached(), false));
392}
393
394void *hwasan_pvalloc(uptr size, StackTrace *stack) {
395  uptr PageSize = GetPageSizeCached();
396  if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
397    errno = errno_ENOMEM;
398    if (AllocatorMayReturnNull())
399      return nullptr;
400    ReportPvallocOverflow(size, stack);
401  }
402  // pvalloc(0) should allocate one page.
403  size = size ? RoundUpTo(size, PageSize) : PageSize;
404  return SetErrnoOnNull(HwasanAllocate(stack, size, PageSize, false));
405}
406
407void *hwasan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
408  if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
409    errno = errno_EINVAL;
410    if (AllocatorMayReturnNull())
411      return nullptr;
412    ReportInvalidAlignedAllocAlignment(size, alignment, stack);
413  }
414  return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
415}
416
417void *hwasan_memalign(uptr alignment, uptr size, StackTrace *stack) {
418  if (UNLIKELY(!IsPowerOfTwo(alignment))) {
419    errno = errno_EINVAL;
420    if (AllocatorMayReturnNull())
421      return nullptr;
422    ReportInvalidAllocationAlignment(alignment, stack);
423  }
424  return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
425}
426
427int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size,
428                        StackTrace *stack) {
429  if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
430    if (AllocatorMayReturnNull())
431      return errno_EINVAL;
432    ReportInvalidPosixMemalignAlignment(alignment, stack);
433  }
434  void *ptr = HwasanAllocate(stack, size, alignment, false);
435  if (UNLIKELY(!ptr))
436    // OOM error is already taken care of by HwasanAllocate.
437    return errno_ENOMEM;
438  CHECK(IsAligned((uptr)ptr, alignment));
439  *memptr = ptr;
440  return 0;
441}
442
443void hwasan_free(void *ptr, StackTrace *stack) {
444  return HwasanDeallocate(stack, ptr);
445}
446
447}  // namespace __hwasan
448
449using namespace __hwasan;
450
451void __hwasan_enable_allocator_tagging() {
452  atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 1);
453}
454
455void __hwasan_disable_allocator_tagging() {
456  atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 0);
457}
458
459uptr __sanitizer_get_current_allocated_bytes() {
460  uptr stats[AllocatorStatCount];
461  allocator.GetStats(stats);
462  return stats[AllocatorStatAllocated];
463}
464
465uptr __sanitizer_get_heap_size() {
466  uptr stats[AllocatorStatCount];
467  allocator.GetStats(stats);
468  return stats[AllocatorStatMapped];
469}
470
471uptr __sanitizer_get_free_bytes() { return 1; }
472
473uptr __sanitizer_get_unmapped_bytes() { return 1; }
474
475uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
476
477int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
478
479uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
480