1//===-- asan_allocator.cc -------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// Implementation of ASan's memory allocator, 2-nd version.
13// This variant uses the allocator from sanitizer_common, i.e. the one shared
14// with ThreadSanitizer and MemorySanitizer.
15//
16//===----------------------------------------------------------------------===//
17
18#include "asan_allocator.h"
19#include "asan_mapping.h"
20#include "asan_poisoning.h"
21#include "asan_report.h"
22#include "asan_stack.h"
23#include "asan_thread.h"
24#include "sanitizer_common/sanitizer_allocator_checks.h"
25#include "sanitizer_common/sanitizer_allocator_interface.h"
26#include "sanitizer_common/sanitizer_errno.h"
27#include "sanitizer_common/sanitizer_flags.h"
28#include "sanitizer_common/sanitizer_internal_defs.h"
29#include "sanitizer_common/sanitizer_list.h"
30#include "sanitizer_common/sanitizer_stackdepot.h"
31#include "sanitizer_common/sanitizer_quarantine.h"
32#include "lsan/lsan_common.h"
33
34namespace __asan {
35
36// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
37// We use adaptive redzones: for larger allocation larger redzones are used.
38static u32 RZLog2Size(u32 rz_log) {
39  CHECK_LT(rz_log, 8);
40  return 16 << rz_log;
41}
42
43static u32 RZSize2Log(u32 rz_size) {
44  CHECK_GE(rz_size, 16);
45  CHECK_LE(rz_size, 2048);
46  CHECK(IsPowerOfTwo(rz_size));
47  u32 res = Log2(rz_size) - 4;
48  CHECK_EQ(rz_size, RZLog2Size(res));
49  return res;
50}
51
52static AsanAllocator &get_allocator();
53
54// The memory chunk allocated from the underlying allocator looks like this:
55// L L L L L L H H U U U U U U R R
56//   L -- left redzone words (0 or more bytes)
57//   H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
58//   U -- user memory.
59//   R -- right redzone (0 or more bytes)
60// ChunkBase consists of ChunkHeader and other bytes that overlap with user
61// memory.
62
63// If the left redzone is greater than the ChunkHeader size we store a magic
64// value in the first uptr word of the memory block and store the address of
65// ChunkBase in the next uptr.
66// M B L L L L L L L L L  H H U U U U U U
67//   |                    ^
68//   ---------------------|
69//   M -- magic value kAllocBegMagic
70//   B -- address of ChunkHeader pointing to the first 'H'
71static const uptr kAllocBegMagic = 0xCC6E96B9;
72
73struct ChunkHeader {
74  // 1-st 8 bytes.
75  u32 chunk_state       : 8;  // Must be first.
76  u32 alloc_tid         : 24;
77
78  u32 free_tid          : 24;
79  u32 from_memalign     : 1;
80  u32 alloc_type        : 2;
81  u32 rz_log            : 3;
82  u32 lsan_tag          : 2;
83  // 2-nd 8 bytes
84  // This field is used for small sizes. For large sizes it is equal to
85  // SizeClassMap::kMaxSize and the actual size is stored in the
86  // SecondaryAllocator's metadata.
87  u32 user_requested_size : 29;
88  // align < 8 -> 0
89  // else      -> log2(min(align, 512)) - 2
90  u32 user_requested_alignment_log : 3;
91  u32 alloc_context_id;
92};
93
94struct ChunkBase : ChunkHeader {
95  // Header2, intersects with user memory.
96  u32 free_context_id;
97};
98
99static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
100static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
101COMPILER_CHECK(kChunkHeaderSize == 16);
102COMPILER_CHECK(kChunkHeader2Size <= 16);
103
104// Every chunk of memory allocated by this allocator can be in one of 3 states:
105// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
106// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
107// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
108enum {
109  CHUNK_AVAILABLE  = 0,  // 0 is the default value even if we didn't set it.
110  CHUNK_ALLOCATED  = 2,
111  CHUNK_QUARANTINE = 3
112};
113
114struct AsanChunk: ChunkBase {
115  uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
116  uptr UsedSize(bool locked_version = false) {
117    if (user_requested_size != SizeClassMap::kMaxSize)
118      return user_requested_size;
119    return *reinterpret_cast<uptr *>(
120               get_allocator().GetMetaData(AllocBeg(locked_version)));
121  }
122  void *AllocBeg(bool locked_version = false) {
123    if (from_memalign) {
124      if (locked_version)
125        return get_allocator().GetBlockBeginFastLocked(
126            reinterpret_cast<void *>(this));
127      return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this));
128    }
129    return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
130  }
131  bool AddrIsInside(uptr addr, bool locked_version = false) {
132    return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
133  }
134};
135
136struct QuarantineCallback {
137  QuarantineCallback(AllocatorCache *cache, BufferedStackTrace *stack)
138      : cache_(cache),
139        stack_(stack) {
140  }
141
142  void Recycle(AsanChunk *m) {
143    CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
144    atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed);
145    CHECK_NE(m->alloc_tid, kInvalidTid);
146    CHECK_NE(m->free_tid, kInvalidTid);
147    PoisonShadow(m->Beg(),
148                 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
149                 kAsanHeapLeftRedzoneMagic);
150    void *p = reinterpret_cast<void *>(m->AllocBeg());
151    if (p != m) {
152      uptr *alloc_magic = reinterpret_cast<uptr *>(p);
153      CHECK_EQ(alloc_magic[0], kAllocBegMagic);
154      // Clear the magic value, as allocator internals may overwrite the
155      // contents of deallocated chunk, confusing GetAsanChunk lookup.
156      alloc_magic[0] = 0;
157      CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m));
158    }
159
160    // Statistics.
161    AsanStats &thread_stats = GetCurrentThreadStats();
162    thread_stats.real_frees++;
163    thread_stats.really_freed += m->UsedSize();
164
165    get_allocator().Deallocate(cache_, p);
166  }
167
168  void *Allocate(uptr size) {
169    void *res = get_allocator().Allocate(cache_, size, 1);
170    // TODO(alekseys): Consider making quarantine OOM-friendly.
171    if (UNLIKELY(!res))
172      ReportOutOfMemory(size, stack_);
173    return res;
174  }
175
176  void Deallocate(void *p) {
177    get_allocator().Deallocate(cache_, p);
178  }
179
180 private:
181  AllocatorCache* const cache_;
182  BufferedStackTrace* const stack_;
183};
184
185typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
186typedef AsanQuarantine::Cache QuarantineCache;
187
188void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
189  PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
190  // Statistics.
191  AsanStats &thread_stats = GetCurrentThreadStats();
192  thread_stats.mmaps++;
193  thread_stats.mmaped += size;
194}
195void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
196  PoisonShadow(p, size, 0);
197  // We are about to unmap a chunk of user memory.
198  // Mark the corresponding shadow memory as not needed.
199  FlushUnneededASanShadowMemory(p, size);
200  // Statistics.
201  AsanStats &thread_stats = GetCurrentThreadStats();
202  thread_stats.munmaps++;
203  thread_stats.munmaped += size;
204}
205
206// We can not use THREADLOCAL because it is not supported on some of the
207// platforms we care about (OSX 10.6, Android).
208// static THREADLOCAL AllocatorCache cache;
209AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
210  CHECK(ms);
211  return &ms->allocator_cache;
212}
213
214QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
215  CHECK(ms);
216  CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
217  return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
218}
219
220void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) {
221  quarantine_size_mb = f->quarantine_size_mb;
222  thread_local_quarantine_size_kb = f->thread_local_quarantine_size_kb;
223  min_redzone = f->redzone;
224  max_redzone = f->max_redzone;
225  may_return_null = cf->allocator_may_return_null;
226  alloc_dealloc_mismatch = f->alloc_dealloc_mismatch;
227  release_to_os_interval_ms = cf->allocator_release_to_os_interval_ms;
228}
229
230void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
231  f->quarantine_size_mb = quarantine_size_mb;
232  f->thread_local_quarantine_size_kb = thread_local_quarantine_size_kb;
233  f->redzone = min_redzone;
234  f->max_redzone = max_redzone;
235  cf->allocator_may_return_null = may_return_null;
236  f->alloc_dealloc_mismatch = alloc_dealloc_mismatch;
237  cf->allocator_release_to_os_interval_ms = release_to_os_interval_ms;
238}
239
240struct Allocator {
241  static const uptr kMaxAllowedMallocSize =
242      FIRST_32_SECOND_64(3UL << 30, 1ULL << 40);
243
244  AsanAllocator allocator;
245  AsanQuarantine quarantine;
246  StaticSpinMutex fallback_mutex;
247  AllocatorCache fallback_allocator_cache;
248  QuarantineCache fallback_quarantine_cache;
249
250  atomic_uint8_t rss_limit_exceeded;
251
252  // ------------------- Options --------------------------
253  atomic_uint16_t min_redzone;
254  atomic_uint16_t max_redzone;
255  atomic_uint8_t alloc_dealloc_mismatch;
256
257  // ------------------- Initialization ------------------------
258  explicit Allocator(LinkerInitialized)
259      : quarantine(LINKER_INITIALIZED),
260        fallback_quarantine_cache(LINKER_INITIALIZED) {}
261
262  void CheckOptions(const AllocatorOptions &options) const {
263    CHECK_GE(options.min_redzone, 16);
264    CHECK_GE(options.max_redzone, options.min_redzone);
265    CHECK_LE(options.max_redzone, 2048);
266    CHECK(IsPowerOfTwo(options.min_redzone));
267    CHECK(IsPowerOfTwo(options.max_redzone));
268  }
269
270  void SharedInitCode(const AllocatorOptions &options) {
271    CheckOptions(options);
272    quarantine.Init((uptr)options.quarantine_size_mb << 20,
273                    (uptr)options.thread_local_quarantine_size_kb << 10);
274    atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch,
275                 memory_order_release);
276    atomic_store(&min_redzone, options.min_redzone, memory_order_release);
277    atomic_store(&max_redzone, options.max_redzone, memory_order_release);
278  }
279
280  void InitLinkerInitialized(const AllocatorOptions &options) {
281    SetAllocatorMayReturnNull(options.may_return_null);
282    allocator.InitLinkerInitialized(options.release_to_os_interval_ms);
283    SharedInitCode(options);
284  }
285
286  bool RssLimitExceeded() {
287    return atomic_load(&rss_limit_exceeded, memory_order_relaxed);
288  }
289
290  void SetRssLimitExceeded(bool limit_exceeded) {
291    atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed);
292  }
293
294  void RePoisonChunk(uptr chunk) {
295    // This could be a user-facing chunk (with redzones), or some internal
296    // housekeeping chunk, like TransferBatch. Start by assuming the former.
297    AsanChunk *ac = GetAsanChunk((void *)chunk);
298    uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)ac);
299    uptr beg = ac->Beg();
300    uptr end = ac->Beg() + ac->UsedSize(true);
301    uptr chunk_end = chunk + allocated_size;
302    if (chunk < beg && beg < end && end <= chunk_end &&
303        ac->chunk_state == CHUNK_ALLOCATED) {
304      // Looks like a valid AsanChunk in use, poison redzones only.
305      PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic);
306      uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY);
307      FastPoisonShadowPartialRightRedzone(
308          end_aligned_down, end - end_aligned_down,
309          chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic);
310    } else {
311      // This is either not an AsanChunk or freed or quarantined AsanChunk.
312      // In either case, poison everything.
313      PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic);
314    }
315  }
316
317  void ReInitialize(const AllocatorOptions &options) {
318    SetAllocatorMayReturnNull(options.may_return_null);
319    allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms);
320    SharedInitCode(options);
321
322    // Poison all existing allocation's redzones.
323    if (CanPoisonMemory()) {
324      allocator.ForceLock();
325      allocator.ForEachChunk(
326          [](uptr chunk, void *alloc) {
327            ((Allocator *)alloc)->RePoisonChunk(chunk);
328          },
329          this);
330      allocator.ForceUnlock();
331    }
332  }
333
334  void GetOptions(AllocatorOptions *options) const {
335    options->quarantine_size_mb = quarantine.GetSize() >> 20;
336    options->thread_local_quarantine_size_kb = quarantine.GetCacheSize() >> 10;
337    options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
338    options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
339    options->may_return_null = AllocatorMayReturnNull();
340    options->alloc_dealloc_mismatch =
341        atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
342    options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs();
343  }
344
345  // -------------------- Helper methods. -------------------------
346  uptr ComputeRZLog(uptr user_requested_size) {
347    u32 rz_log =
348      user_requested_size <= 64        - 16   ? 0 :
349      user_requested_size <= 128       - 32   ? 1 :
350      user_requested_size <= 512       - 64   ? 2 :
351      user_requested_size <= 4096      - 128  ? 3 :
352      user_requested_size <= (1 << 14) - 256  ? 4 :
353      user_requested_size <= (1 << 15) - 512  ? 5 :
354      user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
355    u32 min_rz = atomic_load(&min_redzone, memory_order_acquire);
356    u32 max_rz = atomic_load(&max_redzone, memory_order_acquire);
357    return Min(Max(rz_log, RZSize2Log(min_rz)), RZSize2Log(max_rz));
358  }
359
360  static uptr ComputeUserRequestedAlignmentLog(uptr user_requested_alignment) {
361    if (user_requested_alignment < 8)
362      return 0;
363    if (user_requested_alignment > 512)
364      user_requested_alignment = 512;
365    return Log2(user_requested_alignment) - 2;
366  }
367
368  static uptr ComputeUserAlignment(uptr user_requested_alignment_log) {
369    if (user_requested_alignment_log == 0)
370      return 0;
371    return 1LL << (user_requested_alignment_log + 2);
372  }
373
374  // We have an address between two chunks, and we want to report just one.
375  AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk,
376                         AsanChunk *right_chunk) {
377    // Prefer an allocated chunk over freed chunk and freed chunk
378    // over available chunk.
379    if (left_chunk->chunk_state != right_chunk->chunk_state) {
380      if (left_chunk->chunk_state == CHUNK_ALLOCATED)
381        return left_chunk;
382      if (right_chunk->chunk_state == CHUNK_ALLOCATED)
383        return right_chunk;
384      if (left_chunk->chunk_state == CHUNK_QUARANTINE)
385        return left_chunk;
386      if (right_chunk->chunk_state == CHUNK_QUARANTINE)
387        return right_chunk;
388    }
389    // Same chunk_state: choose based on offset.
390    sptr l_offset = 0, r_offset = 0;
391    CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
392    CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
393    if (l_offset < r_offset)
394      return left_chunk;
395    return right_chunk;
396  }
397
398  // -------------------- Allocation/Deallocation routines ---------------
399  void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
400                 AllocType alloc_type, bool can_fill) {
401    if (UNLIKELY(!asan_inited))
402      AsanInitFromRtl();
403    if (RssLimitExceeded()) {
404      if (AllocatorMayReturnNull())
405        return nullptr;
406      ReportRssLimitExceeded(stack);
407    }
408    Flags &fl = *flags();
409    CHECK(stack);
410    const uptr min_alignment = SHADOW_GRANULARITY;
411    const uptr user_requested_alignment_log =
412        ComputeUserRequestedAlignmentLog(alignment);
413    if (alignment < min_alignment)
414      alignment = min_alignment;
415    if (size == 0) {
416      // We'd be happy to avoid allocating memory for zero-size requests, but
417      // some programs/tests depend on this behavior and assume that malloc
418      // would not return NULL even for zero-size allocations. Moreover, it
419      // looks like operator new should never return NULL, and results of
420      // consecutive "new" calls must be different even if the allocated size
421      // is zero.
422      size = 1;
423    }
424    CHECK(IsPowerOfTwo(alignment));
425    uptr rz_log = ComputeRZLog(size);
426    uptr rz_size = RZLog2Size(rz_log);
427    uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
428    uptr needed_size = rounded_size + rz_size;
429    if (alignment > min_alignment)
430      needed_size += alignment;
431    bool using_primary_allocator = true;
432    // If we are allocating from the secondary allocator, there will be no
433    // automatic right redzone, so add the right redzone manually.
434    if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
435      needed_size += rz_size;
436      using_primary_allocator = false;
437    }
438    CHECK(IsAligned(needed_size, min_alignment));
439    if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
440      if (AllocatorMayReturnNull()) {
441        Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
442               (void*)size);
443        return nullptr;
444      }
445      ReportAllocationSizeTooBig(size, needed_size, kMaxAllowedMallocSize,
446                                 stack);
447    }
448
449    AsanThread *t = GetCurrentThread();
450    void *allocated;
451    if (t) {
452      AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
453      allocated = allocator.Allocate(cache, needed_size, 8);
454    } else {
455      SpinMutexLock l(&fallback_mutex);
456      AllocatorCache *cache = &fallback_allocator_cache;
457      allocated = allocator.Allocate(cache, needed_size, 8);
458    }
459    if (UNLIKELY(!allocated)) {
460      SetAllocatorOutOfMemory();
461      if (AllocatorMayReturnNull())
462        return nullptr;
463      ReportOutOfMemory(size, stack);
464    }
465
466    if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
467      // Heap poisoning is enabled, but the allocator provides an unpoisoned
468      // chunk. This is possible if CanPoisonMemory() was false for some
469      // time, for example, due to flags()->start_disabled.
470      // Anyway, poison the block before using it for anything else.
471      uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated);
472      PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic);
473    }
474
475    uptr alloc_beg = reinterpret_cast<uptr>(allocated);
476    uptr alloc_end = alloc_beg + needed_size;
477    uptr beg_plus_redzone = alloc_beg + rz_size;
478    uptr user_beg = beg_plus_redzone;
479    if (!IsAligned(user_beg, alignment))
480      user_beg = RoundUpTo(user_beg, alignment);
481    uptr user_end = user_beg + size;
482    CHECK_LE(user_end, alloc_end);
483    uptr chunk_beg = user_beg - kChunkHeaderSize;
484    AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
485    m->alloc_type = alloc_type;
486    m->rz_log = rz_log;
487    u32 alloc_tid = t ? t->tid() : 0;
488    m->alloc_tid = alloc_tid;
489    CHECK_EQ(alloc_tid, m->alloc_tid);  // Does alloc_tid fit into the bitfield?
490    m->free_tid = kInvalidTid;
491    m->from_memalign = user_beg != beg_plus_redzone;
492    if (alloc_beg != chunk_beg) {
493      CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg);
494      reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic;
495      reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg;
496    }
497    if (using_primary_allocator) {
498      CHECK(size);
499      m->user_requested_size = size;
500      CHECK(allocator.FromPrimary(allocated));
501    } else {
502      CHECK(!allocator.FromPrimary(allocated));
503      m->user_requested_size = SizeClassMap::kMaxSize;
504      uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
505      meta[0] = size;
506      meta[1] = chunk_beg;
507    }
508    m->user_requested_alignment_log = user_requested_alignment_log;
509
510    m->alloc_context_id = StackDepotPut(*stack);
511
512    uptr size_rounded_down_to_granularity =
513        RoundDownTo(size, SHADOW_GRANULARITY);
514    // Unpoison the bulk of the memory region.
515    if (size_rounded_down_to_granularity)
516      PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
517    // Deal with the end of the region if size is not aligned to granularity.
518    if (size != size_rounded_down_to_granularity && CanPoisonMemory()) {
519      u8 *shadow =
520          (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity);
521      *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0;
522    }
523
524    AsanStats &thread_stats = GetCurrentThreadStats();
525    thread_stats.mallocs++;
526    thread_stats.malloced += size;
527    thread_stats.malloced_redzones += needed_size - size;
528    if (needed_size > SizeClassMap::kMaxSize)
529      thread_stats.malloc_large++;
530    else
531      thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++;
532
533    void *res = reinterpret_cast<void *>(user_beg);
534    if (can_fill && fl.max_malloc_fill_size) {
535      uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
536      REAL(memset)(res, fl.malloc_fill_byte, fill_size);
537    }
538#if CAN_SANITIZE_LEAKS
539    m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
540                                                 : __lsan::kDirectlyLeaked;
541#endif
542    // Must be the last mutation of metadata in this function.
543    atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release);
544    ASAN_MALLOC_HOOK(res, size);
545    return res;
546  }
547
548  // Set quarantine flag if chunk is allocated, issue ASan error report on
549  // available and quarantined chunks. Return true on success, false otherwise.
550  bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr,
551                                   BufferedStackTrace *stack) {
552    u8 old_chunk_state = CHUNK_ALLOCATED;
553    // Flip the chunk_state atomically to avoid race on double-free.
554    if (!atomic_compare_exchange_strong((atomic_uint8_t *)m, &old_chunk_state,
555                                        CHUNK_QUARANTINE,
556                                        memory_order_acquire)) {
557      ReportInvalidFree(ptr, old_chunk_state, stack);
558      // It's not safe to push a chunk in quarantine on invalid free.
559      return false;
560    }
561    CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
562    return true;
563  }
564
565  // Expects the chunk to already be marked as quarantined by using
566  // AtomicallySetQuarantineFlagIfAllocated.
567  void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack) {
568    CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
569    CHECK_GE(m->alloc_tid, 0);
570    if (SANITIZER_WORDSIZE == 64)  // On 32-bits this resides in user area.
571      CHECK_EQ(m->free_tid, kInvalidTid);
572    AsanThread *t = GetCurrentThread();
573    m->free_tid = t ? t->tid() : 0;
574    m->free_context_id = StackDepotPut(*stack);
575
576    Flags &fl = *flags();
577    if (fl.max_free_fill_size > 0) {
578      // We have to skip the chunk header, it contains free_context_id.
579      uptr scribble_start = (uptr)m + kChunkHeaderSize + kChunkHeader2Size;
580      if (m->UsedSize() >= kChunkHeader2Size) {  // Skip Header2 in user area.
581        uptr size_to_fill = m->UsedSize() - kChunkHeader2Size;
582        size_to_fill = Min(size_to_fill, (uptr)fl.max_free_fill_size);
583        REAL(memset)((void *)scribble_start, fl.free_fill_byte, size_to_fill);
584      }
585    }
586
587    // Poison the region.
588    PoisonShadow(m->Beg(),
589                 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
590                 kAsanHeapFreeMagic);
591
592    AsanStats &thread_stats = GetCurrentThreadStats();
593    thread_stats.frees++;
594    thread_stats.freed += m->UsedSize();
595
596    // Push into quarantine.
597    if (t) {
598      AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
599      AllocatorCache *ac = GetAllocatorCache(ms);
600      quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac, stack), m,
601                     m->UsedSize());
602    } else {
603      SpinMutexLock l(&fallback_mutex);
604      AllocatorCache *ac = &fallback_allocator_cache;
605      quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac, stack),
606                     m, m->UsedSize());
607    }
608  }
609
610  void Deallocate(void *ptr, uptr delete_size, uptr delete_alignment,
611                  BufferedStackTrace *stack, AllocType alloc_type) {
612    uptr p = reinterpret_cast<uptr>(ptr);
613    if (p == 0) return;
614
615    uptr chunk_beg = p - kChunkHeaderSize;
616    AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
617
618    // On Windows, uninstrumented DLLs may allocate memory before ASan hooks
619    // malloc. Don't report an invalid free in this case.
620    if (SANITIZER_WINDOWS &&
621        !get_allocator().PointerIsMine(ptr)) {
622      if (!IsSystemHeapAddress(p))
623        ReportFreeNotMalloced(p, stack);
624      return;
625    }
626
627    ASAN_FREE_HOOK(ptr);
628
629    // Must mark the chunk as quarantined before any changes to its metadata.
630    // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag.
631    if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return;
632
633    if (m->alloc_type != alloc_type) {
634      if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) {
635        ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type,
636                                (AllocType)alloc_type);
637      }
638    } else {
639      if (flags()->new_delete_type_mismatch &&
640          (alloc_type == FROM_NEW || alloc_type == FROM_NEW_BR) &&
641          ((delete_size && delete_size != m->UsedSize()) ||
642           ComputeUserRequestedAlignmentLog(delete_alignment) !=
643               m->user_requested_alignment_log)) {
644        ReportNewDeleteTypeMismatch(p, delete_size, delete_alignment, stack);
645      }
646    }
647
648    QuarantineChunk(m, ptr, stack);
649  }
650
651  void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) {
652    CHECK(old_ptr && new_size);
653    uptr p = reinterpret_cast<uptr>(old_ptr);
654    uptr chunk_beg = p - kChunkHeaderSize;
655    AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
656
657    AsanStats &thread_stats = GetCurrentThreadStats();
658    thread_stats.reallocs++;
659    thread_stats.realloced += new_size;
660
661    void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
662    if (new_ptr) {
663      u8 chunk_state = m->chunk_state;
664      if (chunk_state != CHUNK_ALLOCATED)
665        ReportInvalidFree(old_ptr, chunk_state, stack);
666      CHECK_NE(REAL(memcpy), nullptr);
667      uptr memcpy_size = Min(new_size, m->UsedSize());
668      // If realloc() races with free(), we may start copying freed memory.
669      // However, we will report racy double-free later anyway.
670      REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
671      Deallocate(old_ptr, 0, 0, stack, FROM_MALLOC);
672    }
673    return new_ptr;
674  }
675
676  void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
677    if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
678      if (AllocatorMayReturnNull())
679        return nullptr;
680      ReportCallocOverflow(nmemb, size, stack);
681    }
682    void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
683    // If the memory comes from the secondary allocator no need to clear it
684    // as it comes directly from mmap.
685    if (ptr && allocator.FromPrimary(ptr))
686      REAL(memset)(ptr, 0, nmemb * size);
687    return ptr;
688  }
689
690  void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) {
691    if (chunk_state == CHUNK_QUARANTINE)
692      ReportDoubleFree((uptr)ptr, stack);
693    else
694      ReportFreeNotMalloced((uptr)ptr, stack);
695  }
696
697  void CommitBack(AsanThreadLocalMallocStorage *ms, BufferedStackTrace *stack) {
698    AllocatorCache *ac = GetAllocatorCache(ms);
699    quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac, stack));
700    allocator.SwallowCache(ac);
701  }
702
703  // -------------------------- Chunk lookup ----------------------
704
705  // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
706  AsanChunk *GetAsanChunk(void *alloc_beg) {
707    if (!alloc_beg) return nullptr;
708    if (!allocator.FromPrimary(alloc_beg)) {
709      uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
710      AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
711      return m;
712    }
713    uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
714    if (alloc_magic[0] == kAllocBegMagic)
715      return reinterpret_cast<AsanChunk *>(alloc_magic[1]);
716    return reinterpret_cast<AsanChunk *>(alloc_beg);
717  }
718
719  AsanChunk *GetAsanChunkByAddr(uptr p) {
720    void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
721    return GetAsanChunk(alloc_beg);
722  }
723
724  // Allocator must be locked when this function is called.
725  AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
726    void *alloc_beg =
727        allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
728    return GetAsanChunk(alloc_beg);
729  }
730
731  uptr AllocationSize(uptr p) {
732    AsanChunk *m = GetAsanChunkByAddr(p);
733    if (!m) return 0;
734    if (m->chunk_state != CHUNK_ALLOCATED) return 0;
735    if (m->Beg() != p) return 0;
736    return m->UsedSize();
737  }
738
739  AsanChunkView FindHeapChunkByAddress(uptr addr) {
740    AsanChunk *m1 = GetAsanChunkByAddr(addr);
741    if (!m1) return AsanChunkView(m1);
742    sptr offset = 0;
743    if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
744      // The address is in the chunk's left redzone, so maybe it is actually
745      // a right buffer overflow from the other chunk to the left.
746      // Search a bit to the left to see if there is another chunk.
747      AsanChunk *m2 = nullptr;
748      for (uptr l = 1; l < GetPageSizeCached(); l++) {
749        m2 = GetAsanChunkByAddr(addr - l);
750        if (m2 == m1) continue;  // Still the same chunk.
751        break;
752      }
753      if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
754        m1 = ChooseChunk(addr, m2, m1);
755    }
756    return AsanChunkView(m1);
757  }
758
759  void Purge(BufferedStackTrace *stack) {
760    AsanThread *t = GetCurrentThread();
761    if (t) {
762      AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
763      quarantine.DrainAndRecycle(GetQuarantineCache(ms),
764                                 QuarantineCallback(GetAllocatorCache(ms),
765                                                    stack));
766    }
767    {
768      SpinMutexLock l(&fallback_mutex);
769      quarantine.DrainAndRecycle(&fallback_quarantine_cache,
770                                 QuarantineCallback(&fallback_allocator_cache,
771                                                    stack));
772    }
773
774    allocator.ForceReleaseToOS();
775  }
776
777  void PrintStats() {
778    allocator.PrintStats();
779    quarantine.PrintStats();
780  }
781
782  void ForceLock() {
783    allocator.ForceLock();
784    fallback_mutex.Lock();
785  }
786
787  void ForceUnlock() {
788    fallback_mutex.Unlock();
789    allocator.ForceUnlock();
790  }
791};
792
793static Allocator instance(LINKER_INITIALIZED);
794
795static AsanAllocator &get_allocator() {
796  return instance.allocator;
797}
798
799bool AsanChunkView::IsValid() const {
800  return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE;
801}
802bool AsanChunkView::IsAllocated() const {
803  return chunk_ && chunk_->chunk_state == CHUNK_ALLOCATED;
804}
805bool AsanChunkView::IsQuarantined() const {
806  return chunk_ && chunk_->chunk_state == CHUNK_QUARANTINE;
807}
808uptr AsanChunkView::Beg() const { return chunk_->Beg(); }
809uptr AsanChunkView::End() const { return Beg() + UsedSize(); }
810uptr AsanChunkView::UsedSize() const { return chunk_->UsedSize(); }
811u32 AsanChunkView::UserRequestedAlignment() const {
812  return Allocator::ComputeUserAlignment(chunk_->user_requested_alignment_log);
813}
814uptr AsanChunkView::AllocTid() const { return chunk_->alloc_tid; }
815uptr AsanChunkView::FreeTid() const { return chunk_->free_tid; }
816AllocType AsanChunkView::GetAllocType() const {
817  return (AllocType)chunk_->alloc_type;
818}
819
820static StackTrace GetStackTraceFromId(u32 id) {
821  CHECK(id);
822  StackTrace res = StackDepotGet(id);
823  CHECK(res.trace);
824  return res;
825}
826
827u32 AsanChunkView::GetAllocStackId() const { return chunk_->alloc_context_id; }
828u32 AsanChunkView::GetFreeStackId() const { return chunk_->free_context_id; }
829
830StackTrace AsanChunkView::GetAllocStack() const {
831  return GetStackTraceFromId(GetAllocStackId());
832}
833
834StackTrace AsanChunkView::GetFreeStack() const {
835  return GetStackTraceFromId(GetFreeStackId());
836}
837
838void InitializeAllocator(const AllocatorOptions &options) {
839  instance.InitLinkerInitialized(options);
840}
841
842void ReInitializeAllocator(const AllocatorOptions &options) {
843  instance.ReInitialize(options);
844}
845
846void GetAllocatorOptions(AllocatorOptions *options) {
847  instance.GetOptions(options);
848}
849
850AsanChunkView FindHeapChunkByAddress(uptr addr) {
851  return instance.FindHeapChunkByAddress(addr);
852}
853AsanChunkView FindHeapChunkByAllocBeg(uptr addr) {
854  return AsanChunkView(instance.GetAsanChunk(reinterpret_cast<void*>(addr)));
855}
856
857void AsanThreadLocalMallocStorage::CommitBack() {
858  GET_STACK_TRACE_MALLOC;
859  instance.CommitBack(this, &stack);
860}
861
862void PrintInternalAllocatorStats() {
863  instance.PrintStats();
864}
865
866void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
867  instance.Deallocate(ptr, 0, 0, stack, alloc_type);
868}
869
870void asan_delete(void *ptr, uptr size, uptr alignment,
871                 BufferedStackTrace *stack, AllocType alloc_type) {
872  instance.Deallocate(ptr, size, alignment, stack, alloc_type);
873}
874
875void *asan_malloc(uptr size, BufferedStackTrace *stack) {
876  return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
877}
878
879void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
880  return SetErrnoOnNull(instance.Calloc(nmemb, size, stack));
881}
882
883void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
884  if (!p)
885    return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
886  if (size == 0) {
887    if (flags()->allocator_frees_and_returns_null_on_realloc_zero) {
888      instance.Deallocate(p, 0, 0, stack, FROM_MALLOC);
889      return nullptr;
890    }
891    // Allocate a size of 1 if we shouldn't free() on Realloc to 0
892    size = 1;
893  }
894  return SetErrnoOnNull(instance.Reallocate(p, size, stack));
895}
896
897void *asan_valloc(uptr size, BufferedStackTrace *stack) {
898  return SetErrnoOnNull(
899      instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true));
900}
901
902void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
903  uptr PageSize = GetPageSizeCached();
904  if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
905    errno = errno_ENOMEM;
906    if (AllocatorMayReturnNull())
907      return nullptr;
908    ReportPvallocOverflow(size, stack);
909  }
910  // pvalloc(0) should allocate one page.
911  size = size ? RoundUpTo(size, PageSize) : PageSize;
912  return SetErrnoOnNull(
913      instance.Allocate(size, PageSize, stack, FROM_MALLOC, true));
914}
915
916void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
917                    AllocType alloc_type) {
918  if (UNLIKELY(!IsPowerOfTwo(alignment))) {
919    errno = errno_EINVAL;
920    if (AllocatorMayReturnNull())
921      return nullptr;
922    ReportInvalidAllocationAlignment(alignment, stack);
923  }
924  return SetErrnoOnNull(
925      instance.Allocate(size, alignment, stack, alloc_type, true));
926}
927
928void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) {
929  if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
930    errno = errno_EINVAL;
931    if (AllocatorMayReturnNull())
932      return nullptr;
933    ReportInvalidAlignedAllocAlignment(size, alignment, stack);
934  }
935  return SetErrnoOnNull(
936      instance.Allocate(size, alignment, stack, FROM_MALLOC, true));
937}
938
939int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
940                        BufferedStackTrace *stack) {
941  if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
942    if (AllocatorMayReturnNull())
943      return errno_EINVAL;
944    ReportInvalidPosixMemalignAlignment(alignment, stack);
945  }
946  void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true);
947  if (UNLIKELY(!ptr))
948    // OOM error is already taken care of by Allocate.
949    return errno_ENOMEM;
950  CHECK(IsAligned((uptr)ptr, alignment));
951  *memptr = ptr;
952  return 0;
953}
954
955uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp) {
956  if (!ptr) return 0;
957  uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr));
958  if (flags()->check_malloc_usable_size && (usable_size == 0)) {
959    GET_STACK_TRACE_FATAL(pc, bp);
960    ReportMallocUsableSizeNotOwned((uptr)ptr, &stack);
961  }
962  return usable_size;
963}
964
965uptr asan_mz_size(const void *ptr) {
966  return instance.AllocationSize(reinterpret_cast<uptr>(ptr));
967}
968
969void asan_mz_force_lock() {
970  instance.ForceLock();
971}
972
973void asan_mz_force_unlock() {
974  instance.ForceUnlock();
975}
976
977void AsanSoftRssLimitExceededCallback(bool limit_exceeded) {
978  instance.SetRssLimitExceeded(limit_exceeded);
979}
980
981} // namespace __asan
982
983// --- Implementation of LSan-specific functions --- {{{1
984namespace __lsan {
985void LockAllocator() {
986  __asan::get_allocator().ForceLock();
987}
988
989void UnlockAllocator() {
990  __asan::get_allocator().ForceUnlock();
991}
992
993void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
994  *begin = (uptr)&__asan::get_allocator();
995  *end = *begin + sizeof(__asan::get_allocator());
996}
997
998uptr PointsIntoChunk(void* p) {
999  uptr addr = reinterpret_cast<uptr>(p);
1000  __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr);
1001  if (!m) return 0;
1002  uptr chunk = m->Beg();
1003  if (m->chunk_state != __asan::CHUNK_ALLOCATED)
1004    return 0;
1005  if (m->AddrIsInside(addr, /*locked_version=*/true))
1006    return chunk;
1007  if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true),
1008                                  addr))
1009    return chunk;
1010  return 0;
1011}
1012
1013uptr GetUserBegin(uptr chunk) {
1014  __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk);
1015  CHECK(m);
1016  return m->Beg();
1017}
1018
1019LsanMetadata::LsanMetadata(uptr chunk) {
1020  metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
1021}
1022
1023bool LsanMetadata::allocated() const {
1024  __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1025  return m->chunk_state == __asan::CHUNK_ALLOCATED;
1026}
1027
1028ChunkTag LsanMetadata::tag() const {
1029  __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1030  return static_cast<ChunkTag>(m->lsan_tag);
1031}
1032
1033void LsanMetadata::set_tag(ChunkTag value) {
1034  __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1035  m->lsan_tag = value;
1036}
1037
1038uptr LsanMetadata::requested_size() const {
1039  __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1040  return m->UsedSize(/*locked_version=*/true);
1041}
1042
1043u32 LsanMetadata::stack_trace_id() const {
1044  __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1045  return m->alloc_context_id;
1046}
1047
1048void ForEachChunk(ForEachChunkCallback callback, void *arg) {
1049  __asan::get_allocator().ForEachChunk(callback, arg);
1050}
1051
1052IgnoreObjectResult IgnoreObjectLocked(const void *p) {
1053  uptr addr = reinterpret_cast<uptr>(p);
1054  __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr);
1055  if (!m) return kIgnoreObjectInvalid;
1056  if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) {
1057    if (m->lsan_tag == kIgnored)
1058      return kIgnoreObjectAlreadyIgnored;
1059    m->lsan_tag = __lsan::kIgnored;
1060    return kIgnoreObjectSuccess;
1061  } else {
1062    return kIgnoreObjectInvalid;
1063  }
1064}
1065}  // namespace __lsan
1066
1067// ---------------------- Interface ---------------- {{{1
1068using namespace __asan;  // NOLINT
1069
1070// ASan allocator doesn't reserve extra bytes, so normally we would
1071// just return "size". We don't want to expose our redzone sizes, etc here.
1072uptr __sanitizer_get_estimated_allocated_size(uptr size) {
1073  return size;
1074}
1075
1076int __sanitizer_get_ownership(const void *p) {
1077  uptr ptr = reinterpret_cast<uptr>(p);
1078  return instance.AllocationSize(ptr) > 0;
1079}
1080
1081uptr __sanitizer_get_allocated_size(const void *p) {
1082  if (!p) return 0;
1083  uptr ptr = reinterpret_cast<uptr>(p);
1084  uptr allocated_size = instance.AllocationSize(ptr);
1085  // Die if p is not malloced or if it is already freed.
1086  if (allocated_size == 0) {
1087    GET_STACK_TRACE_FATAL_HERE;
1088    ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack);
1089  }
1090  return allocated_size;
1091}
1092
1093void __sanitizer_purge_allocator() {
1094  GET_STACK_TRACE_MALLOC;
1095  instance.Purge(&stack);
1096}
1097
1098#if !SANITIZER_SUPPORTS_WEAK_HOOKS
1099// Provide default (no-op) implementation of malloc hooks.
1100SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook,
1101                             void *ptr, uptr size) {
1102  (void)ptr;
1103  (void)size;
1104}
1105
1106SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *ptr) {
1107  (void)ptr;
1108}
1109#endif
1110