1//===-- asan_allocator.cc -------------------------------------------------===//
2//
3// This file is distributed under the University of Illinois Open Source
4// License. See LICENSE.TXT for details.
5//
6//===----------------------------------------------------------------------===//
7//
8// This file is a part of AddressSanitizer, an address sanity checker.
9//
10// Implementation of ASan's memory allocator, 2-nd version.
11// This variant uses the allocator from sanitizer_common, i.e. the one shared
12// with ThreadSanitizer and MemorySanitizer.
13//
14//===----------------------------------------------------------------------===//
15
16#include "asan_allocator.h"
17#include "asan_mapping.h"
18#include "asan_poisoning.h"
19#include "asan_report.h"
20#include "asan_stack.h"
21#include "asan_thread.h"
22#include "sanitizer_common/sanitizer_allocator_checks.h"
23#include "sanitizer_common/sanitizer_allocator_interface.h"
24#include "sanitizer_common/sanitizer_errno.h"
25#include "sanitizer_common/sanitizer_flags.h"
26#include "sanitizer_common/sanitizer_internal_defs.h"
27#include "sanitizer_common/sanitizer_list.h"
28#include "sanitizer_common/sanitizer_stackdepot.h"
29#include "sanitizer_common/sanitizer_quarantine.h"
30#include "lsan/lsan_common.h"
31
32namespace __asan {
33
34// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
35// We use adaptive redzones: for larger allocation larger redzones are used.
36static u32 RZLog2Size(u32 rz_log) {
37  CHECK_LT(rz_log, 8);
38  return 16 << rz_log;
39}
40
41static u32 RZSize2Log(u32 rz_size) {
42  CHECK_GE(rz_size, 16);
43  CHECK_LE(rz_size, 2048);
44  CHECK(IsPowerOfTwo(rz_size));
45  u32 res = Log2(rz_size) - 4;
46  CHECK_EQ(rz_size, RZLog2Size(res));
47  return res;
48}
49
50static AsanAllocator &get_allocator();
51
52// The memory chunk allocated from the underlying allocator looks like this:
53// L L L L L L H H U U U U U U R R
54//   L -- left redzone words (0 or more bytes)
55//   H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
56//   U -- user memory.
57//   R -- right redzone (0 or more bytes)
58// ChunkBase consists of ChunkHeader and other bytes that overlap with user
59// memory.
60
61// If the left redzone is greater than the ChunkHeader size we store a magic
62// value in the first uptr word of the memory block and store the address of
63// ChunkBase in the next uptr.
64// M B L L L L L L L L L  H H U U U U U U
65//   |                    ^
66//   ---------------------|
67//   M -- magic value kAllocBegMagic
68//   B -- address of ChunkHeader pointing to the first 'H'
69static const uptr kAllocBegMagic = 0xCC6E96B9;
70
71struct ChunkHeader {
72  // 1-st 8 bytes.
73  u32 chunk_state       : 8;  // Must be first.
74  u32 alloc_tid         : 24;
75
76  u32 free_tid          : 24;
77  u32 from_memalign     : 1;
78  u32 alloc_type        : 2;
79  u32 rz_log            : 3;
80  u32 lsan_tag          : 2;
81  // 2-nd 8 bytes
82  // This field is used for small sizes. For large sizes it is equal to
83  // SizeClassMap::kMaxSize and the actual size is stored in the
84  // SecondaryAllocator's metadata.
85  u32 user_requested_size : 29;
86  // align < 8 -> 0
87  // else      -> log2(min(align, 512)) - 2
88  u32 user_requested_alignment_log : 3;
89  u32 alloc_context_id;
90};
91
92struct ChunkBase : ChunkHeader {
93  // Header2, intersects with user memory.
94  u32 free_context_id;
95};
96
97static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
98static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
99COMPILER_CHECK(kChunkHeaderSize == 16);
100COMPILER_CHECK(kChunkHeader2Size <= 16);
101
102// Every chunk of memory allocated by this allocator can be in one of 3 states:
103// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
104// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
105// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
106enum {
107  CHUNK_AVAILABLE  = 0,  // 0 is the default value even if we didn't set it.
108  CHUNK_ALLOCATED  = 2,
109  CHUNK_QUARANTINE = 3
110};
111
112struct AsanChunk: ChunkBase {
113  uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
114  uptr UsedSize(bool locked_version = false) {
115    if (user_requested_size != SizeClassMap::kMaxSize)
116      return user_requested_size;
117    return *reinterpret_cast<uptr *>(
118               get_allocator().GetMetaData(AllocBeg(locked_version)));
119  }
120  void *AllocBeg(bool locked_version = false) {
121    if (from_memalign) {
122      if (locked_version)
123        return get_allocator().GetBlockBeginFastLocked(
124            reinterpret_cast<void *>(this));
125      return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this));
126    }
127    return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
128  }
129  bool AddrIsInside(uptr addr, bool locked_version = false) {
130    return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
131  }
132};
133
134struct QuarantineCallback {
135  QuarantineCallback(AllocatorCache *cache, BufferedStackTrace *stack)
136      : cache_(cache),
137        stack_(stack) {
138  }
139
140  void Recycle(AsanChunk *m) {
141    CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
142    atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed);
143    CHECK_NE(m->alloc_tid, kInvalidTid);
144    CHECK_NE(m->free_tid, kInvalidTid);
145    PoisonShadow(m->Beg(),
146                 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
147                 kAsanHeapLeftRedzoneMagic);
148    void *p = reinterpret_cast<void *>(m->AllocBeg());
149    if (p != m) {
150      uptr *alloc_magic = reinterpret_cast<uptr *>(p);
151      CHECK_EQ(alloc_magic[0], kAllocBegMagic);
152      // Clear the magic value, as allocator internals may overwrite the
153      // contents of deallocated chunk, confusing GetAsanChunk lookup.
154      alloc_magic[0] = 0;
155      CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m));
156    }
157
158    // Statistics.
159    AsanStats &thread_stats = GetCurrentThreadStats();
160    thread_stats.real_frees++;
161    thread_stats.really_freed += m->UsedSize();
162
163    get_allocator().Deallocate(cache_, p);
164  }
165
166  void *Allocate(uptr size) {
167    void *res = get_allocator().Allocate(cache_, size, 1);
168    // TODO(alekseys): Consider making quarantine OOM-friendly.
169    if (UNLIKELY(!res))
170      ReportOutOfMemory(size, stack_);
171    return res;
172  }
173
174  void Deallocate(void *p) {
175    get_allocator().Deallocate(cache_, p);
176  }
177
178 private:
179  AllocatorCache* const cache_;
180  BufferedStackTrace* const stack_;
181};
182
183typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
184typedef AsanQuarantine::Cache QuarantineCache;
185
186void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
187  PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
188  // Statistics.
189  AsanStats &thread_stats = GetCurrentThreadStats();
190  thread_stats.mmaps++;
191  thread_stats.mmaped += size;
192}
193void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
194  PoisonShadow(p, size, 0);
195  // We are about to unmap a chunk of user memory.
196  // Mark the corresponding shadow memory as not needed.
197  FlushUnneededASanShadowMemory(p, size);
198  // Statistics.
199  AsanStats &thread_stats = GetCurrentThreadStats();
200  thread_stats.munmaps++;
201  thread_stats.munmaped += size;
202}
203
204// We can not use THREADLOCAL because it is not supported on some of the
205// platforms we care about (OSX 10.6, Android).
206// static THREADLOCAL AllocatorCache cache;
207AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
208  CHECK(ms);
209  return &ms->allocator_cache;
210}
211
212QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
213  CHECK(ms);
214  CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
215  return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
216}
217
218void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) {
219  quarantine_size_mb = f->quarantine_size_mb;
220  thread_local_quarantine_size_kb = f->thread_local_quarantine_size_kb;
221  min_redzone = f->redzone;
222  max_redzone = f->max_redzone;
223  may_return_null = cf->allocator_may_return_null;
224  alloc_dealloc_mismatch = f->alloc_dealloc_mismatch;
225  release_to_os_interval_ms = cf->allocator_release_to_os_interval_ms;
226}
227
228void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
229  f->quarantine_size_mb = quarantine_size_mb;
230  f->thread_local_quarantine_size_kb = thread_local_quarantine_size_kb;
231  f->redzone = min_redzone;
232  f->max_redzone = max_redzone;
233  cf->allocator_may_return_null = may_return_null;
234  f->alloc_dealloc_mismatch = alloc_dealloc_mismatch;
235  cf->allocator_release_to_os_interval_ms = release_to_os_interval_ms;
236}
237
238struct Allocator {
239  static const uptr kMaxAllowedMallocSize =
240      FIRST_32_SECOND_64(3UL << 30, 1ULL << 40);
241
242  AsanAllocator allocator;
243  AsanQuarantine quarantine;
244  StaticSpinMutex fallback_mutex;
245  AllocatorCache fallback_allocator_cache;
246  QuarantineCache fallback_quarantine_cache;
247
248  atomic_uint8_t rss_limit_exceeded;
249
250  // ------------------- Options --------------------------
251  atomic_uint16_t min_redzone;
252  atomic_uint16_t max_redzone;
253  atomic_uint8_t alloc_dealloc_mismatch;
254
255  // ------------------- Initialization ------------------------
256  explicit Allocator(LinkerInitialized)
257      : quarantine(LINKER_INITIALIZED),
258        fallback_quarantine_cache(LINKER_INITIALIZED) {}
259
260  void CheckOptions(const AllocatorOptions &options) const {
261    CHECK_GE(options.min_redzone, 16);
262    CHECK_GE(options.max_redzone, options.min_redzone);
263    CHECK_LE(options.max_redzone, 2048);
264    CHECK(IsPowerOfTwo(options.min_redzone));
265    CHECK(IsPowerOfTwo(options.max_redzone));
266  }
267
268  void SharedInitCode(const AllocatorOptions &options) {
269    CheckOptions(options);
270    quarantine.Init((uptr)options.quarantine_size_mb << 20,
271                    (uptr)options.thread_local_quarantine_size_kb << 10);
272    atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch,
273                 memory_order_release);
274    atomic_store(&min_redzone, options.min_redzone, memory_order_release);
275    atomic_store(&max_redzone, options.max_redzone, memory_order_release);
276  }
277
278  void InitLinkerInitialized(const AllocatorOptions &options) {
279    SetAllocatorMayReturnNull(options.may_return_null);
280    allocator.InitLinkerInitialized(options.release_to_os_interval_ms);
281    SharedInitCode(options);
282  }
283
284  bool RssLimitExceeded() {
285    return atomic_load(&rss_limit_exceeded, memory_order_relaxed);
286  }
287
288  void SetRssLimitExceeded(bool limit_exceeded) {
289    atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed);
290  }
291
292  void RePoisonChunk(uptr chunk) {
293    // This could be a user-facing chunk (with redzones), or some internal
294    // housekeeping chunk, like TransferBatch. Start by assuming the former.
295    AsanChunk *ac = GetAsanChunk((void *)chunk);
296    uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)ac);
297    uptr beg = ac->Beg();
298    uptr end = ac->Beg() + ac->UsedSize(true);
299    uptr chunk_end = chunk + allocated_size;
300    if (chunk < beg && beg < end && end <= chunk_end &&
301        ac->chunk_state == CHUNK_ALLOCATED) {
302      // Looks like a valid AsanChunk in use, poison redzones only.
303      PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic);
304      uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY);
305      FastPoisonShadowPartialRightRedzone(
306          end_aligned_down, end - end_aligned_down,
307          chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic);
308    } else {
309      // This is either not an AsanChunk or freed or quarantined AsanChunk.
310      // In either case, poison everything.
311      PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic);
312    }
313  }
314
315  void ReInitialize(const AllocatorOptions &options) {
316    SetAllocatorMayReturnNull(options.may_return_null);
317    allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms);
318    SharedInitCode(options);
319
320    // Poison all existing allocation's redzones.
321    if (CanPoisonMemory()) {
322      allocator.ForceLock();
323      allocator.ForEachChunk(
324          [](uptr chunk, void *alloc) {
325            ((Allocator *)alloc)->RePoisonChunk(chunk);
326          },
327          this);
328      allocator.ForceUnlock();
329    }
330  }
331
332  void GetOptions(AllocatorOptions *options) const {
333    options->quarantine_size_mb = quarantine.GetSize() >> 20;
334    options->thread_local_quarantine_size_kb = quarantine.GetCacheSize() >> 10;
335    options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
336    options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
337    options->may_return_null = AllocatorMayReturnNull();
338    options->alloc_dealloc_mismatch =
339        atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
340    options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs();
341  }
342
343  // -------------------- Helper methods. -------------------------
344  uptr ComputeRZLog(uptr user_requested_size) {
345    u32 rz_log =
346      user_requested_size <= 64        - 16   ? 0 :
347      user_requested_size <= 128       - 32   ? 1 :
348      user_requested_size <= 512       - 64   ? 2 :
349      user_requested_size <= 4096      - 128  ? 3 :
350      user_requested_size <= (1 << 14) - 256  ? 4 :
351      user_requested_size <= (1 << 15) - 512  ? 5 :
352      user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
353    u32 min_rz = atomic_load(&min_redzone, memory_order_acquire);
354    u32 max_rz = atomic_load(&max_redzone, memory_order_acquire);
355    return Min(Max(rz_log, RZSize2Log(min_rz)), RZSize2Log(max_rz));
356  }
357
358  static uptr ComputeUserRequestedAlignmentLog(uptr user_requested_alignment) {
359    if (user_requested_alignment < 8)
360      return 0;
361    if (user_requested_alignment > 512)
362      user_requested_alignment = 512;
363    return Log2(user_requested_alignment) - 2;
364  }
365
366  static uptr ComputeUserAlignment(uptr user_requested_alignment_log) {
367    if (user_requested_alignment_log == 0)
368      return 0;
369    return 1LL << (user_requested_alignment_log + 2);
370  }
371
372  // We have an address between two chunks, and we want to report just one.
373  AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk,
374                         AsanChunk *right_chunk) {
375    // Prefer an allocated chunk over freed chunk and freed chunk
376    // over available chunk.
377    if (left_chunk->chunk_state != right_chunk->chunk_state) {
378      if (left_chunk->chunk_state == CHUNK_ALLOCATED)
379        return left_chunk;
380      if (right_chunk->chunk_state == CHUNK_ALLOCATED)
381        return right_chunk;
382      if (left_chunk->chunk_state == CHUNK_QUARANTINE)
383        return left_chunk;
384      if (right_chunk->chunk_state == CHUNK_QUARANTINE)
385        return right_chunk;
386    }
387    // Same chunk_state: choose based on offset.
388    sptr l_offset = 0, r_offset = 0;
389    CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
390    CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
391    if (l_offset < r_offset)
392      return left_chunk;
393    return right_chunk;
394  }
395
396  // -------------------- Allocation/Deallocation routines ---------------
397  void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
398                 AllocType alloc_type, bool can_fill) {
399    if (UNLIKELY(!asan_inited))
400      AsanInitFromRtl();
401    if (RssLimitExceeded()) {
402      if (AllocatorMayReturnNull())
403        return nullptr;
404      ReportRssLimitExceeded(stack);
405    }
406    Flags &fl = *flags();
407    CHECK(stack);
408    const uptr min_alignment = SHADOW_GRANULARITY;
409    const uptr user_requested_alignment_log =
410        ComputeUserRequestedAlignmentLog(alignment);
411    if (alignment < min_alignment)
412      alignment = min_alignment;
413    if (size == 0) {
414      // We'd be happy to avoid allocating memory for zero-size requests, but
415      // some programs/tests depend on this behavior and assume that malloc
416      // would not return NULL even for zero-size allocations. Moreover, it
417      // looks like operator new should never return NULL, and results of
418      // consecutive "new" calls must be different even if the allocated size
419      // is zero.
420      size = 1;
421    }
422    CHECK(IsPowerOfTwo(alignment));
423    uptr rz_log = ComputeRZLog(size);
424    uptr rz_size = RZLog2Size(rz_log);
425    uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
426    uptr needed_size = rounded_size + rz_size;
427    if (alignment > min_alignment)
428      needed_size += alignment;
429    bool using_primary_allocator = true;
430    // If we are allocating from the secondary allocator, there will be no
431    // automatic right redzone, so add the right redzone manually.
432    if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
433      needed_size += rz_size;
434      using_primary_allocator = false;
435    }
436    CHECK(IsAligned(needed_size, min_alignment));
437    if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
438      if (AllocatorMayReturnNull()) {
439        Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
440               (void*)size);
441        return nullptr;
442      }
443      ReportAllocationSizeTooBig(size, needed_size, kMaxAllowedMallocSize,
444                                 stack);
445    }
446
447    AsanThread *t = GetCurrentThread();
448    void *allocated;
449    if (t) {
450      AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
451      allocated = allocator.Allocate(cache, needed_size, 8);
452    } else {
453      SpinMutexLock l(&fallback_mutex);
454      AllocatorCache *cache = &fallback_allocator_cache;
455      allocated = allocator.Allocate(cache, needed_size, 8);
456    }
457    if (UNLIKELY(!allocated)) {
458      SetAllocatorOutOfMemory();
459      if (AllocatorMayReturnNull())
460        return nullptr;
461      ReportOutOfMemory(size, stack);
462    }
463
464    if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
465      // Heap poisoning is enabled, but the allocator provides an unpoisoned
466      // chunk. This is possible if CanPoisonMemory() was false for some
467      // time, for example, due to flags()->start_disabled.
468      // Anyway, poison the block before using it for anything else.
469      uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated);
470      PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic);
471    }
472
473    uptr alloc_beg = reinterpret_cast<uptr>(allocated);
474    uptr alloc_end = alloc_beg + needed_size;
475    uptr beg_plus_redzone = alloc_beg + rz_size;
476    uptr user_beg = beg_plus_redzone;
477    if (!IsAligned(user_beg, alignment))
478      user_beg = RoundUpTo(user_beg, alignment);
479    uptr user_end = user_beg + size;
480    CHECK_LE(user_end, alloc_end);
481    uptr chunk_beg = user_beg - kChunkHeaderSize;
482    AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
483    m->alloc_type = alloc_type;
484    m->rz_log = rz_log;
485    u32 alloc_tid = t ? t->tid() : 0;
486    m->alloc_tid = alloc_tid;
487    CHECK_EQ(alloc_tid, m->alloc_tid);  // Does alloc_tid fit into the bitfield?
488    m->free_tid = kInvalidTid;
489    m->from_memalign = user_beg != beg_plus_redzone;
490    if (alloc_beg != chunk_beg) {
491      CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg);
492      reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic;
493      reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg;
494    }
495    if (using_primary_allocator) {
496      CHECK(size);
497      m->user_requested_size = size;
498      CHECK(allocator.FromPrimary(allocated));
499    } else {
500      CHECK(!allocator.FromPrimary(allocated));
501      m->user_requested_size = SizeClassMap::kMaxSize;
502      uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
503      meta[0] = size;
504      meta[1] = chunk_beg;
505    }
506    m->user_requested_alignment_log = user_requested_alignment_log;
507
508    m->alloc_context_id = StackDepotPut(*stack);
509
510    uptr size_rounded_down_to_granularity =
511        RoundDownTo(size, SHADOW_GRANULARITY);
512    // Unpoison the bulk of the memory region.
513    if (size_rounded_down_to_granularity)
514      PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
515    // Deal with the end of the region if size is not aligned to granularity.
516    if (size != size_rounded_down_to_granularity && CanPoisonMemory()) {
517      u8 *shadow =
518          (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity);
519      *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0;
520    }
521
522    AsanStats &thread_stats = GetCurrentThreadStats();
523    thread_stats.mallocs++;
524    thread_stats.malloced += size;
525    thread_stats.malloced_redzones += needed_size - size;
526    if (needed_size > SizeClassMap::kMaxSize)
527      thread_stats.malloc_large++;
528    else
529      thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++;
530
531    void *res = reinterpret_cast<void *>(user_beg);
532    if (can_fill && fl.max_malloc_fill_size) {
533      uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
534      REAL(memset)(res, fl.malloc_fill_byte, fill_size);
535    }
536#if CAN_SANITIZE_LEAKS
537    m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
538                                                 : __lsan::kDirectlyLeaked;
539#endif
540    // Must be the last mutation of metadata in this function.
541    atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release);
542    ASAN_MALLOC_HOOK(res, size);
543    return res;
544  }
545
546  // Set quarantine flag if chunk is allocated, issue ASan error report on
547  // available and quarantined chunks. Return true on success, false otherwise.
548  bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr,
549                                   BufferedStackTrace *stack) {
550    u8 old_chunk_state = CHUNK_ALLOCATED;
551    // Flip the chunk_state atomically to avoid race on double-free.
552    if (!atomic_compare_exchange_strong((atomic_uint8_t *)m, &old_chunk_state,
553                                        CHUNK_QUARANTINE,
554                                        memory_order_acquire)) {
555      ReportInvalidFree(ptr, old_chunk_state, stack);
556      // It's not safe to push a chunk in quarantine on invalid free.
557      return false;
558    }
559    CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
560    return true;
561  }
562
563  // Expects the chunk to already be marked as quarantined by using
564  // AtomicallySetQuarantineFlagIfAllocated.
565  void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack) {
566    CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
567    CHECK_GE(m->alloc_tid, 0);
568    if (SANITIZER_WORDSIZE == 64)  // On 32-bits this resides in user area.
569      CHECK_EQ(m->free_tid, kInvalidTid);
570    AsanThread *t = GetCurrentThread();
571    m->free_tid = t ? t->tid() : 0;
572    m->free_context_id = StackDepotPut(*stack);
573
574    Flags &fl = *flags();
575    if (fl.max_free_fill_size > 0) {
576      // We have to skip the chunk header, it contains free_context_id.
577      uptr scribble_start = (uptr)m + kChunkHeaderSize + kChunkHeader2Size;
578      if (m->UsedSize() >= kChunkHeader2Size) {  // Skip Header2 in user area.
579        uptr size_to_fill = m->UsedSize() - kChunkHeader2Size;
580        size_to_fill = Min(size_to_fill, (uptr)fl.max_free_fill_size);
581        REAL(memset)((void *)scribble_start, fl.free_fill_byte, size_to_fill);
582      }
583    }
584
585    // Poison the region.
586    PoisonShadow(m->Beg(),
587                 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
588                 kAsanHeapFreeMagic);
589
590    AsanStats &thread_stats = GetCurrentThreadStats();
591    thread_stats.frees++;
592    thread_stats.freed += m->UsedSize();
593
594    // Push into quarantine.
595    if (t) {
596      AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
597      AllocatorCache *ac = GetAllocatorCache(ms);
598      quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac, stack), m,
599                     m->UsedSize());
600    } else {
601      SpinMutexLock l(&fallback_mutex);
602      AllocatorCache *ac = &fallback_allocator_cache;
603      quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac, stack),
604                     m, m->UsedSize());
605    }
606  }
607
608  void Deallocate(void *ptr, uptr delete_size, uptr delete_alignment,
609                  BufferedStackTrace *stack, AllocType alloc_type) {
610    uptr p = reinterpret_cast<uptr>(ptr);
611    if (p == 0) return;
612
613    uptr chunk_beg = p - kChunkHeaderSize;
614    AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
615
616    // On Windows, uninstrumented DLLs may allocate memory before ASan hooks
617    // malloc. Don't report an invalid free in this case.
618    if (SANITIZER_WINDOWS &&
619        !get_allocator().PointerIsMine(ptr)) {
620      if (!IsSystemHeapAddress(p))
621        ReportFreeNotMalloced(p, stack);
622      return;
623    }
624
625    ASAN_FREE_HOOK(ptr);
626
627    // Must mark the chunk as quarantined before any changes to its metadata.
628    // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag.
629    if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return;
630
631    if (m->alloc_type != alloc_type) {
632      if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) {
633        ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type,
634                                (AllocType)alloc_type);
635      }
636    } else {
637      if (flags()->new_delete_type_mismatch &&
638          (alloc_type == FROM_NEW || alloc_type == FROM_NEW_BR) &&
639          ((delete_size && delete_size != m->UsedSize()) ||
640           ComputeUserRequestedAlignmentLog(delete_alignment) !=
641               m->user_requested_alignment_log)) {
642        ReportNewDeleteTypeMismatch(p, delete_size, delete_alignment, stack);
643      }
644    }
645
646    QuarantineChunk(m, ptr, stack);
647  }
648
649  void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) {
650    CHECK(old_ptr && new_size);
651    uptr p = reinterpret_cast<uptr>(old_ptr);
652    uptr chunk_beg = p - kChunkHeaderSize;
653    AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
654
655    AsanStats &thread_stats = GetCurrentThreadStats();
656    thread_stats.reallocs++;
657    thread_stats.realloced += new_size;
658
659    void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
660    if (new_ptr) {
661      u8 chunk_state = m->chunk_state;
662      if (chunk_state != CHUNK_ALLOCATED)
663        ReportInvalidFree(old_ptr, chunk_state, stack);
664      CHECK_NE(REAL(memcpy), nullptr);
665      uptr memcpy_size = Min(new_size, m->UsedSize());
666      // If realloc() races with free(), we may start copying freed memory.
667      // However, we will report racy double-free later anyway.
668      REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
669      Deallocate(old_ptr, 0, 0, stack, FROM_MALLOC);
670    }
671    return new_ptr;
672  }
673
674  void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
675    if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
676      if (AllocatorMayReturnNull())
677        return nullptr;
678      ReportCallocOverflow(nmemb, size, stack);
679    }
680    void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
681    // If the memory comes from the secondary allocator no need to clear it
682    // as it comes directly from mmap.
683    if (ptr && allocator.FromPrimary(ptr))
684      REAL(memset)(ptr, 0, nmemb * size);
685    return ptr;
686  }
687
688  void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) {
689    if (chunk_state == CHUNK_QUARANTINE)
690      ReportDoubleFree((uptr)ptr, stack);
691    else
692      ReportFreeNotMalloced((uptr)ptr, stack);
693  }
694
695  void CommitBack(AsanThreadLocalMallocStorage *ms, BufferedStackTrace *stack) {
696    AllocatorCache *ac = GetAllocatorCache(ms);
697    quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac, stack));
698    allocator.SwallowCache(ac);
699  }
700
701  // -------------------------- Chunk lookup ----------------------
702
703  // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
704  AsanChunk *GetAsanChunk(void *alloc_beg) {
705    if (!alloc_beg) return nullptr;
706    if (!allocator.FromPrimary(alloc_beg)) {
707      uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
708      AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
709      return m;
710    }
711    uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
712    if (alloc_magic[0] == kAllocBegMagic)
713      return reinterpret_cast<AsanChunk *>(alloc_magic[1]);
714    return reinterpret_cast<AsanChunk *>(alloc_beg);
715  }
716
717  AsanChunk *GetAsanChunkByAddr(uptr p) {
718    void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
719    return GetAsanChunk(alloc_beg);
720  }
721
722  // Allocator must be locked when this function is called.
723  AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
724    void *alloc_beg =
725        allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
726    return GetAsanChunk(alloc_beg);
727  }
728
729  uptr AllocationSize(uptr p) {
730    AsanChunk *m = GetAsanChunkByAddr(p);
731    if (!m) return 0;
732    if (m->chunk_state != CHUNK_ALLOCATED) return 0;
733    if (m->Beg() != p) return 0;
734    return m->UsedSize();
735  }
736
737  AsanChunkView FindHeapChunkByAddress(uptr addr) {
738    AsanChunk *m1 = GetAsanChunkByAddr(addr);
739    if (!m1) return AsanChunkView(m1);
740    sptr offset = 0;
741    if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
742      // The address is in the chunk's left redzone, so maybe it is actually
743      // a right buffer overflow from the other chunk to the left.
744      // Search a bit to the left to see if there is another chunk.
745      AsanChunk *m2 = nullptr;
746      for (uptr l = 1; l < GetPageSizeCached(); l++) {
747        m2 = GetAsanChunkByAddr(addr - l);
748        if (m2 == m1) continue;  // Still the same chunk.
749        break;
750      }
751      if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
752        m1 = ChooseChunk(addr, m2, m1);
753    }
754    return AsanChunkView(m1);
755  }
756
757  void Purge(BufferedStackTrace *stack) {
758    AsanThread *t = GetCurrentThread();
759    if (t) {
760      AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
761      quarantine.DrainAndRecycle(GetQuarantineCache(ms),
762                                 QuarantineCallback(GetAllocatorCache(ms),
763                                                    stack));
764    }
765    {
766      SpinMutexLock l(&fallback_mutex);
767      quarantine.DrainAndRecycle(&fallback_quarantine_cache,
768                                 QuarantineCallback(&fallback_allocator_cache,
769                                                    stack));
770    }
771
772    allocator.ForceReleaseToOS();
773  }
774
775  void PrintStats() {
776    allocator.PrintStats();
777    quarantine.PrintStats();
778  }
779
780  void ForceLock() {
781    allocator.ForceLock();
782    fallback_mutex.Lock();
783  }
784
785  void ForceUnlock() {
786    fallback_mutex.Unlock();
787    allocator.ForceUnlock();
788  }
789};
790
791static Allocator instance(LINKER_INITIALIZED);
792
793static AsanAllocator &get_allocator() {
794  return instance.allocator;
795}
796
797bool AsanChunkView::IsValid() const {
798  return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE;
799}
800bool AsanChunkView::IsAllocated() const {
801  return chunk_ && chunk_->chunk_state == CHUNK_ALLOCATED;
802}
803bool AsanChunkView::IsQuarantined() const {
804  return chunk_ && chunk_->chunk_state == CHUNK_QUARANTINE;
805}
806uptr AsanChunkView::Beg() const { return chunk_->Beg(); }
807uptr AsanChunkView::End() const { return Beg() + UsedSize(); }
808uptr AsanChunkView::UsedSize() const { return chunk_->UsedSize(); }
809u32 AsanChunkView::UserRequestedAlignment() const {
810  return Allocator::ComputeUserAlignment(chunk_->user_requested_alignment_log);
811}
812uptr AsanChunkView::AllocTid() const { return chunk_->alloc_tid; }
813uptr AsanChunkView::FreeTid() const { return chunk_->free_tid; }
814AllocType AsanChunkView::GetAllocType() const {
815  return (AllocType)chunk_->alloc_type;
816}
817
818static StackTrace GetStackTraceFromId(u32 id) {
819  CHECK(id);
820  StackTrace res = StackDepotGet(id);
821  CHECK(res.trace);
822  return res;
823}
824
825u32 AsanChunkView::GetAllocStackId() const { return chunk_->alloc_context_id; }
826u32 AsanChunkView::GetFreeStackId() const { return chunk_->free_context_id; }
827
828StackTrace AsanChunkView::GetAllocStack() const {
829  return GetStackTraceFromId(GetAllocStackId());
830}
831
832StackTrace AsanChunkView::GetFreeStack() const {
833  return GetStackTraceFromId(GetFreeStackId());
834}
835
836void InitializeAllocator(const AllocatorOptions &options) {
837  instance.InitLinkerInitialized(options);
838}
839
840void ReInitializeAllocator(const AllocatorOptions &options) {
841  instance.ReInitialize(options);
842}
843
844void GetAllocatorOptions(AllocatorOptions *options) {
845  instance.GetOptions(options);
846}
847
848AsanChunkView FindHeapChunkByAddress(uptr addr) {
849  return instance.FindHeapChunkByAddress(addr);
850}
851AsanChunkView FindHeapChunkByAllocBeg(uptr addr) {
852  return AsanChunkView(instance.GetAsanChunk(reinterpret_cast<void*>(addr)));
853}
854
855void AsanThreadLocalMallocStorage::CommitBack() {
856  GET_STACK_TRACE_MALLOC;
857  instance.CommitBack(this, &stack);
858}
859
860void PrintInternalAllocatorStats() {
861  instance.PrintStats();
862}
863
864void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
865  instance.Deallocate(ptr, 0, 0, stack, alloc_type);
866}
867
868void asan_delete(void *ptr, uptr size, uptr alignment,
869                 BufferedStackTrace *stack, AllocType alloc_type) {
870  instance.Deallocate(ptr, size, alignment, stack, alloc_type);
871}
872
873void *asan_malloc(uptr size, BufferedStackTrace *stack) {
874  return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
875}
876
877void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
878  return SetErrnoOnNull(instance.Calloc(nmemb, size, stack));
879}
880
881void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
882  if (!p)
883    return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
884  if (size == 0) {
885    if (flags()->allocator_frees_and_returns_null_on_realloc_zero) {
886      instance.Deallocate(p, 0, 0, stack, FROM_MALLOC);
887      return nullptr;
888    }
889    // Allocate a size of 1 if we shouldn't free() on Realloc to 0
890    size = 1;
891  }
892  return SetErrnoOnNull(instance.Reallocate(p, size, stack));
893}
894
895void *asan_valloc(uptr size, BufferedStackTrace *stack) {
896  return SetErrnoOnNull(
897      instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true));
898}
899
900void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
901  uptr PageSize = GetPageSizeCached();
902  if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
903    errno = errno_ENOMEM;
904    if (AllocatorMayReturnNull())
905      return nullptr;
906    ReportPvallocOverflow(size, stack);
907  }
908  // pvalloc(0) should allocate one page.
909  size = size ? RoundUpTo(size, PageSize) : PageSize;
910  return SetErrnoOnNull(
911      instance.Allocate(size, PageSize, stack, FROM_MALLOC, true));
912}
913
914void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
915                    AllocType alloc_type) {
916  if (UNLIKELY(!IsPowerOfTwo(alignment))) {
917    errno = errno_EINVAL;
918    if (AllocatorMayReturnNull())
919      return nullptr;
920    ReportInvalidAllocationAlignment(alignment, stack);
921  }
922  return SetErrnoOnNull(
923      instance.Allocate(size, alignment, stack, alloc_type, true));
924}
925
926void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) {
927  if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
928    errno = errno_EINVAL;
929    if (AllocatorMayReturnNull())
930      return nullptr;
931    ReportInvalidAlignedAllocAlignment(size, alignment, stack);
932  }
933  return SetErrnoOnNull(
934      instance.Allocate(size, alignment, stack, FROM_MALLOC, true));
935}
936
937int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
938                        BufferedStackTrace *stack) {
939  if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
940    if (AllocatorMayReturnNull())
941      return errno_EINVAL;
942    ReportInvalidPosixMemalignAlignment(alignment, stack);
943  }
944  void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true);
945  if (UNLIKELY(!ptr))
946    // OOM error is already taken care of by Allocate.
947    return errno_ENOMEM;
948  CHECK(IsAligned((uptr)ptr, alignment));
949  *memptr = ptr;
950  return 0;
951}
952
953uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp) {
954  if (!ptr) return 0;
955  uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr));
956  if (flags()->check_malloc_usable_size && (usable_size == 0)) {
957    GET_STACK_TRACE_FATAL(pc, bp);
958    ReportMallocUsableSizeNotOwned((uptr)ptr, &stack);
959  }
960  return usable_size;
961}
962
963uptr asan_mz_size(const void *ptr) {
964  return instance.AllocationSize(reinterpret_cast<uptr>(ptr));
965}
966
967void asan_mz_force_lock() {
968  instance.ForceLock();
969}
970
971void asan_mz_force_unlock() {
972  instance.ForceUnlock();
973}
974
975void AsanSoftRssLimitExceededCallback(bool limit_exceeded) {
976  instance.SetRssLimitExceeded(limit_exceeded);
977}
978
979} // namespace __asan
980
981// --- Implementation of LSan-specific functions --- {{{1
982namespace __lsan {
983void LockAllocator() {
984  __asan::get_allocator().ForceLock();
985}
986
987void UnlockAllocator() {
988  __asan::get_allocator().ForceUnlock();
989}
990
991void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
992  *begin = (uptr)&__asan::get_allocator();
993  *end = *begin + sizeof(__asan::get_allocator());
994}
995
996uptr PointsIntoChunk(void* p) {
997  uptr addr = reinterpret_cast<uptr>(p);
998  __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr);
999  if (!m) return 0;
1000  uptr chunk = m->Beg();
1001  if (m->chunk_state != __asan::CHUNK_ALLOCATED)
1002    return 0;
1003  if (m->AddrIsInside(addr, /*locked_version=*/true))
1004    return chunk;
1005  if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true),
1006                                  addr))
1007    return chunk;
1008  return 0;
1009}
1010
1011uptr GetUserBegin(uptr chunk) {
1012  __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk);
1013  CHECK(m);
1014  return m->Beg();
1015}
1016
1017LsanMetadata::LsanMetadata(uptr chunk) {
1018  metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
1019}
1020
1021bool LsanMetadata::allocated() const {
1022  __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1023  return m->chunk_state == __asan::CHUNK_ALLOCATED;
1024}
1025
1026ChunkTag LsanMetadata::tag() const {
1027  __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1028  return static_cast<ChunkTag>(m->lsan_tag);
1029}
1030
1031void LsanMetadata::set_tag(ChunkTag value) {
1032  __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1033  m->lsan_tag = value;
1034}
1035
1036uptr LsanMetadata::requested_size() const {
1037  __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1038  return m->UsedSize(/*locked_version=*/true);
1039}
1040
1041u32 LsanMetadata::stack_trace_id() const {
1042  __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1043  return m->alloc_context_id;
1044}
1045
1046void ForEachChunk(ForEachChunkCallback callback, void *arg) {
1047  __asan::get_allocator().ForEachChunk(callback, arg);
1048}
1049
1050IgnoreObjectResult IgnoreObjectLocked(const void *p) {
1051  uptr addr = reinterpret_cast<uptr>(p);
1052  __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr);
1053  if (!m) return kIgnoreObjectInvalid;
1054  if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) {
1055    if (m->lsan_tag == kIgnored)
1056      return kIgnoreObjectAlreadyIgnored;
1057    m->lsan_tag = __lsan::kIgnored;
1058    return kIgnoreObjectSuccess;
1059  } else {
1060    return kIgnoreObjectInvalid;
1061  }
1062}
1063}  // namespace __lsan
1064
1065// ---------------------- Interface ---------------- {{{1
1066using namespace __asan;  // NOLINT
1067
1068// ASan allocator doesn't reserve extra bytes, so normally we would
1069// just return "size". We don't want to expose our redzone sizes, etc here.
1070uptr __sanitizer_get_estimated_allocated_size(uptr size) {
1071  return size;
1072}
1073
1074int __sanitizer_get_ownership(const void *p) {
1075  uptr ptr = reinterpret_cast<uptr>(p);
1076  return instance.AllocationSize(ptr) > 0;
1077}
1078
1079uptr __sanitizer_get_allocated_size(const void *p) {
1080  if (!p) return 0;
1081  uptr ptr = reinterpret_cast<uptr>(p);
1082  uptr allocated_size = instance.AllocationSize(ptr);
1083  // Die if p is not malloced or if it is already freed.
1084  if (allocated_size == 0) {
1085    GET_STACK_TRACE_FATAL_HERE;
1086    ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack);
1087  }
1088  return allocated_size;
1089}
1090
1091void __sanitizer_purge_allocator() {
1092  GET_STACK_TRACE_MALLOC;
1093  instance.Purge(&stack);
1094}
1095
1096#if !SANITIZER_SUPPORTS_WEAK_HOOKS
1097// Provide default (no-op) implementation of malloc hooks.
1098SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook,
1099                             void *ptr, uptr size) {
1100  (void)ptr;
1101  (void)size;
1102}
1103
1104SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *ptr) {
1105  (void)ptr;
1106}
1107#endif
1108