lsan_allocator.cc revision 1.1.1.2
1//=-- lsan_allocator.cc ---------------------------------------------------===//
2//
3// This file is distributed under the University of Illinois Open Source
4// License. See LICENSE.TXT for details.
5//
6//===----------------------------------------------------------------------===//
7//
8// This file is a part of LeakSanitizer.
9// See lsan_allocator.h for details.
10//
11//===----------------------------------------------------------------------===//
12
13#include "lsan_allocator.h"
14
15#include "sanitizer_common/sanitizer_allocator.h"
16#include "sanitizer_common/sanitizer_allocator_interface.h"
17#include "sanitizer_common/sanitizer_internal_defs.h"
18#include "sanitizer_common/sanitizer_stackdepot.h"
19#include "sanitizer_common/sanitizer_stacktrace.h"
20#include "lsan_common.h"
21
22extern "C" void *memset(void *ptr, int value, uptr num);
23
24namespace __lsan {
25
26struct ChunkMetadata {
27  u8 allocated : 8;  // Must be first.
28  ChunkTag tag : 2;
29#ifdef _LP64
30  uptr requested_size : 54;
31#else
32  uptr requested_size : 30;
33#endif
34  u32 stack_trace_id;
35};
36
37#if defined(__mips64) || defined(__aarch64__)
38static const uptr kMaxAllowedMallocSize = 4UL << 30;
39static const uptr kRegionSizeLog = 20;
40static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
41typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
42typedef CompactSizeClassMap SizeClassMap;
43typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE,
44    sizeof(ChunkMetadata), SizeClassMap, kRegionSizeLog, ByteMap>
45    PrimaryAllocator;
46#else
47#if _LP64
48static const uptr kMaxAllowedMallocSize = 8UL << 30;
49static const uptr kAllocatorSpace = 0x600000000000ULL;
50static const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
51#else
52static const uptr kMaxAllowedMallocSize = 8UL << 20;
53static const uptr kAllocatorSpace = 0x60000000UL;
54static const uptr kAllocatorSize = 0x40000000ULL; // 2G.
55#endif
56typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize,
57        sizeof(ChunkMetadata), DefaultSizeClassMap> PrimaryAllocator;
58#endif
59typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
60typedef LargeMmapAllocator<> SecondaryAllocator;
61typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
62          SecondaryAllocator> Allocator;
63
64static Allocator allocator;
65static THREADLOCAL AllocatorCache cache;
66
67void InitializeAllocator() {
68  allocator.InitLinkerInitialized(common_flags()->allocator_may_return_null);
69}
70
71void AllocatorThreadFinish() {
72  allocator.SwallowCache(&cache);
73}
74
75static ChunkMetadata *Metadata(const void *p) {
76  return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p));
77}
78
79static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
80  if (!p) return;
81  ChunkMetadata *m = Metadata(p);
82  CHECK(m);
83  m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
84  m->stack_trace_id = StackDepotPut(stack);
85  m->requested_size = size;
86  atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
87}
88
89static void RegisterDeallocation(void *p) {
90  if (!p) return;
91  ChunkMetadata *m = Metadata(p);
92  CHECK(m);
93  atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
94}
95
96void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
97               bool cleared) {
98  if (size == 0)
99    size = 1;
100  if (size > kMaxAllowedMallocSize) {
101    Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size);
102    return nullptr;
103  }
104  void *p = allocator.Allocate(&cache, size, alignment, false);
105  // Do not rely on the allocator to clear the memory (it's slow).
106  if (cleared && allocator.FromPrimary(p))
107    memset(p, 0, size);
108  RegisterAllocation(stack, p, size);
109  if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size);
110  return p;
111}
112
113void Deallocate(void *p) {
114  if (&__sanitizer_free_hook) __sanitizer_free_hook(p);
115  RegisterDeallocation(p);
116  allocator.Deallocate(&cache, p);
117}
118
119void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
120                 uptr alignment) {
121  RegisterDeallocation(p);
122  if (new_size > kMaxAllowedMallocSize) {
123    Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size);
124    allocator.Deallocate(&cache, p);
125    return nullptr;
126  }
127  p = allocator.Reallocate(&cache, p, new_size, alignment);
128  RegisterAllocation(stack, p, new_size);
129  return p;
130}
131
132void GetAllocatorCacheRange(uptr *begin, uptr *end) {
133  *begin = (uptr)&cache;
134  *end = *begin + sizeof(cache);
135}
136
137uptr GetMallocUsableSize(const void *p) {
138  ChunkMetadata *m = Metadata(p);
139  if (!m) return 0;
140  return m->requested_size;
141}
142
143///// Interface to the common LSan module. /////
144
145void LockAllocator() {
146  allocator.ForceLock();
147}
148
149void UnlockAllocator() {
150  allocator.ForceUnlock();
151}
152
153void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
154  *begin = (uptr)&allocator;
155  *end = *begin + sizeof(allocator);
156}
157
158uptr PointsIntoChunk(void* p) {
159  uptr addr = reinterpret_cast<uptr>(p);
160  uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p));
161  if (!chunk) return 0;
162  // LargeMmapAllocator considers pointers to the meta-region of a chunk to be
163  // valid, but we don't want that.
164  if (addr < chunk) return 0;
165  ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
166  CHECK(m);
167  if (!m->allocated)
168    return 0;
169  if (addr < chunk + m->requested_size)
170    return chunk;
171  if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr))
172    return chunk;
173  return 0;
174}
175
176uptr GetUserBegin(uptr chunk) {
177  return chunk;
178}
179
180LsanMetadata::LsanMetadata(uptr chunk) {
181  metadata_ = Metadata(reinterpret_cast<void *>(chunk));
182  CHECK(metadata_);
183}
184
185bool LsanMetadata::allocated() const {
186  return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated;
187}
188
189ChunkTag LsanMetadata::tag() const {
190  return reinterpret_cast<ChunkMetadata *>(metadata_)->tag;
191}
192
193void LsanMetadata::set_tag(ChunkTag value) {
194  reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value;
195}
196
197uptr LsanMetadata::requested_size() const {
198  return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size;
199}
200
201u32 LsanMetadata::stack_trace_id() const {
202  return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
203}
204
205void ForEachChunk(ForEachChunkCallback callback, void *arg) {
206  allocator.ForEachChunk(callback, arg);
207}
208
209IgnoreObjectResult IgnoreObjectLocked(const void *p) {
210  void *chunk = allocator.GetBlockBegin(p);
211  if (!chunk || p < chunk) return kIgnoreObjectInvalid;
212  ChunkMetadata *m = Metadata(chunk);
213  CHECK(m);
214  if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) {
215    if (m->tag == kIgnored)
216      return kIgnoreObjectAlreadyIgnored;
217    m->tag = kIgnored;
218    return kIgnoreObjectSuccess;
219  } else {
220    return kIgnoreObjectInvalid;
221  }
222}
223} // namespace __lsan
224
225using namespace __lsan;
226
227extern "C" {
228SANITIZER_INTERFACE_ATTRIBUTE
229uptr __sanitizer_get_current_allocated_bytes() {
230  uptr stats[AllocatorStatCount];
231  allocator.GetStats(stats);
232  return stats[AllocatorStatAllocated];
233}
234
235SANITIZER_INTERFACE_ATTRIBUTE
236uptr __sanitizer_get_heap_size() {
237  uptr stats[AllocatorStatCount];
238  allocator.GetStats(stats);
239  return stats[AllocatorStatMapped];
240}
241
242SANITIZER_INTERFACE_ATTRIBUTE
243uptr __sanitizer_get_free_bytes() { return 0; }
244
245SANITIZER_INTERFACE_ATTRIBUTE
246uptr __sanitizer_get_unmapped_bytes() { return 0; }
247
248SANITIZER_INTERFACE_ATTRIBUTE
249uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
250
251SANITIZER_INTERFACE_ATTRIBUTE
252int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; }
253
254SANITIZER_INTERFACE_ATTRIBUTE
255uptr __sanitizer_get_allocated_size(const void *p) {
256  return GetMallocUsableSize(p);
257}
258} // extern "C"
259