1//=-- lsan_allocator.cc ---------------------------------------------------===//
2//
3// This file is distributed under the University of Illinois Open Source
4// License. See LICENSE.TXT for details.
5//
6//===----------------------------------------------------------------------===//
7//
8// This file is a part of LeakSanitizer.
9// See lsan_allocator.h for details.
10//
11//===----------------------------------------------------------------------===//
12
13#include "lsan_allocator.h"
14
15#include "sanitizer_common/sanitizer_allocator.h"
16#include "sanitizer_common/sanitizer_allocator_interface.h"
17#include "sanitizer_common/sanitizer_internal_defs.h"
18#include "sanitizer_common/sanitizer_stackdepot.h"
19#include "sanitizer_common/sanitizer_stacktrace.h"
20#include "lsan_common.h"
21
22extern "C" void *memset(void *ptr, int value, uptr num);
23
24namespace __lsan {
25
26static const uptr kMaxAllowedMallocSize = 8UL << 30;
27static const uptr kAllocatorSpace = 0x600000000000ULL;
28static const uptr kAllocatorSize  =  0x40000000000ULL;  // 4T.
29
30struct ChunkMetadata {
31  bool allocated : 8;  // Must be first.
32  ChunkTag tag : 2;
33  uptr requested_size : 54;
34  u32 stack_trace_id;
35};
36
37typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize,
38        sizeof(ChunkMetadata), DefaultSizeClassMap> PrimaryAllocator;
39typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
40typedef LargeMmapAllocator<> SecondaryAllocator;
41typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
42          SecondaryAllocator> Allocator;
43
44static Allocator allocator;
45static THREADLOCAL AllocatorCache cache;
46
47void InitializeAllocator() {
48  allocator.Init();
49}
50
51void AllocatorThreadFinish() {
52  allocator.SwallowCache(&cache);
53}
54
55static ChunkMetadata *Metadata(const void *p) {
56  return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p));
57}
58
59static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
60  if (!p) return;
61  ChunkMetadata *m = Metadata(p);
62  CHECK(m);
63  m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
64  m->stack_trace_id = StackDepotPut(stack);
65  m->requested_size = size;
66  atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
67}
68
69static void RegisterDeallocation(void *p) {
70  if (!p) return;
71  ChunkMetadata *m = Metadata(p);
72  CHECK(m);
73  atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
74}
75
76void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
77               bool cleared) {
78  if (size == 0)
79    size = 1;
80  if (size > kMaxAllowedMallocSize) {
81    Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size);
82    return 0;
83  }
84  void *p = allocator.Allocate(&cache, size, alignment, false);
85  // Do not rely on the allocator to clear the memory (it's slow).
86  if (cleared && allocator.FromPrimary(p))
87    memset(p, 0, size);
88  RegisterAllocation(stack, p, size);
89  if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size);
90  return p;
91}
92
93void Deallocate(void *p) {
94  if (&__sanitizer_free_hook) __sanitizer_free_hook(p);
95  RegisterDeallocation(p);
96  allocator.Deallocate(&cache, p);
97}
98
99void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
100                 uptr alignment) {
101  RegisterDeallocation(p);
102  if (new_size > kMaxAllowedMallocSize) {
103    Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size);
104    allocator.Deallocate(&cache, p);
105    return 0;
106  }
107  p = allocator.Reallocate(&cache, p, new_size, alignment);
108  RegisterAllocation(stack, p, new_size);
109  return p;
110}
111
112void GetAllocatorCacheRange(uptr *begin, uptr *end) {
113  *begin = (uptr)&cache;
114  *end = *begin + sizeof(cache);
115}
116
117uptr GetMallocUsableSize(const void *p) {
118  ChunkMetadata *m = Metadata(p);
119  if (!m) return 0;
120  return m->requested_size;
121}
122
123///// Interface to the common LSan module. /////
124
125void LockAllocator() {
126  allocator.ForceLock();
127}
128
129void UnlockAllocator() {
130  allocator.ForceUnlock();
131}
132
133void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
134  *begin = (uptr)&allocator;
135  *end = *begin + sizeof(allocator);
136}
137
138uptr PointsIntoChunk(void* p) {
139  uptr addr = reinterpret_cast<uptr>(p);
140  uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p));
141  if (!chunk) return 0;
142  // LargeMmapAllocator considers pointers to the meta-region of a chunk to be
143  // valid, but we don't want that.
144  if (addr < chunk) return 0;
145  ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
146  CHECK(m);
147  if (!m->allocated)
148    return 0;
149  if (addr < chunk + m->requested_size)
150    return chunk;
151  if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr))
152    return chunk;
153  return 0;
154}
155
156uptr GetUserBegin(uptr chunk) {
157  return chunk;
158}
159
160LsanMetadata::LsanMetadata(uptr chunk) {
161  metadata_ = Metadata(reinterpret_cast<void *>(chunk));
162  CHECK(metadata_);
163}
164
165bool LsanMetadata::allocated() const {
166  return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated;
167}
168
169ChunkTag LsanMetadata::tag() const {
170  return reinterpret_cast<ChunkMetadata *>(metadata_)->tag;
171}
172
173void LsanMetadata::set_tag(ChunkTag value) {
174  reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value;
175}
176
177uptr LsanMetadata::requested_size() const {
178  return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size;
179}
180
181u32 LsanMetadata::stack_trace_id() const {
182  return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
183}
184
185void ForEachChunk(ForEachChunkCallback callback, void *arg) {
186  allocator.ForEachChunk(callback, arg);
187}
188
189IgnoreObjectResult IgnoreObjectLocked(const void *p) {
190  void *chunk = allocator.GetBlockBegin(p);
191  if (!chunk || p < chunk) return kIgnoreObjectInvalid;
192  ChunkMetadata *m = Metadata(chunk);
193  CHECK(m);
194  if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) {
195    if (m->tag == kIgnored)
196      return kIgnoreObjectAlreadyIgnored;
197    m->tag = kIgnored;
198    return kIgnoreObjectSuccess;
199  } else {
200    return kIgnoreObjectInvalid;
201  }
202}
203}  // namespace __lsan
204
205using namespace __lsan;
206
207extern "C" {
208SANITIZER_INTERFACE_ATTRIBUTE
209uptr __sanitizer_get_current_allocated_bytes() {
210  uptr stats[AllocatorStatCount];
211  allocator.GetStats(stats);
212  return stats[AllocatorStatAllocated];
213}
214
215SANITIZER_INTERFACE_ATTRIBUTE
216uptr __sanitizer_get_heap_size() {
217  uptr stats[AllocatorStatCount];
218  allocator.GetStats(stats);
219  return stats[AllocatorStatMapped];
220}
221
222SANITIZER_INTERFACE_ATTRIBUTE
223uptr __sanitizer_get_free_bytes() { return 0; }
224
225SANITIZER_INTERFACE_ATTRIBUTE
226uptr __sanitizer_get_unmapped_bytes() { return 0; }
227
228SANITIZER_INTERFACE_ATTRIBUTE
229uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
230
231SANITIZER_INTERFACE_ATTRIBUTE
232int __sanitizer_get_ownership(const void *p) { return Metadata(p) != 0; }
233
234SANITIZER_INTERFACE_ATTRIBUTE
235uptr __sanitizer_get_allocated_size(const void *p) {
236  return GetMallocUsableSize(p);
237}
238}  // extern "C"
239