1//===-- sanitizer_allocator_local_cache.h -----------------------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Part of the Sanitizer Allocator.
11//
12//===----------------------------------------------------------------------===//
13#ifndef SANITIZER_ALLOCATOR_H
14#error This file must be included inside sanitizer_allocator.h
15#endif
16
17// Objects of this type should be used as local caches for SizeClassAllocator64
18// or SizeClassAllocator32. Since the typical use of this class is to have one
19// object per thread in TLS, is has to be POD.
20template<class SizeClassAllocator>
21struct SizeClassAllocatorLocalCache
22    : SizeClassAllocator::AllocatorCache {};
23
24// Cache used by SizeClassAllocator64.
25template <class SizeClassAllocator>
26struct SizeClassAllocator64LocalCache {
27  typedef SizeClassAllocator Allocator;
28
29  void Init(AllocatorGlobalStats *s) {
30    stats_.Init();
31    if (s)
32      s->Register(&stats_);
33  }
34
35  void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
36    Drain(allocator);
37    if (s)
38      s->Unregister(&stats_);
39  }
40
41  void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
42    CHECK_NE(class_id, 0UL);
43    CHECK_LT(class_id, kNumClasses);
44    PerClass *c = &per_class_[class_id];
45    if (UNLIKELY(c->count == 0)) {
46      if (UNLIKELY(!Refill(c, allocator, class_id)))
47        return nullptr;
48      DCHECK_GT(c->count, 0);
49    }
50    CompactPtrT chunk = c->chunks[--c->count];
51    stats_.Add(AllocatorStatAllocated, c->class_size);
52    return reinterpret_cast<void *>(allocator->CompactPtrToPointer(
53        allocator->GetRegionBeginBySizeClass(class_id), chunk));
54  }
55
56  void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
57    CHECK_NE(class_id, 0UL);
58    CHECK_LT(class_id, kNumClasses);
59    // If the first allocator call on a new thread is a deallocation, then
60    // max_count will be zero, leading to check failure.
61    PerClass *c = &per_class_[class_id];
62    InitCache(c);
63    if (UNLIKELY(c->count == c->max_count))
64      Drain(c, allocator, class_id, c->max_count / 2);
65    CompactPtrT chunk = allocator->PointerToCompactPtr(
66        allocator->GetRegionBeginBySizeClass(class_id),
67        reinterpret_cast<uptr>(p));
68    c->chunks[c->count++] = chunk;
69    stats_.Sub(AllocatorStatAllocated, c->class_size);
70  }
71
72  void Drain(SizeClassAllocator *allocator) {
73    for (uptr i = 1; i < kNumClasses; i++) {
74      PerClass *c = &per_class_[i];
75      while (c->count > 0)
76        Drain(c, allocator, i, c->count);
77    }
78  }
79
80 private:
81  typedef typename Allocator::SizeClassMapT SizeClassMap;
82  static const uptr kNumClasses = SizeClassMap::kNumClasses;
83  typedef typename Allocator::CompactPtrT CompactPtrT;
84
85  struct PerClass {
86    u32 count;
87    u32 max_count;
88    uptr class_size;
89    CompactPtrT chunks[2 * SizeClassMap::kMaxNumCachedHint];
90  };
91  PerClass per_class_[kNumClasses];
92  AllocatorStats stats_;
93
94  void InitCache(PerClass *c) {
95    if (LIKELY(c->max_count))
96      return;
97    for (uptr i = 1; i < kNumClasses; i++) {
98      PerClass *c = &per_class_[i];
99      const uptr size = Allocator::ClassIdToSize(i);
100      c->max_count = 2 * SizeClassMap::MaxCachedHint(size);
101      c->class_size = size;
102    }
103    DCHECK_NE(c->max_count, 0UL);
104  }
105
106  NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,
107                       uptr class_id) {
108    InitCache(c);
109    const uptr num_requested_chunks = c->max_count / 2;
110    if (UNLIKELY(!allocator->GetFromAllocator(&stats_, class_id, c->chunks,
111                                              num_requested_chunks)))
112      return false;
113    c->count = num_requested_chunks;
114    return true;
115  }
116
117  NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator, uptr class_id,
118                      uptr count) {
119    CHECK_GE(c->count, count);
120    const uptr first_idx_to_drain = c->count - count;
121    c->count -= count;
122    allocator->ReturnToAllocator(&stats_, class_id,
123                                 &c->chunks[first_idx_to_drain], count);
124  }
125};
126
127// Cache used by SizeClassAllocator32.
128template <class SizeClassAllocator>
129struct SizeClassAllocator32LocalCache {
130  typedef SizeClassAllocator Allocator;
131  typedef typename Allocator::TransferBatch TransferBatch;
132
133  void Init(AllocatorGlobalStats *s) {
134    stats_.Init();
135    if (s)
136      s->Register(&stats_);
137  }
138
139  // Returns a TransferBatch suitable for class_id.
140  TransferBatch *CreateBatch(uptr class_id, SizeClassAllocator *allocator,
141                             TransferBatch *b) {
142    if (uptr batch_class_id = per_class_[class_id].batch_class_id)
143      return (TransferBatch*)Allocate(allocator, batch_class_id);
144    return b;
145  }
146
147  // Destroys TransferBatch b.
148  void DestroyBatch(uptr class_id, SizeClassAllocator *allocator,
149                    TransferBatch *b) {
150    if (uptr batch_class_id = per_class_[class_id].batch_class_id)
151      Deallocate(allocator, batch_class_id, b);
152  }
153
154  void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
155    Drain(allocator);
156    if (s)
157      s->Unregister(&stats_);
158  }
159
160  void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
161    CHECK_NE(class_id, 0UL);
162    CHECK_LT(class_id, kNumClasses);
163    PerClass *c = &per_class_[class_id];
164    if (UNLIKELY(c->count == 0)) {
165      if (UNLIKELY(!Refill(c, allocator, class_id)))
166        return nullptr;
167      DCHECK_GT(c->count, 0);
168    }
169    void *res = c->batch[--c->count];
170    PREFETCH(c->batch[c->count - 1]);
171    stats_.Add(AllocatorStatAllocated, c->class_size);
172    return res;
173  }
174
175  void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
176    CHECK_NE(class_id, 0UL);
177    CHECK_LT(class_id, kNumClasses);
178    // If the first allocator call on a new thread is a deallocation, then
179    // max_count will be zero, leading to check failure.
180    PerClass *c = &per_class_[class_id];
181    InitCache(c);
182    if (UNLIKELY(c->count == c->max_count))
183      Drain(c, allocator, class_id);
184    c->batch[c->count++] = p;
185    stats_.Sub(AllocatorStatAllocated, c->class_size);
186  }
187
188  void Drain(SizeClassAllocator *allocator) {
189    for (uptr i = 1; i < kNumClasses; i++) {
190      PerClass *c = &per_class_[i];
191      while (c->count > 0)
192        Drain(c, allocator, i);
193    }
194  }
195
196 private:
197  typedef typename Allocator::SizeClassMapT SizeClassMap;
198  static const uptr kBatchClassID = SizeClassMap::kBatchClassID;
199  static const uptr kNumClasses = SizeClassMap::kNumClasses;
200  // If kUseSeparateSizeClassForBatch is true, all TransferBatch objects are
201  // allocated from kBatchClassID size class (except for those that are needed
202  // for kBatchClassID itself). The goal is to have TransferBatches in a totally
203  // different region of RAM to improve security.
204  static const bool kUseSeparateSizeClassForBatch =
205      Allocator::kUseSeparateSizeClassForBatch;
206
207  struct PerClass {
208    uptr count;
209    uptr max_count;
210    uptr class_size;
211    uptr batch_class_id;
212    void *batch[2 * TransferBatch::kMaxNumCached];
213  };
214  PerClass per_class_[kNumClasses];
215  AllocatorStats stats_;
216
217  void InitCache(PerClass *c) {
218    if (LIKELY(c->max_count))
219      return;
220    const uptr batch_class_id = SizeClassMap::ClassID(sizeof(TransferBatch));
221    for (uptr i = 1; i < kNumClasses; i++) {
222      PerClass *c = &per_class_[i];
223      const uptr size = Allocator::ClassIdToSize(i);
224      const uptr max_cached = TransferBatch::MaxCached(size);
225      c->max_count = 2 * max_cached;
226      c->class_size = size;
227      // Precompute the class id to use to store batches for the current class
228      // id. 0 means the class size is large enough to store a batch within one
229      // of the chunks. If using a separate size class, it will always be
230      // kBatchClassID, except for kBatchClassID itself.
231      if (kUseSeparateSizeClassForBatch) {
232        c->batch_class_id = (i == kBatchClassID) ? 0 : kBatchClassID;
233      } else {
234        c->batch_class_id = (size <
235          TransferBatch::AllocationSizeRequiredForNElements(max_cached)) ?
236              batch_class_id : 0;
237      }
238    }
239    DCHECK_NE(c->max_count, 0UL);
240  }
241
242  NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,
243                       uptr class_id) {
244    InitCache(c);
245    TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id);
246    if (UNLIKELY(!b))
247      return false;
248    CHECK_GT(b->Count(), 0);
249    b->CopyToArray(c->batch);
250    c->count = b->Count();
251    DestroyBatch(class_id, allocator, b);
252    return true;
253  }
254
255  NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator,
256                      uptr class_id) {
257    const uptr count = Min(c->max_count / 2, c->count);
258    const uptr first_idx_to_drain = c->count - count;
259    TransferBatch *b = CreateBatch(
260        class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]);
261    // Failure to allocate a batch while releasing memory is non recoverable.
262    // TODO(alekseys): Figure out how to do it without allocating a new batch.
263    if (UNLIKELY(!b)) {
264      Report("FATAL: Internal error: %s's allocator failed to allocate a "
265             "transfer batch.\n", SanitizerToolName);
266      Die();
267    }
268    b->SetFromArray(&c->batch[first_idx_to_drain], count);
269    c->count -= count;
270    allocator->DeallocateBatch(&stats_, class_id, b);
271  }
272};
273