1//===-- sanitizer_allocator_local_cache.h -----------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Part of the Sanitizer Allocator.
10//
11//===----------------------------------------------------------------------===//
12#ifndef SANITIZER_ALLOCATOR_H
13#error This file must be included inside sanitizer_allocator.h
14#endif
15
16// Cache used by SizeClassAllocator64.
17template <class SizeClassAllocator>
18struct SizeClassAllocator64LocalCache {
19  typedef SizeClassAllocator Allocator;
20
21  void Init(AllocatorGlobalStats *s) {
22    stats_.Init();
23    if (s)
24      s->Register(&stats_);
25  }
26
27  void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
28    Drain(allocator);
29    if (s)
30      s->Unregister(&stats_);
31  }
32
33  void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
34    CHECK_NE(class_id, 0UL);
35    CHECK_LT(class_id, kNumClasses);
36    PerClass *c = &per_class_[class_id];
37    if (UNLIKELY(c->count == 0)) {
38      if (UNLIKELY(!Refill(c, allocator, class_id)))
39        return nullptr;
40      DCHECK_GT(c->count, 0);
41    }
42    CompactPtrT chunk = c->chunks[--c->count];
43    stats_.Add(AllocatorStatAllocated, c->class_size);
44    return reinterpret_cast<void *>(allocator->CompactPtrToPointer(
45        allocator->GetRegionBeginBySizeClass(class_id), chunk));
46  }
47
48  void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
49    CHECK_NE(class_id, 0UL);
50    CHECK_LT(class_id, kNumClasses);
51    // If the first allocator call on a new thread is a deallocation, then
52    // max_count will be zero, leading to check failure.
53    PerClass *c = &per_class_[class_id];
54    InitCache(c);
55    if (UNLIKELY(c->count == c->max_count))
56      Drain(c, allocator, class_id, c->max_count / 2);
57    CompactPtrT chunk = allocator->PointerToCompactPtr(
58        allocator->GetRegionBeginBySizeClass(class_id),
59        reinterpret_cast<uptr>(p));
60    c->chunks[c->count++] = chunk;
61    stats_.Sub(AllocatorStatAllocated, c->class_size);
62  }
63
64  void Drain(SizeClassAllocator *allocator) {
65    for (uptr i = 1; i < kNumClasses; i++) {
66      PerClass *c = &per_class_[i];
67      while (c->count > 0)
68        Drain(c, allocator, i, c->count);
69    }
70  }
71
72 private:
73  typedef typename Allocator::SizeClassMapT SizeClassMap;
74  static const uptr kNumClasses = SizeClassMap::kNumClasses;
75  typedef typename Allocator::CompactPtrT CompactPtrT;
76
77  struct PerClass {
78    u32 count;
79    u32 max_count;
80    uptr class_size;
81    CompactPtrT chunks[2 * SizeClassMap::kMaxNumCachedHint];
82  };
83  PerClass per_class_[kNumClasses];
84  AllocatorStats stats_;
85
86  void InitCache(PerClass *c) {
87    if (LIKELY(c->max_count))
88      return;
89    for (uptr i = 1; i < kNumClasses; i++) {
90      PerClass *c = &per_class_[i];
91      const uptr size = Allocator::ClassIdToSize(i);
92      c->max_count = 2 * SizeClassMap::MaxCachedHint(size);
93      c->class_size = size;
94    }
95    DCHECK_NE(c->max_count, 0UL);
96  }
97
98  NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,
99                       uptr class_id) {
100    InitCache(c);
101    const uptr num_requested_chunks = c->max_count / 2;
102    if (UNLIKELY(!allocator->GetFromAllocator(&stats_, class_id, c->chunks,
103                                              num_requested_chunks)))
104      return false;
105    c->count = num_requested_chunks;
106    return true;
107  }
108
109  NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator, uptr class_id,
110                      uptr count) {
111    CHECK_GE(c->count, count);
112    const uptr first_idx_to_drain = c->count - count;
113    c->count -= count;
114    allocator->ReturnToAllocator(&stats_, class_id,
115                                 &c->chunks[first_idx_to_drain], count);
116  }
117};
118
119// Cache used by SizeClassAllocator32.
120template <class SizeClassAllocator>
121struct SizeClassAllocator32LocalCache {
122  typedef SizeClassAllocator Allocator;
123  typedef typename Allocator::TransferBatch TransferBatch;
124
125  void Init(AllocatorGlobalStats *s) {
126    stats_.Init();
127    if (s)
128      s->Register(&stats_);
129  }
130
131  // Returns a TransferBatch suitable for class_id.
132  TransferBatch *CreateBatch(uptr class_id, SizeClassAllocator *allocator,
133                             TransferBatch *b) {
134    if (uptr batch_class_id = per_class_[class_id].batch_class_id)
135      return (TransferBatch*)Allocate(allocator, batch_class_id);
136    return b;
137  }
138
139  // Destroys TransferBatch b.
140  void DestroyBatch(uptr class_id, SizeClassAllocator *allocator,
141                    TransferBatch *b) {
142    if (uptr batch_class_id = per_class_[class_id].batch_class_id)
143      Deallocate(allocator, batch_class_id, b);
144  }
145
146  void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
147    Drain(allocator);
148    if (s)
149      s->Unregister(&stats_);
150  }
151
152  void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
153    CHECK_NE(class_id, 0UL);
154    CHECK_LT(class_id, kNumClasses);
155    PerClass *c = &per_class_[class_id];
156    if (UNLIKELY(c->count == 0)) {
157      if (UNLIKELY(!Refill(c, allocator, class_id)))
158        return nullptr;
159      DCHECK_GT(c->count, 0);
160    }
161    void *res = c->batch[--c->count];
162    PREFETCH(c->batch[c->count - 1]);
163    stats_.Add(AllocatorStatAllocated, c->class_size);
164    return res;
165  }
166
167  void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
168    CHECK_NE(class_id, 0UL);
169    CHECK_LT(class_id, kNumClasses);
170    // If the first allocator call on a new thread is a deallocation, then
171    // max_count will be zero, leading to check failure.
172    PerClass *c = &per_class_[class_id];
173    InitCache(c);
174    if (UNLIKELY(c->count == c->max_count))
175      Drain(c, allocator, class_id);
176    c->batch[c->count++] = p;
177    stats_.Sub(AllocatorStatAllocated, c->class_size);
178  }
179
180  void Drain(SizeClassAllocator *allocator) {
181    for (uptr i = 1; i < kNumClasses; i++) {
182      PerClass *c = &per_class_[i];
183      while (c->count > 0)
184        Drain(c, allocator, i);
185    }
186  }
187
188 private:
189  typedef typename Allocator::SizeClassMapT SizeClassMap;
190  static const uptr kBatchClassID = SizeClassMap::kBatchClassID;
191  static const uptr kNumClasses = SizeClassMap::kNumClasses;
192  // If kUseSeparateSizeClassForBatch is true, all TransferBatch objects are
193  // allocated from kBatchClassID size class (except for those that are needed
194  // for kBatchClassID itself). The goal is to have TransferBatches in a totally
195  // different region of RAM to improve security.
196  static const bool kUseSeparateSizeClassForBatch =
197      Allocator::kUseSeparateSizeClassForBatch;
198
199  struct PerClass {
200    uptr count;
201    uptr max_count;
202    uptr class_size;
203    uptr batch_class_id;
204    void *batch[2 * TransferBatch::kMaxNumCached];
205  };
206  PerClass per_class_[kNumClasses];
207  AllocatorStats stats_;
208
209  void InitCache(PerClass *c) {
210    if (LIKELY(c->max_count))
211      return;
212    const uptr batch_class_id = SizeClassMap::ClassID(sizeof(TransferBatch));
213    for (uptr i = 1; i < kNumClasses; i++) {
214      PerClass *c = &per_class_[i];
215      const uptr size = Allocator::ClassIdToSize(i);
216      const uptr max_cached = TransferBatch::MaxCached(size);
217      c->max_count = 2 * max_cached;
218      c->class_size = size;
219      // Precompute the class id to use to store batches for the current class
220      // id. 0 means the class size is large enough to store a batch within one
221      // of the chunks. If using a separate size class, it will always be
222      // kBatchClassID, except for kBatchClassID itself.
223      if (kUseSeparateSizeClassForBatch) {
224        c->batch_class_id = (i == kBatchClassID) ? 0 : kBatchClassID;
225      } else {
226        c->batch_class_id = (size <
227          TransferBatch::AllocationSizeRequiredForNElements(max_cached)) ?
228              batch_class_id : 0;
229      }
230    }
231    DCHECK_NE(c->max_count, 0UL);
232  }
233
234  NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,
235                       uptr class_id) {
236    InitCache(c);
237    TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id);
238    if (UNLIKELY(!b))
239      return false;
240    CHECK_GT(b->Count(), 0);
241    b->CopyToArray(c->batch);
242    c->count = b->Count();
243    DestroyBatch(class_id, allocator, b);
244    return true;
245  }
246
247  NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator,
248                      uptr class_id) {
249    const uptr count = Min(c->max_count / 2, c->count);
250    const uptr first_idx_to_drain = c->count - count;
251    TransferBatch *b = CreateBatch(
252        class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]);
253    // Failure to allocate a batch while releasing memory is non recoverable.
254    // TODO(alekseys): Figure out how to do it without allocating a new batch.
255    if (UNLIKELY(!b)) {
256      Report("FATAL: Internal error: %s's allocator failed to allocate a "
257             "transfer batch.\n", SanitizerToolName);
258      Die();
259    }
260    b->SetFromArray(&c->batch[first_idx_to_drain], count);
261    c->count -= count;
262    allocator->DeallocateBatch(&stats_, class_id, b);
263  }
264};
265