1//===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Memory quarantine for AddressSanitizer and potentially other tools.
10// Quarantine caches some specified amount of memory in per-thread caches,
11// then evicts to global FIFO queue. When the queue reaches specified threshold,
12// oldest memory is recycled.
13//
14//===----------------------------------------------------------------------===//
15
16#ifndef SANITIZER_QUARANTINE_H
17#define SANITIZER_QUARANTINE_H
18
19#include "sanitizer_internal_defs.h"
20#include "sanitizer_mutex.h"
21#include "sanitizer_list.h"
22
23namespace __sanitizer {
24
25template<typename Node> class QuarantineCache;
26
27struct QuarantineBatch {
28  static const uptr kSize = 1021;
29  QuarantineBatch *next;
30  uptr size;
31  uptr count;
32  void *batch[kSize];
33
34  void init(void *ptr, uptr size) {
35    count = 1;
36    batch[0] = ptr;
37    this->size = size + sizeof(QuarantineBatch);  // Account for the batch size.
38  }
39
40  // The total size of quarantined nodes recorded in this batch.
41  uptr quarantined_size() const {
42    return size - sizeof(QuarantineBatch);
43  }
44
45  void push_back(void *ptr, uptr size) {
46    CHECK_LT(count, kSize);
47    batch[count++] = ptr;
48    this->size += size;
49  }
50
51  bool can_merge(const QuarantineBatch* const from) const {
52    return count + from->count <= kSize;
53  }
54
55  void merge(QuarantineBatch* const from) {
56    CHECK_LE(count + from->count, kSize);
57    CHECK_GE(size, sizeof(QuarantineBatch));
58
59    for (uptr i = 0; i < from->count; ++i)
60      batch[count + i] = from->batch[i];
61    count += from->count;
62    size += from->quarantined_size();
63
64    from->count = 0;
65    from->size = sizeof(QuarantineBatch);
66  }
67};
68
69COMPILER_CHECK(sizeof(QuarantineBatch) <= (1 << 13));  // 8Kb.
70
71// The callback interface is:
72// void Callback::Recycle(Node *ptr);
73// void *cb.Allocate(uptr size);
74// void cb.Deallocate(void *ptr);
75template<typename Callback, typename Node>
76class Quarantine {
77 public:
78  typedef QuarantineCache<Callback> Cache;
79
80  explicit Quarantine(LinkerInitialized)
81      : cache_(LINKER_INITIALIZED) {
82  }
83
84  void Init(uptr size, uptr cache_size) {
85    // Thread local quarantine size can be zero only when global quarantine size
86    // is zero (it allows us to perform just one atomic read per Put() call).
87    CHECK((size == 0 && cache_size == 0) || cache_size != 0);
88
89    atomic_store_relaxed(&max_size_, size);
90    atomic_store_relaxed(&min_size_, size / 10 * 9);  // 90% of max size.
91    atomic_store_relaxed(&max_cache_size_, cache_size);
92
93    cache_mutex_.Init();
94    recycle_mutex_.Init();
95  }
96
97  uptr GetSize() const { return atomic_load_relaxed(&max_size_); }
98  uptr GetCacheSize() const {
99    return atomic_load_relaxed(&max_cache_size_);
100  }
101
102  void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
103    uptr cache_size = GetCacheSize();
104    if (cache_size) {
105      c->Enqueue(cb, ptr, size);
106    } else {
107      // GetCacheSize() == 0 only when GetSize() == 0 (see Init).
108      cb.Recycle(ptr);
109    }
110    // Check cache size anyway to accommodate for runtime cache_size change.
111    if (c->Size() > cache_size)
112      Drain(c, cb);
113  }
114
115  void NOINLINE Drain(Cache *c, Callback cb) {
116    {
117      SpinMutexLock l(&cache_mutex_);
118      cache_.Transfer(c);
119    }
120    if (cache_.Size() > GetSize() && recycle_mutex_.TryLock())
121      Recycle(atomic_load_relaxed(&min_size_), cb);
122  }
123
124  void NOINLINE DrainAndRecycle(Cache *c, Callback cb) {
125    {
126      SpinMutexLock l(&cache_mutex_);
127      cache_.Transfer(c);
128    }
129    recycle_mutex_.Lock();
130    Recycle(0, cb);
131  }
132
133  void PrintStats() const {
134    // It assumes that the world is stopped, just as the allocator's PrintStats.
135    Printf("Quarantine limits: global: %zdMb; thread local: %zdKb\n",
136           GetSize() >> 20, GetCacheSize() >> 10);
137    cache_.PrintStats();
138  }
139
140 private:
141  // Read-only data.
142  char pad0_[kCacheLineSize];
143  atomic_uintptr_t max_size_;
144  atomic_uintptr_t min_size_;
145  atomic_uintptr_t max_cache_size_;
146  char pad1_[kCacheLineSize];
147  StaticSpinMutex cache_mutex_;
148  StaticSpinMutex recycle_mutex_;
149  Cache cache_;
150  char pad2_[kCacheLineSize];
151
152  void NOINLINE Recycle(uptr min_size, Callback cb) {
153    Cache tmp;
154    {
155      SpinMutexLock l(&cache_mutex_);
156      // Go over the batches and merge partially filled ones to
157      // save some memory, otherwise batches themselves (since the memory used
158      // by them is counted against quarantine limit) can overcome the actual
159      // user's quarantined chunks, which diminishes the purpose of the
160      // quarantine.
161      uptr cache_size = cache_.Size();
162      uptr overhead_size = cache_.OverheadSize();
163      CHECK_GE(cache_size, overhead_size);
164      // Do the merge only when overhead exceeds this predefined limit (might
165      // require some tuning). It saves us merge attempt when the batch list
166      // quarantine is unlikely to contain batches suitable for merge.
167      const uptr kOverheadThresholdPercents = 100;
168      if (cache_size > overhead_size &&
169          overhead_size * (100 + kOverheadThresholdPercents) >
170              cache_size * kOverheadThresholdPercents) {
171        cache_.MergeBatches(&tmp);
172      }
173      // Extract enough chunks from the quarantine to get below the max
174      // quarantine size and leave some leeway for the newly quarantined chunks.
175      while (cache_.Size() > min_size) {
176        tmp.EnqueueBatch(cache_.DequeueBatch());
177      }
178    }
179    recycle_mutex_.Unlock();
180    DoRecycle(&tmp, cb);
181  }
182
183  void NOINLINE DoRecycle(Cache *c, Callback cb) {
184    while (QuarantineBatch *b = c->DequeueBatch()) {
185      const uptr kPrefetch = 16;
186      CHECK(kPrefetch <= ARRAY_SIZE(b->batch));
187      for (uptr i = 0; i < kPrefetch; i++)
188        PREFETCH(b->batch[i]);
189      for (uptr i = 0, count = b->count; i < count; i++) {
190        if (i + kPrefetch < count)
191          PREFETCH(b->batch[i + kPrefetch]);
192        cb.Recycle((Node*)b->batch[i]);
193      }
194      cb.Deallocate(b);
195    }
196  }
197};
198
199// Per-thread cache of memory blocks.
200template<typename Callback>
201class QuarantineCache {
202 public:
203  explicit QuarantineCache(LinkerInitialized) {
204  }
205
206  QuarantineCache()
207      : size_() {
208    list_.clear();
209  }
210
211  // Total memory used, including internal accounting.
212  uptr Size() const {
213    return atomic_load_relaxed(&size_);
214  }
215
216  // Memory used for internal accounting.
217  uptr OverheadSize() const {
218    return list_.size() * sizeof(QuarantineBatch);
219  }
220
221  void Enqueue(Callback cb, void *ptr, uptr size) {
222    if (list_.empty() || list_.back()->count == QuarantineBatch::kSize) {
223      QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
224      CHECK(b);
225      b->init(ptr, size);
226      EnqueueBatch(b);
227    } else {
228      list_.back()->push_back(ptr, size);
229      SizeAdd(size);
230    }
231  }
232
233  void Transfer(QuarantineCache *from_cache) {
234    list_.append_back(&from_cache->list_);
235    SizeAdd(from_cache->Size());
236
237    atomic_store_relaxed(&from_cache->size_, 0);
238  }
239
240  void EnqueueBatch(QuarantineBatch *b) {
241    list_.push_back(b);
242    SizeAdd(b->size);
243  }
244
245  QuarantineBatch *DequeueBatch() {
246    if (list_.empty())
247      return nullptr;
248    QuarantineBatch *b = list_.front();
249    list_.pop_front();
250    SizeSub(b->size);
251    return b;
252  }
253
254  void MergeBatches(QuarantineCache *to_deallocate) {
255    uptr extracted_size = 0;
256    QuarantineBatch *current = list_.front();
257    while (current && current->next) {
258      if (current->can_merge(current->next)) {
259        QuarantineBatch *extracted = current->next;
260        // Move all the chunks into the current batch.
261        current->merge(extracted);
262        CHECK_EQ(extracted->count, 0);
263        CHECK_EQ(extracted->size, sizeof(QuarantineBatch));
264        // Remove the next batch from the list and account for its size.
265        list_.extract(current, extracted);
266        extracted_size += extracted->size;
267        // Add it to deallocation list.
268        to_deallocate->EnqueueBatch(extracted);
269      } else {
270        current = current->next;
271      }
272    }
273    SizeSub(extracted_size);
274  }
275
276  void PrintStats() const {
277    uptr batch_count = 0;
278    uptr total_overhead_bytes = 0;
279    uptr total_bytes = 0;
280    uptr total_quarantine_chunks = 0;
281    for (List::ConstIterator it = list_.begin(); it != list_.end(); ++it) {
282      batch_count++;
283      total_bytes += (*it).size;
284      total_overhead_bytes += (*it).size - (*it).quarantined_size();
285      total_quarantine_chunks += (*it).count;
286    }
287    uptr quarantine_chunks_capacity = batch_count * QuarantineBatch::kSize;
288    int chunks_usage_percent = quarantine_chunks_capacity == 0 ?
289        0 : total_quarantine_chunks * 100 / quarantine_chunks_capacity;
290    uptr total_quarantined_bytes = total_bytes - total_overhead_bytes;
291    int memory_overhead_percent = total_quarantined_bytes == 0 ?
292        0 : total_overhead_bytes * 100 / total_quarantined_bytes;
293    Printf("Global quarantine stats: batches: %zd; bytes: %zd (user: %zd); "
294           "chunks: %zd (capacity: %zd); %d%% chunks used; %d%% memory overhead"
295           "\n",
296           batch_count, total_bytes, total_quarantined_bytes,
297           total_quarantine_chunks, quarantine_chunks_capacity,
298           chunks_usage_percent, memory_overhead_percent);
299  }
300
301 private:
302  typedef IntrusiveList<QuarantineBatch> List;
303
304  List list_;
305  atomic_uintptr_t size_;
306
307  void SizeAdd(uptr add) {
308    atomic_store_relaxed(&size_, Size() + add);
309  }
310  void SizeSub(uptr sub) {
311    atomic_store_relaxed(&size_, Size() - sub);
312  }
313};
314
315} // namespace __sanitizer
316
317#endif // SANITIZER_QUARANTINE_H
318