sanitizer_quarantine.h revision 327952
1//===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Memory quarantine for AddressSanitizer and potentially other tools.
11// Quarantine caches some specified amount of memory in per-thread caches,
12// then evicts to global FIFO queue. When the queue reaches specified threshold,
13// oldest memory is recycled.
14//
15//===----------------------------------------------------------------------===//
16
17#ifndef SANITIZER_QUARANTINE_H
18#define SANITIZER_QUARANTINE_H
19
20#include "sanitizer_internal_defs.h"
21#include "sanitizer_mutex.h"
22#include "sanitizer_list.h"
23
24namespace __sanitizer {
25
26template<typename Node> class QuarantineCache;
27
28struct QuarantineBatch {
29  static const uptr kSize = 1021;
30  QuarantineBatch *next;
31  uptr size;
32  uptr count;
33  void *batch[kSize];
34
35  void init(void *ptr, uptr size) {
36    count = 1;
37    batch[0] = ptr;
38    this->size = size + sizeof(QuarantineBatch);  // Account for the batch size.
39  }
40
41  // The total size of quarantined nodes recorded in this batch.
42  uptr quarantined_size() const {
43    return size - sizeof(QuarantineBatch);
44  }
45
46  void push_back(void *ptr, uptr size) {
47    CHECK_LT(count, kSize);
48    batch[count++] = ptr;
49    this->size += size;
50  }
51
52  bool can_merge(const QuarantineBatch* const from) const {
53    return count + from->count <= kSize;
54  }
55
56  void merge(QuarantineBatch* const from) {
57    CHECK_LE(count + from->count, kSize);
58    CHECK_GE(size, sizeof(QuarantineBatch));
59
60    for (uptr i = 0; i < from->count; ++i)
61      batch[count + i] = from->batch[i];
62    count += from->count;
63    size += from->quarantined_size();
64
65    from->count = 0;
66    from->size = sizeof(QuarantineBatch);
67  }
68};
69
70COMPILER_CHECK(sizeof(QuarantineBatch) <= (1 << 13));  // 8Kb.
71
72// The callback interface is:
73// void Callback::Recycle(Node *ptr);
74// void *cb.Allocate(uptr size);
75// void cb.Deallocate(void *ptr);
76template<typename Callback, typename Node>
77class Quarantine {
78 public:
79  typedef QuarantineCache<Callback> Cache;
80
81  explicit Quarantine(LinkerInitialized)
82      : cache_(LINKER_INITIALIZED) {
83  }
84
85  void Init(uptr size, uptr cache_size) {
86    // Thread local quarantine size can be zero only when global quarantine size
87    // is zero (it allows us to perform just one atomic read per Put() call).
88    CHECK((size == 0 && cache_size == 0) || cache_size != 0);
89
90    atomic_store_relaxed(&max_size_, size);
91    atomic_store_relaxed(&min_size_, size / 10 * 9);  // 90% of max size.
92    atomic_store_relaxed(&max_cache_size_, cache_size);
93  }
94
95  uptr GetSize() const { return atomic_load_relaxed(&max_size_); }
96  uptr GetCacheSize() const {
97    return atomic_load_relaxed(&max_cache_size_);
98  }
99
100  void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
101    uptr cache_size = GetCacheSize();
102    if (cache_size) {
103      c->Enqueue(cb, ptr, size);
104    } else {
105      // GetCacheSize() == 0 only when GetSize() == 0 (see Init).
106      cb.Recycle(ptr);
107    }
108    // Check cache size anyway to accommodate for runtime cache_size change.
109    if (c->Size() > cache_size)
110      Drain(c, cb);
111  }
112
113  void NOINLINE Drain(Cache *c, Callback cb) {
114    {
115      SpinMutexLock l(&cache_mutex_);
116      cache_.Transfer(c);
117    }
118    if (cache_.Size() > GetSize() && recycle_mutex_.TryLock())
119      Recycle(atomic_load_relaxed(&min_size_), cb);
120  }
121
122  void NOINLINE DrainAndRecycle(Cache *c, Callback cb) {
123    {
124      SpinMutexLock l(&cache_mutex_);
125      cache_.Transfer(c);
126    }
127    recycle_mutex_.Lock();
128    Recycle(0, cb);
129  }
130
131  void PrintStats() const {
132    // It assumes that the world is stopped, just as the allocator's PrintStats.
133    Printf("Quarantine limits: global: %zdMb; thread local: %zdKb\n",
134           GetSize() >> 20, GetCacheSize() >> 10);
135    cache_.PrintStats();
136  }
137
138 private:
139  // Read-only data.
140  char pad0_[kCacheLineSize];
141  atomic_uintptr_t max_size_;
142  atomic_uintptr_t min_size_;
143  atomic_uintptr_t max_cache_size_;
144  char pad1_[kCacheLineSize];
145  SpinMutex cache_mutex_;
146  SpinMutex recycle_mutex_;
147  Cache cache_;
148  char pad2_[kCacheLineSize];
149
150  void NOINLINE Recycle(uptr min_size, Callback cb) {
151    Cache tmp;
152    {
153      SpinMutexLock l(&cache_mutex_);
154      // Go over the batches and merge partially filled ones to
155      // save some memory, otherwise batches themselves (since the memory used
156      // by them is counted against quarantine limit) can overcome the actual
157      // user's quarantined chunks, which diminishes the purpose of the
158      // quarantine.
159      uptr cache_size = cache_.Size();
160      uptr overhead_size = cache_.OverheadSize();
161      CHECK_GE(cache_size, overhead_size);
162      // Do the merge only when overhead exceeds this predefined limit (might
163      // require some tuning). It saves us merge attempt when the batch list
164      // quarantine is unlikely to contain batches suitable for merge.
165      const uptr kOverheadThresholdPercents = 100;
166      if (cache_size > overhead_size &&
167          overhead_size * (100 + kOverheadThresholdPercents) >
168              cache_size * kOverheadThresholdPercents) {
169        cache_.MergeBatches(&tmp);
170      }
171      // Extract enough chunks from the quarantine to get below the max
172      // quarantine size and leave some leeway for the newly quarantined chunks.
173      while (cache_.Size() > min_size) {
174        tmp.EnqueueBatch(cache_.DequeueBatch());
175      }
176    }
177    recycle_mutex_.Unlock();
178    DoRecycle(&tmp, cb);
179  }
180
181  void NOINLINE DoRecycle(Cache *c, Callback cb) {
182    while (QuarantineBatch *b = c->DequeueBatch()) {
183      const uptr kPrefetch = 16;
184      CHECK(kPrefetch <= ARRAY_SIZE(b->batch));
185      for (uptr i = 0; i < kPrefetch; i++)
186        PREFETCH(b->batch[i]);
187      for (uptr i = 0, count = b->count; i < count; i++) {
188        if (i + kPrefetch < count)
189          PREFETCH(b->batch[i + kPrefetch]);
190        cb.Recycle((Node*)b->batch[i]);
191      }
192      cb.Deallocate(b);
193    }
194  }
195};
196
197// Per-thread cache of memory blocks.
198template<typename Callback>
199class QuarantineCache {
200 public:
201  explicit QuarantineCache(LinkerInitialized) {
202  }
203
204  QuarantineCache()
205      : size_() {
206    list_.clear();
207  }
208
209  // Total memory used, including internal accounting.
210  uptr Size() const {
211    return atomic_load_relaxed(&size_);
212  }
213
214  // Memory used for internal accounting.
215  uptr OverheadSize() const {
216    return list_.size() * sizeof(QuarantineBatch);
217  }
218
219  void Enqueue(Callback cb, void *ptr, uptr size) {
220    if (list_.empty() || list_.back()->count == QuarantineBatch::kSize) {
221      QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
222      CHECK(b);
223      b->init(ptr, size);
224      EnqueueBatch(b);
225    } else {
226      list_.back()->push_back(ptr, size);
227      SizeAdd(size);
228    }
229  }
230
231  void Transfer(QuarantineCache *from_cache) {
232    list_.append_back(&from_cache->list_);
233    SizeAdd(from_cache->Size());
234
235    atomic_store_relaxed(&from_cache->size_, 0);
236  }
237
238  void EnqueueBatch(QuarantineBatch *b) {
239    list_.push_back(b);
240    SizeAdd(b->size);
241  }
242
243  QuarantineBatch *DequeueBatch() {
244    if (list_.empty())
245      return nullptr;
246    QuarantineBatch *b = list_.front();
247    list_.pop_front();
248    SizeSub(b->size);
249    return b;
250  }
251
252  void MergeBatches(QuarantineCache *to_deallocate) {
253    uptr extracted_size = 0;
254    QuarantineBatch *current = list_.front();
255    while (current && current->next) {
256      if (current->can_merge(current->next)) {
257        QuarantineBatch *extracted = current->next;
258        // Move all the chunks into the current batch.
259        current->merge(extracted);
260        CHECK_EQ(extracted->count, 0);
261        CHECK_EQ(extracted->size, sizeof(QuarantineBatch));
262        // Remove the next batch from the list and account for its size.
263        list_.extract(current, extracted);
264        extracted_size += extracted->size;
265        // Add it to deallocation list.
266        to_deallocate->EnqueueBatch(extracted);
267      } else {
268        current = current->next;
269      }
270    }
271    SizeSub(extracted_size);
272  }
273
274  void PrintStats() const {
275    uptr batch_count = 0;
276    uptr total_overhead_bytes = 0;
277    uptr total_bytes = 0;
278    uptr total_quarantine_chunks = 0;
279    for (List::ConstIterator it = list_.begin(); it != list_.end(); ++it) {
280      batch_count++;
281      total_bytes += (*it).size;
282      total_overhead_bytes += (*it).size - (*it).quarantined_size();
283      total_quarantine_chunks += (*it).count;
284    }
285    uptr quarantine_chunks_capacity = batch_count * QuarantineBatch::kSize;
286    int chunks_usage_percent = quarantine_chunks_capacity == 0 ?
287        0 : total_quarantine_chunks * 100 / quarantine_chunks_capacity;
288    uptr total_quarantined_bytes = total_bytes - total_overhead_bytes;
289    int memory_overhead_percent = total_quarantined_bytes == 0 ?
290        0 : total_overhead_bytes * 100 / total_quarantined_bytes;
291    Printf("Global quarantine stats: batches: %zd; bytes: %zd (user: %zd); "
292           "chunks: %zd (capacity: %zd); %d%% chunks used; %d%% memory overhead"
293           "\n",
294           batch_count, total_bytes, total_quarantined_bytes,
295           total_quarantine_chunks, quarantine_chunks_capacity,
296           chunks_usage_percent, memory_overhead_percent);
297  }
298
299 private:
300  typedef IntrusiveList<QuarantineBatch> List;
301
302  List list_;
303  atomic_uintptr_t size_;
304
305  void SizeAdd(uptr add) {
306    atomic_store_relaxed(&size_, Size() + add);
307  }
308  void SizeSub(uptr sub) {
309    atomic_store_relaxed(&size_, Size() - sub);
310  }
311};
312
313} // namespace __sanitizer
314
315#endif // SANITIZER_QUARANTINE_H
316