1139826Simp//===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//
253541Sshin//
353541Sshin// This file is distributed under the University of Illinois Open Source
453541Sshin// License. See LICENSE.TXT for details.
553541Sshin//
653541Sshin//===----------------------------------------------------------------------===//
753541Sshin//
853541Sshin// Memory quarantine for AddressSanitizer and potentially other tools.
953541Sshin// Quarantine caches some specified amount of memory in per-thread caches,
1053541Sshin// then evicts to global FIFO queue. When the queue reaches specified threshold,
1153541Sshin// oldest memory is recycled.
1253541Sshin//
1353541Sshin//===----------------------------------------------------------------------===//
1453541Sshin
1553541Sshin#ifndef SANITIZER_QUARANTINE_H
1653541Sshin#define SANITIZER_QUARANTINE_H
1753541Sshin
1853541Sshin#include "sanitizer_internal_defs.h"
1953541Sshin#include "sanitizer_mutex.h"
2053541Sshin#include "sanitizer_list.h"
2153541Sshin
2253541Sshinnamespace __sanitizer {
2353541Sshin
2453541Sshintemplate<typename Node> class QuarantineCache;
2553541Sshin
2653541Sshinstruct QuarantineBatch {
2753541Sshin  static const uptr kSize = 1021;
28174510Sobrien  QuarantineBatch *next;
29174510Sobrien  uptr size;
30174510Sobrien  uptr count;
3153541Sshin  void *batch[kSize];
3253541Sshin};
3353541Sshin
3453541SshinCOMPILER_CHECK(sizeof(QuarantineBatch) <= (1 << 13));  // 8Kb.
3553541Sshin
3653541Sshin// The callback interface is:
3753541Sshin// void Callback::Recycle(Node *ptr);
3853541Sshin// void *cb.Allocate(uptr size);
3953541Sshin// void cb.Deallocate(void *ptr);
4053541Sshintemplate<typename Callback, typename Node>
4153541Sshinclass Quarantine {
4253541Sshin public:
4362587Sitojun  typedef QuarantineCache<Callback> Cache;
4453541Sshin
4553541Sshin  explicit Quarantine(LinkerInitialized)
4653541Sshin      : cache_(LINKER_INITIALIZED) {
4753541Sshin  }
4853541Sshin
4953541Sshin  void Init(uptr size, uptr cache_size) {
5053541Sshin    max_size_ = size;
5153541Sshin    min_size_ = size / 10 * 9;  // 90% of max size.
5253541Sshin    max_cache_size_ = cache_size;
5353541Sshin  }
5453541Sshin
5553541Sshin  void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
5662587Sitojun    c->Enqueue(cb, ptr, size);
5753541Sshin    if (c->Size() > max_cache_size_)
5853541Sshin      Drain(c, cb);
5953541Sshin  }
6062587Sitojun
6162587Sitojun  void NOINLINE Drain(Cache *c, Callback cb) {
6253541Sshin    {
6353541Sshin      SpinMutexLock l(&cache_mutex_);
6453541Sshin      cache_.Transfer(c);
6553541Sshin    }
6662587Sitojun    if (cache_.Size() > max_size_ && recycle_mutex_.TryLock())
6753541Sshin      Recycle(cb);
6853541Sshin  }
6962587Sitojun
70 private:
71  // Read-only data.
72  char pad0_[kCacheLineSize];
73  uptr max_size_;
74  uptr min_size_;
75  uptr max_cache_size_;
76  char pad1_[kCacheLineSize];
77  SpinMutex cache_mutex_;
78  SpinMutex recycle_mutex_;
79  Cache cache_;
80  char pad2_[kCacheLineSize];
81
82  void NOINLINE Recycle(Callback cb) {
83    Cache tmp;
84    {
85      SpinMutexLock l(&cache_mutex_);
86      while (cache_.Size() > min_size_) {
87        QuarantineBatch *b = cache_.DequeueBatch();
88        tmp.EnqueueBatch(b);
89      }
90    }
91    recycle_mutex_.Unlock();
92    DoRecycle(&tmp, cb);
93  }
94
95  void NOINLINE DoRecycle(Cache *c, Callback cb) {
96    while (QuarantineBatch *b = c->DequeueBatch()) {
97      const uptr kPrefetch = 16;
98      for (uptr i = 0; i < kPrefetch; i++)
99        PREFETCH(b->batch[i]);
100      for (uptr i = 0; i < b->count; i++) {
101        PREFETCH(b->batch[i + kPrefetch]);
102        cb.Recycle((Node*)b->batch[i]);
103      }
104      cb.Deallocate(b);
105    }
106  }
107};
108
109// Per-thread cache of memory blocks.
110template<typename Callback>
111class QuarantineCache {
112 public:
113  explicit QuarantineCache(LinkerInitialized) {
114  }
115
116  QuarantineCache()
117      : size_() {
118    list_.clear();
119  }
120
121  uptr Size() const {
122    return atomic_load(&size_, memory_order_relaxed);
123  }
124
125  void Enqueue(Callback cb, void *ptr, uptr size) {
126    if (list_.empty() || list_.back()->count == QuarantineBatch::kSize) {
127      AllocBatch(cb);
128      size += sizeof(QuarantineBatch);  // Count the batch in Quarantine size.
129    }
130    QuarantineBatch *b = list_.back();
131    b->batch[b->count++] = ptr;
132    b->size += size;
133    SizeAdd(size);
134  }
135
136  void Transfer(QuarantineCache *c) {
137    list_.append_back(&c->list_);
138    SizeAdd(c->Size());
139    atomic_store(&c->size_, 0, memory_order_relaxed);
140  }
141
142  void EnqueueBatch(QuarantineBatch *b) {
143    list_.push_back(b);
144    SizeAdd(b->size);
145  }
146
147  QuarantineBatch *DequeueBatch() {
148    if (list_.empty())
149      return 0;
150    QuarantineBatch *b = list_.front();
151    list_.pop_front();
152    SizeSub(b->size);
153    return b;
154  }
155
156 private:
157  IntrusiveList<QuarantineBatch> list_;
158  atomic_uintptr_t size_;
159
160  void SizeAdd(uptr add) {
161    atomic_store(&size_, Size() + add, memory_order_relaxed);
162  }
163  void SizeSub(uptr sub) {
164    atomic_store(&size_, Size() - sub, memory_order_relaxed);
165  }
166
167  NOINLINE QuarantineBatch* AllocBatch(Callback cb) {
168    QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
169    b->count = 0;
170    b->size = 0;
171    list_.push_back(b);
172    return b;
173  }
174};
175}  // namespace __sanitizer
176
177#endif  // #ifndef SANITIZER_QUARANTINE_H
178