1//===-- sanitizer_allocator_primary64.h -------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Part of the Sanitizer Allocator.
10//
11//===----------------------------------------------------------------------===//
12#ifndef SANITIZER_ALLOCATOR_H
13#error This file must be included inside sanitizer_allocator.h
14#endif
15
16template<class SizeClassAllocator> struct SizeClassAllocator64LocalCache;
17
18// SizeClassAllocator64 -- allocator for 64-bit address space.
19// The template parameter Params is a class containing the actual parameters.
20//
21// Space: a portion of address space of kSpaceSize bytes starting at SpaceBeg.
22// If kSpaceBeg is ~0 then SpaceBeg is chosen dynamically my mmap.
23// Otherwise SpaceBeg=kSpaceBeg (fixed address).
24// kSpaceSize is a power of two.
25// At the beginning the entire space is mprotect-ed, then small parts of it
26// are mapped on demand.
27//
28// Region: a part of Space dedicated to a single size class.
29// There are kNumClasses Regions of equal size.
30//
31// UserChunk: a piece of memory returned to user.
32// MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
33
34// FreeArray is an array free-d chunks (stored as 4-byte offsets)
35//
36// A Region looks like this:
37// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1 FreeArray
38
39struct SizeClassAllocator64FlagMasks {  //  Bit masks.
40  enum {
41    kRandomShuffleChunks = 1,
42  };
43};
44
45template <class Params>
46class SizeClassAllocator64 {
47 public:
48  using AddressSpaceView = typename Params::AddressSpaceView;
49  static const uptr kSpaceBeg = Params::kSpaceBeg;
50  static const uptr kSpaceSize = Params::kSpaceSize;
51  static const uptr kMetadataSize = Params::kMetadataSize;
52  typedef typename Params::SizeClassMap SizeClassMap;
53  typedef typename Params::MapUnmapCallback MapUnmapCallback;
54
55  static const bool kRandomShuffleChunks =
56      Params::kFlags & SizeClassAllocator64FlagMasks::kRandomShuffleChunks;
57
58  typedef SizeClassAllocator64<Params> ThisT;
59  typedef SizeClassAllocator64LocalCache<ThisT> AllocatorCache;
60
61  // When we know the size class (the region base) we can represent a pointer
62  // as a 4-byte integer (offset from the region start shifted right by 4).
63  typedef u32 CompactPtrT;
64  static const uptr kCompactPtrScale = 4;
65  CompactPtrT PointerToCompactPtr(uptr base, uptr ptr) const {
66    return static_cast<CompactPtrT>((ptr - base) >> kCompactPtrScale);
67  }
68  uptr CompactPtrToPointer(uptr base, CompactPtrT ptr32) const {
69    return base + (static_cast<uptr>(ptr32) << kCompactPtrScale);
70  }
71
72  void Init(s32 release_to_os_interval_ms) {
73    uptr TotalSpaceSize = kSpaceSize + AdditionalSize();
74    if (kUsingConstantSpaceBeg) {
75      CHECK_EQ(kSpaceBeg, address_range.Init(TotalSpaceSize,
76                                             PrimaryAllocatorName, kSpaceBeg));
77    } else {
78      NonConstSpaceBeg = address_range.Init(TotalSpaceSize,
79                                            PrimaryAllocatorName);
80      CHECK_NE(NonConstSpaceBeg, ~(uptr)0);
81    }
82    SetReleaseToOSIntervalMs(release_to_os_interval_ms);
83    MapWithCallbackOrDie(SpaceEnd(), AdditionalSize(),
84                         "SizeClassAllocator: region info");
85    // Check that the RegionInfo array is aligned on the CacheLine size.
86    DCHECK_EQ(SpaceEnd() % kCacheLineSize, 0);
87  }
88
89  s32 ReleaseToOSIntervalMs() const {
90    return atomic_load(&release_to_os_interval_ms_, memory_order_relaxed);
91  }
92
93  void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
94    atomic_store(&release_to_os_interval_ms_, release_to_os_interval_ms,
95                 memory_order_relaxed);
96  }
97
98  void ForceReleaseToOS() {
99    for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
100      BlockingMutexLock l(&GetRegionInfo(class_id)->mutex);
101      MaybeReleaseToOS(class_id, true /*force*/);
102    }
103  }
104
105  static bool CanAllocate(uptr size, uptr alignment) {
106    return size <= SizeClassMap::kMaxSize &&
107      alignment <= SizeClassMap::kMaxSize;
108  }
109
110  NOINLINE void ReturnToAllocator(AllocatorStats *stat, uptr class_id,
111                                  const CompactPtrT *chunks, uptr n_chunks) {
112    RegionInfo *region = GetRegionInfo(class_id);
113    uptr region_beg = GetRegionBeginBySizeClass(class_id);
114    CompactPtrT *free_array = GetFreeArray(region_beg);
115
116    BlockingMutexLock l(&region->mutex);
117    uptr old_num_chunks = region->num_freed_chunks;
118    uptr new_num_freed_chunks = old_num_chunks + n_chunks;
119    // Failure to allocate free array space while releasing memory is non
120    // recoverable.
121    if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg,
122                                       new_num_freed_chunks))) {
123      Report("FATAL: Internal error: %s's allocator exhausted the free list "
124             "space for size class %zd (%zd bytes).\n", SanitizerToolName,
125             class_id, ClassIdToSize(class_id));
126      Die();
127    }
128    for (uptr i = 0; i < n_chunks; i++)
129      free_array[old_num_chunks + i] = chunks[i];
130    region->num_freed_chunks = new_num_freed_chunks;
131    region->stats.n_freed += n_chunks;
132
133    MaybeReleaseToOS(class_id, false /*force*/);
134  }
135
136  NOINLINE bool GetFromAllocator(AllocatorStats *stat, uptr class_id,
137                                 CompactPtrT *chunks, uptr n_chunks) {
138    RegionInfo *region = GetRegionInfo(class_id);
139    uptr region_beg = GetRegionBeginBySizeClass(class_id);
140    CompactPtrT *free_array = GetFreeArray(region_beg);
141
142    BlockingMutexLock l(&region->mutex);
143    if (UNLIKELY(region->num_freed_chunks < n_chunks)) {
144      if (UNLIKELY(!PopulateFreeArray(stat, class_id, region,
145                                      n_chunks - region->num_freed_chunks)))
146        return false;
147      CHECK_GE(region->num_freed_chunks, n_chunks);
148    }
149    region->num_freed_chunks -= n_chunks;
150    uptr base_idx = region->num_freed_chunks;
151    for (uptr i = 0; i < n_chunks; i++)
152      chunks[i] = free_array[base_idx + i];
153    region->stats.n_allocated += n_chunks;
154    return true;
155  }
156
157  bool PointerIsMine(const void *p) const {
158    uptr P = reinterpret_cast<uptr>(p);
159    if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
160      return P / kSpaceSize == kSpaceBeg / kSpaceSize;
161    return P >= SpaceBeg() && P < SpaceEnd();
162  }
163
164  uptr GetRegionBegin(const void *p) {
165    if (kUsingConstantSpaceBeg)
166      return reinterpret_cast<uptr>(p) & ~(kRegionSize - 1);
167    uptr space_beg = SpaceBeg();
168    return ((reinterpret_cast<uptr>(p)  - space_beg) & ~(kRegionSize - 1)) +
169        space_beg;
170  }
171
172  uptr GetRegionBeginBySizeClass(uptr class_id) const {
173    return SpaceBeg() + kRegionSize * class_id;
174  }
175
176  uptr GetSizeClass(const void *p) {
177    if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
178      return ((reinterpret_cast<uptr>(p)) / kRegionSize) % kNumClassesRounded;
179    return ((reinterpret_cast<uptr>(p) - SpaceBeg()) / kRegionSize) %
180           kNumClassesRounded;
181  }
182
183  void *GetBlockBegin(const void *p) {
184    uptr class_id = GetSizeClass(p);
185    uptr size = ClassIdToSize(class_id);
186    if (!size) return nullptr;
187    uptr chunk_idx = GetChunkIdx((uptr)p, size);
188    uptr reg_beg = GetRegionBegin(p);
189    uptr beg = chunk_idx * size;
190    uptr next_beg = beg + size;
191    if (class_id >= kNumClasses) return nullptr;
192    const RegionInfo *region = AddressSpaceView::Load(GetRegionInfo(class_id));
193    if (region->mapped_user >= next_beg)
194      return reinterpret_cast<void*>(reg_beg + beg);
195    return nullptr;
196  }
197
198  uptr GetActuallyAllocatedSize(void *p) {
199    CHECK(PointerIsMine(p));
200    return ClassIdToSize(GetSizeClass(p));
201  }
202
203  static uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
204
205  void *GetMetaData(const void *p) {
206    uptr class_id = GetSizeClass(p);
207    uptr size = ClassIdToSize(class_id);
208    uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
209    uptr region_beg = GetRegionBeginBySizeClass(class_id);
210    return reinterpret_cast<void *>(GetMetadataEnd(region_beg) -
211                                    (1 + chunk_idx) * kMetadataSize);
212  }
213
214  uptr TotalMemoryUsed() {
215    uptr res = 0;
216    for (uptr i = 0; i < kNumClasses; i++)
217      res += GetRegionInfo(i)->allocated_user;
218    return res;
219  }
220
221  // Test-only.
222  void TestOnlyUnmap() {
223    UnmapWithCallbackOrDie(SpaceBeg(), kSpaceSize + AdditionalSize());
224  }
225
226  static void FillMemoryProfile(uptr start, uptr rss, bool file, uptr *stats,
227                           uptr stats_size) {
228    for (uptr class_id = 0; class_id < stats_size; class_id++)
229      if (stats[class_id] == start)
230        stats[class_id] = rss;
231  }
232
233  void PrintStats(uptr class_id, uptr rss) {
234    RegionInfo *region = GetRegionInfo(class_id);
235    if (region->mapped_user == 0) return;
236    uptr in_use = region->stats.n_allocated - region->stats.n_freed;
237    uptr avail_chunks = region->allocated_user / ClassIdToSize(class_id);
238    Printf(
239        "%s %02zd (%6zd): mapped: %6zdK allocs: %7zd frees: %7zd inuse: %6zd "
240        "num_freed_chunks %7zd avail: %6zd rss: %6zdK releases: %6zd "
241        "last released: %6zdK region: 0x%zx\n",
242        region->exhausted ? "F" : " ", class_id, ClassIdToSize(class_id),
243        region->mapped_user >> 10, region->stats.n_allocated,
244        region->stats.n_freed, in_use, region->num_freed_chunks, avail_chunks,
245        rss >> 10, region->rtoi.num_releases,
246        region->rtoi.last_released_bytes >> 10,
247        SpaceBeg() + kRegionSize * class_id);
248  }
249
250  void PrintStats() {
251    uptr rss_stats[kNumClasses];
252    for (uptr class_id = 0; class_id < kNumClasses; class_id++)
253      rss_stats[class_id] = SpaceBeg() + kRegionSize * class_id;
254    GetMemoryProfile(FillMemoryProfile, rss_stats, kNumClasses);
255
256    uptr total_mapped = 0;
257    uptr total_rss = 0;
258    uptr n_allocated = 0;
259    uptr n_freed = 0;
260    for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
261      RegionInfo *region = GetRegionInfo(class_id);
262      if (region->mapped_user != 0) {
263        total_mapped += region->mapped_user;
264        total_rss += rss_stats[class_id];
265      }
266      n_allocated += region->stats.n_allocated;
267      n_freed += region->stats.n_freed;
268    }
269
270    Printf("Stats: SizeClassAllocator64: %zdM mapped (%zdM rss) in "
271           "%zd allocations; remains %zd\n", total_mapped >> 20,
272           total_rss >> 20, n_allocated, n_allocated - n_freed);
273    for (uptr class_id = 1; class_id < kNumClasses; class_id++)
274      PrintStats(class_id, rss_stats[class_id]);
275  }
276
277  // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
278  // introspection API.
279  void ForceLock() {
280    for (uptr i = 0; i < kNumClasses; i++) {
281      GetRegionInfo(i)->mutex.Lock();
282    }
283  }
284
285  void ForceUnlock() {
286    for (int i = (int)kNumClasses - 1; i >= 0; i--) {
287      GetRegionInfo(i)->mutex.Unlock();
288    }
289  }
290
291  // Iterate over all existing chunks.
292  // The allocator must be locked when calling this function.
293  void ForEachChunk(ForEachChunkCallback callback, void *arg) {
294    for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
295      RegionInfo *region = GetRegionInfo(class_id);
296      uptr chunk_size = ClassIdToSize(class_id);
297      uptr region_beg = SpaceBeg() + class_id * kRegionSize;
298      uptr region_allocated_user_size =
299          AddressSpaceView::Load(region)->allocated_user;
300      for (uptr chunk = region_beg;
301           chunk < region_beg + region_allocated_user_size;
302           chunk += chunk_size) {
303        // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
304        callback(chunk, arg);
305      }
306    }
307  }
308
309  static uptr ClassIdToSize(uptr class_id) {
310    return SizeClassMap::Size(class_id);
311  }
312
313  static uptr AdditionalSize() {
314    return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
315                     GetPageSizeCached());
316  }
317
318  typedef SizeClassMap SizeClassMapT;
319  static const uptr kNumClasses = SizeClassMap::kNumClasses;
320  static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
321
322  // A packed array of counters. Each counter occupies 2^n bits, enough to store
323  // counter's max_value. Ctor will try to allocate the required buffer via
324  // mapper->MapPackedCounterArrayBuffer and the caller is expected to check
325  // whether the initialization was successful by checking IsAllocated() result.
326  // For the performance sake, none of the accessors check the validity of the
327  // arguments, it is assumed that index is always in [0, n) range and the value
328  // is not incremented past max_value.
329  template<class MemoryMapperT>
330  class PackedCounterArray {
331   public:
332    PackedCounterArray(u64 num_counters, u64 max_value, MemoryMapperT *mapper)
333        : n(num_counters), memory_mapper(mapper) {
334      CHECK_GT(num_counters, 0);
335      CHECK_GT(max_value, 0);
336      constexpr u64 kMaxCounterBits = sizeof(*buffer) * 8ULL;
337      // Rounding counter storage size up to the power of two allows for using
338      // bit shifts calculating particular counter's index and offset.
339      uptr counter_size_bits =
340          RoundUpToPowerOfTwo(MostSignificantSetBitIndex(max_value) + 1);
341      CHECK_LE(counter_size_bits, kMaxCounterBits);
342      counter_size_bits_log = Log2(counter_size_bits);
343      counter_mask = ~0ULL >> (kMaxCounterBits - counter_size_bits);
344
345      uptr packing_ratio = kMaxCounterBits >> counter_size_bits_log;
346      CHECK_GT(packing_ratio, 0);
347      packing_ratio_log = Log2(packing_ratio);
348      bit_offset_mask = packing_ratio - 1;
349
350      buffer_size =
351          (RoundUpTo(n, 1ULL << packing_ratio_log) >> packing_ratio_log) *
352          sizeof(*buffer);
353      buffer = reinterpret_cast<u64*>(
354          memory_mapper->MapPackedCounterArrayBuffer(buffer_size));
355    }
356    ~PackedCounterArray() {
357      if (buffer) {
358        memory_mapper->UnmapPackedCounterArrayBuffer(
359            reinterpret_cast<uptr>(buffer), buffer_size);
360      }
361    }
362
363    bool IsAllocated() const {
364      return !!buffer;
365    }
366
367    u64 GetCount() const {
368      return n;
369    }
370
371    uptr Get(uptr i) const {
372      DCHECK_LT(i, n);
373      uptr index = i >> packing_ratio_log;
374      uptr bit_offset = (i & bit_offset_mask) << counter_size_bits_log;
375      return (buffer[index] >> bit_offset) & counter_mask;
376    }
377
378    void Inc(uptr i) const {
379      DCHECK_LT(Get(i), counter_mask);
380      uptr index = i >> packing_ratio_log;
381      uptr bit_offset = (i & bit_offset_mask) << counter_size_bits_log;
382      buffer[index] += 1ULL << bit_offset;
383    }
384
385    void IncRange(uptr from, uptr to) const {
386      DCHECK_LE(from, to);
387      for (uptr i = from; i <= to; i++)
388        Inc(i);
389    }
390
391   private:
392    const u64 n;
393    u64 counter_size_bits_log;
394    u64 counter_mask;
395    u64 packing_ratio_log;
396    u64 bit_offset_mask;
397
398    MemoryMapperT* const memory_mapper;
399    u64 buffer_size;
400    u64* buffer;
401  };
402
403  template<class MemoryMapperT>
404  class FreePagesRangeTracker {
405   public:
406    explicit FreePagesRangeTracker(MemoryMapperT* mapper)
407        : memory_mapper(mapper),
408          page_size_scaled_log(Log2(GetPageSizeCached() >> kCompactPtrScale)),
409          in_the_range(false), current_page(0), current_range_start_page(0) {}
410
411    void NextPage(bool freed) {
412      if (freed) {
413        if (!in_the_range) {
414          current_range_start_page = current_page;
415          in_the_range = true;
416        }
417      } else {
418        CloseOpenedRange();
419      }
420      current_page++;
421    }
422
423    void Done() {
424      CloseOpenedRange();
425    }
426
427   private:
428    void CloseOpenedRange() {
429      if (in_the_range) {
430        memory_mapper->ReleasePageRangeToOS(
431            current_range_start_page << page_size_scaled_log,
432            current_page << page_size_scaled_log);
433        in_the_range = false;
434      }
435    }
436
437    MemoryMapperT* const memory_mapper;
438    const uptr page_size_scaled_log;
439    bool in_the_range;
440    uptr current_page;
441    uptr current_range_start_page;
442  };
443
444  // Iterates over the free_array to identify memory pages containing freed
445  // chunks only and returns these pages back to OS.
446  // allocated_pages_count is the total number of pages allocated for the
447  // current bucket.
448  template<class MemoryMapperT>
449  static void ReleaseFreeMemoryToOS(CompactPtrT *free_array,
450                                    uptr free_array_count, uptr chunk_size,
451                                    uptr allocated_pages_count,
452                                    MemoryMapperT *memory_mapper) {
453    const uptr page_size = GetPageSizeCached();
454
455    // Figure out the number of chunks per page and whether we can take a fast
456    // path (the number of chunks per page is the same for all pages).
457    uptr full_pages_chunk_count_max;
458    bool same_chunk_count_per_page;
459    if (chunk_size <= page_size && page_size % chunk_size == 0) {
460      // Same number of chunks per page, no cross overs.
461      full_pages_chunk_count_max = page_size / chunk_size;
462      same_chunk_count_per_page = true;
463    } else if (chunk_size <= page_size && page_size % chunk_size != 0 &&
464        chunk_size % (page_size % chunk_size) == 0) {
465      // Some chunks are crossing page boundaries, which means that the page
466      // contains one or two partial chunks, but all pages contain the same
467      // number of chunks.
468      full_pages_chunk_count_max = page_size / chunk_size + 1;
469      same_chunk_count_per_page = true;
470    } else if (chunk_size <= page_size) {
471      // Some chunks are crossing page boundaries, which means that the page
472      // contains one or two partial chunks.
473      full_pages_chunk_count_max = page_size / chunk_size + 2;
474      same_chunk_count_per_page = false;
475    } else if (chunk_size > page_size && chunk_size % page_size == 0) {
476      // One chunk covers multiple pages, no cross overs.
477      full_pages_chunk_count_max = 1;
478      same_chunk_count_per_page = true;
479    } else if (chunk_size > page_size) {
480      // One chunk covers multiple pages, Some chunks are crossing page
481      // boundaries. Some pages contain one chunk, some contain two.
482      full_pages_chunk_count_max = 2;
483      same_chunk_count_per_page = false;
484    } else {
485      UNREACHABLE("All chunk_size/page_size ratios must be handled.");
486    }
487
488    PackedCounterArray<MemoryMapperT> counters(allocated_pages_count,
489                                               full_pages_chunk_count_max,
490                                               memory_mapper);
491    if (!counters.IsAllocated())
492      return;
493
494    const uptr chunk_size_scaled = chunk_size >> kCompactPtrScale;
495    const uptr page_size_scaled = page_size >> kCompactPtrScale;
496    const uptr page_size_scaled_log = Log2(page_size_scaled);
497
498    // Iterate over free chunks and count how many free chunks affect each
499    // allocated page.
500    if (chunk_size <= page_size && page_size % chunk_size == 0) {
501      // Each chunk affects one page only.
502      for (uptr i = 0; i < free_array_count; i++)
503        counters.Inc(free_array[i] >> page_size_scaled_log);
504    } else {
505      // In all other cases chunks might affect more than one page.
506      for (uptr i = 0; i < free_array_count; i++) {
507        counters.IncRange(
508            free_array[i] >> page_size_scaled_log,
509            (free_array[i] + chunk_size_scaled - 1) >> page_size_scaled_log);
510      }
511    }
512
513    // Iterate over pages detecting ranges of pages with chunk counters equal
514    // to the expected number of chunks for the particular page.
515    FreePagesRangeTracker<MemoryMapperT> range_tracker(memory_mapper);
516    if (same_chunk_count_per_page) {
517      // Fast path, every page has the same number of chunks affecting it.
518      for (uptr i = 0; i < counters.GetCount(); i++)
519        range_tracker.NextPage(counters.Get(i) == full_pages_chunk_count_max);
520    } else {
521      // Show path, go through the pages keeping count how many chunks affect
522      // each page.
523      const uptr pn =
524          chunk_size < page_size ? page_size_scaled / chunk_size_scaled : 1;
525      const uptr pnc = pn * chunk_size_scaled;
526      // The idea is to increment the current page pointer by the first chunk
527      // size, middle portion size (the portion of the page covered by chunks
528      // except the first and the last one) and then the last chunk size, adding
529      // up the number of chunks on the current page and checking on every step
530      // whether the page boundary was crossed.
531      uptr prev_page_boundary = 0;
532      uptr current_boundary = 0;
533      for (uptr i = 0; i < counters.GetCount(); i++) {
534        uptr page_boundary = prev_page_boundary + page_size_scaled;
535        uptr chunks_per_page = pn;
536        if (current_boundary < page_boundary) {
537          if (current_boundary > prev_page_boundary)
538            chunks_per_page++;
539          current_boundary += pnc;
540          if (current_boundary < page_boundary) {
541            chunks_per_page++;
542            current_boundary += chunk_size_scaled;
543          }
544        }
545        prev_page_boundary = page_boundary;
546
547        range_tracker.NextPage(counters.Get(i) == chunks_per_page);
548      }
549    }
550    range_tracker.Done();
551  }
552
553 private:
554  friend class MemoryMapper;
555
556  ReservedAddressRange address_range;
557
558  static const uptr kRegionSize = kSpaceSize / kNumClassesRounded;
559  // FreeArray is the array of free-d chunks (stored as 4-byte offsets).
560  // In the worst case it may reguire kRegionSize/SizeClassMap::kMinSize
561  // elements, but in reality this will not happen. For simplicity we
562  // dedicate 1/8 of the region's virtual space to FreeArray.
563  static const uptr kFreeArraySize = kRegionSize / 8;
564
565  static const bool kUsingConstantSpaceBeg = kSpaceBeg != ~(uptr)0;
566  uptr NonConstSpaceBeg;
567  uptr SpaceBeg() const {
568    return kUsingConstantSpaceBeg ? kSpaceBeg : NonConstSpaceBeg;
569  }
570  uptr SpaceEnd() const { return  SpaceBeg() + kSpaceSize; }
571  // kRegionSize must be >= 2^32.
572  COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
573  // kRegionSize must be <= 2^36, see CompactPtrT.
574  COMPILER_CHECK((kRegionSize) <= (1ULL << (SANITIZER_WORDSIZE / 2 + 4)));
575  // Call mmap for user memory with at least this size.
576  static const uptr kUserMapSize = 1 << 16;
577  // Call mmap for metadata memory with at least this size.
578  static const uptr kMetaMapSize = 1 << 16;
579  // Call mmap for free array memory with at least this size.
580  static const uptr kFreeArrayMapSize = 1 << 16;
581
582  atomic_sint32_t release_to_os_interval_ms_;
583
584  struct Stats {
585    uptr n_allocated;
586    uptr n_freed;
587  };
588
589  struct ReleaseToOsInfo {
590    uptr n_freed_at_last_release;
591    uptr num_releases;
592    u64 last_release_at_ns;
593    u64 last_released_bytes;
594  };
595
596  struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) RegionInfo {
597    BlockingMutex mutex;
598    uptr num_freed_chunks;  // Number of elements in the freearray.
599    uptr mapped_free_array;  // Bytes mapped for freearray.
600    uptr allocated_user;  // Bytes allocated for user memory.
601    uptr allocated_meta;  // Bytes allocated for metadata.
602    uptr mapped_user;  // Bytes mapped for user memory.
603    uptr mapped_meta;  // Bytes mapped for metadata.
604    u32 rand_state;  // Seed for random shuffle, used if kRandomShuffleChunks.
605    bool exhausted;  // Whether region is out of space for new chunks.
606    Stats stats;
607    ReleaseToOsInfo rtoi;
608  };
609  COMPILER_CHECK(sizeof(RegionInfo) % kCacheLineSize == 0);
610
611  RegionInfo *GetRegionInfo(uptr class_id) const {
612    DCHECK_LT(class_id, kNumClasses);
613    RegionInfo *regions = reinterpret_cast<RegionInfo *>(SpaceEnd());
614    return &regions[class_id];
615  }
616
617  uptr GetMetadataEnd(uptr region_beg) const {
618    return region_beg + kRegionSize - kFreeArraySize;
619  }
620
621  uptr GetChunkIdx(uptr chunk, uptr size) const {
622    if (!kUsingConstantSpaceBeg)
623      chunk -= SpaceBeg();
624
625    uptr offset = chunk % kRegionSize;
626    // Here we divide by a non-constant. This is costly.
627    // size always fits into 32-bits. If the offset fits too, use 32-bit div.
628    if (offset >> (SANITIZER_WORDSIZE / 2))
629      return offset / size;
630    return (u32)offset / (u32)size;
631  }
632
633  CompactPtrT *GetFreeArray(uptr region_beg) const {
634    return reinterpret_cast<CompactPtrT *>(GetMetadataEnd(region_beg));
635  }
636
637  bool MapWithCallback(uptr beg, uptr size, const char *name) {
638    uptr mapped = address_range.Map(beg, size, name);
639    if (UNLIKELY(!mapped))
640      return false;
641    CHECK_EQ(beg, mapped);
642    MapUnmapCallback().OnMap(beg, size);
643    return true;
644  }
645
646  void MapWithCallbackOrDie(uptr beg, uptr size, const char *name) {
647    CHECK_EQ(beg, address_range.MapOrDie(beg, size, name));
648    MapUnmapCallback().OnMap(beg, size);
649  }
650
651  void UnmapWithCallbackOrDie(uptr beg, uptr size) {
652    MapUnmapCallback().OnUnmap(beg, size);
653    address_range.Unmap(beg, size);
654  }
655
656  bool EnsureFreeArraySpace(RegionInfo *region, uptr region_beg,
657                            uptr num_freed_chunks) {
658    uptr needed_space = num_freed_chunks * sizeof(CompactPtrT);
659    if (region->mapped_free_array < needed_space) {
660      uptr new_mapped_free_array = RoundUpTo(needed_space, kFreeArrayMapSize);
661      CHECK_LE(new_mapped_free_array, kFreeArraySize);
662      uptr current_map_end = reinterpret_cast<uptr>(GetFreeArray(region_beg)) +
663                             region->mapped_free_array;
664      uptr new_map_size = new_mapped_free_array - region->mapped_free_array;
665      if (UNLIKELY(!MapWithCallback(current_map_end, new_map_size,
666                                    "SizeClassAllocator: freearray")))
667        return false;
668      region->mapped_free_array = new_mapped_free_array;
669    }
670    return true;
671  }
672
673  // Check whether this size class is exhausted.
674  bool IsRegionExhausted(RegionInfo *region, uptr class_id,
675                         uptr additional_map_size) {
676    if (LIKELY(region->mapped_user + region->mapped_meta +
677               additional_map_size <= kRegionSize - kFreeArraySize))
678      return false;
679    if (!region->exhausted) {
680      region->exhausted = true;
681      Printf("%s: Out of memory. ", SanitizerToolName);
682      Printf("The process has exhausted %zuMB for size class %zu.\n",
683             kRegionSize >> 20, ClassIdToSize(class_id));
684    }
685    return true;
686  }
687
688  NOINLINE bool PopulateFreeArray(AllocatorStats *stat, uptr class_id,
689                                  RegionInfo *region, uptr requested_count) {
690    // region->mutex is held.
691    const uptr region_beg = GetRegionBeginBySizeClass(class_id);
692    const uptr size = ClassIdToSize(class_id);
693
694    const uptr total_user_bytes =
695        region->allocated_user + requested_count * size;
696    // Map more space for chunks, if necessary.
697    if (LIKELY(total_user_bytes > region->mapped_user)) {
698      if (UNLIKELY(region->mapped_user == 0)) {
699        if (!kUsingConstantSpaceBeg && kRandomShuffleChunks)
700          // The random state is initialized from ASLR.
701          region->rand_state = static_cast<u32>(region_beg >> 12);
702        // Postpone the first release to OS attempt for ReleaseToOSIntervalMs,
703        // preventing just allocated memory from being released sooner than
704        // necessary and also preventing extraneous ReleaseMemoryPagesToOS calls
705        // for short lived processes.
706        // Do it only when the feature is turned on, to avoid a potentially
707        // extraneous syscall.
708        if (ReleaseToOSIntervalMs() >= 0)
709          region->rtoi.last_release_at_ns = MonotonicNanoTime();
710      }
711      // Do the mmap for the user memory.
712      const uptr user_map_size =
713          RoundUpTo(total_user_bytes - region->mapped_user, kUserMapSize);
714      if (UNLIKELY(IsRegionExhausted(region, class_id, user_map_size)))
715        return false;
716      if (UNLIKELY(!MapWithCallback(region_beg + region->mapped_user,
717                                    user_map_size,
718                                    "SizeClassAllocator: region data")))
719        return false;
720      stat->Add(AllocatorStatMapped, user_map_size);
721      region->mapped_user += user_map_size;
722    }
723    const uptr new_chunks_count =
724        (region->mapped_user - region->allocated_user) / size;
725
726    if (kMetadataSize) {
727      // Calculate the required space for metadata.
728      const uptr total_meta_bytes =
729          region->allocated_meta + new_chunks_count * kMetadataSize;
730      const uptr meta_map_size = (total_meta_bytes > region->mapped_meta) ?
731          RoundUpTo(total_meta_bytes - region->mapped_meta, kMetaMapSize) : 0;
732      // Map more space for metadata, if necessary.
733      if (meta_map_size) {
734        if (UNLIKELY(IsRegionExhausted(region, class_id, meta_map_size)))
735          return false;
736        if (UNLIKELY(!MapWithCallback(
737            GetMetadataEnd(region_beg) - region->mapped_meta - meta_map_size,
738            meta_map_size, "SizeClassAllocator: region metadata")))
739          return false;
740        region->mapped_meta += meta_map_size;
741      }
742    }
743
744    // If necessary, allocate more space for the free array and populate it with
745    // newly allocated chunks.
746    const uptr total_freed_chunks = region->num_freed_chunks + new_chunks_count;
747    if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg, total_freed_chunks)))
748      return false;
749    CompactPtrT *free_array = GetFreeArray(region_beg);
750    for (uptr i = 0, chunk = region->allocated_user; i < new_chunks_count;
751         i++, chunk += size)
752      free_array[total_freed_chunks - 1 - i] = PointerToCompactPtr(0, chunk);
753    if (kRandomShuffleChunks)
754      RandomShuffle(&free_array[region->num_freed_chunks], new_chunks_count,
755                    &region->rand_state);
756
757    // All necessary memory is mapped and now it is safe to advance all
758    // 'allocated_*' counters.
759    region->num_freed_chunks += new_chunks_count;
760    region->allocated_user += new_chunks_count * size;
761    CHECK_LE(region->allocated_user, region->mapped_user);
762    region->allocated_meta += new_chunks_count * kMetadataSize;
763    CHECK_LE(region->allocated_meta, region->mapped_meta);
764    region->exhausted = false;
765
766    // TODO(alekseyshl): Consider bumping last_release_at_ns here to prevent
767    // MaybeReleaseToOS from releasing just allocated pages or protect these
768    // not yet used chunks some other way.
769
770    return true;
771  }
772
773  class MemoryMapper {
774   public:
775    MemoryMapper(const ThisT& base_allocator, uptr class_id)
776        : allocator(base_allocator),
777          region_base(base_allocator.GetRegionBeginBySizeClass(class_id)),
778          released_ranges_count(0),
779          released_bytes(0) {
780    }
781
782    uptr GetReleasedRangesCount() const {
783      return released_ranges_count;
784    }
785
786    uptr GetReleasedBytes() const {
787      return released_bytes;
788    }
789
790    uptr MapPackedCounterArrayBuffer(uptr buffer_size) {
791      // TODO(alekseyshl): The idea to explore is to check if we have enough
792      // space between num_freed_chunks*sizeof(CompactPtrT) and
793      // mapped_free_array to fit buffer_size bytes and use that space instead
794      // of mapping a temporary one.
795      return reinterpret_cast<uptr>(
796          MmapOrDieOnFatalError(buffer_size, "ReleaseToOSPageCounters"));
797    }
798
799    void UnmapPackedCounterArrayBuffer(uptr buffer, uptr buffer_size) {
800      UnmapOrDie(reinterpret_cast<void *>(buffer), buffer_size);
801    }
802
803    // Releases [from, to) range of pages back to OS.
804    void ReleasePageRangeToOS(CompactPtrT from, CompactPtrT to) {
805      const uptr from_page = allocator.CompactPtrToPointer(region_base, from);
806      const uptr to_page = allocator.CompactPtrToPointer(region_base, to);
807      ReleaseMemoryPagesToOS(from_page, to_page);
808      released_ranges_count++;
809      released_bytes += to_page - from_page;
810    }
811
812   private:
813    const ThisT& allocator;
814    const uptr region_base;
815    uptr released_ranges_count;
816    uptr released_bytes;
817  };
818
819  // Attempts to release RAM occupied by freed chunks back to OS. The region is
820  // expected to be locked.
821  void MaybeReleaseToOS(uptr class_id, bool force) {
822    RegionInfo *region = GetRegionInfo(class_id);
823    const uptr chunk_size = ClassIdToSize(class_id);
824    const uptr page_size = GetPageSizeCached();
825
826    uptr n = region->num_freed_chunks;
827    if (n * chunk_size < page_size)
828      return;  // No chance to release anything.
829    if ((region->stats.n_freed -
830         region->rtoi.n_freed_at_last_release) * chunk_size < page_size) {
831      return;  // Nothing new to release.
832    }
833
834    if (!force) {
835      s32 interval_ms = ReleaseToOSIntervalMs();
836      if (interval_ms < 0)
837        return;
838
839      if (region->rtoi.last_release_at_ns + interval_ms * 1000000ULL >
840          MonotonicNanoTime()) {
841        return;  // Memory was returned recently.
842      }
843    }
844
845    MemoryMapper memory_mapper(*this, class_id);
846
847    ReleaseFreeMemoryToOS<MemoryMapper>(
848        GetFreeArray(GetRegionBeginBySizeClass(class_id)), n, chunk_size,
849        RoundUpTo(region->allocated_user, page_size) / page_size,
850        &memory_mapper);
851
852    if (memory_mapper.GetReleasedRangesCount() > 0) {
853      region->rtoi.n_freed_at_last_release = region->stats.n_freed;
854      region->rtoi.num_releases += memory_mapper.GetReleasedRangesCount();
855      region->rtoi.last_released_bytes = memory_mapper.GetReleasedBytes();
856    }
857    region->rtoi.last_release_at_ns = MonotonicNanoTime();
858  }
859};
860