1//===-- primary64.h ---------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef SCUDO_PRIMARY64_H_
10#define SCUDO_PRIMARY64_H_
11
12#include "bytemap.h"
13#include "common.h"
14#include "list.h"
15#include "local_cache.h"
16#include "memtag.h"
17#include "options.h"
18#include "release.h"
19#include "stats.h"
20#include "string_utils.h"
21
22namespace scudo {
23
24// SizeClassAllocator64 is an allocator tuned for 64-bit address space.
25//
26// It starts by reserving NumClasses * 2^RegionSizeLog bytes, equally divided in
27// Regions, specific to each size class. Note that the base of that mapping is
28// random (based to the platform specific map() capabilities). If
29// PrimaryEnableRandomOffset is set, each Region actually starts at a random
30// offset from its base.
31//
32// Regions are mapped incrementally on demand to fulfill allocation requests,
33// those mappings being split into equally sized Blocks based on the size class
34// they belong to. The Blocks created are shuffled to prevent predictable
35// address patterns (the predictability increases with the size of the Blocks).
36//
37// The 1st Region (for size class 0) holds the TransferBatches. This is a
38// structure used to transfer arrays of available pointers from the class size
39// freelist to the thread specific freelist, and back.
40//
41// The memory used by this allocator is never unmapped, but can be partially
42// released if the platform allows for it.
43
44template <typename Config> class SizeClassAllocator64 {
45public:
46  typedef typename Config::PrimaryCompactPtrT CompactPtrT;
47  static const uptr CompactPtrScale = Config::PrimaryCompactPtrScale;
48  static const uptr GroupSizeLog = Config::PrimaryGroupSizeLog;
49  typedef typename Config::SizeClassMap SizeClassMap;
50  typedef SizeClassAllocator64<Config> ThisT;
51  typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
52  typedef typename CacheT::TransferBatch TransferBatch;
53  typedef typename CacheT::BatchGroup BatchGroup;
54
55  static uptr getSizeByClassId(uptr ClassId) {
56    return (ClassId == SizeClassMap::BatchClassId)
57               ? roundUpTo(sizeof(TransferBatch), 1U << CompactPtrScale)
58               : SizeClassMap::getSizeByClassId(ClassId);
59  }
60
61  static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
62
63  void init(s32 ReleaseToOsInterval) {
64    DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
65    DCHECK_EQ(PrimaryBase, 0U);
66    // Reserve the space required for the Primary.
67    PrimaryBase = reinterpret_cast<uptr>(
68        map(nullptr, PrimarySize, nullptr, MAP_NOACCESS, &Data));
69
70    u32 Seed;
71    const u64 Time = getMonotonicTime();
72    if (!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed)))
73      Seed = static_cast<u32>(Time ^ (PrimaryBase >> 12));
74    const uptr PageSize = getPageSizeCached();
75    for (uptr I = 0; I < NumClasses; I++) {
76      RegionInfo *Region = getRegionInfo(I);
77      // The actual start of a region is offset by a random number of pages
78      // when PrimaryEnableRandomOffset is set.
79      Region->RegionBeg = getRegionBaseByClassId(I) +
80                          (Config::PrimaryEnableRandomOffset
81                               ? ((getRandomModN(&Seed, 16) + 1) * PageSize)
82                               : 0);
83      Region->RandState = getRandomU32(&Seed);
84      Region->ReleaseInfo.LastReleaseAtNs = Time;
85    }
86    setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
87  }
88
89  void unmapTestOnly() {
90    for (uptr I = 0; I < NumClasses; I++) {
91      RegionInfo *Region = getRegionInfo(I);
92      *Region = {};
93    }
94    if (PrimaryBase)
95      unmap(reinterpret_cast<void *>(PrimaryBase), PrimarySize, UNMAP_ALL,
96            &Data);
97    PrimaryBase = 0U;
98  }
99
100  TransferBatch *popBatch(CacheT *C, uptr ClassId) {
101    DCHECK_LT(ClassId, NumClasses);
102    RegionInfo *Region = getRegionInfo(ClassId);
103    ScopedLock L(Region->Mutex);
104    TransferBatch *B = popBatchImpl(C, ClassId);
105    if (UNLIKELY(!B)) {
106      if (UNLIKELY(!populateFreeList(C, ClassId, Region)))
107        return nullptr;
108      B = popBatchImpl(C, ClassId);
109      // if `populateFreeList` succeeded, we are supposed to get free blocks.
110      DCHECK_NE(B, nullptr);
111    }
112    Region->Stats.PoppedBlocks += B->getCount();
113    return B;
114  }
115
116  // Push the array of free blocks to the designated batch group.
117  void pushBlocks(CacheT *C, uptr ClassId, CompactPtrT *Array, u32 Size) {
118    DCHECK_LT(ClassId, NumClasses);
119    DCHECK_GT(Size, 0);
120
121    RegionInfo *Region = getRegionInfo(ClassId);
122    if (ClassId == SizeClassMap::BatchClassId) {
123      ScopedLock L(Region->Mutex);
124      // Constructing a batch group in the free list will use two blocks in
125      // BatchClassId. If we are pushing BatchClassId blocks, we will use the
126      // blocks in the array directly (can't delegate local cache which will
127      // cause a recursive allocation). However, The number of free blocks may
128      // be less than two. Therefore, populate the free list before inserting
129      // the blocks.
130      if (Size == 1 && UNLIKELY(!populateFreeList(C, ClassId, Region)))
131        return;
132      pushBlocksImpl(C, ClassId, Array, Size);
133      Region->Stats.PushedBlocks += Size;
134      return;
135    }
136
137    // TODO(chiahungduan): Consider not doing grouping if the group size is not
138    // greater than the block size with a certain scale.
139
140    // Sort the blocks so that blocks belonging to the same group can be pushed
141    // together.
142    bool SameGroup = true;
143    for (u32 I = 1; I < Size; ++I) {
144      if (compactPtrGroup(Array[I - 1]) != compactPtrGroup(Array[I]))
145        SameGroup = false;
146      CompactPtrT Cur = Array[I];
147      u32 J = I;
148      while (J > 0 && compactPtrGroup(Cur) < compactPtrGroup(Array[J - 1])) {
149        Array[J] = Array[J - 1];
150        --J;
151      }
152      Array[J] = Cur;
153    }
154
155    ScopedLock L(Region->Mutex);
156    pushBlocksImpl(C, ClassId, Array, Size, SameGroup);
157
158    Region->Stats.PushedBlocks += Size;
159    if (ClassId != SizeClassMap::BatchClassId)
160      releaseToOSMaybe(Region, ClassId);
161  }
162
163  void disable() {
164    // The BatchClassId must be locked last since other classes can use it.
165    for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--) {
166      if (static_cast<uptr>(I) == SizeClassMap::BatchClassId)
167        continue;
168      getRegionInfo(static_cast<uptr>(I))->Mutex.lock();
169    }
170    getRegionInfo(SizeClassMap::BatchClassId)->Mutex.lock();
171  }
172
173  void enable() {
174    getRegionInfo(SizeClassMap::BatchClassId)->Mutex.unlock();
175    for (uptr I = 0; I < NumClasses; I++) {
176      if (I == SizeClassMap::BatchClassId)
177        continue;
178      getRegionInfo(I)->Mutex.unlock();
179    }
180  }
181
182  template <typename F> void iterateOverBlocks(F Callback) {
183    for (uptr I = 0; I < NumClasses; I++) {
184      if (I == SizeClassMap::BatchClassId)
185        continue;
186      const RegionInfo *Region = getRegionInfo(I);
187      const uptr BlockSize = getSizeByClassId(I);
188      const uptr From = Region->RegionBeg;
189      const uptr To = From + Region->AllocatedUser;
190      for (uptr Block = From; Block < To; Block += BlockSize)
191        Callback(Block);
192    }
193  }
194
195  void getStats(ScopedString *Str) {
196    // TODO(kostyak): get the RSS per region.
197    uptr TotalMapped = 0;
198    uptr PoppedBlocks = 0;
199    uptr PushedBlocks = 0;
200    for (uptr I = 0; I < NumClasses; I++) {
201      RegionInfo *Region = getRegionInfo(I);
202      if (Region->MappedUser)
203        TotalMapped += Region->MappedUser;
204      PoppedBlocks += Region->Stats.PoppedBlocks;
205      PushedBlocks += Region->Stats.PushedBlocks;
206    }
207    Str->append("Stats: SizeClassAllocator64: %zuM mapped (%uM rss) in %zu "
208                "allocations; remains %zu\n",
209                TotalMapped >> 20, 0U, PoppedBlocks,
210                PoppedBlocks - PushedBlocks);
211
212    for (uptr I = 0; I < NumClasses; I++)
213      getStats(Str, I, 0);
214  }
215
216  bool setOption(Option O, sptr Value) {
217    if (O == Option::ReleaseInterval) {
218      const s32 Interval = Max(
219          Min(static_cast<s32>(Value), Config::PrimaryMaxReleaseToOsIntervalMs),
220          Config::PrimaryMinReleaseToOsIntervalMs);
221      atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
222      return true;
223    }
224    // Not supported by the Primary, but not an error either.
225    return true;
226  }
227
228  uptr releaseToOS() {
229    uptr TotalReleasedBytes = 0;
230    for (uptr I = 0; I < NumClasses; I++) {
231      if (I == SizeClassMap::BatchClassId)
232        continue;
233      RegionInfo *Region = getRegionInfo(I);
234      ScopedLock L(Region->Mutex);
235      TotalReleasedBytes += releaseToOSMaybe(Region, I, /*Force=*/true);
236    }
237    return TotalReleasedBytes;
238  }
239
240  const char *getRegionInfoArrayAddress() const {
241    return reinterpret_cast<const char *>(RegionInfoArray);
242  }
243
244  static uptr getRegionInfoArraySize() { return sizeof(RegionInfoArray); }
245
246  uptr getCompactPtrBaseByClassId(uptr ClassId) {
247    // If we are not compacting pointers, base everything off of 0.
248    if (sizeof(CompactPtrT) == sizeof(uptr) && CompactPtrScale == 0)
249      return 0;
250    return getRegionInfo(ClassId)->RegionBeg;
251  }
252
253  CompactPtrT compactPtr(uptr ClassId, uptr Ptr) {
254    DCHECK_LE(ClassId, SizeClassMap::LargestClassId);
255    return compactPtrInternal(getCompactPtrBaseByClassId(ClassId), Ptr);
256  }
257
258  void *decompactPtr(uptr ClassId, CompactPtrT CompactPtr) {
259    DCHECK_LE(ClassId, SizeClassMap::LargestClassId);
260    return reinterpret_cast<void *>(
261        decompactPtrInternal(getCompactPtrBaseByClassId(ClassId), CompactPtr));
262  }
263
264  static BlockInfo findNearestBlock(const char *RegionInfoData, uptr Ptr) {
265    const RegionInfo *RegionInfoArray =
266        reinterpret_cast<const RegionInfo *>(RegionInfoData);
267    uptr ClassId;
268    uptr MinDistance = -1UL;
269    for (uptr I = 0; I != NumClasses; ++I) {
270      if (I == SizeClassMap::BatchClassId)
271        continue;
272      uptr Begin = RegionInfoArray[I].RegionBeg;
273      uptr End = Begin + RegionInfoArray[I].AllocatedUser;
274      if (Begin > End || End - Begin < SizeClassMap::getSizeByClassId(I))
275        continue;
276      uptr RegionDistance;
277      if (Begin <= Ptr) {
278        if (Ptr < End)
279          RegionDistance = 0;
280        else
281          RegionDistance = Ptr - End;
282      } else {
283        RegionDistance = Begin - Ptr;
284      }
285
286      if (RegionDistance < MinDistance) {
287        MinDistance = RegionDistance;
288        ClassId = I;
289      }
290    }
291
292    BlockInfo B = {};
293    if (MinDistance <= 8192) {
294      B.RegionBegin = RegionInfoArray[ClassId].RegionBeg;
295      B.RegionEnd = B.RegionBegin + RegionInfoArray[ClassId].AllocatedUser;
296      B.BlockSize = SizeClassMap::getSizeByClassId(ClassId);
297      B.BlockBegin =
298          B.RegionBegin + uptr(sptr(Ptr - B.RegionBegin) / sptr(B.BlockSize) *
299                               sptr(B.BlockSize));
300      while (B.BlockBegin < B.RegionBegin)
301        B.BlockBegin += B.BlockSize;
302      while (B.RegionEnd < B.BlockBegin + B.BlockSize)
303        B.BlockBegin -= B.BlockSize;
304    }
305    return B;
306  }
307
308  AtomicOptions Options;
309
310private:
311  static const uptr RegionSize = 1UL << Config::PrimaryRegionSizeLog;
312  static const uptr NumClasses = SizeClassMap::NumClasses;
313  static const uptr PrimarySize = RegionSize * NumClasses;
314
315  static const uptr MapSizeIncrement = Config::PrimaryMapSizeIncrement;
316  // Fill at most this number of batches from the newly map'd memory.
317  static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U;
318
319  struct RegionStats {
320    uptr PoppedBlocks;
321    uptr PushedBlocks;
322  };
323
324  struct ReleaseToOsInfo {
325    uptr PushedBlocksAtLastRelease;
326    uptr RangesReleased;
327    uptr LastReleasedBytes;
328    u64 LastReleaseAtNs;
329  };
330
331  struct UnpaddedRegionInfo {
332    HybridMutex Mutex;
333    SinglyLinkedList<BatchGroup> FreeList;
334    uptr RegionBeg = 0;
335    RegionStats Stats = {};
336    u32 RandState = 0;
337    uptr MappedUser = 0;    // Bytes mapped for user memory.
338    uptr AllocatedUser = 0; // Bytes allocated for user memory.
339    MapPlatformData Data = {};
340    ReleaseToOsInfo ReleaseInfo = {};
341    bool Exhausted = false;
342  };
343  struct RegionInfo : UnpaddedRegionInfo {
344    char Padding[SCUDO_CACHE_LINE_SIZE -
345                 (sizeof(UnpaddedRegionInfo) % SCUDO_CACHE_LINE_SIZE)] = {};
346  };
347  static_assert(sizeof(RegionInfo) % SCUDO_CACHE_LINE_SIZE == 0, "");
348
349  uptr PrimaryBase = 0;
350  MapPlatformData Data = {};
351  atomic_s32 ReleaseToOsIntervalMs = {};
352  alignas(SCUDO_CACHE_LINE_SIZE) RegionInfo RegionInfoArray[NumClasses];
353
354  RegionInfo *getRegionInfo(uptr ClassId) {
355    DCHECK_LT(ClassId, NumClasses);
356    return &RegionInfoArray[ClassId];
357  }
358
359  uptr getRegionBaseByClassId(uptr ClassId) const {
360    return PrimaryBase + (ClassId << Config::PrimaryRegionSizeLog);
361  }
362
363  static CompactPtrT compactPtrInternal(uptr Base, uptr Ptr) {
364    return static_cast<CompactPtrT>((Ptr - Base) >> CompactPtrScale);
365  }
366
367  static uptr decompactPtrInternal(uptr Base, CompactPtrT CompactPtr) {
368    return Base + (static_cast<uptr>(CompactPtr) << CompactPtrScale);
369  }
370
371  static uptr compactPtrGroup(CompactPtrT CompactPtr) {
372    return static_cast<uptr>(CompactPtr) >> (GroupSizeLog - CompactPtrScale);
373  }
374  static uptr batchGroupBase(uptr Base, uptr GroupId) {
375    return (GroupId << GroupSizeLog) + Base;
376  }
377
378  // Push the blocks to their batch group. The layout will be like,
379  //
380  // FreeList - > BG -> BG -> BG
381  //              |     |     |
382  //              v     v     v
383  //              TB    TB    TB
384  //              |
385  //              v
386  //              TB
387  //
388  // Each BlockGroup(BG) will associate with unique group id and the free blocks
389  // are managed by a list of TransferBatch(TB). To reduce the time of inserting
390  // blocks, BGs are sorted and the input `Array` are supposed to be sorted so
391  // that we can get better performance of maintaining sorted property.
392  // Use `SameGroup=true` to indicate that all blocks in the array are from the
393  // same group then we will skip checking the group id of each block.
394  //
395  // Note that this aims to have a better management of dirty pages, i.e., the
396  // RSS usage won't grow indefinitely. There's an exception that we may not put
397  // a block to its associated group. While populating new blocks, we may have
398  // blocks cross different groups. However, most cases will fall into same
399  // group and they are supposed to be popped soon. In that case, it's not worth
400  // sorting the array with the almost-sorted property. Therefore, we use
401  // `SameGroup=true` instead.
402  //
403  // The region mutex needs to be held while calling this method.
404  void pushBlocksImpl(CacheT *C, uptr ClassId, CompactPtrT *Array, u32 Size,
405                      bool SameGroup = false) {
406    DCHECK_GT(Size, 0U);
407    RegionInfo *Region = getRegionInfo(ClassId);
408
409    auto CreateGroup = [&](uptr GroupId) {
410      BatchGroup *BG = nullptr;
411      TransferBatch *TB = nullptr;
412      if (ClassId == SizeClassMap::BatchClassId) {
413        DCHECK_GE(Size, 2U);
414        BG = reinterpret_cast<BatchGroup *>(
415            decompactPtr(ClassId, Array[Size - 1]));
416        BG->Batches.clear();
417
418        TB = reinterpret_cast<TransferBatch *>(
419            decompactPtr(ClassId, Array[Size - 2]));
420        TB->clear();
421      } else {
422        BG = C->createGroup();
423        BG->Batches.clear();
424
425        TB = C->createBatch(ClassId, nullptr);
426        TB->clear();
427      }
428
429      BG->GroupId = GroupId;
430      BG->Batches.push_front(TB);
431      BG->PushedBlocks = 0;
432      BG->PushedBlocksAtLastCheckpoint = 0;
433      BG->MaxCachedPerBatch =
434          TransferBatch::getMaxCached(getSizeByClassId(ClassId));
435
436      return BG;
437    };
438
439    auto InsertBlocks = [&](BatchGroup *BG, CompactPtrT *Array, u32 Size) {
440      SinglyLinkedList<TransferBatch> &Batches = BG->Batches;
441      TransferBatch *CurBatch = Batches.front();
442      DCHECK_NE(CurBatch, nullptr);
443
444      for (u32 I = 0; I < Size;) {
445        DCHECK_GE(BG->MaxCachedPerBatch, CurBatch->getCount());
446        u16 UnusedSlots =
447            static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
448        if (UnusedSlots == 0) {
449          CurBatch = C->createBatch(
450              ClassId,
451              reinterpret_cast<void *>(decompactPtr(ClassId, Array[I])));
452          CurBatch->clear();
453          Batches.push_front(CurBatch);
454          UnusedSlots = BG->MaxCachedPerBatch;
455        }
456        // `UnusedSlots` is u16 so the result will be also fit in u16.
457        u16 AppendSize = static_cast<u16>(Min<u32>(UnusedSlots, Size - I));
458        CurBatch->appendFromArray(&Array[I], AppendSize);
459        I += AppendSize;
460      }
461
462      BG->PushedBlocks += Size;
463    };
464
465    BatchGroup *Cur = Region->FreeList.front();
466
467    if (ClassId == SizeClassMap::BatchClassId) {
468      if (Cur == nullptr) {
469        // Don't need to classify BatchClassId.
470        Cur = CreateGroup(/*GroupId=*/0);
471        Region->FreeList.push_front(Cur);
472      }
473      InsertBlocks(Cur, Array, Size);
474      return;
475    }
476
477    // In the following, `Cur` always points to the BatchGroup for blocks that
478    // will be pushed next. `Prev` is the element right before `Cur`.
479    BatchGroup *Prev = nullptr;
480
481    while (Cur != nullptr && compactPtrGroup(Array[0]) > Cur->GroupId) {
482      Prev = Cur;
483      Cur = Cur->Next;
484    }
485
486    if (Cur == nullptr || compactPtrGroup(Array[0]) != Cur->GroupId) {
487      Cur = CreateGroup(compactPtrGroup(Array[0]));
488      if (Prev == nullptr)
489        Region->FreeList.push_front(Cur);
490      else
491        Region->FreeList.insert(Prev, Cur);
492    }
493
494    // All the blocks are from the same group, just push without checking group
495    // id.
496    if (SameGroup) {
497      InsertBlocks(Cur, Array, Size);
498      return;
499    }
500
501    // The blocks are sorted by group id. Determine the segment of group and
502    // push them to their group together.
503    u32 Count = 1;
504    for (u32 I = 1; I < Size; ++I) {
505      if (compactPtrGroup(Array[I - 1]) != compactPtrGroup(Array[I])) {
506        DCHECK_EQ(compactPtrGroup(Array[I - 1]), Cur->GroupId);
507        InsertBlocks(Cur, Array + I - Count, Count);
508
509        while (Cur != nullptr && compactPtrGroup(Array[I]) > Cur->GroupId) {
510          Prev = Cur;
511          Cur = Cur->Next;
512        }
513
514        if (Cur == nullptr || compactPtrGroup(Array[I]) != Cur->GroupId) {
515          Cur = CreateGroup(compactPtrGroup(Array[I]));
516          DCHECK_NE(Prev, nullptr);
517          Region->FreeList.insert(Prev, Cur);
518        }
519
520        Count = 1;
521      } else {
522        ++Count;
523      }
524    }
525
526    InsertBlocks(Cur, Array + Size - Count, Count);
527  }
528
529  // Pop one TransferBatch from a BatchGroup. The BatchGroup with the smallest
530  // group id will be considered first.
531  //
532  // The region mutex needs to be held while calling this method.
533  TransferBatch *popBatchImpl(CacheT *C, uptr ClassId) {
534    RegionInfo *Region = getRegionInfo(ClassId);
535    if (Region->FreeList.empty())
536      return nullptr;
537
538    SinglyLinkedList<TransferBatch> &Batches =
539        Region->FreeList.front()->Batches;
540    DCHECK(!Batches.empty());
541
542    TransferBatch *B = Batches.front();
543    Batches.pop_front();
544    DCHECK_NE(B, nullptr);
545    DCHECK_GT(B->getCount(), 0U);
546
547    if (Batches.empty()) {
548      BatchGroup *BG = Region->FreeList.front();
549      Region->FreeList.pop_front();
550
551      // We don't keep BatchGroup with zero blocks to avoid empty-checking while
552      // allocating. Note that block used by constructing BatchGroup is recorded
553      // as free blocks in the last element of BatchGroup::Batches. Which means,
554      // once we pop the last TransferBatch, the block is implicitly
555      // deallocated.
556      if (ClassId != SizeClassMap::BatchClassId)
557        C->deallocate(SizeClassMap::BatchClassId, BG);
558    }
559
560    return B;
561  }
562
563  NOINLINE bool populateFreeList(CacheT *C, uptr ClassId, RegionInfo *Region) {
564    const uptr Size = getSizeByClassId(ClassId);
565    const u16 MaxCount = TransferBatch::getMaxCached(Size);
566
567    const uptr RegionBeg = Region->RegionBeg;
568    const uptr MappedUser = Region->MappedUser;
569    const uptr TotalUserBytes = Region->AllocatedUser + MaxCount * Size;
570    // Map more space for blocks, if necessary.
571    if (TotalUserBytes > MappedUser) {
572      // Do the mmap for the user memory.
573      const uptr MapSize =
574          roundUpTo(TotalUserBytes - MappedUser, MapSizeIncrement);
575      const uptr RegionBase = RegionBeg - getRegionBaseByClassId(ClassId);
576      if (UNLIKELY(RegionBase + MappedUser + MapSize > RegionSize)) {
577        if (!Region->Exhausted) {
578          Region->Exhausted = true;
579          ScopedString Str;
580          getStats(&Str);
581          Str.append(
582              "Scudo OOM: The process has exhausted %zuM for size class %zu.\n",
583              RegionSize >> 20, Size);
584          Str.output();
585        }
586        return false;
587      }
588      if (MappedUser == 0)
589        Region->Data = Data;
590      if (UNLIKELY(!map(
591              reinterpret_cast<void *>(RegionBeg + MappedUser), MapSize,
592              "scudo:primary",
593              MAP_ALLOWNOMEM | MAP_RESIZABLE |
594                  (useMemoryTagging<Config>(Options.load()) ? MAP_MEMTAG : 0),
595              &Region->Data))) {
596        return false;
597      }
598      Region->MappedUser += MapSize;
599      C->getStats().add(StatMapped, MapSize);
600    }
601
602    const u32 NumberOfBlocks = Min(
603        MaxNumBatches * MaxCount,
604        static_cast<u32>((Region->MappedUser - Region->AllocatedUser) / Size));
605    DCHECK_GT(NumberOfBlocks, 0);
606
607    constexpr u32 ShuffleArraySize =
608        MaxNumBatches * TransferBatch::MaxNumCached;
609    CompactPtrT ShuffleArray[ShuffleArraySize];
610    DCHECK_LE(NumberOfBlocks, ShuffleArraySize);
611
612    const uptr CompactPtrBase = getCompactPtrBaseByClassId(ClassId);
613    uptr P = RegionBeg + Region->AllocatedUser;
614    for (u32 I = 0; I < NumberOfBlocks; I++, P += Size)
615      ShuffleArray[I] = compactPtrInternal(CompactPtrBase, P);
616    // No need to shuffle the batches size class.
617    if (ClassId != SizeClassMap::BatchClassId)
618      shuffle(ShuffleArray, NumberOfBlocks, &Region->RandState);
619    for (u32 I = 0; I < NumberOfBlocks;) {
620      // `MaxCount` is u16 so the result will also fit in u16.
621      const u16 N = static_cast<u16>(Min<u32>(MaxCount, NumberOfBlocks - I));
622      // Note that the N blocks here may have different group ids. Given that
623      // it only happens when it crosses the group size boundary. Instead of
624      // sorting them, treat them as same group here to avoid sorting the
625      // almost-sorted blocks.
626      pushBlocksImpl(C, ClassId, &ShuffleArray[I], N, /*SameGroup=*/true);
627      I += N;
628    }
629
630    const uptr AllocatedUser = Size * NumberOfBlocks;
631    C->getStats().add(StatFree, AllocatedUser);
632    Region->AllocatedUser += AllocatedUser;
633
634    return true;
635  }
636
637  void getStats(ScopedString *Str, uptr ClassId, uptr Rss) {
638    RegionInfo *Region = getRegionInfo(ClassId);
639    if (Region->MappedUser == 0)
640      return;
641    const uptr InUse = Region->Stats.PoppedBlocks - Region->Stats.PushedBlocks;
642    const uptr TotalChunks = Region->AllocatedUser / getSizeByClassId(ClassId);
643    Str->append("%s %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
644                "inuse: %6zu total: %6zu rss: %6zuK releases: %6zu last "
645                "released: %6zuK region: 0x%zx (0x%zx)\n",
646                Region->Exhausted ? "F" : " ", ClassId,
647                getSizeByClassId(ClassId), Region->MappedUser >> 10,
648                Region->Stats.PoppedBlocks, Region->Stats.PushedBlocks, InUse,
649                TotalChunks, Rss >> 10, Region->ReleaseInfo.RangesReleased,
650                Region->ReleaseInfo.LastReleasedBytes >> 10, Region->RegionBeg,
651                getRegionBaseByClassId(ClassId));
652  }
653
654  NOINLINE uptr releaseToOSMaybe(RegionInfo *Region, uptr ClassId,
655                                 bool Force = false) {
656    const uptr BlockSize = getSizeByClassId(ClassId);
657    const uptr PageSize = getPageSizeCached();
658
659    DCHECK_GE(Region->Stats.PoppedBlocks, Region->Stats.PushedBlocks);
660    const uptr BytesInFreeList =
661        Region->AllocatedUser -
662        (Region->Stats.PoppedBlocks - Region->Stats.PushedBlocks) * BlockSize;
663    if (BytesInFreeList < PageSize)
664      return 0; // No chance to release anything.
665    const uptr BytesPushed = (Region->Stats.PushedBlocks -
666                              Region->ReleaseInfo.PushedBlocksAtLastRelease) *
667                             BlockSize;
668    if (BytesPushed < PageSize)
669      return 0; // Nothing new to release.
670
671    bool CheckDensity = BlockSize < PageSize / 16U;
672    // Releasing smaller blocks is expensive, so we want to make sure that a
673    // significant amount of bytes are free, and that there has been a good
674    // amount of batches pushed to the freelist before attempting to release.
675    if (CheckDensity) {
676      if (!Force && BytesPushed < Region->AllocatedUser / 16U)
677        return 0;
678    }
679
680    if (!Force) {
681      const s32 IntervalMs = atomic_load_relaxed(&ReleaseToOsIntervalMs);
682      if (IntervalMs < 0)
683        return 0;
684      if (Region->ReleaseInfo.LastReleaseAtNs +
685              static_cast<u64>(IntervalMs) * 1000000 >
686          getMonotonicTime()) {
687        return 0; // Memory was returned recently.
688      }
689    }
690
691    const uptr GroupSize = (1U << GroupSizeLog);
692    const uptr AllocatedUserEnd = Region->AllocatedUser + Region->RegionBeg;
693    ReleaseRecorder Recorder(Region->RegionBeg, &Region->Data);
694    PageReleaseContext Context(BlockSize, Region->AllocatedUser,
695                               /*NumberOfRegions=*/1U);
696
697    const uptr CompactPtrBase = getCompactPtrBaseByClassId(ClassId);
698    auto DecompactPtr = [CompactPtrBase](CompactPtrT CompactPtr) {
699      return decompactPtrInternal(CompactPtrBase, CompactPtr);
700    };
701    for (BatchGroup &BG : Region->FreeList) {
702      const uptr PushedBytesDelta =
703          BG.PushedBlocks - BG.PushedBlocksAtLastCheckpoint;
704      if (PushedBytesDelta * BlockSize < PageSize)
705        continue;
706
707      // Group boundary does not necessarily have the same alignment as Region.
708      // It may sit across a Region boundary. Which means that we may have the
709      // following two cases,
710      //
711      // 1. Group boundary sits before RegionBeg.
712      //
713      //                (BatchGroupBeg)
714      // batchGroupBase  RegionBeg       BatchGroupEnd
715      //        |            |                |
716      //        v            v                v
717      //        +------------+----------------+
718      //         \                           /
719      //          ------   GroupSize   ------
720      //
721      // 2. Group boundary sits after RegionBeg.
722      //
723      //               (BatchGroupBeg)
724      //    RegionBeg  batchGroupBase               BatchGroupEnd
725      //        |           |                             |
726      //        v           v                             v
727      //        +-----------+-----------------------------+
728      //                     \                           /
729      //                      ------   GroupSize   ------
730      //
731      // Note that in the first case, the group range before RegionBeg is never
732      // used. Therefore, while calculating the used group size, we should
733      // exclude that part to get the correct size.
734      const uptr BatchGroupBeg =
735          Max(batchGroupBase(CompactPtrBase, BG.GroupId), Region->RegionBeg);
736      DCHECK_GE(AllocatedUserEnd, BatchGroupBeg);
737      const uptr BatchGroupEnd =
738          batchGroupBase(CompactPtrBase, BG.GroupId) + GroupSize;
739      const uptr AllocatedGroupSize = AllocatedUserEnd >= BatchGroupEnd
740                                          ? BatchGroupEnd - BatchGroupBeg
741                                          : AllocatedUserEnd - BatchGroupBeg;
742      if (AllocatedGroupSize == 0)
743        continue;
744
745      // TransferBatches are pushed in front of BG.Batches. The first one may
746      // not have all caches used.
747      const uptr NumBlocks = (BG.Batches.size() - 1) * BG.MaxCachedPerBatch +
748                             BG.Batches.front()->getCount();
749      const uptr BytesInBG = NumBlocks * BlockSize;
750      // Given the randomness property, we try to release the pages only if the
751      // bytes used by free blocks exceed certain proportion of group size. Note
752      // that this heuristic only applies when all the spaces in a BatchGroup
753      // are allocated.
754      if (CheckDensity && (BytesInBG * 100U) / AllocatedGroupSize <
755                              (100U - 1U - BlockSize / 16U)) {
756        continue;
757      }
758
759      BG.PushedBlocksAtLastCheckpoint = BG.PushedBlocks;
760      // Note that we don't always visit blocks in each BatchGroup so that we
761      // may miss the chance of releasing certain pages that cross BatchGroups.
762      Context.markFreeBlocks(BG.Batches, DecompactPtr, Region->RegionBeg);
763    }
764
765    if (!Context.hasBlockMarked())
766      return 0;
767
768    auto SkipRegion = [](UNUSED uptr RegionIndex) { return false; };
769    releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
770
771    if (Recorder.getReleasedRangesCount() > 0) {
772      Region->ReleaseInfo.PushedBlocksAtLastRelease =
773          Region->Stats.PushedBlocks;
774      Region->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
775      Region->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
776    }
777    Region->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
778    return Recorder.getReleasedBytes();
779  }
780};
781
782} // namespace scudo
783
784#endif // SCUDO_PRIMARY64_H_
785