release_test.cpp revision 1.1.1.1
1//===-- release_test.cpp ----------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "tests/scudo_unit_test.h"
10
11#include "list.h"
12#include "release.h"
13#include "size_class_map.h"
14
15#include <string.h>
16
17#include <algorithm>
18#include <random>
19#include <set>
20
21TEST(ScudoReleaseTest, PackedCounterArray) {
22  for (scudo::uptr I = 0; I < SCUDO_WORDSIZE; I++) {
23    // Various valid counter's max values packed into one word.
24    scudo::PackedCounterArray Counters2N(1, 1UL << I);
25    EXPECT_EQ(sizeof(scudo::uptr), Counters2N.getBufferSize());
26    // Check the "all bit set" values too.
27    scudo::PackedCounterArray Counters2N1_1(1, ~0UL >> I);
28    EXPECT_EQ(sizeof(scudo::uptr), Counters2N1_1.getBufferSize());
29    // Verify the packing ratio, the counter is Expected to be packed into the
30    // closest power of 2 bits.
31    scudo::PackedCounterArray Counters(SCUDO_WORDSIZE, 1UL << I);
32    EXPECT_EQ(sizeof(scudo::uptr) * scudo::roundUpToPowerOfTwo(I + 1),
33              Counters.getBufferSize());
34  }
35
36  // Go through 1, 2, 4, 8, .. {32,64} bits per counter.
37  for (scudo::uptr I = 0; (SCUDO_WORDSIZE >> I) != 0; I++) {
38    // Make sure counters request one memory page for the buffer.
39    const scudo::uptr NumCounters =
40        (scudo::getPageSizeCached() / 8) * (SCUDO_WORDSIZE >> I);
41    scudo::PackedCounterArray Counters(NumCounters, 1UL << ((1UL << I) - 1));
42    Counters.inc(0);
43    for (scudo::uptr C = 1; C < NumCounters - 1; C++) {
44      EXPECT_EQ(0UL, Counters.get(C));
45      Counters.inc(C);
46      EXPECT_EQ(1UL, Counters.get(C - 1));
47    }
48    EXPECT_EQ(0UL, Counters.get(NumCounters - 1));
49    Counters.inc(NumCounters - 1);
50    if (I > 0) {
51      Counters.incRange(0, NumCounters - 1);
52      for (scudo::uptr C = 0; C < NumCounters; C++)
53        EXPECT_EQ(2UL, Counters.get(C));
54    }
55  }
56}
57
58class StringRangeRecorder {
59public:
60  std::string ReportedPages;
61
62  StringRangeRecorder()
63      : PageSizeScaledLog(scudo::getLog2(scudo::getPageSizeCached())) {}
64
65  void releasePageRangeToOS(scudo::uptr From, scudo::uptr To) {
66    From >>= PageSizeScaledLog;
67    To >>= PageSizeScaledLog;
68    EXPECT_LT(From, To);
69    if (!ReportedPages.empty())
70      EXPECT_LT(LastPageReported, From);
71    ReportedPages.append(From - LastPageReported, '.');
72    ReportedPages.append(To - From, 'x');
73    LastPageReported = To;
74  }
75
76private:
77  const scudo::uptr PageSizeScaledLog;
78  scudo::uptr LastPageReported = 0;
79};
80
81TEST(ScudoReleaseTest, FreePagesRangeTracker) {
82  // 'x' denotes a page to be released, '.' denotes a page to be kept around.
83  const char *TestCases[] = {
84      "",
85      ".",
86      "x",
87      "........",
88      "xxxxxxxxxxx",
89      "..............xxxxx",
90      "xxxxxxxxxxxxxxxxxx.....",
91      "......xxxxxxxx........",
92      "xxx..........xxxxxxxxxxxxxxx",
93      "......xxxx....xxxx........",
94      "xxx..........xxxxxxxx....xxxxxxx",
95      "x.x.x.x.x.x.x.x.x.x.x.x.",
96      ".x.x.x.x.x.x.x.x.x.x.x.x",
97      ".x.x.x.x.x.x.x.x.x.x.x.x.",
98      "x.x.x.x.x.x.x.x.x.x.x.x.x",
99  };
100  typedef scudo::FreePagesRangeTracker<StringRangeRecorder> RangeTracker;
101
102  for (auto TestCase : TestCases) {
103    StringRangeRecorder Recorder;
104    RangeTracker Tracker(&Recorder);
105    for (scudo::uptr I = 0; TestCase[I] != 0; I++)
106      Tracker.processNextPage(TestCase[I] == 'x');
107    Tracker.finish();
108    // Strip trailing '.'-pages before comparing the results as they are not
109    // going to be reported to range_recorder anyway.
110    const char *LastX = strrchr(TestCase, 'x');
111    std::string Expected(TestCase,
112                         LastX == nullptr ? 0 : (LastX - TestCase + 1));
113    EXPECT_STREQ(Expected.c_str(), Recorder.ReportedPages.c_str());
114  }
115}
116
117class ReleasedPagesRecorder {
118public:
119  std::set<scudo::uptr> ReportedPages;
120
121  void releasePageRangeToOS(scudo::uptr From, scudo::uptr To) {
122    const scudo::uptr PageSize = scudo::getPageSizeCached();
123    for (scudo::uptr I = From; I < To; I += PageSize)
124      ReportedPages.insert(I);
125  }
126};
127
128// Simplified version of a TransferBatch.
129template <class SizeClassMap> struct FreeBatch {
130  static const scudo::u32 MaxCount = SizeClassMap::MaxNumCachedHint;
131  void clear() { Count = 0; }
132  void add(scudo::uptr P) {
133    DCHECK_LT(Count, MaxCount);
134    Batch[Count++] = P;
135  }
136  scudo::u32 getCount() const { return Count; }
137  scudo::uptr get(scudo::u32 I) const {
138    DCHECK_LE(I, Count);
139    return Batch[I];
140  }
141  FreeBatch *Next;
142
143private:
144  scudo::u32 Count;
145  scudo::uptr Batch[MaxCount];
146};
147
148template <class SizeClassMap> void testReleaseFreeMemoryToOS() {
149  typedef FreeBatch<SizeClassMap> Batch;
150  const scudo::uptr AllocatedPagesCount = 1024;
151  const scudo::uptr PageSize = scudo::getPageSizeCached();
152  std::mt19937 R;
153  scudo::u32 RandState = 42;
154
155  for (scudo::uptr I = 1; I <= SizeClassMap::LargestClassId; I++) {
156    const scudo::uptr BlockSize = SizeClassMap::getSizeByClassId(I);
157    const scudo::uptr MaxBlocks = AllocatedPagesCount * PageSize / BlockSize;
158
159    // Generate the random free list.
160    std::vector<scudo::uptr> FreeArray;
161    bool InFreeRange = false;
162    scudo::uptr CurrentRangeEnd = 0;
163    for (scudo::uptr I = 0; I < MaxBlocks; I++) {
164      if (I == CurrentRangeEnd) {
165        InFreeRange = (scudo::getRandomU32(&RandState) & 1U) == 1;
166        CurrentRangeEnd += (scudo::getRandomU32(&RandState) & 0x7f) + 1;
167      }
168      if (InFreeRange)
169        FreeArray.push_back(I * BlockSize);
170    }
171    if (FreeArray.empty())
172      continue;
173    // Shuffle the array to ensure that the order is irrelevant.
174    std::shuffle(FreeArray.begin(), FreeArray.end(), R);
175
176    // Build the FreeList from the FreeArray.
177    scudo::SinglyLinkedList<Batch> FreeList;
178    FreeList.clear();
179    Batch *CurrentBatch = nullptr;
180    for (auto const &Block : FreeArray) {
181      if (!CurrentBatch) {
182        CurrentBatch = new Batch;
183        CurrentBatch->clear();
184        FreeList.push_back(CurrentBatch);
185      }
186      CurrentBatch->add(Block);
187      if (CurrentBatch->getCount() == Batch::MaxCount)
188        CurrentBatch = nullptr;
189    }
190
191    // Release the memory.
192    ReleasedPagesRecorder Recorder;
193    releaseFreeMemoryToOS(FreeList, 0, AllocatedPagesCount, BlockSize,
194                          &Recorder);
195
196    // Verify that there are no released pages touched by used chunks and all
197    // ranges of free chunks big enough to contain the entire memory pages had
198    // these pages released.
199    scudo::uptr VerifiedReleasedPages = 0;
200    std::set<scudo::uptr> FreeBlocks(FreeArray.begin(), FreeArray.end());
201
202    scudo::uptr CurrentBlock = 0;
203    InFreeRange = false;
204    scudo::uptr CurrentFreeRangeStart = 0;
205    for (scudo::uptr I = 0; I <= MaxBlocks; I++) {
206      const bool IsFreeBlock =
207          FreeBlocks.find(CurrentBlock) != FreeBlocks.end();
208      if (IsFreeBlock) {
209        if (!InFreeRange) {
210          InFreeRange = true;
211          CurrentFreeRangeStart = CurrentBlock;
212        }
213      } else {
214        // Verify that this used chunk does not touch any released page.
215        const scudo::uptr StartPage = CurrentBlock / PageSize;
216        const scudo::uptr EndPage = (CurrentBlock + BlockSize - 1) / PageSize;
217        for (scudo::uptr J = StartPage; J <= EndPage; J++) {
218          const bool PageReleased = Recorder.ReportedPages.find(J * PageSize) !=
219                                    Recorder.ReportedPages.end();
220          EXPECT_EQ(false, PageReleased);
221        }
222
223        if (InFreeRange) {
224          InFreeRange = false;
225          // Verify that all entire memory pages covered by this range of free
226          // chunks were released.
227          scudo::uptr P = scudo::roundUpTo(CurrentFreeRangeStart, PageSize);
228          while (P + PageSize <= CurrentBlock) {
229            const bool PageReleased =
230                Recorder.ReportedPages.find(P) != Recorder.ReportedPages.end();
231            EXPECT_EQ(true, PageReleased);
232            VerifiedReleasedPages++;
233            P += PageSize;
234          }
235        }
236      }
237
238      CurrentBlock += BlockSize;
239    }
240
241    EXPECT_EQ(Recorder.ReportedPages.size(), VerifiedReleasedPages);
242
243    while (!FreeList.empty()) {
244      CurrentBatch = FreeList.front();
245      FreeList.pop_front();
246      delete CurrentBatch;
247    }
248  }
249}
250
251TEST(ScudoReleaseTest, ReleaseFreeMemoryToOSDefault) {
252  testReleaseFreeMemoryToOS<scudo::DefaultSizeClassMap>();
253}
254
255TEST(ScudoReleaseTest, ReleaseFreeMemoryToOSAndroid) {
256  testReleaseFreeMemoryToOS<scudo::AndroidSizeClassMap>();
257}
258
259TEST(ScudoReleaseTest, ReleaseFreeMemoryToOSSvelte) {
260  testReleaseFreeMemoryToOS<scudo::SvelteSizeClassMap>();
261}
262