1//===-- sanitizer_allocator_test.cc ---------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
11// Tests for sanitizer_allocator.h.
12//
13//===----------------------------------------------------------------------===//
14#include "sanitizer_common/sanitizer_allocator.h"
15#include "sanitizer_common/sanitizer_allocator_internal.h"
16#include "sanitizer_common/sanitizer_common.h"
17
18#include "sanitizer_test_utils.h"
19#include "sanitizer_pthread_wrappers.h"
20
21#include "gtest/gtest.h"
22
23#include <stdio.h>
24#include <stdlib.h>
25#include <algorithm>
26#include <vector>
27#include <random>
28#include <set>
29
30using namespace __sanitizer;
31
32// Too slow for debug build
33#if !SANITIZER_DEBUG
34
35#if SANITIZER_CAN_USE_ALLOCATOR64
36#if SANITIZER_WINDOWS
37// On Windows 64-bit there is no easy way to find a large enough fixed address
38// space that is always available. Thus, a dynamically allocated address space
39// is used instead (i.e. ~(uptr)0).
40static const uptr kAllocatorSpace = ~(uptr)0;
41static const uptr kAllocatorSize  =  0x8000000000ULL;  // 500G
42static const u64 kAddressSpaceSize = 1ULL << 47;
43typedef DefaultSizeClassMap SizeClassMap;
44#elif SANITIZER_ANDROID && defined(__aarch64__)
45static const uptr kAllocatorSpace = 0x3000000000ULL;
46static const uptr kAllocatorSize  = 0x2000000000ULL;
47static const u64 kAddressSpaceSize = 1ULL << 39;
48typedef VeryCompactSizeClassMap SizeClassMap;
49#else
50static const uptr kAllocatorSpace = 0x700000000000ULL;
51static const uptr kAllocatorSize  = 0x010000000000ULL;  // 1T.
52static const u64 kAddressSpaceSize = 1ULL << 47;
53typedef DefaultSizeClassMap SizeClassMap;
54#endif
55
56template <typename AddressSpaceViewTy>
57struct AP64 {  // Allocator Params. Short name for shorter demangled names..
58  static const uptr kSpaceBeg = kAllocatorSpace;
59  static const uptr kSpaceSize = kAllocatorSize;
60  static const uptr kMetadataSize = 16;
61  typedef ::SizeClassMap SizeClassMap;
62  typedef NoOpMapUnmapCallback MapUnmapCallback;
63  static const uptr kFlags = 0;
64  using AddressSpaceView = AddressSpaceViewTy;
65};
66
67template <typename AddressSpaceViewTy>
68struct AP64Dyn {
69  static const uptr kSpaceBeg = ~(uptr)0;
70  static const uptr kSpaceSize = kAllocatorSize;
71  static const uptr kMetadataSize = 16;
72  typedef ::SizeClassMap SizeClassMap;
73  typedef NoOpMapUnmapCallback MapUnmapCallback;
74  static const uptr kFlags = 0;
75  using AddressSpaceView = AddressSpaceViewTy;
76};
77
78template <typename AddressSpaceViewTy>
79struct AP64Compact {
80  static const uptr kSpaceBeg = ~(uptr)0;
81  static const uptr kSpaceSize = kAllocatorSize;
82  static const uptr kMetadataSize = 16;
83  typedef CompactSizeClassMap SizeClassMap;
84  typedef NoOpMapUnmapCallback MapUnmapCallback;
85  static const uptr kFlags = 0;
86  using AddressSpaceView = AddressSpaceViewTy;
87};
88
89template <typename AddressSpaceViewTy>
90struct AP64VeryCompact {
91  static const uptr kSpaceBeg = ~(uptr)0;
92  static const uptr kSpaceSize = 1ULL << 37;
93  static const uptr kMetadataSize = 16;
94  typedef VeryCompactSizeClassMap SizeClassMap;
95  typedef NoOpMapUnmapCallback MapUnmapCallback;
96  static const uptr kFlags = 0;
97  using AddressSpaceView = AddressSpaceViewTy;
98};
99
100template <typename AddressSpaceViewTy>
101struct AP64Dense {
102  static const uptr kSpaceBeg = kAllocatorSpace;
103  static const uptr kSpaceSize = kAllocatorSize;
104  static const uptr kMetadataSize = 16;
105  typedef DenseSizeClassMap SizeClassMap;
106  typedef NoOpMapUnmapCallback MapUnmapCallback;
107  static const uptr kFlags = 0;
108  using AddressSpaceView = AddressSpaceViewTy;
109};
110
111template <typename AddressSpaceView>
112using Allocator64ASVT = SizeClassAllocator64<AP64<AddressSpaceView>>;
113using Allocator64 = Allocator64ASVT<LocalAddressSpaceView>;
114
115template <typename AddressSpaceView>
116using Allocator64DynamicASVT = SizeClassAllocator64<AP64Dyn<AddressSpaceView>>;
117using Allocator64Dynamic = Allocator64DynamicASVT<LocalAddressSpaceView>;
118
119template <typename AddressSpaceView>
120using Allocator64CompactASVT =
121    SizeClassAllocator64<AP64Compact<AddressSpaceView>>;
122using Allocator64Compact = Allocator64CompactASVT<LocalAddressSpaceView>;
123
124template <typename AddressSpaceView>
125using Allocator64VeryCompactASVT =
126    SizeClassAllocator64<AP64VeryCompact<AddressSpaceView>>;
127using Allocator64VeryCompact =
128    Allocator64VeryCompactASVT<LocalAddressSpaceView>;
129
130template <typename AddressSpaceView>
131using Allocator64DenseASVT = SizeClassAllocator64<AP64Dense<AddressSpaceView>>;
132using Allocator64Dense = Allocator64DenseASVT<LocalAddressSpaceView>;
133
134#elif defined(__mips64)
135static const u64 kAddressSpaceSize = 1ULL << 40;
136#elif defined(__aarch64__)
137static const u64 kAddressSpaceSize = 1ULL << 39;
138#elif defined(__s390x__)
139static const u64 kAddressSpaceSize = 1ULL << 53;
140#elif defined(__s390__)
141static const u64 kAddressSpaceSize = 1ULL << 31;
142#else
143static const u64 kAddressSpaceSize = 1ULL << 32;
144#endif
145
146static const uptr kRegionSizeLog = FIRST_32_SECOND_64(20, 24);
147static const uptr kFlatByteMapSize = kAddressSpaceSize >> kRegionSizeLog;
148
149template <typename AddressSpaceViewTy>
150struct AP32Compact {
151  static const uptr kSpaceBeg = 0;
152  static const u64 kSpaceSize = kAddressSpaceSize;
153  static const uptr kMetadataSize = 16;
154  typedef CompactSizeClassMap SizeClassMap;
155  static const uptr kRegionSizeLog = ::kRegionSizeLog;
156  using AddressSpaceView = AddressSpaceViewTy;
157  using ByteMap = FlatByteMap<kFlatByteMapSize, AddressSpaceView>;
158  typedef NoOpMapUnmapCallback MapUnmapCallback;
159  static const uptr kFlags = 0;
160};
161template <typename AddressSpaceView>
162using Allocator32CompactASVT =
163    SizeClassAllocator32<AP32Compact<AddressSpaceView>>;
164using Allocator32Compact = Allocator32CompactASVT<LocalAddressSpaceView>;
165
166template <class SizeClassMap>
167void TestSizeClassMap() {
168  typedef SizeClassMap SCMap;
169  SCMap::Print();
170  SCMap::Validate();
171}
172
173TEST(SanitizerCommon, DefaultSizeClassMap) {
174  TestSizeClassMap<DefaultSizeClassMap>();
175}
176
177TEST(SanitizerCommon, CompactSizeClassMap) {
178  TestSizeClassMap<CompactSizeClassMap>();
179}
180
181TEST(SanitizerCommon, VeryCompactSizeClassMap) {
182  TestSizeClassMap<VeryCompactSizeClassMap>();
183}
184
185TEST(SanitizerCommon, InternalSizeClassMap) {
186  TestSizeClassMap<InternalSizeClassMap>();
187}
188
189TEST(SanitizerCommon, DenseSizeClassMap) {
190  TestSizeClassMap<VeryCompactSizeClassMap>();
191}
192
193template <class Allocator>
194void TestSizeClassAllocator() {
195  Allocator *a = new Allocator;
196  a->Init(kReleaseToOSIntervalNever);
197  SizeClassAllocatorLocalCache<Allocator> cache;
198  memset(&cache, 0, sizeof(cache));
199  cache.Init(0);
200
201  static const uptr sizes[] = {
202    1, 16,  30, 40, 100, 1000, 10000,
203    50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000
204  };
205
206  std::vector<void *> allocated;
207
208  uptr last_total_allocated = 0;
209  for (int i = 0; i < 3; i++) {
210    // Allocate a bunch of chunks.
211    for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
212      uptr size = sizes[s];
213      if (!a->CanAllocate(size, 1)) continue;
214      // printf("s = %ld\n", size);
215      uptr n_iter = std::max((uptr)6, 4000000 / size);
216      // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
217      for (uptr i = 0; i < n_iter; i++) {
218        uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
219        char *x = (char*)cache.Allocate(a, class_id0);
220        x[0] = 0;
221        x[size - 1] = 0;
222        x[size / 2] = 0;
223        allocated.push_back(x);
224        CHECK_EQ(x, a->GetBlockBegin(x));
225        CHECK_EQ(x, a->GetBlockBegin(x + size - 1));
226        CHECK(a->PointerIsMine(x));
227        CHECK(a->PointerIsMine(x + size - 1));
228        CHECK(a->PointerIsMine(x + size / 2));
229        CHECK_GE(a->GetActuallyAllocatedSize(x), size);
230        uptr class_id = a->GetSizeClass(x);
231        CHECK_EQ(class_id, Allocator::SizeClassMapT::ClassID(size));
232        uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
233        metadata[0] = reinterpret_cast<uptr>(x) + 1;
234        metadata[1] = 0xABCD;
235      }
236    }
237    // Deallocate all.
238    for (uptr i = 0; i < allocated.size(); i++) {
239      void *x = allocated[i];
240      uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
241      CHECK_EQ(metadata[0], reinterpret_cast<uptr>(x) + 1);
242      CHECK_EQ(metadata[1], 0xABCD);
243      cache.Deallocate(a, a->GetSizeClass(x), x);
244    }
245    allocated.clear();
246    uptr total_allocated = a->TotalMemoryUsed();
247    if (last_total_allocated == 0)
248      last_total_allocated = total_allocated;
249    CHECK_EQ(last_total_allocated, total_allocated);
250  }
251
252  // Check that GetBlockBegin never crashes.
253  for (uptr x = 0, step = kAddressSpaceSize / 100000;
254       x < kAddressSpaceSize - step; x += step)
255    if (a->PointerIsMine(reinterpret_cast<void *>(x)))
256      Ident(a->GetBlockBegin(reinterpret_cast<void *>(x)));
257
258  a->TestOnlyUnmap();
259  delete a;
260}
261
262#if SANITIZER_CAN_USE_ALLOCATOR64
263// These tests can fail on Windows if memory is somewhat full and lit happens
264// to run them all at the same time. FIXME: Make them not flaky and reenable.
265#if !SANITIZER_WINDOWS
266TEST(SanitizerCommon, SizeClassAllocator64) {
267  TestSizeClassAllocator<Allocator64>();
268}
269
270TEST(SanitizerCommon, SizeClassAllocator64Dynamic) {
271  TestSizeClassAllocator<Allocator64Dynamic>();
272}
273
274#if !SANITIZER_ANDROID
275//FIXME(kostyak): find values so that those work on Android as well.
276TEST(SanitizerCommon, SizeClassAllocator64Compact) {
277  TestSizeClassAllocator<Allocator64Compact>();
278}
279
280TEST(SanitizerCommon, SizeClassAllocator64Dense) {
281  TestSizeClassAllocator<Allocator64Dense>();
282}
283#endif
284
285TEST(SanitizerCommon, SizeClassAllocator64VeryCompact) {
286  TestSizeClassAllocator<Allocator64VeryCompact>();
287}
288#endif
289#endif
290
291TEST(SanitizerCommon, SizeClassAllocator32Compact) {
292  TestSizeClassAllocator<Allocator32Compact>();
293}
294
295template <typename AddressSpaceViewTy>
296struct AP32SeparateBatches {
297  static const uptr kSpaceBeg = 0;
298  static const u64 kSpaceSize = kAddressSpaceSize;
299  static const uptr kMetadataSize = 16;
300  typedef DefaultSizeClassMap SizeClassMap;
301  static const uptr kRegionSizeLog = ::kRegionSizeLog;
302  using AddressSpaceView = AddressSpaceViewTy;
303  using ByteMap = FlatByteMap<kFlatByteMapSize, AddressSpaceView>;
304  typedef NoOpMapUnmapCallback MapUnmapCallback;
305  static const uptr kFlags =
306      SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch;
307};
308template <typename AddressSpaceView>
309using Allocator32SeparateBatchesASVT =
310    SizeClassAllocator32<AP32SeparateBatches<AddressSpaceView>>;
311using Allocator32SeparateBatches =
312    Allocator32SeparateBatchesASVT<LocalAddressSpaceView>;
313
314TEST(SanitizerCommon, SizeClassAllocator32SeparateBatches) {
315  TestSizeClassAllocator<Allocator32SeparateBatches>();
316}
317
318template <class Allocator>
319void SizeClassAllocatorMetadataStress() {
320  Allocator *a = new Allocator;
321  a->Init(kReleaseToOSIntervalNever);
322  SizeClassAllocatorLocalCache<Allocator> cache;
323  memset(&cache, 0, sizeof(cache));
324  cache.Init(0);
325
326  const uptr kNumAllocs = 1 << 13;
327  void *allocated[kNumAllocs];
328  void *meta[kNumAllocs];
329  for (uptr i = 0; i < kNumAllocs; i++) {
330    void *x = cache.Allocate(a, 1 + i % (Allocator::kNumClasses - 1));
331    allocated[i] = x;
332    meta[i] = a->GetMetaData(x);
333  }
334  // Get Metadata kNumAllocs^2 times.
335  for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) {
336    uptr idx = i % kNumAllocs;
337    void *m = a->GetMetaData(allocated[idx]);
338    EXPECT_EQ(m, meta[idx]);
339  }
340  for (uptr i = 0; i < kNumAllocs; i++) {
341    cache.Deallocate(a, 1 + i % (Allocator::kNumClasses - 1), allocated[i]);
342  }
343
344  a->TestOnlyUnmap();
345  delete a;
346}
347
348#if SANITIZER_CAN_USE_ALLOCATOR64
349// These tests can fail on Windows if memory is somewhat full and lit happens
350// to run them all at the same time. FIXME: Make them not flaky and reenable.
351#if !SANITIZER_WINDOWS
352TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) {
353  SizeClassAllocatorMetadataStress<Allocator64>();
354}
355
356TEST(SanitizerCommon, SizeClassAllocator64DynamicMetadataStress) {
357  SizeClassAllocatorMetadataStress<Allocator64Dynamic>();
358}
359
360#if !SANITIZER_ANDROID
361TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) {
362  SizeClassAllocatorMetadataStress<Allocator64Compact>();
363}
364#endif
365
366#endif
367#endif  // SANITIZER_CAN_USE_ALLOCATOR64
368TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) {
369  SizeClassAllocatorMetadataStress<Allocator32Compact>();
370}
371
372template <class Allocator>
373void SizeClassAllocatorGetBlockBeginStress(u64 TotalSize) {
374  Allocator *a = new Allocator;
375  a->Init(kReleaseToOSIntervalNever);
376  SizeClassAllocatorLocalCache<Allocator> cache;
377  memset(&cache, 0, sizeof(cache));
378  cache.Init(0);
379
380  uptr max_size_class = Allocator::SizeClassMapT::kLargestClassID;
381  uptr size = Allocator::SizeClassMapT::Size(max_size_class);
382  // Make sure we correctly compute GetBlockBegin() w/o overflow.
383  for (size_t i = 0; i <= TotalSize / size; i++) {
384    void *x = cache.Allocate(a, max_size_class);
385    void *beg = a->GetBlockBegin(x);
386    // if ((i & (i - 1)) == 0)
387    //   fprintf(stderr, "[%zd] %p %p\n", i, x, beg);
388    EXPECT_EQ(x, beg);
389  }
390
391  a->TestOnlyUnmap();
392  delete a;
393}
394
395#if SANITIZER_CAN_USE_ALLOCATOR64
396// These tests can fail on Windows if memory is somewhat full and lit happens
397// to run them all at the same time. FIXME: Make them not flaky and reenable.
398#if !SANITIZER_WINDOWS
399TEST(SanitizerCommon, SizeClassAllocator64GetBlockBegin) {
400  SizeClassAllocatorGetBlockBeginStress<Allocator64>(
401      1ULL << (SANITIZER_ANDROID ? 31 : 33));
402}
403TEST(SanitizerCommon, SizeClassAllocator64DynamicGetBlockBegin) {
404  SizeClassAllocatorGetBlockBeginStress<Allocator64Dynamic>(
405      1ULL << (SANITIZER_ANDROID ? 31 : 33));
406}
407#if !SANITIZER_ANDROID
408TEST(SanitizerCommon, SizeClassAllocator64CompactGetBlockBegin) {
409  SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>(1ULL << 33);
410}
411#endif
412TEST(SanitizerCommon, SizeClassAllocator64VeryCompactGetBlockBegin) {
413  // Does not have > 4Gb for each class.
414  SizeClassAllocatorGetBlockBeginStress<Allocator64VeryCompact>(1ULL << 31);
415}
416TEST(SanitizerCommon, SizeClassAllocator32CompactGetBlockBegin) {
417  SizeClassAllocatorGetBlockBeginStress<Allocator32Compact>(1ULL << 33);
418}
419#endif
420#endif  // SANITIZER_CAN_USE_ALLOCATOR64
421
422struct TestMapUnmapCallback {
423  static int map_count, unmap_count;
424  void OnMap(uptr p, uptr size) const { map_count++; }
425  void OnUnmap(uptr p, uptr size) const { unmap_count++; }
426};
427int TestMapUnmapCallback::map_count;
428int TestMapUnmapCallback::unmap_count;
429
430#if SANITIZER_CAN_USE_ALLOCATOR64
431// These tests can fail on Windows if memory is somewhat full and lit happens
432// to run them all at the same time. FIXME: Make them not flaky and reenable.
433#if !SANITIZER_WINDOWS
434
435template <typename AddressSpaceViewTy = LocalAddressSpaceView>
436struct AP64WithCallback {
437  static const uptr kSpaceBeg = kAllocatorSpace;
438  static const uptr kSpaceSize = kAllocatorSize;
439  static const uptr kMetadataSize = 16;
440  typedef ::SizeClassMap SizeClassMap;
441  typedef TestMapUnmapCallback MapUnmapCallback;
442  static const uptr kFlags = 0;
443  using AddressSpaceView = AddressSpaceViewTy;
444};
445
446TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) {
447  TestMapUnmapCallback::map_count = 0;
448  TestMapUnmapCallback::unmap_count = 0;
449  typedef SizeClassAllocator64<AP64WithCallback<>> Allocator64WithCallBack;
450  Allocator64WithCallBack *a = new Allocator64WithCallBack;
451  a->Init(kReleaseToOSIntervalNever);
452  EXPECT_EQ(TestMapUnmapCallback::map_count, 1);  // Allocator state.
453  SizeClassAllocatorLocalCache<Allocator64WithCallBack> cache;
454  memset(&cache, 0, sizeof(cache));
455  cache.Init(0);
456  AllocatorStats stats;
457  stats.Init();
458  const size_t kNumChunks = 128;
459  uint32_t chunks[kNumChunks];
460  a->GetFromAllocator(&stats, 30, chunks, kNumChunks);
461  // State + alloc + metadata + freearray.
462  EXPECT_EQ(TestMapUnmapCallback::map_count, 4);
463  a->TestOnlyUnmap();
464  EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);  // The whole thing.
465  delete a;
466}
467#endif
468#endif
469
470template <typename AddressSpaceViewTy = LocalAddressSpaceView>
471struct AP32WithCallback {
472  static const uptr kSpaceBeg = 0;
473  static const u64 kSpaceSize = kAddressSpaceSize;
474  static const uptr kMetadataSize = 16;
475  typedef CompactSizeClassMap SizeClassMap;
476  static const uptr kRegionSizeLog = ::kRegionSizeLog;
477  using AddressSpaceView = AddressSpaceViewTy;
478  using ByteMap = FlatByteMap<kFlatByteMapSize, AddressSpaceView>;
479  typedef TestMapUnmapCallback MapUnmapCallback;
480  static const uptr kFlags = 0;
481};
482
483TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) {
484  TestMapUnmapCallback::map_count = 0;
485  TestMapUnmapCallback::unmap_count = 0;
486  typedef SizeClassAllocator32<AP32WithCallback<>> Allocator32WithCallBack;
487  Allocator32WithCallBack *a = new Allocator32WithCallBack;
488  a->Init(kReleaseToOSIntervalNever);
489  EXPECT_EQ(TestMapUnmapCallback::map_count, 0);
490  SizeClassAllocatorLocalCache<Allocator32WithCallBack>  cache;
491  memset(&cache, 0, sizeof(cache));
492  cache.Init(0);
493  AllocatorStats stats;
494  stats.Init();
495  a->AllocateBatch(&stats, &cache, 32);
496  EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
497  a->TestOnlyUnmap();
498  EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
499  delete a;
500  // fprintf(stderr, "Map: %d Unmap: %d\n",
501  //         TestMapUnmapCallback::map_count,
502  //         TestMapUnmapCallback::unmap_count);
503}
504
505TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {
506  TestMapUnmapCallback::map_count = 0;
507  TestMapUnmapCallback::unmap_count = 0;
508  LargeMmapAllocator<TestMapUnmapCallback> a;
509  a.Init();
510  AllocatorStats stats;
511  stats.Init();
512  void *x = a.Allocate(&stats, 1 << 20, 1);
513  EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
514  a.Deallocate(&stats, x);
515  EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
516}
517
518// Don't test OOM conditions on Win64 because it causes other tests on the same
519// machine to OOM.
520#if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 && !SANITIZER_ANDROID
521TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
522  Allocator64 a;
523  a.Init(kReleaseToOSIntervalNever);
524  SizeClassAllocatorLocalCache<Allocator64> cache;
525  memset(&cache, 0, sizeof(cache));
526  cache.Init(0);
527  AllocatorStats stats;
528  stats.Init();
529
530  const size_t kNumChunks = 128;
531  uint32_t chunks[kNumChunks];
532  bool allocation_failed = false;
533  for (int i = 0; i < 1000000; i++) {
534    if (!a.GetFromAllocator(&stats, 52, chunks, kNumChunks)) {
535      allocation_failed = true;
536      break;
537    }
538  }
539  EXPECT_EQ(allocation_failed, true);
540
541  a.TestOnlyUnmap();
542}
543#endif
544
545TEST(SanitizerCommon, LargeMmapAllocator) {
546  LargeMmapAllocator<NoOpMapUnmapCallback> a;
547  a.Init();
548  AllocatorStats stats;
549  stats.Init();
550
551  static const int kNumAllocs = 1000;
552  char *allocated[kNumAllocs];
553  static const uptr size = 4000;
554  // Allocate some.
555  for (int i = 0; i < kNumAllocs; i++) {
556    allocated[i] = (char *)a.Allocate(&stats, size, 1);
557    CHECK(a.PointerIsMine(allocated[i]));
558  }
559  // Deallocate all.
560  CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
561  for (int i = 0; i < kNumAllocs; i++) {
562    char *p = allocated[i];
563    CHECK(a.PointerIsMine(p));
564    a.Deallocate(&stats, p);
565  }
566  // Check that non left.
567  CHECK_EQ(a.TotalMemoryUsed(), 0);
568
569  // Allocate some more, also add metadata.
570  for (int i = 0; i < kNumAllocs; i++) {
571    char *x = (char *)a.Allocate(&stats, size, 1);
572    CHECK_GE(a.GetActuallyAllocatedSize(x), size);
573    uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
574    *meta = i;
575    allocated[i] = x;
576  }
577  for (int i = 0; i < kNumAllocs * kNumAllocs; i++) {
578    char *p = allocated[i % kNumAllocs];
579    CHECK(a.PointerIsMine(p));
580    CHECK(a.PointerIsMine(p + 2000));
581  }
582  CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
583  // Deallocate all in reverse order.
584  for (int i = 0; i < kNumAllocs; i++) {
585    int idx = kNumAllocs - i - 1;
586    char *p = allocated[idx];
587    uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p));
588    CHECK_EQ(*meta, idx);
589    CHECK(a.PointerIsMine(p));
590    a.Deallocate(&stats, p);
591  }
592  CHECK_EQ(a.TotalMemoryUsed(), 0);
593
594  // Test alignments. Test with 512MB alignment on x64 non-Windows machines.
595  // Windows doesn't overcommit, and many machines do not have 51.2GB of swap.
596  uptr max_alignment =
597      (SANITIZER_WORDSIZE == 64 && !SANITIZER_WINDOWS) ? (1 << 28) : (1 << 24);
598  for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) {
599    const uptr kNumAlignedAllocs = 100;
600    for (uptr i = 0; i < kNumAlignedAllocs; i++) {
601      uptr size = ((i % 10) + 1) * 4096;
602      char *p = allocated[i] = (char *)a.Allocate(&stats, size, alignment);
603      CHECK_EQ(p, a.GetBlockBegin(p));
604      CHECK_EQ(p, a.GetBlockBegin(p + size - 1));
605      CHECK_EQ(p, a.GetBlockBegin(p + size / 2));
606      CHECK_EQ(0, (uptr)allocated[i] % alignment);
607      p[0] = p[size - 1] = 0;
608    }
609    for (uptr i = 0; i < kNumAlignedAllocs; i++) {
610      a.Deallocate(&stats, allocated[i]);
611    }
612  }
613
614  // Regression test for boundary condition in GetBlockBegin().
615  uptr page_size = GetPageSizeCached();
616  char *p = (char *)a.Allocate(&stats, page_size, 1);
617  CHECK_EQ(p, a.GetBlockBegin(p));
618  CHECK_EQ(p, (char *)a.GetBlockBegin(p + page_size - 1));
619  CHECK_NE(p, (char *)a.GetBlockBegin(p + page_size));
620  a.Deallocate(&stats, p);
621}
622
623template
624<class PrimaryAllocator, class SecondaryAllocator, class AllocatorCache>
625void TestCombinedAllocator() {
626  typedef
627      CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
628      Allocator;
629  Allocator *a = new Allocator;
630  a->Init(kReleaseToOSIntervalNever);
631  std::mt19937 r;
632
633  AllocatorCache cache;
634  memset(&cache, 0, sizeof(cache));
635  a->InitCache(&cache);
636
637  EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);
638  EXPECT_EQ(a->Allocate(&cache, -1, 1024), (void*)0);
639  EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0);
640  EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0);
641  EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0);
642  EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);
643
644  const uptr kNumAllocs = 100000;
645  const uptr kNumIter = 10;
646  for (uptr iter = 0; iter < kNumIter; iter++) {
647    std::vector<void*> allocated;
648    for (uptr i = 0; i < kNumAllocs; i++) {
649      uptr size = (i % (1 << 14)) + 1;
650      if ((i % 1024) == 0)
651        size = 1 << (10 + (i % 14));
652      void *x = a->Allocate(&cache, size, 1);
653      uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
654      CHECK_EQ(*meta, 0);
655      *meta = size;
656      allocated.push_back(x);
657    }
658
659    std::shuffle(allocated.begin(), allocated.end(), r);
660
661    // Test ForEachChunk(...)
662    {
663      std::set<void *> reported_chunks;
664      auto cb = [](uptr chunk, void *arg) {
665        auto reported_chunks_ptr = reinterpret_cast<std::set<void *> *>(arg);
666        auto pair =
667            reported_chunks_ptr->insert(reinterpret_cast<void *>(chunk));
668        // Check chunk is never reported more than once.
669        ASSERT_TRUE(pair.second);
670      };
671      a->ForEachChunk(cb, reinterpret_cast<void *>(&reported_chunks));
672      for (const auto &allocated_ptr : allocated) {
673        ASSERT_NE(reported_chunks.find(allocated_ptr), reported_chunks.end());
674      }
675    }
676
677    for (uptr i = 0; i < kNumAllocs; i++) {
678      void *x = allocated[i];
679      uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
680      CHECK_NE(*meta, 0);
681      CHECK(a->PointerIsMine(x));
682      *meta = 0;
683      a->Deallocate(&cache, x);
684    }
685    allocated.clear();
686    a->SwallowCache(&cache);
687  }
688  a->DestroyCache(&cache);
689  a->TestOnlyUnmap();
690}
691
692#if SANITIZER_CAN_USE_ALLOCATOR64
693TEST(SanitizerCommon, CombinedAllocator64) {
694  TestCombinedAllocator<Allocator64,
695      LargeMmapAllocator<>,
696      SizeClassAllocatorLocalCache<Allocator64> > ();
697}
698
699TEST(SanitizerCommon, CombinedAllocator64Dynamic) {
700  TestCombinedAllocator<Allocator64Dynamic,
701      LargeMmapAllocator<>,
702      SizeClassAllocatorLocalCache<Allocator64Dynamic> > ();
703}
704
705#if !SANITIZER_ANDROID
706TEST(SanitizerCommon, CombinedAllocator64Compact) {
707  TestCombinedAllocator<Allocator64Compact,
708      LargeMmapAllocator<>,
709      SizeClassAllocatorLocalCache<Allocator64Compact> > ();
710}
711#endif
712
713TEST(SanitizerCommon, CombinedAllocator64VeryCompact) {
714  TestCombinedAllocator<Allocator64VeryCompact,
715      LargeMmapAllocator<>,
716      SizeClassAllocatorLocalCache<Allocator64VeryCompact> > ();
717}
718#endif
719
720TEST(SanitizerCommon, CombinedAllocator32Compact) {
721  TestCombinedAllocator<Allocator32Compact,
722      LargeMmapAllocator<>,
723      SizeClassAllocatorLocalCache<Allocator32Compact> > ();
724}
725
726template <class AllocatorCache>
727void TestSizeClassAllocatorLocalCache() {
728  AllocatorCache cache;
729  typedef typename AllocatorCache::Allocator Allocator;
730  Allocator *a = new Allocator();
731
732  a->Init(kReleaseToOSIntervalNever);
733  memset(&cache, 0, sizeof(cache));
734  cache.Init(0);
735
736  const uptr kNumAllocs = 10000;
737  const int kNumIter = 100;
738  uptr saved_total = 0;
739  for (int class_id = 1; class_id <= 5; class_id++) {
740    for (int it = 0; it < kNumIter; it++) {
741      void *allocated[kNumAllocs];
742      for (uptr i = 0; i < kNumAllocs; i++) {
743        allocated[i] = cache.Allocate(a, class_id);
744      }
745      for (uptr i = 0; i < kNumAllocs; i++) {
746        cache.Deallocate(a, class_id, allocated[i]);
747      }
748      cache.Drain(a);
749      uptr total_allocated = a->TotalMemoryUsed();
750      if (it)
751        CHECK_EQ(saved_total, total_allocated);
752      saved_total = total_allocated;
753    }
754  }
755
756  a->TestOnlyUnmap();
757  delete a;
758}
759
760#if SANITIZER_CAN_USE_ALLOCATOR64
761// These tests can fail on Windows if memory is somewhat full and lit happens
762// to run them all at the same time. FIXME: Make them not flaky and reenable.
763#if !SANITIZER_WINDOWS
764TEST(SanitizerCommon, SizeClassAllocator64LocalCache) {
765  TestSizeClassAllocatorLocalCache<
766      SizeClassAllocatorLocalCache<Allocator64> >();
767}
768
769TEST(SanitizerCommon, SizeClassAllocator64DynamicLocalCache) {
770  TestSizeClassAllocatorLocalCache<
771      SizeClassAllocatorLocalCache<Allocator64Dynamic> >();
772}
773
774#if !SANITIZER_ANDROID
775TEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) {
776  TestSizeClassAllocatorLocalCache<
777      SizeClassAllocatorLocalCache<Allocator64Compact> >();
778}
779#endif
780TEST(SanitizerCommon, SizeClassAllocator64VeryCompactLocalCache) {
781  TestSizeClassAllocatorLocalCache<
782      SizeClassAllocatorLocalCache<Allocator64VeryCompact> >();
783}
784#endif
785#endif
786
787TEST(SanitizerCommon, SizeClassAllocator32CompactLocalCache) {
788  TestSizeClassAllocatorLocalCache<
789      SizeClassAllocatorLocalCache<Allocator32Compact> >();
790}
791
792#if SANITIZER_CAN_USE_ALLOCATOR64
793typedef SizeClassAllocatorLocalCache<Allocator64> AllocatorCache;
794static AllocatorCache static_allocator_cache;
795
796void *AllocatorLeakTestWorker(void *arg) {
797  typedef AllocatorCache::Allocator Allocator;
798  Allocator *a = (Allocator*)(arg);
799  static_allocator_cache.Allocate(a, 10);
800  static_allocator_cache.Drain(a);
801  return 0;
802}
803
804TEST(SanitizerCommon, AllocatorLeakTest) {
805  typedef AllocatorCache::Allocator Allocator;
806  Allocator a;
807  a.Init(kReleaseToOSIntervalNever);
808  uptr total_used_memory = 0;
809  for (int i = 0; i < 100; i++) {
810    pthread_t t;
811    PTHREAD_CREATE(&t, 0, AllocatorLeakTestWorker, &a);
812    PTHREAD_JOIN(t, 0);
813    if (i == 0)
814      total_used_memory = a.TotalMemoryUsed();
815    EXPECT_EQ(a.TotalMemoryUsed(), total_used_memory);
816  }
817
818  a.TestOnlyUnmap();
819}
820
821// Struct which is allocated to pass info to new threads.  The new thread frees
822// it.
823struct NewThreadParams {
824  AllocatorCache *thread_cache;
825  AllocatorCache::Allocator *allocator;
826  uptr class_id;
827};
828
829// Called in a new thread.  Just frees its argument.
830static void *DeallocNewThreadWorker(void *arg) {
831  NewThreadParams *params = reinterpret_cast<NewThreadParams*>(arg);
832  params->thread_cache->Deallocate(params->allocator, params->class_id, params);
833  return NULL;
834}
835
836// The allocator cache is supposed to be POD and zero initialized.  We should be
837// able to call Deallocate on a zeroed cache, and it will self-initialize.
838TEST(Allocator, AllocatorCacheDeallocNewThread) {
839  AllocatorCache::Allocator allocator;
840  allocator.Init(kReleaseToOSIntervalNever);
841  AllocatorCache main_cache;
842  AllocatorCache child_cache;
843  memset(&main_cache, 0, sizeof(main_cache));
844  memset(&child_cache, 0, sizeof(child_cache));
845
846  uptr class_id = DefaultSizeClassMap::ClassID(sizeof(NewThreadParams));
847  NewThreadParams *params = reinterpret_cast<NewThreadParams*>(
848      main_cache.Allocate(&allocator, class_id));
849  params->thread_cache = &child_cache;
850  params->allocator = &allocator;
851  params->class_id = class_id;
852  pthread_t t;
853  PTHREAD_CREATE(&t, 0, DeallocNewThreadWorker, params);
854  PTHREAD_JOIN(t, 0);
855
856  allocator.TestOnlyUnmap();
857}
858#endif
859
860TEST(Allocator, Basic) {
861  char *p = (char*)InternalAlloc(10);
862  EXPECT_NE(p, (char*)0);
863  char *p2 = (char*)InternalAlloc(20);
864  EXPECT_NE(p2, (char*)0);
865  EXPECT_NE(p2, p);
866  InternalFree(p);
867  InternalFree(p2);
868}
869
870TEST(Allocator, Stress) {
871  const int kCount = 1000;
872  char *ptrs[kCount];
873  unsigned rnd = 42;
874  for (int i = 0; i < kCount; i++) {
875    uptr sz = my_rand_r(&rnd) % 1000;
876    char *p = (char*)InternalAlloc(sz);
877    EXPECT_NE(p, (char*)0);
878    ptrs[i] = p;
879  }
880  for (int i = 0; i < kCount; i++) {
881    InternalFree(ptrs[i]);
882  }
883}
884
885TEST(Allocator, LargeAlloc) {
886  void *p = InternalAlloc(10 << 20);
887  InternalFree(p);
888}
889
890TEST(Allocator, ScopedBuffer) {
891  const int kSize = 512;
892  {
893    InternalMmapVector<int> int_buf(kSize);
894    EXPECT_EQ((uptr)kSize, int_buf.size()); // NOLINT
895  }
896  InternalMmapVector<char> char_buf(kSize);
897  EXPECT_EQ((uptr)kSize, char_buf.size()); // NOLINT
898  internal_memset(char_buf.data(), 'c', kSize);
899  for (int i = 0; i < kSize; i++) {
900    EXPECT_EQ('c', char_buf[i]);
901  }
902}
903
904void IterationTestCallback(uptr chunk, void *arg) {
905  reinterpret_cast<std::set<uptr> *>(arg)->insert(chunk);
906}
907
908template <class Allocator>
909void TestSizeClassAllocatorIteration() {
910  Allocator *a = new Allocator;
911  a->Init(kReleaseToOSIntervalNever);
912  SizeClassAllocatorLocalCache<Allocator> cache;
913  memset(&cache, 0, sizeof(cache));
914  cache.Init(0);
915
916  static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
917    50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
918
919  std::vector<void *> allocated;
920
921  // Allocate a bunch of chunks.
922  for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
923    uptr size = sizes[s];
924    if (!a->CanAllocate(size, 1)) continue;
925    // printf("s = %ld\n", size);
926    uptr n_iter = std::max((uptr)6, 80000 / size);
927    // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
928    for (uptr j = 0; j < n_iter; j++) {
929      uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
930      void *x = cache.Allocate(a, class_id0);
931      allocated.push_back(x);
932    }
933  }
934
935  std::set<uptr> reported_chunks;
936  a->ForceLock();
937  a->ForEachChunk(IterationTestCallback, &reported_chunks);
938  a->ForceUnlock();
939
940  for (uptr i = 0; i < allocated.size(); i++) {
941    // Don't use EXPECT_NE. Reporting the first mismatch is enough.
942    ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),
943              reported_chunks.end());
944  }
945
946  a->TestOnlyUnmap();
947  delete a;
948}
949
950#if SANITIZER_CAN_USE_ALLOCATOR64
951// These tests can fail on Windows if memory is somewhat full and lit happens
952// to run them all at the same time. FIXME: Make them not flaky and reenable.
953#if !SANITIZER_WINDOWS
954TEST(SanitizerCommon, SizeClassAllocator64Iteration) {
955  TestSizeClassAllocatorIteration<Allocator64>();
956}
957TEST(SanitizerCommon, SizeClassAllocator64DynamicIteration) {
958  TestSizeClassAllocatorIteration<Allocator64Dynamic>();
959}
960#endif
961#endif
962
963TEST(SanitizerCommon, SizeClassAllocator32Iteration) {
964  TestSizeClassAllocatorIteration<Allocator32Compact>();
965}
966
967TEST(SanitizerCommon, LargeMmapAllocatorIteration) {
968  LargeMmapAllocator<NoOpMapUnmapCallback> a;
969  a.Init();
970  AllocatorStats stats;
971  stats.Init();
972
973  static const uptr kNumAllocs = 1000;
974  char *allocated[kNumAllocs];
975  static const uptr size = 40;
976  // Allocate some.
977  for (uptr i = 0; i < kNumAllocs; i++)
978    allocated[i] = (char *)a.Allocate(&stats, size, 1);
979
980  std::set<uptr> reported_chunks;
981  a.ForceLock();
982  a.ForEachChunk(IterationTestCallback, &reported_chunks);
983  a.ForceUnlock();
984
985  for (uptr i = 0; i < kNumAllocs; i++) {
986    // Don't use EXPECT_NE. Reporting the first mismatch is enough.
987    ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),
988              reported_chunks.end());
989  }
990  for (uptr i = 0; i < kNumAllocs; i++)
991    a.Deallocate(&stats, allocated[i]);
992}
993
994TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) {
995  LargeMmapAllocator<NoOpMapUnmapCallback> a;
996  a.Init();
997  AllocatorStats stats;
998  stats.Init();
999
1000  static const uptr kNumAllocs = 1024;
1001  static const uptr kNumExpectedFalseLookups = 10000000;
1002  char *allocated[kNumAllocs];
1003  static const uptr size = 4096;
1004  // Allocate some.
1005  for (uptr i = 0; i < kNumAllocs; i++) {
1006    allocated[i] = (char *)a.Allocate(&stats, size, 1);
1007  }
1008
1009  a.ForceLock();
1010  for (uptr i = 0; i < kNumAllocs  * kNumAllocs; i++) {
1011    // if ((i & (i - 1)) == 0) fprintf(stderr, "[%zd]\n", i);
1012    char *p1 = allocated[i % kNumAllocs];
1013    EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1));
1014    EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size / 2));
1015    EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size - 1));
1016    EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 - 100));
1017  }
1018
1019  for (uptr i = 0; i < kNumExpectedFalseLookups; i++) {
1020    void *p = reinterpret_cast<void *>(i % 1024);
1021    EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p));
1022    p = reinterpret_cast<void *>(~0L - (i % 1024));
1023    EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p));
1024  }
1025  a.ForceUnlock();
1026
1027  for (uptr i = 0; i < kNumAllocs; i++)
1028    a.Deallocate(&stats, allocated[i]);
1029}
1030
1031
1032// Don't test OOM conditions on Win64 because it causes other tests on the same
1033// machine to OOM.
1034#if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 && !SANITIZER_ANDROID
1035typedef SizeClassMap<3, 4, 8, 63, 128, 16> SpecialSizeClassMap;
1036template <typename AddressSpaceViewTy = LocalAddressSpaceView>
1037struct AP64_SpecialSizeClassMap {
1038  static const uptr kSpaceBeg = kAllocatorSpace;
1039  static const uptr kSpaceSize = kAllocatorSize;
1040  static const uptr kMetadataSize = 0;
1041  typedef SpecialSizeClassMap SizeClassMap;
1042  typedef NoOpMapUnmapCallback MapUnmapCallback;
1043  static const uptr kFlags = 0;
1044  using AddressSpaceView = AddressSpaceViewTy;
1045};
1046
1047// Regression test for out-of-memory condition in PopulateFreeList().
1048TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) {
1049  // In a world where regions are small and chunks are huge...
1050  typedef SizeClassAllocator64<AP64_SpecialSizeClassMap<>> SpecialAllocator64;
1051  const uptr kRegionSize =
1052      kAllocatorSize / SpecialSizeClassMap::kNumClassesRounded;
1053  SpecialAllocator64 *a = new SpecialAllocator64;
1054  a->Init(kReleaseToOSIntervalNever);
1055  SizeClassAllocatorLocalCache<SpecialAllocator64> cache;
1056  memset(&cache, 0, sizeof(cache));
1057  cache.Init(0);
1058
1059  // ...one man is on a mission to overflow a region with a series of
1060  // successive allocations.
1061
1062  const uptr kClassID = 107;
1063  const uptr kAllocationSize = SpecialSizeClassMap::Size(kClassID);
1064  ASSERT_LT(2 * kAllocationSize, kRegionSize);
1065  ASSERT_GT(3 * kAllocationSize, kRegionSize);
1066  EXPECT_NE(cache.Allocate(a, kClassID), nullptr);
1067  EXPECT_NE(cache.Allocate(a, kClassID), nullptr);
1068  EXPECT_EQ(cache.Allocate(a, kClassID), nullptr);
1069
1070  const uptr Class2 = 100;
1071  const uptr Size2 = SpecialSizeClassMap::Size(Class2);
1072  ASSERT_EQ(Size2 * 8, kRegionSize);
1073  char *p[7];
1074  for (int i = 0; i < 7; i++) {
1075    p[i] = (char*)cache.Allocate(a, Class2);
1076    EXPECT_NE(p[i], nullptr);
1077    fprintf(stderr, "p[%d] %p s = %lx\n", i, (void*)p[i], Size2);
1078    p[i][Size2 - 1] = 42;
1079    if (i) ASSERT_LT(p[i - 1], p[i]);
1080  }
1081  EXPECT_EQ(cache.Allocate(a, Class2), nullptr);
1082  cache.Deallocate(a, Class2, p[0]);
1083  cache.Drain(a);
1084  ASSERT_EQ(p[6][Size2 - 1], 42);
1085  a->TestOnlyUnmap();
1086  delete a;
1087}
1088
1089#endif
1090
1091#if SANITIZER_CAN_USE_ALLOCATOR64
1092
1093class NoMemoryMapper {
1094 public:
1095  uptr last_request_buffer_size;
1096
1097  NoMemoryMapper() : last_request_buffer_size(0) {}
1098
1099  uptr MapPackedCounterArrayBuffer(uptr buffer_size) {
1100    last_request_buffer_size = buffer_size;
1101    return 0;
1102  }
1103  void UnmapPackedCounterArrayBuffer(uptr buffer, uptr buffer_size) {}
1104};
1105
1106class RedZoneMemoryMapper {
1107 public:
1108  RedZoneMemoryMapper() {
1109    const auto page_size = GetPageSize();
1110    buffer = MmapOrDie(3ULL * page_size, "");
1111    MprotectNoAccess(reinterpret_cast<uptr>(buffer), page_size);
1112    MprotectNoAccess(reinterpret_cast<uptr>(buffer) + page_size * 2, page_size);
1113  }
1114  ~RedZoneMemoryMapper() {
1115    UnmapOrDie(buffer, 3 * GetPageSize());
1116  }
1117
1118  uptr MapPackedCounterArrayBuffer(uptr buffer_size) {
1119    const auto page_size = GetPageSize();
1120    CHECK_EQ(buffer_size, page_size);
1121    memset(reinterpret_cast<void*>(reinterpret_cast<uptr>(buffer) + page_size),
1122           0, page_size);
1123    return reinterpret_cast<uptr>(buffer) + page_size;
1124  }
1125  void UnmapPackedCounterArrayBuffer(uptr buffer, uptr buffer_size) {}
1126
1127 private:
1128  void *buffer;
1129};
1130
1131TEST(SanitizerCommon, SizeClassAllocator64PackedCounterArray) {
1132  NoMemoryMapper no_memory_mapper;
1133  typedef Allocator64::PackedCounterArray<NoMemoryMapper>
1134      NoMemoryPackedCounterArray;
1135
1136  for (int i = 0; i < 64; i++) {
1137    // Various valid counter's max values packed into one word.
1138    NoMemoryPackedCounterArray counters_2n(1, 1ULL << i, &no_memory_mapper);
1139    EXPECT_EQ(8ULL, no_memory_mapper.last_request_buffer_size);
1140
1141    // Check the "all bit set" values too.
1142    NoMemoryPackedCounterArray counters_2n1_1(1, ~0ULL >> i, &no_memory_mapper);
1143    EXPECT_EQ(8ULL, no_memory_mapper.last_request_buffer_size);
1144
1145    // Verify the packing ratio, the counter is expected to be packed into the
1146    // closest power of 2 bits.
1147    NoMemoryPackedCounterArray counters(64, 1ULL << i, &no_memory_mapper);
1148    EXPECT_EQ(8ULL * RoundUpToPowerOfTwo(i + 1),
1149              no_memory_mapper.last_request_buffer_size);
1150  }
1151
1152  RedZoneMemoryMapper memory_mapper;
1153  typedef Allocator64::PackedCounterArray<RedZoneMemoryMapper>
1154      RedZonePackedCounterArray;
1155  // Go through 1, 2, 4, 8, .. 64 bits per counter.
1156  for (int i = 0; i < 7; i++) {
1157    // Make sure counters request one memory page for the buffer.
1158    const u64 kNumCounters = (GetPageSize() / 8) * (64 >> i);
1159    RedZonePackedCounterArray counters(kNumCounters,
1160                                       1ULL << ((1 << i) - 1),
1161                                       &memory_mapper);
1162    counters.Inc(0);
1163    for (u64 c = 1; c < kNumCounters - 1; c++) {
1164      ASSERT_EQ(0ULL, counters.Get(c));
1165      counters.Inc(c);
1166      ASSERT_EQ(1ULL, counters.Get(c - 1));
1167    }
1168    ASSERT_EQ(0ULL, counters.Get(kNumCounters - 1));
1169    counters.Inc(kNumCounters - 1);
1170
1171    if (i > 0) {
1172      counters.IncRange(0, kNumCounters - 1);
1173      for (u64 c = 0; c < kNumCounters; c++)
1174        ASSERT_EQ(2ULL, counters.Get(c));
1175    }
1176  }
1177}
1178
1179class RangeRecorder {
1180 public:
1181  std::string reported_pages;
1182
1183  RangeRecorder()
1184      : page_size_scaled_log(
1185            Log2(GetPageSizeCached() >> Allocator64::kCompactPtrScale)),
1186        last_page_reported(0) {}
1187
1188  void ReleasePageRangeToOS(u32 from, u32 to) {
1189    from >>= page_size_scaled_log;
1190    to >>= page_size_scaled_log;
1191    ASSERT_LT(from, to);
1192    if (!reported_pages.empty())
1193      ASSERT_LT(last_page_reported, from);
1194    reported_pages.append(from - last_page_reported, '.');
1195    reported_pages.append(to - from, 'x');
1196    last_page_reported = to;
1197  }
1198 private:
1199  const uptr page_size_scaled_log;
1200  u32 last_page_reported;
1201};
1202
1203TEST(SanitizerCommon, SizeClassAllocator64FreePagesRangeTracker) {
1204  typedef Allocator64::FreePagesRangeTracker<RangeRecorder> RangeTracker;
1205
1206  // 'x' denotes a page to be released, '.' denotes a page to be kept around.
1207  const char* test_cases[] = {
1208      "",
1209      ".",
1210      "x",
1211      "........",
1212      "xxxxxxxxxxx",
1213      "..............xxxxx",
1214      "xxxxxxxxxxxxxxxxxx.....",
1215      "......xxxxxxxx........",
1216      "xxx..........xxxxxxxxxxxxxxx",
1217      "......xxxx....xxxx........",
1218      "xxx..........xxxxxxxx....xxxxxxx",
1219      "x.x.x.x.x.x.x.x.x.x.x.x.",
1220      ".x.x.x.x.x.x.x.x.x.x.x.x",
1221      ".x.x.x.x.x.x.x.x.x.x.x.x.",
1222      "x.x.x.x.x.x.x.x.x.x.x.x.x",
1223  };
1224
1225  for (auto test_case : test_cases) {
1226    RangeRecorder range_recorder;
1227    RangeTracker tracker(&range_recorder);
1228    for (int i = 0; test_case[i] != 0; i++)
1229      tracker.NextPage(test_case[i] == 'x');
1230    tracker.Done();
1231    // Strip trailing '.'-pages before comparing the results as they are not
1232    // going to be reported to range_recorder anyway.
1233    const char* last_x = strrchr(test_case, 'x');
1234    std::string expected(
1235        test_case,
1236        last_x == nullptr ? 0 : (last_x - test_case + 1));
1237    EXPECT_STREQ(expected.c_str(), range_recorder.reported_pages.c_str());
1238  }
1239}
1240
1241class ReleasedPagesTrackingMemoryMapper {
1242 public:
1243  std::set<u32> reported_pages;
1244
1245  uptr MapPackedCounterArrayBuffer(uptr buffer_size) {
1246    reported_pages.clear();
1247    return reinterpret_cast<uptr>(calloc(1, buffer_size));
1248  }
1249  void UnmapPackedCounterArrayBuffer(uptr buffer, uptr buffer_size) {
1250    free(reinterpret_cast<void*>(buffer));
1251  }
1252
1253  void ReleasePageRangeToOS(u32 from, u32 to) {
1254    uptr page_size_scaled =
1255        GetPageSizeCached() >> Allocator64::kCompactPtrScale;
1256    for (u32 i = from; i < to; i += page_size_scaled)
1257      reported_pages.insert(i);
1258  }
1259};
1260
1261template <class Allocator>
1262void TestReleaseFreeMemoryToOS() {
1263  ReleasedPagesTrackingMemoryMapper memory_mapper;
1264  const uptr kAllocatedPagesCount = 1024;
1265  const uptr page_size = GetPageSizeCached();
1266  const uptr page_size_scaled = page_size >> Allocator::kCompactPtrScale;
1267  std::mt19937 r;
1268  uint32_t rnd_state = 42;
1269
1270  for (uptr class_id = 1; class_id <= Allocator::SizeClassMapT::kLargestClassID;
1271      class_id++) {
1272    const uptr chunk_size = Allocator::SizeClassMapT::Size(class_id);
1273    const uptr chunk_size_scaled = chunk_size >> Allocator::kCompactPtrScale;
1274    const uptr max_chunks =
1275        kAllocatedPagesCount * GetPageSizeCached() / chunk_size;
1276
1277    // Generate the random free list.
1278    std::vector<u32> free_array;
1279    bool in_free_range = false;
1280    uptr current_range_end = 0;
1281    for (uptr i = 0; i < max_chunks; i++) {
1282      if (i == current_range_end) {
1283        in_free_range = (my_rand_r(&rnd_state) & 1U) == 1;
1284        current_range_end += my_rand_r(&rnd_state) % 100 + 1;
1285      }
1286      if (in_free_range)
1287        free_array.push_back(i * chunk_size_scaled);
1288    }
1289    if (free_array.empty())
1290      continue;
1291    // Shuffle free_list to verify that ReleaseFreeMemoryToOS does not depend on
1292    // the list ordering.
1293    std::shuffle(free_array.begin(), free_array.end(), r);
1294
1295    Allocator::ReleaseFreeMemoryToOS(&free_array[0], free_array.size(),
1296                                     chunk_size, kAllocatedPagesCount,
1297                                     &memory_mapper);
1298
1299    // Verify that there are no released pages touched by used chunks and all
1300    // ranges of free chunks big enough to contain the entire memory pages had
1301    // these pages released.
1302    uptr verified_released_pages = 0;
1303    std::set<u32> free_chunks(free_array.begin(), free_array.end());
1304
1305    u32 current_chunk = 0;
1306    in_free_range = false;
1307    u32 current_free_range_start = 0;
1308    for (uptr i = 0; i <= max_chunks; i++) {
1309      bool is_free_chunk = free_chunks.find(current_chunk) != free_chunks.end();
1310
1311      if (is_free_chunk) {
1312        if (!in_free_range) {
1313          in_free_range = true;
1314          current_free_range_start = current_chunk;
1315        }
1316      } else {
1317        // Verify that this used chunk does not touch any released page.
1318        for (uptr i_page = current_chunk / page_size_scaled;
1319             i_page <= (current_chunk + chunk_size_scaled - 1) /
1320                       page_size_scaled;
1321             i_page++) {
1322          bool page_released =
1323              memory_mapper.reported_pages.find(i_page * page_size_scaled) !=
1324              memory_mapper.reported_pages.end();
1325          ASSERT_EQ(false, page_released);
1326        }
1327
1328        if (in_free_range) {
1329          in_free_range = false;
1330          // Verify that all entire memory pages covered by this range of free
1331          // chunks were released.
1332          u32 page = RoundUpTo(current_free_range_start, page_size_scaled);
1333          while (page + page_size_scaled <= current_chunk) {
1334            bool page_released =
1335                memory_mapper.reported_pages.find(page) !=
1336                memory_mapper.reported_pages.end();
1337            ASSERT_EQ(true, page_released);
1338            verified_released_pages++;
1339            page += page_size_scaled;
1340          }
1341        }
1342      }
1343
1344      current_chunk += chunk_size_scaled;
1345    }
1346
1347    ASSERT_EQ(memory_mapper.reported_pages.size(), verified_released_pages);
1348  }
1349}
1350
1351TEST(SanitizerCommon, SizeClassAllocator64ReleaseFreeMemoryToOS) {
1352  TestReleaseFreeMemoryToOS<Allocator64>();
1353}
1354
1355#if !SANITIZER_ANDROID
1356TEST(SanitizerCommon, SizeClassAllocator64CompactReleaseFreeMemoryToOS) {
1357  TestReleaseFreeMemoryToOS<Allocator64Compact>();
1358}
1359
1360TEST(SanitizerCommon, SizeClassAllocator64VeryCompactReleaseFreeMemoryToOS) {
1361  TestReleaseFreeMemoryToOS<Allocator64VeryCompact>();
1362}
1363#endif  // !SANITIZER_ANDROID
1364
1365#endif  // SANITIZER_CAN_USE_ALLOCATOR64
1366
1367TEST(SanitizerCommon, TwoLevelByteMap) {
1368  const u64 kSize1 = 1 << 6, kSize2 = 1 << 12;
1369  const u64 n = kSize1 * kSize2;
1370  TwoLevelByteMap<kSize1, kSize2> m;
1371  m.Init();
1372  for (u64 i = 0; i < n; i += 7) {
1373    m.set(i, (i % 100) + 1);
1374  }
1375  for (u64 j = 0; j < n; j++) {
1376    if (j % 7)
1377      EXPECT_EQ(m[j], 0);
1378    else
1379      EXPECT_EQ(m[j], (j % 100) + 1);
1380  }
1381
1382  m.TestOnlyUnmap();
1383}
1384
1385template <typename AddressSpaceView>
1386using TestByteMapASVT =
1387    TwoLevelByteMap<1 << 12, 1 << 13, AddressSpaceView, TestMapUnmapCallback>;
1388using TestByteMap = TestByteMapASVT<LocalAddressSpaceView>;
1389
1390struct TestByteMapParam {
1391  TestByteMap *m;
1392  size_t shard;
1393  size_t num_shards;
1394};
1395
1396void *TwoLevelByteMapUserThread(void *param) {
1397  TestByteMapParam *p = (TestByteMapParam*)param;
1398  for (size_t i = p->shard; i < p->m->size(); i += p->num_shards) {
1399    size_t val = (i % 100) + 1;
1400    p->m->set(i, val);
1401    EXPECT_EQ((*p->m)[i], val);
1402  }
1403  return 0;
1404}
1405
1406TEST(SanitizerCommon, ThreadedTwoLevelByteMap) {
1407  TestByteMap m;
1408  m.Init();
1409  TestMapUnmapCallback::map_count = 0;
1410  TestMapUnmapCallback::unmap_count = 0;
1411  static const int kNumThreads = 4;
1412  pthread_t t[kNumThreads];
1413  TestByteMapParam p[kNumThreads];
1414  for (int i = 0; i < kNumThreads; i++) {
1415    p[i].m = &m;
1416    p[i].shard = i;
1417    p[i].num_shards = kNumThreads;
1418    PTHREAD_CREATE(&t[i], 0, TwoLevelByteMapUserThread, &p[i]);
1419  }
1420  for (int i = 0; i < kNumThreads; i++) {
1421    PTHREAD_JOIN(t[i], 0);
1422  }
1423  EXPECT_EQ((uptr)TestMapUnmapCallback::map_count, m.size1());
1424  EXPECT_EQ((uptr)TestMapUnmapCallback::unmap_count, 0UL);
1425  m.TestOnlyUnmap();
1426  EXPECT_EQ((uptr)TestMapUnmapCallback::map_count, m.size1());
1427  EXPECT_EQ((uptr)TestMapUnmapCallback::unmap_count, m.size1());
1428}
1429
1430#endif  // #if !SANITIZER_DEBUG
1431