1251881Speter//===-- combined_test.cpp ---------------------------------------*- C++ -*-===//
2251881Speter//
3251881Speter// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4251881Speter// See https://llvm.org/LICENSE.txt for license information.
5251881Speter// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6251881Speter//
7251881Speter//===----------------------------------------------------------------------===//
8251881Speter
9251881Speter#include "memtag.h"
10251881Speter#include "tests/scudo_unit_test.h"
11251881Speter
12251881Speter#include "allocator_config.h"
13251881Speter#include "chunk.h"
14251881Speter#include "combined.h"
15251881Speter
16251881Speter#include <condition_variable>
17251881Speter#include <memory>
18251881Speter#include <mutex>
19251881Speter#include <set>
20251881Speter#include <stdlib.h>
21251881Speter#include <thread>
22251881Speter#include <vector>
23251881Speter
24251881Speterstatic constexpr scudo::Chunk::Origin Origin = scudo::Chunk::Origin::Malloc;
25251881Speterstatic constexpr scudo::uptr MinAlignLog = FIRST_32_SECOND_64(3U, 4U);
26251881Speter
27251881Speter// Fuchsia complains that the function is not used.
28251881SpeterUNUSED static void disableDebuggerdMaybe() {
29251881Speter#if SCUDO_ANDROID
30251881Speter  // Disable the debuggerd signal handler on Android, without this we can end
31251881Speter  // up spending a significant amount of time creating tombstones.
32251881Speter  signal(SIGSEGV, SIG_DFL);
33251881Speter#endif
34251881Speter}
35251881Speter
36251881Spetertemplate <class AllocatorT>
37251881Speterbool isPrimaryAllocation(scudo::uptr Size, scudo::uptr Alignment) {
38251881Speter  const scudo::uptr MinAlignment = 1UL << SCUDO_MIN_ALIGNMENT_LOG;
39251881Speter  if (Alignment < MinAlignment)
40251881Speter    Alignment = MinAlignment;
41251881Speter  const scudo::uptr NeededSize =
42251881Speter      scudo::roundUpTo(Size, MinAlignment) +
43251881Speter      ((Alignment > MinAlignment) ? Alignment : scudo::Chunk::getHeaderSize());
44251881Speter  return AllocatorT::PrimaryT::canAllocate(NeededSize);
45251881Speter}
46251881Speter
47251881Spetertemplate <class AllocatorT>
48251881Spetervoid checkMemoryTaggingMaybe(AllocatorT *Allocator, void *P, scudo::uptr Size,
49251881Speter                             scudo::uptr Alignment) {
50251881Speter  const scudo::uptr MinAlignment = 1UL << SCUDO_MIN_ALIGNMENT_LOG;
51251881Speter  Size = scudo::roundUpTo(Size, MinAlignment);
52251881Speter  if (Allocator->useMemoryTaggingTestOnly())
53251881Speter    EXPECT_DEATH(
54251881Speter        {
55251881Speter          disableDebuggerdMaybe();
56251881Speter          reinterpret_cast<char *>(P)[-1] = 0xaa;
57251881Speter        },
58251881Speter        "");
59251881Speter  if (isPrimaryAllocation<AllocatorT>(Size, Alignment)
60251881Speter          ? Allocator->useMemoryTaggingTestOnly()
61251881Speter          : Alignment == MinAlignment) {
62251881Speter    EXPECT_DEATH(
63251881Speter        {
64251881Speter          disableDebuggerdMaybe();
65251881Speter          reinterpret_cast<char *>(P)[Size] = 0xaa;
66251881Speter        },
67251881Speter        "");
68251881Speter  }
69251881Speter}
70251881Speter
71251881Spetertemplate <typename Config> struct TestAllocator : scudo::Allocator<Config> {
72251881Speter  TestAllocator() {
73251881Speter    this->initThreadMaybe();
74251881Speter    if (scudo::archSupportsMemoryTagging() &&
75251881Speter        !scudo::systemDetectsMemoryTagFaultsTestOnly())
76251881Speter      this->disableMemoryTagging();
77251881Speter  }
78251881Speter  ~TestAllocator() { this->unmapTestOnly(); }
79251881Speter
80251881Speter  void *operator new(size_t size) {
81251881Speter    void *p = nullptr;
82251881Speter    EXPECT_EQ(0, posix_memalign(&p, alignof(TestAllocator), size));
83251881Speter    return p;
84251881Speter  }
85251881Speter
86251881Speter  void operator delete(void *ptr) { free(ptr); }
87251881Speter};
88251881Speter
89251881Spetertemplate <class TypeParam> struct ScudoCombinedTest : public Test {
90251881Speter  ScudoCombinedTest() {
91251881Speter    UseQuarantine = std::is_same<TypeParam, scudo::AndroidConfig>::value;
92251881Speter    Allocator = std::make_unique<AllocatorT>();
93251881Speter  }
94251881Speter  ~ScudoCombinedTest() {
95251881Speter    Allocator->releaseToOS();
96251881Speter    UseQuarantine = true;
97251881Speter  }
98251881Speter
99251881Speter  void RunTest();
100251881Speter
101251881Speter  void BasicTest(scudo::uptr SizeLog);
102251881Speter
103251881Speter  using AllocatorT = TestAllocator<TypeParam>;
104251881Speter  std::unique_ptr<AllocatorT> Allocator;
105251881Speter};
106251881Speter
107251881Spetertemplate <typename T> using ScudoCombinedDeathTest = ScudoCombinedTest<T>;
108251881Speter
109251881Speter#if SCUDO_FUCHSIA
110251881Speter#define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME)                              \
111251881Speter  SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, AndroidSvelteConfig)                    \
112251881Speter  SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, FuchsiaConfig)
113251881Speter#else
114251881Speter#define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME)                              \
115251881Speter  SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, AndroidSvelteConfig)                    \
116251881Speter  SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, DefaultConfig)                          \
117251881Speter  SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, AndroidConfig)
118251881Speter#endif
119251881Speter
120251881Speter#define SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TYPE)                             \
121251881Speter  using FIXTURE##NAME##_##TYPE = FIXTURE##NAME<scudo::TYPE>;                   \
122251881Speter  TEST_F(FIXTURE##NAME##_##TYPE, NAME) { FIXTURE##NAME<scudo::TYPE>::Run(); }
123251881Speter
124251881Speter#define SCUDO_TYPED_TEST(FIXTURE, NAME)                                        \
125251881Speter  template <class TypeParam>                                                   \
126251881Speter  struct FIXTURE##NAME : public FIXTURE<TypeParam> {                           \
127251881Speter    void Run();                                                                \
128251881Speter  };                                                                           \
129251881Speter  SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME)                                    \
130251881Speter  template <class TypeParam> void FIXTURE##NAME<TypeParam>::Run()
131251881Speter
132251881SpeterSCUDO_TYPED_TEST(ScudoCombinedTest, IsOwned) {
133251881Speter  auto *Allocator = this->Allocator.get();
134251881Speter  static scudo::u8 StaticBuffer[scudo::Chunk::getHeaderSize() + 1];
135251881Speter  EXPECT_FALSE(
136251881Speter      Allocator->isOwned(&StaticBuffer[scudo::Chunk::getHeaderSize()]));
137251881Speter
138251881Speter  scudo::u8 StackBuffer[scudo::Chunk::getHeaderSize() + 1];
139251881Speter  for (scudo::uptr I = 0; I < sizeof(StackBuffer); I++)
140251881Speter    StackBuffer[I] = 0x42U;
141251881Speter  EXPECT_FALSE(Allocator->isOwned(&StackBuffer[scudo::Chunk::getHeaderSize()]));
142251881Speter  for (scudo::uptr I = 0; I < sizeof(StackBuffer); I++)
143251881Speter    EXPECT_EQ(StackBuffer[I], 0x42U);
144251881Speter}
145251881Speter
146251881Spetertemplate <class Config>
147251881Spetervoid ScudoCombinedTest<Config>::BasicTest(scudo::uptr SizeLog) {
148251881Speter  auto *Allocator = this->Allocator.get();
149251881Speter
150251881Speter  // This allocates and deallocates a bunch of chunks, with a wide range of
151251881Speter  // sizes and alignments, with a focus on sizes that could trigger weird
152251881Speter  // behaviors (plus or minus a small delta of a power of two for example).
153251881Speter  for (scudo::uptr AlignLog = MinAlignLog; AlignLog <= 16U; AlignLog++) {
154251881Speter    const scudo::uptr Align = 1U << AlignLog;
155251881Speter    for (scudo::sptr Delta = -32; Delta <= 32; Delta++) {
156251881Speter      if (static_cast<scudo::sptr>(1U << SizeLog) + Delta < 0)
157251881Speter        continue;
158251881Speter      const scudo::uptr Size = (1U << SizeLog) + Delta;
159251881Speter      void *P = Allocator->allocate(Size, Origin, Align);
160251881Speter      EXPECT_NE(P, nullptr);
161251881Speter      EXPECT_TRUE(Allocator->isOwned(P));
162251881Speter      EXPECT_TRUE(scudo::isAligned(reinterpret_cast<scudo::uptr>(P), Align));
163251881Speter      EXPECT_LE(Size, Allocator->getUsableSize(P));
164251881Speter      memset(P, 0xaa, Size);
165251881Speter      checkMemoryTaggingMaybe(Allocator, P, Size, Align);
166251881Speter      Allocator->deallocate(P, Origin, Size);
167251881Speter    }
168251881Speter  }
169251881Speter}
170251881Speter
171251881Speter#define SCUDO_MAKE_BASIC_TEST(SizeLog)                                         \
172251881Speter  SCUDO_TYPED_TEST(ScudoCombinedDeathTest, BasicCombined##SizeLog) {           \
173251881Speter    this->BasicTest(SizeLog);                                                  \
174251881Speter  }
175251881Speter
176251881SpeterSCUDO_MAKE_BASIC_TEST(0)
177251881SpeterSCUDO_MAKE_BASIC_TEST(1)
178251881SpeterSCUDO_MAKE_BASIC_TEST(2)
179251881SpeterSCUDO_MAKE_BASIC_TEST(3)
180251881SpeterSCUDO_MAKE_BASIC_TEST(4)
181251881SpeterSCUDO_MAKE_BASIC_TEST(5)
182251881SpeterSCUDO_MAKE_BASIC_TEST(6)
183251881SpeterSCUDO_MAKE_BASIC_TEST(7)
184251881SpeterSCUDO_MAKE_BASIC_TEST(8)
185251881SpeterSCUDO_MAKE_BASIC_TEST(9)
186251881SpeterSCUDO_MAKE_BASIC_TEST(10)
187251881SpeterSCUDO_MAKE_BASIC_TEST(11)
188251881SpeterSCUDO_MAKE_BASIC_TEST(12)
189251881SpeterSCUDO_MAKE_BASIC_TEST(13)
190251881SpeterSCUDO_MAKE_BASIC_TEST(14)
191251881SpeterSCUDO_MAKE_BASIC_TEST(15)
192251881SpeterSCUDO_MAKE_BASIC_TEST(16)
193251881SpeterSCUDO_MAKE_BASIC_TEST(17)
194251881SpeterSCUDO_MAKE_BASIC_TEST(18)
195251881SpeterSCUDO_MAKE_BASIC_TEST(19)
196251881SpeterSCUDO_MAKE_BASIC_TEST(20)
197251881Speter
198251881SpeterSCUDO_TYPED_TEST(ScudoCombinedTest, ZeroContents) {
199251881Speter  auto *Allocator = this->Allocator.get();
200251881Speter
201251881Speter  // Ensure that specifying ZeroContents returns a zero'd out block.
202251881Speter  for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
203251881Speter    for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
204251881Speter      const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
205251881Speter      void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, true);
206251881Speter      EXPECT_NE(P, nullptr);
207251881Speter      for (scudo::uptr I = 0; I < Size; I++)
208251881Speter        ASSERT_EQ((reinterpret_cast<char *>(P))[I], 0);
209251881Speter      memset(P, 0xaa, Size);
210251881Speter      Allocator->deallocate(P, Origin, Size);
211251881Speter    }
212251881Speter  }
213251881Speter}
214251881Speter
215251881SpeterSCUDO_TYPED_TEST(ScudoCombinedTest, ZeroFill) {
216251881Speter  auto *Allocator = this->Allocator.get();
217251881Speter
218251881Speter  // Ensure that specifying ZeroFill returns a zero'd out block.
219251881Speter  Allocator->setFillContents(scudo::ZeroFill);
220251881Speter  for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
221251881Speter    for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
222251881Speter      const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
223251881Speter      void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, false);
224251881Speter      EXPECT_NE(P, nullptr);
225251881Speter      for (scudo::uptr I = 0; I < Size; I++)
226251881Speter        ASSERT_EQ((reinterpret_cast<char *>(P))[I], 0);
227251881Speter      memset(P, 0xaa, Size);
228251881Speter      Allocator->deallocate(P, Origin, Size);
229251881Speter    }
230251881Speter  }
231251881Speter}
232251881Speter
233251881SpeterSCUDO_TYPED_TEST(ScudoCombinedTest, PatternOrZeroFill) {
234251881Speter  auto *Allocator = this->Allocator.get();
235251881Speter
236251881Speter  // Ensure that specifying PatternOrZeroFill returns a pattern or zero filled
237251881Speter  // block. The primary allocator only produces pattern filled blocks if MTE
238251881Speter  // is disabled, so we only require pattern filled blocks in that case.
239251881Speter  Allocator->setFillContents(scudo::PatternOrZeroFill);
240251881Speter  for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
241251881Speter    for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
242251881Speter      const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
243251881Speter      void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, false);
244251881Speter      EXPECT_NE(P, nullptr);
245251881Speter      for (scudo::uptr I = 0; I < Size; I++) {
246251881Speter        unsigned char V = (reinterpret_cast<unsigned char *>(P))[I];
247251881Speter        if (isPrimaryAllocation<TestAllocator<TypeParam>>(Size,
248251881Speter                                                          1U << MinAlignLog) &&
249251881Speter            !Allocator->useMemoryTaggingTestOnly())
250251881Speter          ASSERT_EQ(V, scudo::PatternFillByte);
251251881Speter        else
252251881Speter          ASSERT_TRUE(V == scudo::PatternFillByte || V == 0);
253251881Speter      }
254251881Speter      memset(P, 0xaa, Size);
255251881Speter      Allocator->deallocate(P, Origin, Size);
256251881Speter    }
257251881Speter  }
258251881Speter}
259251881Speter
260251881SpeterSCUDO_TYPED_TEST(ScudoCombinedTest, BlockReuse) {
261251881Speter  auto *Allocator = this->Allocator.get();
262251881Speter
263251881Speter  // Verify that a chunk will end up being reused, at some point.
264251881Speter  const scudo::uptr NeedleSize = 1024U;
265251881Speter  void *NeedleP = Allocator->allocate(NeedleSize, Origin);
266251881Speter  Allocator->deallocate(NeedleP, Origin);
267251881Speter  bool Found = false;
268251881Speter  for (scudo::uptr I = 0; I < 1024U && !Found; I++) {
269251881Speter    void *P = Allocator->allocate(NeedleSize, Origin);
270251881Speter    if (Allocator->getHeaderTaggedPointer(P) ==
271251881Speter        Allocator->getHeaderTaggedPointer(NeedleP))
272251881Speter      Found = true;
273251881Speter    Allocator->deallocate(P, Origin);
274251881Speter  }
275251881Speter  EXPECT_TRUE(Found);
276251881Speter}
277251881Speter
278251881SpeterSCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateLargeIncreasing) {
279  auto *Allocator = this->Allocator.get();
280
281  // Reallocate a chunk all the way up to a secondary allocation, verifying that
282  // we preserve the data in the process.
283  scudo::uptr Size = 16;
284  void *P = Allocator->allocate(Size, Origin);
285  const char Marker = 0xab;
286  memset(P, Marker, Size);
287  while (Size < TypeParam::Primary::SizeClassMap::MaxSize * 4) {
288    void *NewP = Allocator->reallocate(P, Size * 2);
289    EXPECT_NE(NewP, nullptr);
290    for (scudo::uptr J = 0; J < Size; J++)
291      EXPECT_EQ((reinterpret_cast<char *>(NewP))[J], Marker);
292    memset(reinterpret_cast<char *>(NewP) + Size, Marker, Size);
293    Size *= 2U;
294    P = NewP;
295  }
296  Allocator->deallocate(P, Origin);
297}
298
299SCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateLargeDecreasing) {
300  auto *Allocator = this->Allocator.get();
301
302  // Reallocate a large chunk all the way down to a byte, verifying that we
303  // preserve the data in the process.
304  scudo::uptr Size = TypeParam::Primary::SizeClassMap::MaxSize * 2;
305  const scudo::uptr DataSize = 2048U;
306  void *P = Allocator->allocate(Size, Origin);
307  const char Marker = 0xab;
308  memset(P, Marker, scudo::Min(Size, DataSize));
309  while (Size > 1U) {
310    Size /= 2U;
311    void *NewP = Allocator->reallocate(P, Size);
312    EXPECT_NE(NewP, nullptr);
313    for (scudo::uptr J = 0; J < scudo::Min(Size, DataSize); J++)
314      EXPECT_EQ((reinterpret_cast<char *>(NewP))[J], Marker);
315    P = NewP;
316  }
317  Allocator->deallocate(P, Origin);
318}
319
320SCUDO_TYPED_TEST(ScudoCombinedDeathTest, ReallocateSame) {
321  auto *Allocator = this->Allocator.get();
322
323  // Check that reallocating a chunk to a slightly smaller or larger size
324  // returns the same chunk. This requires that all the sizes we iterate on use
325  // the same block size, but that should be the case for MaxSize - 64 with our
326  // default class size maps.
327  constexpr scudo::uptr ReallocSize =
328      TypeParam::Primary::SizeClassMap::MaxSize - 64;
329  void *P = Allocator->allocate(ReallocSize, Origin);
330  const char Marker = 0xab;
331  memset(P, Marker, ReallocSize);
332  for (scudo::sptr Delta = -32; Delta < 32; Delta += 8) {
333    const scudo::uptr NewSize = ReallocSize + Delta;
334    void *NewP = Allocator->reallocate(P, NewSize);
335    EXPECT_EQ(NewP, P);
336    for (scudo::uptr I = 0; I < ReallocSize - 32; I++)
337      EXPECT_EQ((reinterpret_cast<char *>(NewP))[I], Marker);
338    checkMemoryTaggingMaybe(Allocator, NewP, NewSize, 0);
339  }
340  Allocator->deallocate(P, Origin);
341}
342
343SCUDO_TYPED_TEST(ScudoCombinedTest, IterateOverChunks) {
344  auto *Allocator = this->Allocator.get();
345  // Allocates a bunch of chunks, then iterate over all the chunks, ensuring
346  // they are the ones we allocated. This requires the allocator to not have any
347  // other allocated chunk at this point (eg: won't work with the Quarantine).
348  // FIXME: Make it work with UseQuarantine and tagging enabled. Internals of
349  // iterateOverChunks reads header by tagged and non-tagger pointers so one of
350  // them will fail.
351  if (!UseQuarantine) {
352    std::vector<void *> V;
353    for (scudo::uptr I = 0; I < 64U; I++)
354      V.push_back(Allocator->allocate(
355          rand() % (TypeParam::Primary::SizeClassMap::MaxSize / 2U), Origin));
356    Allocator->disable();
357    Allocator->iterateOverChunks(
358        0U, static_cast<scudo::uptr>(SCUDO_MMAP_RANGE_SIZE - 1),
359        [](uintptr_t Base, size_t Size, void *Arg) {
360          std::vector<void *> *V = reinterpret_cast<std::vector<void *> *>(Arg);
361          void *P = reinterpret_cast<void *>(Base);
362          EXPECT_NE(std::find(V->begin(), V->end(), P), V->end());
363        },
364        reinterpret_cast<void *>(&V));
365    Allocator->enable();
366    for (auto P : V)
367      Allocator->deallocate(P, Origin);
368  }
369}
370
371SCUDO_TYPED_TEST(ScudoCombinedDeathTest, UseAfterFree) {
372  auto *Allocator = this->Allocator.get();
373
374  // Check that use-after-free is detected.
375  for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
376    const scudo::uptr Size = 1U << SizeLog;
377    if (!Allocator->useMemoryTaggingTestOnly())
378      continue;
379    EXPECT_DEATH(
380        {
381          disableDebuggerdMaybe();
382          void *P = Allocator->allocate(Size, Origin);
383          Allocator->deallocate(P, Origin);
384          reinterpret_cast<char *>(P)[0] = 0xaa;
385        },
386        "");
387    EXPECT_DEATH(
388        {
389          disableDebuggerdMaybe();
390          void *P = Allocator->allocate(Size, Origin);
391          Allocator->deallocate(P, Origin);
392          reinterpret_cast<char *>(P)[Size - 1] = 0xaa;
393        },
394        "");
395  }
396}
397
398SCUDO_TYPED_TEST(ScudoCombinedDeathTest, DisableMemoryTagging) {
399  auto *Allocator = this->Allocator.get();
400
401  if (Allocator->useMemoryTaggingTestOnly()) {
402    // Check that disabling memory tagging works correctly.
403    void *P = Allocator->allocate(2048, Origin);
404    EXPECT_DEATH(reinterpret_cast<char *>(P)[2048] = 0xaa, "");
405    scudo::ScopedDisableMemoryTagChecks NoTagChecks;
406    Allocator->disableMemoryTagging();
407    reinterpret_cast<char *>(P)[2048] = 0xaa;
408    Allocator->deallocate(P, Origin);
409
410    P = Allocator->allocate(2048, Origin);
411    EXPECT_EQ(scudo::untagPointer(P), P);
412    reinterpret_cast<char *>(P)[2048] = 0xaa;
413    Allocator->deallocate(P, Origin);
414
415    Allocator->releaseToOS();
416  }
417}
418
419SCUDO_TYPED_TEST(ScudoCombinedTest, Stats) {
420  auto *Allocator = this->Allocator.get();
421
422  scudo::uptr BufferSize = 8192;
423  std::vector<char> Buffer(BufferSize);
424  scudo::uptr ActualSize = Allocator->getStats(Buffer.data(), BufferSize);
425  while (ActualSize > BufferSize) {
426    BufferSize = ActualSize + 1024;
427    Buffer.resize(BufferSize);
428    ActualSize = Allocator->getStats(Buffer.data(), BufferSize);
429  }
430  std::string Stats(Buffer.begin(), Buffer.end());
431  // Basic checks on the contents of the statistics output, which also allows us
432  // to verify that we got it all.
433  EXPECT_NE(Stats.find("Stats: SizeClassAllocator"), std::string::npos);
434  EXPECT_NE(Stats.find("Stats: MapAllocator"), std::string::npos);
435  EXPECT_NE(Stats.find("Stats: Quarantine"), std::string::npos);
436}
437
438SCUDO_TYPED_TEST(ScudoCombinedTest, CacheDrain) {
439  auto *Allocator = this->Allocator.get();
440
441  std::vector<void *> V;
442  for (scudo::uptr I = 0; I < 64U; I++)
443    V.push_back(Allocator->allocate(
444        rand() % (TypeParam::Primary::SizeClassMap::MaxSize / 2U), Origin));
445  for (auto P : V)
446    Allocator->deallocate(P, Origin);
447
448  bool UnlockRequired;
449  auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired);
450  EXPECT_TRUE(!TSD->Cache.isEmpty());
451  TSD->Cache.drain();
452  EXPECT_TRUE(TSD->Cache.isEmpty());
453  if (UnlockRequired)
454    TSD->unlock();
455}
456
457SCUDO_TYPED_TEST(ScudoCombinedTest, ThreadedCombined) {
458  std::mutex Mutex;
459  std::condition_variable Cv;
460  bool Ready = false;
461  auto *Allocator = this->Allocator.get();
462  std::thread Threads[32];
463  for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
464    Threads[I] = std::thread([&]() {
465      {
466        std::unique_lock<std::mutex> Lock(Mutex);
467        while (!Ready)
468          Cv.wait(Lock);
469      }
470      std::vector<std::pair<void *, scudo::uptr>> V;
471      for (scudo::uptr I = 0; I < 256U; I++) {
472        const scudo::uptr Size = std::rand() % 4096U;
473        void *P = Allocator->allocate(Size, Origin);
474        // A region could have ran out of memory, resulting in a null P.
475        if (P)
476          V.push_back(std::make_pair(P, Size));
477      }
478      while (!V.empty()) {
479        auto Pair = V.back();
480        Allocator->deallocate(Pair.first, Origin, Pair.second);
481        V.pop_back();
482      }
483    });
484  {
485    std::unique_lock<std::mutex> Lock(Mutex);
486    Ready = true;
487    Cv.notify_all();
488  }
489  for (auto &T : Threads)
490    T.join();
491  Allocator->releaseToOS();
492}
493
494// Test that multiple instantiations of the allocator have not messed up the
495// process's signal handlers (GWP-ASan used to do this).
496TEST(ScudoCombinedDeathTest, SKIP_ON_FUCHSIA(testSEGV)) {
497  const scudo::uptr Size = 4 * scudo::getPageSizeCached();
498  scudo::MapPlatformData Data = {};
499  void *P = scudo::map(nullptr, Size, "testSEGV", MAP_NOACCESS, &Data);
500  EXPECT_NE(P, nullptr);
501  EXPECT_DEATH(memset(P, 0xaa, Size), "");
502  scudo::unmap(P, Size, UNMAP_ALL, &Data);
503}
504
505struct DeathSizeClassConfig {
506  static const scudo::uptr NumBits = 1;
507  static const scudo::uptr MinSizeLog = 10;
508  static const scudo::uptr MidSizeLog = 10;
509  static const scudo::uptr MaxSizeLog = 13;
510  static const scudo::u16 MaxNumCachedHint = 8;
511  static const scudo::uptr MaxBytesCachedLog = 12;
512  static const scudo::uptr SizeDelta = 0;
513};
514
515static const scudo::uptr DeathRegionSizeLog = 21U;
516struct DeathConfig {
517  static const bool MaySupportMemoryTagging = false;
518
519  // Tiny allocator, its Primary only serves chunks of four sizes.
520  using SizeClassMap = scudo::FixedSizeClassMap<DeathSizeClassConfig>;
521  typedef scudo::SizeClassAllocator64<DeathConfig> Primary;
522  static const scudo::uptr PrimaryRegionSizeLog = DeathRegionSizeLog;
523  static const scudo::s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
524  static const scudo::s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
525  typedef scudo::uptr PrimaryCompactPtrT;
526  static const scudo::uptr PrimaryCompactPtrScale = 0;
527  static const bool PrimaryEnableRandomOffset = true;
528  static const scudo::uptr PrimaryMapSizeIncrement = 1UL << 18;
529  static const scudo::uptr PrimaryGroupSizeLog = 18;
530
531  typedef scudo::MapAllocatorNoCache SecondaryCache;
532  template <class A> using TSDRegistryT = scudo::TSDRegistrySharedT<A, 1U, 1U>;
533};
534
535TEST(ScudoCombinedDeathTest, DeathCombined) {
536  using AllocatorT = TestAllocator<DeathConfig>;
537  auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
538
539  const scudo::uptr Size = 1000U;
540  void *P = Allocator->allocate(Size, Origin);
541  EXPECT_NE(P, nullptr);
542
543  // Invalid sized deallocation.
544  EXPECT_DEATH(Allocator->deallocate(P, Origin, Size + 8U), "");
545
546  // Misaligned pointer. Potentially unused if EXPECT_DEATH isn't available.
547  UNUSED void *MisalignedP =
548      reinterpret_cast<void *>(reinterpret_cast<scudo::uptr>(P) | 1U);
549  EXPECT_DEATH(Allocator->deallocate(MisalignedP, Origin, Size), "");
550  EXPECT_DEATH(Allocator->reallocate(MisalignedP, Size * 2U), "");
551
552  // Header corruption.
553  scudo::u64 *H =
554      reinterpret_cast<scudo::u64 *>(scudo::Chunk::getAtomicHeader(P));
555  *H ^= 0x42U;
556  EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
557  *H ^= 0x420042U;
558  EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
559  *H ^= 0x420000U;
560
561  // Invalid chunk state.
562  Allocator->deallocate(P, Origin, Size);
563  EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
564  EXPECT_DEATH(Allocator->reallocate(P, Size * 2U), "");
565  EXPECT_DEATH(Allocator->getUsableSize(P), "");
566}
567
568// Verify that when a region gets full, the allocator will still manage to
569// fulfill the allocation through a larger size class.
570TEST(ScudoCombinedTest, FullRegion) {
571  using AllocatorT = TestAllocator<DeathConfig>;
572  auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
573
574  std::vector<void *> V;
575  scudo::uptr FailedAllocationsCount = 0;
576  for (scudo::uptr ClassId = 1U;
577       ClassId <= DeathConfig::SizeClassMap::LargestClassId; ClassId++) {
578    const scudo::uptr Size =
579        DeathConfig::SizeClassMap::getSizeByClassId(ClassId);
580    // Allocate enough to fill all of the regions above this one.
581    const scudo::uptr MaxNumberOfChunks =
582        ((1U << DeathRegionSizeLog) / Size) *
583        (DeathConfig::SizeClassMap::LargestClassId - ClassId + 1);
584    void *P;
585    for (scudo::uptr I = 0; I <= MaxNumberOfChunks; I++) {
586      P = Allocator->allocate(Size - 64U, Origin);
587      if (!P)
588        FailedAllocationsCount++;
589      else
590        V.push_back(P);
591    }
592    while (!V.empty()) {
593      Allocator->deallocate(V.back(), Origin);
594      V.pop_back();
595    }
596  }
597  EXPECT_EQ(FailedAllocationsCount, 0U);
598}
599
600// Ensure that releaseToOS can be called prior to any other allocator
601// operation without issue.
602SCUDO_TYPED_TEST(ScudoCombinedTest, ReleaseToOS) {
603  auto *Allocator = this->Allocator.get();
604  Allocator->releaseToOS();
605}
606
607SCUDO_TYPED_TEST(ScudoCombinedTest, OddEven) {
608  auto *Allocator = this->Allocator.get();
609
610  if (!Allocator->useMemoryTaggingTestOnly())
611    return;
612
613  auto CheckOddEven = [](scudo::uptr P1, scudo::uptr P2) {
614    scudo::uptr Tag1 = scudo::extractTag(scudo::loadTag(P1));
615    scudo::uptr Tag2 = scudo::extractTag(scudo::loadTag(P2));
616    EXPECT_NE(Tag1 % 2, Tag2 % 2);
617  };
618
619  using SizeClassMap = typename TypeParam::Primary::SizeClassMap;
620  for (scudo::uptr ClassId = 1U; ClassId <= SizeClassMap::LargestClassId;
621       ClassId++) {
622    const scudo::uptr Size = SizeClassMap::getSizeByClassId(ClassId);
623
624    std::set<scudo::uptr> Ptrs;
625    bool Found = false;
626    for (unsigned I = 0; I != 65536; ++I) {
627      scudo::uptr P = scudo::untagPointer(reinterpret_cast<scudo::uptr>(
628          Allocator->allocate(Size - scudo::Chunk::getHeaderSize(), Origin)));
629      if (Ptrs.count(P - Size)) {
630        Found = true;
631        CheckOddEven(P, P - Size);
632        break;
633      }
634      if (Ptrs.count(P + Size)) {
635        Found = true;
636        CheckOddEven(P, P + Size);
637        break;
638      }
639      Ptrs.insert(P);
640    }
641    EXPECT_TRUE(Found);
642  }
643}
644
645SCUDO_TYPED_TEST(ScudoCombinedTest, DisableMemInit) {
646  auto *Allocator = this->Allocator.get();
647
648  std::vector<void *> Ptrs(65536);
649
650  Allocator->setOption(scudo::Option::ThreadDisableMemInit, 1);
651
652  constexpr scudo::uptr MinAlignLog = FIRST_32_SECOND_64(3U, 4U);
653
654  // Test that if mem-init is disabled on a thread, calloc should still work as
655  // expected. This is tricky to ensure when MTE is enabled, so this test tries
656  // to exercise the relevant code on our MTE path.
657  for (scudo::uptr ClassId = 1U; ClassId <= 8; ClassId++) {
658    using SizeClassMap = typename TypeParam::Primary::SizeClassMap;
659    const scudo::uptr Size =
660        SizeClassMap::getSizeByClassId(ClassId) - scudo::Chunk::getHeaderSize();
661    if (Size < 8)
662      continue;
663    for (unsigned I = 0; I != Ptrs.size(); ++I) {
664      Ptrs[I] = Allocator->allocate(Size, Origin);
665      memset(Ptrs[I], 0xaa, Size);
666    }
667    for (unsigned I = 0; I != Ptrs.size(); ++I)
668      Allocator->deallocate(Ptrs[I], Origin, Size);
669    for (unsigned I = 0; I != Ptrs.size(); ++I) {
670      Ptrs[I] = Allocator->allocate(Size - 8, Origin);
671      memset(Ptrs[I], 0xbb, Size - 8);
672    }
673    for (unsigned I = 0; I != Ptrs.size(); ++I)
674      Allocator->deallocate(Ptrs[I], Origin, Size - 8);
675    for (unsigned I = 0; I != Ptrs.size(); ++I) {
676      Ptrs[I] = Allocator->allocate(Size, Origin, 1U << MinAlignLog, true);
677      for (scudo::uptr J = 0; J < Size; ++J)
678        ASSERT_EQ((reinterpret_cast<char *>(Ptrs[I]))[J], 0);
679    }
680  }
681
682  Allocator->setOption(scudo::Option::ThreadDisableMemInit, 0);
683}
684
685SCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateInPlaceStress) {
686  auto *Allocator = this->Allocator.get();
687
688  // Regression test: make realloc-in-place happen at the very right end of a
689  // mapped region.
690  constexpr int nPtrs = 10000;
691  for (int i = 1; i < 32; ++i) {
692    scudo::uptr Size = 16 * i - 1;
693    std::vector<void *> Ptrs;
694    for (int i = 0; i < nPtrs; ++i) {
695      void *P = Allocator->allocate(Size, Origin);
696      P = Allocator->reallocate(P, Size + 1);
697      Ptrs.push_back(P);
698    }
699
700    for (int i = 0; i < nPtrs; ++i)
701      Allocator->deallocate(Ptrs[i], Origin);
702  }
703}
704
705SCUDO_TYPED_TEST(ScudoCombinedTest, RingBufferSize) {
706  auto *Allocator = this->Allocator.get();
707  auto Size = Allocator->getRingBufferSize();
708  ASSERT_GT(Size, 0u);
709  EXPECT_EQ(Allocator->getRingBufferAddress()[Size - 1], '\0');
710}
711
712SCUDO_TYPED_TEST(ScudoCombinedTest, RingBufferAddress) {
713  auto *Allocator = this->Allocator.get();
714  auto *Addr = Allocator->getRingBufferAddress();
715  EXPECT_NE(Addr, nullptr);
716  EXPECT_EQ(Addr, Allocator->getRingBufferAddress());
717}
718
719#if SCUDO_CAN_USE_PRIMARY64
720#if SCUDO_TRUSTY
721
722// TrustyConfig is designed for a domain-specific allocator. Add a basic test
723// which covers only simple operations and ensure the configuration is able to
724// compile.
725TEST(ScudoCombinedTest, BasicTrustyConfig) {
726  using AllocatorT = scudo::Allocator<scudo::TrustyConfig>;
727  auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
728
729  for (scudo::uptr ClassId = 1U;
730       ClassId <= scudo::TrustyConfig::SizeClassMap::LargestClassId;
731       ClassId++) {
732    const scudo::uptr Size =
733        scudo::TrustyConfig::SizeClassMap::getSizeByClassId(ClassId);
734    void *p = Allocator->allocate(Size - scudo::Chunk::getHeaderSize(), Origin);
735    ASSERT_NE(p, nullptr);
736    free(p);
737  }
738
739  bool UnlockRequired;
740  auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired);
741  TSD->Cache.drain();
742
743  Allocator->releaseToOS();
744}
745
746#endif
747#endif
748
749#if SCUDO_LINUX
750
751SCUDO_TYPED_TEST(ScudoCombinedTest, SoftRssLimit) {
752  auto *Allocator = this->Allocator.get();
753  Allocator->setRssLimitsTestOnly(1, 0, true);
754
755  size_t Megabyte = 1024 * 1024;
756  size_t ChunkSize = 16;
757  size_t Error = 256;
758
759  std::vector<void *> Ptrs;
760  for (size_t index = 0; index < Megabyte + Error; index += ChunkSize) {
761    void *Ptr = Allocator->allocate(ChunkSize, Origin);
762    Ptrs.push_back(Ptr);
763  }
764
765  EXPECT_EQ(nullptr, Allocator->allocate(ChunkSize, Origin));
766
767  for (void *Ptr : Ptrs)
768    Allocator->deallocate(Ptr, Origin);
769}
770
771SCUDO_TYPED_TEST(ScudoCombinedTest, HardRssLimit) {
772  auto *Allocator = this->Allocator.get();
773  Allocator->setRssLimitsTestOnly(0, 1, false);
774
775  size_t Megabyte = 1024 * 1024;
776
777  EXPECT_DEATH(
778      {
779        disableDebuggerdMaybe();
780        Allocator->allocate(Megabyte, Origin);
781      },
782      "");
783}
784
785#endif
786