1//===-- scudo_allocator.cpp -------------------------------------*- C++ -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8/// 9/// Scudo Hardened Allocator implementation. 10/// It uses the sanitizer_common allocator as a base and aims at mitigating 11/// heap corruption vulnerabilities. It provides a checksum-guarded chunk 12/// header, a delayed free list, and additional sanity checks. 13/// 14//===----------------------------------------------------------------------===// 15 16#include "scudo_allocator.h" 17#include "scudo_crc32.h" 18#include "scudo_errors.h" 19#include "scudo_flags.h" 20#include "scudo_interface_internal.h" 21#include "scudo_tsd.h" 22#include "scudo_utils.h" 23 24#include "sanitizer_common/sanitizer_allocator_checks.h" 25#include "sanitizer_common/sanitizer_allocator_interface.h" 26#include "sanitizer_common/sanitizer_quarantine.h" 27 28#ifdef GWP_ASAN_HOOKS 29# include "gwp_asan/guarded_pool_allocator.h" 30# include "gwp_asan/optional/backtrace.h" 31# include "gwp_asan/optional/options_parser.h" 32#endif // GWP_ASAN_HOOKS 33 34#include <errno.h> 35#include <string.h> 36 37namespace __scudo { 38 39// Global static cookie, initialized at start-up. 40static u32 Cookie; 41 42// We default to software CRC32 if the alternatives are not supported, either 43// at compilation or at runtime. 44static atomic_uint8_t HashAlgorithm = { CRC32Software }; 45 46INLINE u32 computeCRC32(u32 Crc, uptr Value, uptr *Array, uptr ArraySize) { 47 // If the hardware CRC32 feature is defined here, it was enabled everywhere, 48 // as opposed to only for scudo_crc32.cpp. This means that other hardware 49 // specific instructions were likely emitted at other places, and as a 50 // result there is no reason to not use it here. 51#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32) 52 Crc = CRC32_INTRINSIC(Crc, Value); 53 for (uptr i = 0; i < ArraySize; i++) 54 Crc = CRC32_INTRINSIC(Crc, Array[i]); 55 return Crc; 56#else 57 if (atomic_load_relaxed(&HashAlgorithm) == CRC32Hardware) { 58 Crc = computeHardwareCRC32(Crc, Value); 59 for (uptr i = 0; i < ArraySize; i++) 60 Crc = computeHardwareCRC32(Crc, Array[i]); 61 return Crc; 62 } 63 Crc = computeSoftwareCRC32(Crc, Value); 64 for (uptr i = 0; i < ArraySize; i++) 65 Crc = computeSoftwareCRC32(Crc, Array[i]); 66 return Crc; 67#endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32) 68} 69 70static BackendT &getBackend(); 71 72namespace Chunk { 73 static INLINE AtomicPackedHeader *getAtomicHeader(void *Ptr) { 74 return reinterpret_cast<AtomicPackedHeader *>(reinterpret_cast<uptr>(Ptr) - 75 getHeaderSize()); 76 } 77 static INLINE 78 const AtomicPackedHeader *getConstAtomicHeader(const void *Ptr) { 79 return reinterpret_cast<const AtomicPackedHeader *>( 80 reinterpret_cast<uptr>(Ptr) - getHeaderSize()); 81 } 82 83 static INLINE bool isAligned(const void *Ptr) { 84 return IsAligned(reinterpret_cast<uptr>(Ptr), MinAlignment); 85 } 86 87 // We can't use the offset member of the chunk itself, as we would double 88 // fetch it without any warranty that it wouldn't have been tampered. To 89 // prevent this, we work with a local copy of the header. 90 static INLINE void *getBackendPtr(const void *Ptr, UnpackedHeader *Header) { 91 return reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) - 92 getHeaderSize() - (Header->Offset << MinAlignmentLog)); 93 } 94 95 // Returns the usable size for a chunk, meaning the amount of bytes from the 96 // beginning of the user data to the end of the backend allocated chunk. 97 static INLINE uptr getUsableSize(const void *Ptr, UnpackedHeader *Header) { 98 const uptr ClassId = Header->ClassId; 99 if (ClassId) 100 return PrimaryT::ClassIdToSize(ClassId) - getHeaderSize() - 101 (Header->Offset << MinAlignmentLog); 102 return SecondaryT::GetActuallyAllocatedSize( 103 getBackendPtr(Ptr, Header)) - getHeaderSize(); 104 } 105 106 // Returns the size the user requested when allocating the chunk. 107 static INLINE uptr getSize(const void *Ptr, UnpackedHeader *Header) { 108 const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes; 109 if (Header->ClassId) 110 return SizeOrUnusedBytes; 111 return SecondaryT::GetActuallyAllocatedSize( 112 getBackendPtr(Ptr, Header)) - getHeaderSize() - SizeOrUnusedBytes; 113 } 114 115 // Compute the checksum of the chunk pointer and its header. 116 static INLINE u16 computeChecksum(const void *Ptr, UnpackedHeader *Header) { 117 UnpackedHeader ZeroChecksumHeader = *Header; 118 ZeroChecksumHeader.Checksum = 0; 119 uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)]; 120 memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder)); 121 const u32 Crc = computeCRC32(Cookie, reinterpret_cast<uptr>(Ptr), 122 HeaderHolder, ARRAY_SIZE(HeaderHolder)); 123 return static_cast<u16>(Crc); 124 } 125 126 // Checks the validity of a chunk by verifying its checksum. It doesn't 127 // incur termination in the event of an invalid chunk. 128 static INLINE bool isValid(const void *Ptr) { 129 PackedHeader NewPackedHeader = 130 atomic_load_relaxed(getConstAtomicHeader(Ptr)); 131 UnpackedHeader NewUnpackedHeader = 132 bit_cast<UnpackedHeader>(NewPackedHeader); 133 return (NewUnpackedHeader.Checksum == 134 computeChecksum(Ptr, &NewUnpackedHeader)); 135 } 136 137 // Ensure that ChunkAvailable is 0, so that if a 0 checksum is ever valid 138 // for a fully nulled out header, its state will be available anyway. 139 COMPILER_CHECK(ChunkAvailable == 0); 140 141 // Loads and unpacks the header, verifying the checksum in the process. 142 static INLINE 143 void loadHeader(const void *Ptr, UnpackedHeader *NewUnpackedHeader) { 144 PackedHeader NewPackedHeader = 145 atomic_load_relaxed(getConstAtomicHeader(Ptr)); 146 *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader); 147 if (UNLIKELY(NewUnpackedHeader->Checksum != 148 computeChecksum(Ptr, NewUnpackedHeader))) 149 dieWithMessage("corrupted chunk header at address %p\n", Ptr); 150 } 151 152 // Packs and stores the header, computing the checksum in the process. 153 static INLINE void storeHeader(void *Ptr, UnpackedHeader *NewUnpackedHeader) { 154 NewUnpackedHeader->Checksum = computeChecksum(Ptr, NewUnpackedHeader); 155 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader); 156 atomic_store_relaxed(getAtomicHeader(Ptr), NewPackedHeader); 157 } 158 159 // Packs and stores the header, computing the checksum in the process. We 160 // compare the current header with the expected provided one to ensure that 161 // we are not being raced by a corruption occurring in another thread. 162 static INLINE void compareExchangeHeader(void *Ptr, 163 UnpackedHeader *NewUnpackedHeader, 164 UnpackedHeader *OldUnpackedHeader) { 165 NewUnpackedHeader->Checksum = computeChecksum(Ptr, NewUnpackedHeader); 166 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader); 167 PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader); 168 if (UNLIKELY(!atomic_compare_exchange_strong( 169 getAtomicHeader(Ptr), &OldPackedHeader, NewPackedHeader, 170 memory_order_relaxed))) 171 dieWithMessage("race on chunk header at address %p\n", Ptr); 172 } 173} // namespace Chunk 174 175struct QuarantineCallback { 176 explicit QuarantineCallback(AllocatorCacheT *Cache) 177 : Cache_(Cache) {} 178 179 // Chunk recycling function, returns a quarantined chunk to the backend, 180 // first making sure it hasn't been tampered with. 181 void Recycle(void *Ptr) { 182 UnpackedHeader Header; 183 Chunk::loadHeader(Ptr, &Header); 184 if (UNLIKELY(Header.State != ChunkQuarantine)) 185 dieWithMessage("invalid chunk state when recycling address %p\n", Ptr); 186 UnpackedHeader NewHeader = Header; 187 NewHeader.State = ChunkAvailable; 188 Chunk::compareExchangeHeader(Ptr, &NewHeader, &Header); 189 void *BackendPtr = Chunk::getBackendPtr(Ptr, &Header); 190 if (Header.ClassId) 191 getBackend().deallocatePrimary(Cache_, BackendPtr, Header.ClassId); 192 else 193 getBackend().deallocateSecondary(BackendPtr); 194 } 195 196 // Internal quarantine allocation and deallocation functions. We first check 197 // that the batches are indeed serviced by the Primary. 198 // TODO(kostyak): figure out the best way to protect the batches. 199 void *Allocate(uptr Size) { 200 const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch)); 201 return getBackend().allocatePrimary(Cache_, BatchClassId); 202 } 203 204 void Deallocate(void *Ptr) { 205 const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch)); 206 getBackend().deallocatePrimary(Cache_, Ptr, BatchClassId); 207 } 208 209 AllocatorCacheT *Cache_; 210 COMPILER_CHECK(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize); 211}; 212 213typedef Quarantine<QuarantineCallback, void> QuarantineT; 214typedef QuarantineT::Cache QuarantineCacheT; 215COMPILER_CHECK(sizeof(QuarantineCacheT) <= 216 sizeof(ScudoTSD::QuarantineCachePlaceHolder)); 217 218QuarantineCacheT *getQuarantineCache(ScudoTSD *TSD) { 219 return reinterpret_cast<QuarantineCacheT *>(TSD->QuarantineCachePlaceHolder); 220} 221 222#ifdef GWP_ASAN_HOOKS 223static gwp_asan::GuardedPoolAllocator GuardedAlloc; 224#endif // GWP_ASAN_HOOKS 225 226struct Allocator { 227 static const uptr MaxAllowedMallocSize = 228 FIRST_32_SECOND_64(2UL << 30, 1ULL << 40); 229 230 BackendT Backend; 231 QuarantineT Quarantine; 232 233 u32 QuarantineChunksUpToSize; 234 235 bool DeallocationTypeMismatch; 236 bool ZeroContents; 237 bool DeleteSizeMismatch; 238 239 bool CheckRssLimit; 240 uptr HardRssLimitMb; 241 uptr SoftRssLimitMb; 242 atomic_uint8_t RssLimitExceeded; 243 atomic_uint64_t RssLastCheckedAtNS; 244 245 explicit Allocator(LinkerInitialized) 246 : Quarantine(LINKER_INITIALIZED) {} 247 248 NOINLINE void performSanityChecks(); 249 250 void init() { 251 SanitizerToolName = "Scudo"; 252 PrimaryAllocatorName = "ScudoPrimary"; 253 SecondaryAllocatorName = "ScudoSecondary"; 254 255 initFlags(); 256 257 performSanityChecks(); 258 259 // Check if hardware CRC32 is supported in the binary and by the platform, 260 // if so, opt for the CRC32 hardware version of the checksum. 261 if (&computeHardwareCRC32 && hasHardwareCRC32()) 262 atomic_store_relaxed(&HashAlgorithm, CRC32Hardware); 263 264 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); 265 Backend.init(common_flags()->allocator_release_to_os_interval_ms); 266 HardRssLimitMb = common_flags()->hard_rss_limit_mb; 267 SoftRssLimitMb = common_flags()->soft_rss_limit_mb; 268 Quarantine.Init( 269 static_cast<uptr>(getFlags()->QuarantineSizeKb) << 10, 270 static_cast<uptr>(getFlags()->ThreadLocalQuarantineSizeKb) << 10); 271 QuarantineChunksUpToSize = (Quarantine.GetCacheSize() == 0) ? 0 : 272 getFlags()->QuarantineChunksUpToSize; 273 DeallocationTypeMismatch = getFlags()->DeallocationTypeMismatch; 274 DeleteSizeMismatch = getFlags()->DeleteSizeMismatch; 275 ZeroContents = getFlags()->ZeroContents; 276 277 if (UNLIKELY(!GetRandom(reinterpret_cast<void *>(&Cookie), sizeof(Cookie), 278 /*blocking=*/false))) { 279 Cookie = static_cast<u32>((NanoTime() >> 12) ^ 280 (reinterpret_cast<uptr>(this) >> 4)); 281 } 282 283 CheckRssLimit = HardRssLimitMb || SoftRssLimitMb; 284 if (CheckRssLimit) 285 atomic_store_relaxed(&RssLastCheckedAtNS, MonotonicNanoTime()); 286 } 287 288 // Helper function that checks for a valid Scudo chunk. nullptr isn't. 289 bool isValidPointer(const void *Ptr) { 290 initThreadMaybe(); 291 if (UNLIKELY(!Ptr)) 292 return false; 293 if (!Chunk::isAligned(Ptr)) 294 return false; 295 return Chunk::isValid(Ptr); 296 } 297 298 NOINLINE bool isRssLimitExceeded(); 299 300 // Allocates a chunk. 301 void *allocate(uptr Size, uptr Alignment, AllocType Type, 302 bool ForceZeroContents = false) { 303 initThreadMaybe(); 304 305#ifdef GWP_ASAN_HOOKS 306 if (UNLIKELY(GuardedAlloc.shouldSample())) { 307 if (void *Ptr = GuardedAlloc.allocate(Size)) 308 return Ptr; 309 } 310#endif // GWP_ASAN_HOOKS 311 312 if (UNLIKELY(Alignment > MaxAlignment)) { 313 if (AllocatorMayReturnNull()) 314 return nullptr; 315 reportAllocationAlignmentTooBig(Alignment, MaxAlignment); 316 } 317 if (UNLIKELY(Alignment < MinAlignment)) 318 Alignment = MinAlignment; 319 320 const uptr NeededSize = RoundUpTo(Size ? Size : 1, MinAlignment) + 321 Chunk::getHeaderSize(); 322 const uptr AlignedSize = (Alignment > MinAlignment) ? 323 NeededSize + (Alignment - Chunk::getHeaderSize()) : NeededSize; 324 if (UNLIKELY(Size >= MaxAllowedMallocSize) || 325 UNLIKELY(AlignedSize >= MaxAllowedMallocSize)) { 326 if (AllocatorMayReturnNull()) 327 return nullptr; 328 reportAllocationSizeTooBig(Size, AlignedSize, MaxAllowedMallocSize); 329 } 330 331 if (CheckRssLimit && UNLIKELY(isRssLimitExceeded())) { 332 if (AllocatorMayReturnNull()) 333 return nullptr; 334 reportRssLimitExceeded(); 335 } 336 337 // Primary and Secondary backed allocations have a different treatment. We 338 // deal with alignment requirements of Primary serviced allocations here, 339 // but the Secondary will take care of its own alignment needs. 340 void *BackendPtr; 341 uptr BackendSize; 342 u8 ClassId; 343 if (PrimaryT::CanAllocate(AlignedSize, MinAlignment)) { 344 BackendSize = AlignedSize; 345 ClassId = SizeClassMap::ClassID(BackendSize); 346 bool UnlockRequired; 347 ScudoTSD *TSD = getTSDAndLock(&UnlockRequired); 348 BackendPtr = Backend.allocatePrimary(&TSD->Cache, ClassId); 349 if (UnlockRequired) 350 TSD->unlock(); 351 } else { 352 BackendSize = NeededSize; 353 ClassId = 0; 354 BackendPtr = Backend.allocateSecondary(BackendSize, Alignment); 355 } 356 if (UNLIKELY(!BackendPtr)) { 357 SetAllocatorOutOfMemory(); 358 if (AllocatorMayReturnNull()) 359 return nullptr; 360 reportOutOfMemory(Size); 361 } 362 363 // If requested, we will zero out the entire contents of the returned chunk. 364 if ((ForceZeroContents || ZeroContents) && ClassId) 365 memset(BackendPtr, 0, PrimaryT::ClassIdToSize(ClassId)); 366 367 UnpackedHeader Header = {}; 368 uptr UserPtr = reinterpret_cast<uptr>(BackendPtr) + Chunk::getHeaderSize(); 369 if (UNLIKELY(!IsAligned(UserPtr, Alignment))) { 370 // Since the Secondary takes care of alignment, a non-aligned pointer 371 // means it is from the Primary. It is also the only case where the offset 372 // field of the header would be non-zero. 373 DCHECK(ClassId); 374 const uptr AlignedUserPtr = RoundUpTo(UserPtr, Alignment); 375 Header.Offset = (AlignedUserPtr - UserPtr) >> MinAlignmentLog; 376 UserPtr = AlignedUserPtr; 377 } 378 DCHECK_LE(UserPtr + Size, reinterpret_cast<uptr>(BackendPtr) + BackendSize); 379 Header.State = ChunkAllocated; 380 Header.AllocType = Type; 381 if (ClassId) { 382 Header.ClassId = ClassId; 383 Header.SizeOrUnusedBytes = Size; 384 } else { 385 // The secondary fits the allocations to a page, so the amount of unused 386 // bytes is the difference between the end of the user allocation and the 387 // next page boundary. 388 const uptr PageSize = GetPageSizeCached(); 389 const uptr TrailingBytes = (UserPtr + Size) & (PageSize - 1); 390 if (TrailingBytes) 391 Header.SizeOrUnusedBytes = PageSize - TrailingBytes; 392 } 393 void *Ptr = reinterpret_cast<void *>(UserPtr); 394 Chunk::storeHeader(Ptr, &Header); 395 if (SCUDO_CAN_USE_HOOKS && &__sanitizer_malloc_hook) 396 __sanitizer_malloc_hook(Ptr, Size); 397 return Ptr; 398 } 399 400 // Place a chunk in the quarantine or directly deallocate it in the event of 401 // a zero-sized quarantine, or if the size of the chunk is greater than the 402 // quarantine chunk size threshold. 403 void quarantineOrDeallocateChunk(void *Ptr, UnpackedHeader *Header, 404 uptr Size) { 405 const bool BypassQuarantine = !Size || (Size > QuarantineChunksUpToSize); 406 if (BypassQuarantine) { 407 UnpackedHeader NewHeader = *Header; 408 NewHeader.State = ChunkAvailable; 409 Chunk::compareExchangeHeader(Ptr, &NewHeader, Header); 410 void *BackendPtr = Chunk::getBackendPtr(Ptr, Header); 411 if (Header->ClassId) { 412 bool UnlockRequired; 413 ScudoTSD *TSD = getTSDAndLock(&UnlockRequired); 414 getBackend().deallocatePrimary(&TSD->Cache, BackendPtr, 415 Header->ClassId); 416 if (UnlockRequired) 417 TSD->unlock(); 418 } else { 419 getBackend().deallocateSecondary(BackendPtr); 420 } 421 } else { 422 // If a small memory amount was allocated with a larger alignment, we want 423 // to take that into account. Otherwise the Quarantine would be filled 424 // with tiny chunks, taking a lot of VA memory. This is an approximation 425 // of the usable size, that allows us to not call 426 // GetActuallyAllocatedSize. 427 const uptr EstimatedSize = Size + (Header->Offset << MinAlignmentLog); 428 UnpackedHeader NewHeader = *Header; 429 NewHeader.State = ChunkQuarantine; 430 Chunk::compareExchangeHeader(Ptr, &NewHeader, Header); 431 bool UnlockRequired; 432 ScudoTSD *TSD = getTSDAndLock(&UnlockRequired); 433 Quarantine.Put(getQuarantineCache(TSD), QuarantineCallback(&TSD->Cache), 434 Ptr, EstimatedSize); 435 if (UnlockRequired) 436 TSD->unlock(); 437 } 438 } 439 440 // Deallocates a Chunk, which means either adding it to the quarantine or 441 // directly returning it to the backend if criteria are met. 442 void deallocate(void *Ptr, uptr DeleteSize, uptr DeleteAlignment, 443 AllocType Type) { 444 // For a deallocation, we only ensure minimal initialization, meaning thread 445 // local data will be left uninitialized for now (when using ELF TLS). The 446 // fallback cache will be used instead. This is a workaround for a situation 447 // where the only heap operation performed in a thread would be a free past 448 // the TLS destructors, ending up in initialized thread specific data never 449 // being destroyed properly. Any other heap operation will do a full init. 450 initThreadMaybe(/*MinimalInit=*/true); 451 if (SCUDO_CAN_USE_HOOKS && &__sanitizer_free_hook) 452 __sanitizer_free_hook(Ptr); 453 if (UNLIKELY(!Ptr)) 454 return; 455 456#ifdef GWP_ASAN_HOOKS 457 if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))) { 458 GuardedAlloc.deallocate(Ptr); 459 return; 460 } 461#endif // GWP_ASAN_HOOKS 462 463 if (UNLIKELY(!Chunk::isAligned(Ptr))) 464 dieWithMessage("misaligned pointer when deallocating address %p\n", Ptr); 465 UnpackedHeader Header; 466 Chunk::loadHeader(Ptr, &Header); 467 if (UNLIKELY(Header.State != ChunkAllocated)) 468 dieWithMessage("invalid chunk state when deallocating address %p\n", Ptr); 469 if (DeallocationTypeMismatch) { 470 // The deallocation type has to match the allocation one. 471 if (Header.AllocType != Type) { 472 // With the exception of memalign'd Chunks, that can be still be free'd. 473 if (Header.AllocType != FromMemalign || Type != FromMalloc) 474 dieWithMessage("allocation type mismatch when deallocating address " 475 "%p\n", Ptr); 476 } 477 } 478 const uptr Size = Chunk::getSize(Ptr, &Header); 479 if (DeleteSizeMismatch) { 480 if (DeleteSize && DeleteSize != Size) 481 dieWithMessage("invalid sized delete when deallocating address %p\n", 482 Ptr); 483 } 484 (void)DeleteAlignment; // TODO(kostyak): verify that the alignment matches. 485 quarantineOrDeallocateChunk(Ptr, &Header, Size); 486 } 487 488 // Reallocates a chunk. We can save on a new allocation if the new requested 489 // size still fits in the chunk. 490 void *reallocate(void *OldPtr, uptr NewSize) { 491 initThreadMaybe(); 492 493#ifdef GWP_ASAN_HOOKS 494 if (UNLIKELY(GuardedAlloc.pointerIsMine(OldPtr))) { 495 size_t OldSize = GuardedAlloc.getSize(OldPtr); 496 void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc); 497 if (NewPtr) 498 memcpy(NewPtr, OldPtr, (NewSize < OldSize) ? NewSize : OldSize); 499 GuardedAlloc.deallocate(OldPtr); 500 return NewPtr; 501 } 502#endif // GWP_ASAN_HOOKS 503 504 if (UNLIKELY(!Chunk::isAligned(OldPtr))) 505 dieWithMessage("misaligned address when reallocating address %p\n", 506 OldPtr); 507 UnpackedHeader OldHeader; 508 Chunk::loadHeader(OldPtr, &OldHeader); 509 if (UNLIKELY(OldHeader.State != ChunkAllocated)) 510 dieWithMessage("invalid chunk state when reallocating address %p\n", 511 OldPtr); 512 if (DeallocationTypeMismatch) { 513 if (UNLIKELY(OldHeader.AllocType != FromMalloc)) 514 dieWithMessage("allocation type mismatch when reallocating address " 515 "%p\n", OldPtr); 516 } 517 const uptr UsableSize = Chunk::getUsableSize(OldPtr, &OldHeader); 518 // The new size still fits in the current chunk, and the size difference 519 // is reasonable. 520 if (NewSize <= UsableSize && 521 (UsableSize - NewSize) < (SizeClassMap::kMaxSize / 2)) { 522 UnpackedHeader NewHeader = OldHeader; 523 NewHeader.SizeOrUnusedBytes = 524 OldHeader.ClassId ? NewSize : UsableSize - NewSize; 525 Chunk::compareExchangeHeader(OldPtr, &NewHeader, &OldHeader); 526 return OldPtr; 527 } 528 // Otherwise, we have to allocate a new chunk and copy the contents of the 529 // old one. 530 void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc); 531 if (NewPtr) { 532 const uptr OldSize = OldHeader.ClassId ? OldHeader.SizeOrUnusedBytes : 533 UsableSize - OldHeader.SizeOrUnusedBytes; 534 memcpy(NewPtr, OldPtr, Min(NewSize, UsableSize)); 535 quarantineOrDeallocateChunk(OldPtr, &OldHeader, OldSize); 536 } 537 return NewPtr; 538 } 539 540 // Helper function that returns the actual usable size of a chunk. 541 uptr getUsableSize(const void *Ptr) { 542 initThreadMaybe(); 543 if (UNLIKELY(!Ptr)) 544 return 0; 545 546#ifdef GWP_ASAN_HOOKS 547 if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))) 548 return GuardedAlloc.getSize(Ptr); 549#endif // GWP_ASAN_HOOKS 550 551 UnpackedHeader Header; 552 Chunk::loadHeader(Ptr, &Header); 553 // Getting the usable size of a chunk only makes sense if it's allocated. 554 if (UNLIKELY(Header.State != ChunkAllocated)) 555 dieWithMessage("invalid chunk state when sizing address %p\n", Ptr); 556 return Chunk::getUsableSize(Ptr, &Header); 557 } 558 559 void *calloc(uptr NMemB, uptr Size) { 560 initThreadMaybe(); 561 if (UNLIKELY(CheckForCallocOverflow(NMemB, Size))) { 562 if (AllocatorMayReturnNull()) 563 return nullptr; 564 reportCallocOverflow(NMemB, Size); 565 } 566 return allocate(NMemB * Size, MinAlignment, FromMalloc, true); 567 } 568 569 void commitBack(ScudoTSD *TSD) { 570 Quarantine.Drain(getQuarantineCache(TSD), QuarantineCallback(&TSD->Cache)); 571 Backend.destroyCache(&TSD->Cache); 572 } 573 574 uptr getStats(AllocatorStat StatType) { 575 initThreadMaybe(); 576 uptr stats[AllocatorStatCount]; 577 Backend.getStats(stats); 578 return stats[StatType]; 579 } 580 581 bool canReturnNull() { 582 initThreadMaybe(); 583 return AllocatorMayReturnNull(); 584 } 585 586 void setRssLimit(uptr LimitMb, bool HardLimit) { 587 if (HardLimit) 588 HardRssLimitMb = LimitMb; 589 else 590 SoftRssLimitMb = LimitMb; 591 CheckRssLimit = HardRssLimitMb || SoftRssLimitMb; 592 } 593 594 void printStats() { 595 initThreadMaybe(); 596 Backend.printStats(); 597 } 598}; 599 600NOINLINE void Allocator::performSanityChecks() { 601 // Verify that the header offset field can hold the maximum offset. In the 602 // case of the Secondary allocator, it takes care of alignment and the 603 // offset will always be 0. In the case of the Primary, the worst case 604 // scenario happens in the last size class, when the backend allocation 605 // would already be aligned on the requested alignment, which would happen 606 // to be the maximum alignment that would fit in that size class. As a 607 // result, the maximum offset will be at most the maximum alignment for the 608 // last size class minus the header size, in multiples of MinAlignment. 609 UnpackedHeader Header = {}; 610 const uptr MaxPrimaryAlignment = 611 1 << MostSignificantSetBitIndex(SizeClassMap::kMaxSize - MinAlignment); 612 const uptr MaxOffset = 613 (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog; 614 Header.Offset = MaxOffset; 615 if (Header.Offset != MaxOffset) 616 dieWithMessage("maximum possible offset doesn't fit in header\n"); 617 // Verify that we can fit the maximum size or amount of unused bytes in the 618 // header. Given that the Secondary fits the allocation to a page, the worst 619 // case scenario happens in the Primary. It will depend on the second to 620 // last and last class sizes, as well as the dynamic base for the Primary. 621 // The following is an over-approximation that works for our needs. 622 const uptr MaxSizeOrUnusedBytes = SizeClassMap::kMaxSize - 1; 623 Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes; 624 if (Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes) 625 dieWithMessage("maximum possible unused bytes doesn't fit in header\n"); 626 627 const uptr LargestClassId = SizeClassMap::kLargestClassID; 628 Header.ClassId = LargestClassId; 629 if (Header.ClassId != LargestClassId) 630 dieWithMessage("largest class ID doesn't fit in header\n"); 631} 632 633// Opportunistic RSS limit check. This will update the RSS limit status, if 634// it can, every 250ms, otherwise it will just return the current one. 635NOINLINE bool Allocator::isRssLimitExceeded() { 636 u64 LastCheck = atomic_load_relaxed(&RssLastCheckedAtNS); 637 const u64 CurrentCheck = MonotonicNanoTime(); 638 if (LIKELY(CurrentCheck < LastCheck + (250ULL * 1000000ULL))) 639 return atomic_load_relaxed(&RssLimitExceeded); 640 if (!atomic_compare_exchange_weak(&RssLastCheckedAtNS, &LastCheck, 641 CurrentCheck, memory_order_relaxed)) 642 return atomic_load_relaxed(&RssLimitExceeded); 643 // TODO(kostyak): We currently use sanitizer_common's GetRSS which reads the 644 // RSS from /proc/self/statm by default. We might want to 645 // call getrusage directly, even if it's less accurate. 646 const uptr CurrentRssMb = GetRSS() >> 20; 647 if (HardRssLimitMb && UNLIKELY(HardRssLimitMb < CurrentRssMb)) 648 dieWithMessage("hard RSS limit exhausted (%zdMb vs %zdMb)\n", 649 HardRssLimitMb, CurrentRssMb); 650 if (SoftRssLimitMb) { 651 if (atomic_load_relaxed(&RssLimitExceeded)) { 652 if (CurrentRssMb <= SoftRssLimitMb) 653 atomic_store_relaxed(&RssLimitExceeded, false); 654 } else { 655 if (CurrentRssMb > SoftRssLimitMb) { 656 atomic_store_relaxed(&RssLimitExceeded, true); 657 Printf("Scudo INFO: soft RSS limit exhausted (%zdMb vs %zdMb)\n", 658 SoftRssLimitMb, CurrentRssMb); 659 } 660 } 661 } 662 return atomic_load_relaxed(&RssLimitExceeded); 663} 664 665static Allocator Instance(LINKER_INITIALIZED); 666 667static BackendT &getBackend() { 668 return Instance.Backend; 669} 670 671void initScudo() { 672 Instance.init(); 673#ifdef GWP_ASAN_HOOKS 674 gwp_asan::options::initOptions(); 675 gwp_asan::options::Options &Opts = gwp_asan::options::getOptions(); 676 Opts.Backtrace = gwp_asan::options::getBacktraceFunction(); 677 GuardedAlloc.init(Opts); 678 679 if (Opts.InstallSignalHandlers) 680 gwp_asan::crash_handler::installSignalHandlers( 681 &GuardedAlloc, __sanitizer::Printf, 682 gwp_asan::options::getPrintBacktraceFunction(), Opts.Backtrace); 683#endif // GWP_ASAN_HOOKS 684} 685 686void ScudoTSD::init() { 687 getBackend().initCache(&Cache); 688 memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder)); 689} 690 691void ScudoTSD::commitBack() { 692 Instance.commitBack(this); 693} 694 695void *scudoAllocate(uptr Size, uptr Alignment, AllocType Type) { 696 if (Alignment && UNLIKELY(!IsPowerOfTwo(Alignment))) { 697 errno = EINVAL; 698 if (Instance.canReturnNull()) 699 return nullptr; 700 reportAllocationAlignmentNotPowerOfTwo(Alignment); 701 } 702 return SetErrnoOnNull(Instance.allocate(Size, Alignment, Type)); 703} 704 705void scudoDeallocate(void *Ptr, uptr Size, uptr Alignment, AllocType Type) { 706 Instance.deallocate(Ptr, Size, Alignment, Type); 707} 708 709void *scudoRealloc(void *Ptr, uptr Size) { 710 if (!Ptr) 711 return SetErrnoOnNull(Instance.allocate(Size, MinAlignment, FromMalloc)); 712 if (Size == 0) { 713 Instance.deallocate(Ptr, 0, 0, FromMalloc); 714 return nullptr; 715 } 716 return SetErrnoOnNull(Instance.reallocate(Ptr, Size)); 717} 718 719void *scudoCalloc(uptr NMemB, uptr Size) { 720 return SetErrnoOnNull(Instance.calloc(NMemB, Size)); 721} 722 723void *scudoValloc(uptr Size) { 724 return SetErrnoOnNull( 725 Instance.allocate(Size, GetPageSizeCached(), FromMemalign)); 726} 727 728void *scudoPvalloc(uptr Size) { 729 const uptr PageSize = GetPageSizeCached(); 730 if (UNLIKELY(CheckForPvallocOverflow(Size, PageSize))) { 731 errno = ENOMEM; 732 if (Instance.canReturnNull()) 733 return nullptr; 734 reportPvallocOverflow(Size); 735 } 736 // pvalloc(0) should allocate one page. 737 Size = Size ? RoundUpTo(Size, PageSize) : PageSize; 738 return SetErrnoOnNull(Instance.allocate(Size, PageSize, FromMemalign)); 739} 740 741int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) { 742 if (UNLIKELY(!CheckPosixMemalignAlignment(Alignment))) { 743 if (!Instance.canReturnNull()) 744 reportInvalidPosixMemalignAlignment(Alignment); 745 return EINVAL; 746 } 747 void *Ptr = Instance.allocate(Size, Alignment, FromMemalign); 748 if (UNLIKELY(!Ptr)) 749 return ENOMEM; 750 *MemPtr = Ptr; 751 return 0; 752} 753 754void *scudoAlignedAlloc(uptr Alignment, uptr Size) { 755 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(Alignment, Size))) { 756 errno = EINVAL; 757 if (Instance.canReturnNull()) 758 return nullptr; 759 reportInvalidAlignedAllocAlignment(Size, Alignment); 760 } 761 return SetErrnoOnNull(Instance.allocate(Size, Alignment, FromMalloc)); 762} 763 764uptr scudoMallocUsableSize(void *Ptr) { 765 return Instance.getUsableSize(Ptr); 766} 767 768} // namespace __scudo 769 770using namespace __scudo; 771 772// MallocExtension helper functions 773 774uptr __sanitizer_get_current_allocated_bytes() { 775 return Instance.getStats(AllocatorStatAllocated); 776} 777 778uptr __sanitizer_get_heap_size() { 779 return Instance.getStats(AllocatorStatMapped); 780} 781 782uptr __sanitizer_get_free_bytes() { 783 return 1; 784} 785 786uptr __sanitizer_get_unmapped_bytes() { 787 return 1; 788} 789 790uptr __sanitizer_get_estimated_allocated_size(uptr Size) { 791 return Size; 792} 793 794int __sanitizer_get_ownership(const void *Ptr) { 795 return Instance.isValidPointer(Ptr); 796} 797 798uptr __sanitizer_get_allocated_size(const void *Ptr) { 799 return Instance.getUsableSize(Ptr); 800} 801 802#if !SANITIZER_SUPPORTS_WEAK_HOOKS 803SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook, 804 void *Ptr, uptr Size) { 805 (void)Ptr; 806 (void)Size; 807} 808 809SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *Ptr) { 810 (void)Ptr; 811} 812#endif 813 814// Interface functions 815 816void __scudo_set_rss_limit(uptr LimitMb, s32 HardLimit) { 817 if (!SCUDO_CAN_USE_PUBLIC_INTERFACE) 818 return; 819 Instance.setRssLimit(LimitMb, !!HardLimit); 820} 821 822void __scudo_print_stats() { 823 Instance.printStats(); 824} 825