sanitizer_allocator.h revision 274201
1//===-- sanitizer_allocator.h -----------------------------------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// Specialized memory allocator for ThreadSanitizer, MemorySanitizer, etc. 11// 12//===----------------------------------------------------------------------===// 13 14#ifndef SANITIZER_ALLOCATOR_H 15#define SANITIZER_ALLOCATOR_H 16 17#include "sanitizer_internal_defs.h" 18#include "sanitizer_common.h" 19#include "sanitizer_libc.h" 20#include "sanitizer_list.h" 21#include "sanitizer_mutex.h" 22#include "sanitizer_lfstack.h" 23 24namespace __sanitizer { 25 26// Depending on allocator_may_return_null either return 0 or crash. 27void *AllocatorReturnNull(); 28 29// SizeClassMap maps allocation sizes into size classes and back. 30// Class 0 corresponds to size 0. 31// Classes 1 - 16 correspond to sizes 16 to 256 (size = class_id * 16). 32// Next 4 classes: 256 + i * 64 (i = 1 to 4). 33// Next 4 classes: 512 + i * 128 (i = 1 to 4). 34// ... 35// Next 4 classes: 2^k + i * 2^(k-2) (i = 1 to 4). 36// Last class corresponds to kMaxSize = 1 << kMaxSizeLog. 37// 38// This structure of the size class map gives us: 39// - Efficient table-free class-to-size and size-to-class functions. 40// - Difference between two consequent size classes is betweed 14% and 25% 41// 42// This class also gives a hint to a thread-caching allocator about the amount 43// of chunks that need to be cached per-thread: 44// - kMaxNumCached is the maximal number of chunks per size class. 45// - (1 << kMaxBytesCachedLog) is the maximal number of bytes per size class. 46// 47// Part of output of SizeClassMap::Print(): 48// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0 49// c01 => s: 16 diff: +16 00% l 4 cached: 256 4096; id 1 50// c02 => s: 32 diff: +16 100% l 5 cached: 256 8192; id 2 51// c03 => s: 48 diff: +16 50% l 5 cached: 256 12288; id 3 52// c04 => s: 64 diff: +16 33% l 6 cached: 256 16384; id 4 53// c05 => s: 80 diff: +16 25% l 6 cached: 256 20480; id 5 54// c06 => s: 96 diff: +16 20% l 6 cached: 256 24576; id 6 55// c07 => s: 112 diff: +16 16% l 6 cached: 256 28672; id 7 56// 57// c08 => s: 128 diff: +16 14% l 7 cached: 256 32768; id 8 58// c09 => s: 144 diff: +16 12% l 7 cached: 256 36864; id 9 59// c10 => s: 160 diff: +16 11% l 7 cached: 256 40960; id 10 60// c11 => s: 176 diff: +16 10% l 7 cached: 256 45056; id 11 61// c12 => s: 192 diff: +16 09% l 7 cached: 256 49152; id 12 62// c13 => s: 208 diff: +16 08% l 7 cached: 256 53248; id 13 63// c14 => s: 224 diff: +16 07% l 7 cached: 256 57344; id 14 64// c15 => s: 240 diff: +16 07% l 7 cached: 256 61440; id 15 65// 66// c16 => s: 256 diff: +16 06% l 8 cached: 256 65536; id 16 67// c17 => s: 320 diff: +64 25% l 8 cached: 204 65280; id 17 68// c18 => s: 384 diff: +64 20% l 8 cached: 170 65280; id 18 69// c19 => s: 448 diff: +64 16% l 8 cached: 146 65408; id 19 70// 71// c20 => s: 512 diff: +64 14% l 9 cached: 128 65536; id 20 72// c21 => s: 640 diff: +128 25% l 9 cached: 102 65280; id 21 73// c22 => s: 768 diff: +128 20% l 9 cached: 85 65280; id 22 74// c23 => s: 896 diff: +128 16% l 9 cached: 73 65408; id 23 75// 76// c24 => s: 1024 diff: +128 14% l 10 cached: 64 65536; id 24 77// c25 => s: 1280 diff: +256 25% l 10 cached: 51 65280; id 25 78// c26 => s: 1536 diff: +256 20% l 10 cached: 42 64512; id 26 79// c27 => s: 1792 diff: +256 16% l 10 cached: 36 64512; id 27 80// 81// ... 82// 83// c48 => s: 65536 diff: +8192 14% l 16 cached: 1 65536; id 48 84// c49 => s: 81920 diff: +16384 25% l 16 cached: 1 81920; id 49 85// c50 => s: 98304 diff: +16384 20% l 16 cached: 1 98304; id 50 86// c51 => s: 114688 diff: +16384 16% l 16 cached: 1 114688; id 51 87// 88// c52 => s: 131072 diff: +16384 14% l 17 cached: 1 131072; id 52 89 90template <uptr kMaxSizeLog, uptr kMaxNumCachedT, uptr kMaxBytesCachedLog> 91class SizeClassMap { 92 static const uptr kMinSizeLog = 4; 93 static const uptr kMidSizeLog = kMinSizeLog + 4; 94 static const uptr kMinSize = 1 << kMinSizeLog; 95 static const uptr kMidSize = 1 << kMidSizeLog; 96 static const uptr kMidClass = kMidSize / kMinSize; 97 static const uptr S = 2; 98 static const uptr M = (1 << S) - 1; 99 100 public: 101 static const uptr kMaxNumCached = kMaxNumCachedT; 102 // We transfer chunks between central and thread-local free lists in batches. 103 // For small size classes we allocate batches separately. 104 // For large size classes we use one of the chunks to store the batch. 105 struct TransferBatch { 106 TransferBatch *next; 107 uptr count; 108 void *batch[kMaxNumCached]; 109 }; 110 111 static const uptr kMaxSize = 1UL << kMaxSizeLog; 112 static const uptr kNumClasses = 113 kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1; 114 COMPILER_CHECK(kNumClasses >= 32 && kNumClasses <= 256); 115 static const uptr kNumClassesRounded = 116 kNumClasses == 32 ? 32 : 117 kNumClasses <= 64 ? 64 : 118 kNumClasses <= 128 ? 128 : 256; 119 120 static uptr Size(uptr class_id) { 121 if (class_id <= kMidClass) 122 return kMinSize * class_id; 123 class_id -= kMidClass; 124 uptr t = kMidSize << (class_id >> S); 125 return t + (t >> S) * (class_id & M); 126 } 127 128 static uptr ClassID(uptr size) { 129 if (size <= kMidSize) 130 return (size + kMinSize - 1) >> kMinSizeLog; 131 if (size > kMaxSize) return 0; 132 uptr l = MostSignificantSetBitIndex(size); 133 uptr hbits = (size >> (l - S)) & M; 134 uptr lbits = size & ((1 << (l - S)) - 1); 135 uptr l1 = l - kMidSizeLog; 136 return kMidClass + (l1 << S) + hbits + (lbits > 0); 137 } 138 139 static uptr MaxCached(uptr class_id) { 140 if (class_id == 0) return 0; 141 uptr n = (1UL << kMaxBytesCachedLog) / Size(class_id); 142 return Max<uptr>(1, Min(kMaxNumCached, n)); 143 } 144 145 static void Print() { 146 uptr prev_s = 0; 147 uptr total_cached = 0; 148 for (uptr i = 0; i < kNumClasses; i++) { 149 uptr s = Size(i); 150 if (s >= kMidSize / 2 && (s & (s - 1)) == 0) 151 Printf("\n"); 152 uptr d = s - prev_s; 153 uptr p = prev_s ? (d * 100 / prev_s) : 0; 154 uptr l = s ? MostSignificantSetBitIndex(s) : 0; 155 uptr cached = MaxCached(i) * s; 156 Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd " 157 "cached: %zd %zd; id %zd\n", 158 i, Size(i), d, p, l, MaxCached(i), cached, ClassID(s)); 159 total_cached += cached; 160 prev_s = s; 161 } 162 Printf("Total cached: %zd\n", total_cached); 163 } 164 165 static bool SizeClassRequiresSeparateTransferBatch(uptr class_id) { 166 return Size(class_id) < sizeof(TransferBatch) - 167 sizeof(uptr) * (kMaxNumCached - MaxCached(class_id)); 168 } 169 170 static void Validate() { 171 for (uptr c = 1; c < kNumClasses; c++) { 172 // Printf("Validate: c%zd\n", c); 173 uptr s = Size(c); 174 CHECK_NE(s, 0U); 175 CHECK_EQ(ClassID(s), c); 176 if (c != kNumClasses - 1) 177 CHECK_EQ(ClassID(s + 1), c + 1); 178 CHECK_EQ(ClassID(s - 1), c); 179 if (c) 180 CHECK_GT(Size(c), Size(c-1)); 181 } 182 CHECK_EQ(ClassID(kMaxSize + 1), 0); 183 184 for (uptr s = 1; s <= kMaxSize; s++) { 185 uptr c = ClassID(s); 186 // Printf("s%zd => c%zd\n", s, c); 187 CHECK_LT(c, kNumClasses); 188 CHECK_GE(Size(c), s); 189 if (c > 0) 190 CHECK_LT(Size(c-1), s); 191 } 192 } 193}; 194 195typedef SizeClassMap<17, 128, 16> DefaultSizeClassMap; 196typedef SizeClassMap<17, 64, 14> CompactSizeClassMap; 197template<class SizeClassAllocator> struct SizeClassAllocatorLocalCache; 198 199// Memory allocator statistics 200enum AllocatorStat { 201 AllocatorStatMalloced, 202 AllocatorStatFreed, 203 AllocatorStatMmapped, 204 AllocatorStatUnmapped, 205 AllocatorStatCount 206}; 207 208typedef u64 AllocatorStatCounters[AllocatorStatCount]; 209 210// Per-thread stats, live in per-thread cache. 211class AllocatorStats { 212 public: 213 void Init() { 214 internal_memset(this, 0, sizeof(*this)); 215 } 216 217 void Add(AllocatorStat i, u64 v) { 218 v += atomic_load(&stats_[i], memory_order_relaxed); 219 atomic_store(&stats_[i], v, memory_order_relaxed); 220 } 221 222 void Set(AllocatorStat i, u64 v) { 223 atomic_store(&stats_[i], v, memory_order_relaxed); 224 } 225 226 u64 Get(AllocatorStat i) const { 227 return atomic_load(&stats_[i], memory_order_relaxed); 228 } 229 230 private: 231 friend class AllocatorGlobalStats; 232 AllocatorStats *next_; 233 AllocatorStats *prev_; 234 atomic_uint64_t stats_[AllocatorStatCount]; 235}; 236 237// Global stats, used for aggregation and querying. 238class AllocatorGlobalStats : public AllocatorStats { 239 public: 240 void Init() { 241 internal_memset(this, 0, sizeof(*this)); 242 next_ = this; 243 prev_ = this; 244 } 245 246 void Register(AllocatorStats *s) { 247 SpinMutexLock l(&mu_); 248 s->next_ = next_; 249 s->prev_ = this; 250 next_->prev_ = s; 251 next_ = s; 252 } 253 254 void Unregister(AllocatorStats *s) { 255 SpinMutexLock l(&mu_); 256 s->prev_->next_ = s->next_; 257 s->next_->prev_ = s->prev_; 258 for (int i = 0; i < AllocatorStatCount; i++) 259 Add(AllocatorStat(i), s->Get(AllocatorStat(i))); 260 } 261 262 void Get(AllocatorStatCounters s) const { 263 internal_memset(s, 0, AllocatorStatCount * sizeof(u64)); 264 SpinMutexLock l(&mu_); 265 const AllocatorStats *stats = this; 266 for (;;) { 267 for (int i = 0; i < AllocatorStatCount; i++) 268 s[i] += stats->Get(AllocatorStat(i)); 269 stats = stats->next_; 270 if (stats == this) 271 break; 272 } 273 } 274 275 private: 276 mutable SpinMutex mu_; 277}; 278 279// Allocators call these callbacks on mmap/munmap. 280struct NoOpMapUnmapCallback { 281 void OnMap(uptr p, uptr size) const { } 282 void OnUnmap(uptr p, uptr size) const { } 283}; 284 285// Callback type for iterating over chunks. 286typedef void (*ForEachChunkCallback)(uptr chunk, void *arg); 287 288// SizeClassAllocator64 -- allocator for 64-bit address space. 289// 290// Space: a portion of address space of kSpaceSize bytes starting at 291// a fixed address (kSpaceBeg). Both constants are powers of two and 292// kSpaceBeg is kSpaceSize-aligned. 293// At the beginning the entire space is mprotect-ed, then small parts of it 294// are mapped on demand. 295// 296// Region: a part of Space dedicated to a single size class. 297// There are kNumClasses Regions of equal size. 298// 299// UserChunk: a piece of memory returned to user. 300// MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk. 301// 302// A Region looks like this: 303// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1 304template <const uptr kSpaceBeg, const uptr kSpaceSize, 305 const uptr kMetadataSize, class SizeClassMap, 306 class MapUnmapCallback = NoOpMapUnmapCallback> 307class SizeClassAllocator64 { 308 public: 309 typedef typename SizeClassMap::TransferBatch Batch; 310 typedef SizeClassAllocator64<kSpaceBeg, kSpaceSize, kMetadataSize, 311 SizeClassMap, MapUnmapCallback> ThisT; 312 typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache; 313 314 void Init() { 315 CHECK_EQ(kSpaceBeg, 316 reinterpret_cast<uptr>(Mprotect(kSpaceBeg, kSpaceSize))); 317 MapWithCallback(kSpaceEnd, AdditionalSize()); 318 } 319 320 void MapWithCallback(uptr beg, uptr size) { 321 CHECK_EQ(beg, reinterpret_cast<uptr>(MmapFixedOrDie(beg, size))); 322 MapUnmapCallback().OnMap(beg, size); 323 } 324 325 void UnmapWithCallback(uptr beg, uptr size) { 326 MapUnmapCallback().OnUnmap(beg, size); 327 UnmapOrDie(reinterpret_cast<void *>(beg), size); 328 } 329 330 static bool CanAllocate(uptr size, uptr alignment) { 331 return size <= SizeClassMap::kMaxSize && 332 alignment <= SizeClassMap::kMaxSize; 333 } 334 335 NOINLINE Batch* AllocateBatch(AllocatorStats *stat, AllocatorCache *c, 336 uptr class_id) { 337 CHECK_LT(class_id, kNumClasses); 338 RegionInfo *region = GetRegionInfo(class_id); 339 Batch *b = region->free_list.Pop(); 340 if (b == 0) 341 b = PopulateFreeList(stat, c, class_id, region); 342 region->n_allocated += b->count; 343 return b; 344 } 345 346 NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id, Batch *b) { 347 RegionInfo *region = GetRegionInfo(class_id); 348 CHECK_GT(b->count, 0); 349 region->free_list.Push(b); 350 region->n_freed += b->count; 351 } 352 353 static bool PointerIsMine(const void *p) { 354 return reinterpret_cast<uptr>(p) / kSpaceSize == kSpaceBeg / kSpaceSize; 355 } 356 357 static uptr GetSizeClass(const void *p) { 358 return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClassesRounded; 359 } 360 361 void *GetBlockBegin(const void *p) { 362 uptr class_id = GetSizeClass(p); 363 uptr size = SizeClassMap::Size(class_id); 364 if (!size) return 0; 365 uptr chunk_idx = GetChunkIdx((uptr)p, size); 366 uptr reg_beg = (uptr)p & ~(kRegionSize - 1); 367 uptr beg = chunk_idx * size; 368 uptr next_beg = beg + size; 369 if (class_id >= kNumClasses) return 0; 370 RegionInfo *region = GetRegionInfo(class_id); 371 if (region->mapped_user >= next_beg) 372 return reinterpret_cast<void*>(reg_beg + beg); 373 return 0; 374 } 375 376 static uptr GetActuallyAllocatedSize(void *p) { 377 CHECK(PointerIsMine(p)); 378 return SizeClassMap::Size(GetSizeClass(p)); 379 } 380 381 uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); } 382 383 void *GetMetaData(const void *p) { 384 uptr class_id = GetSizeClass(p); 385 uptr size = SizeClassMap::Size(class_id); 386 uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size); 387 return reinterpret_cast<void*>(kSpaceBeg + (kRegionSize * (class_id + 1)) - 388 (1 + chunk_idx) * kMetadataSize); 389 } 390 391 uptr TotalMemoryUsed() { 392 uptr res = 0; 393 for (uptr i = 0; i < kNumClasses; i++) 394 res += GetRegionInfo(i)->allocated_user; 395 return res; 396 } 397 398 // Test-only. 399 void TestOnlyUnmap() { 400 UnmapWithCallback(kSpaceBeg, kSpaceSize + AdditionalSize()); 401 } 402 403 void PrintStats() { 404 uptr total_mapped = 0; 405 uptr n_allocated = 0; 406 uptr n_freed = 0; 407 for (uptr class_id = 1; class_id < kNumClasses; class_id++) { 408 RegionInfo *region = GetRegionInfo(class_id); 409 total_mapped += region->mapped_user; 410 n_allocated += region->n_allocated; 411 n_freed += region->n_freed; 412 } 413 Printf("Stats: SizeClassAllocator64: %zdM mapped in %zd allocations; " 414 "remains %zd\n", 415 total_mapped >> 20, n_allocated, n_allocated - n_freed); 416 for (uptr class_id = 1; class_id < kNumClasses; class_id++) { 417 RegionInfo *region = GetRegionInfo(class_id); 418 if (region->mapped_user == 0) continue; 419 Printf(" %02zd (%zd): total: %zd K allocs: %zd remains: %zd\n", 420 class_id, 421 SizeClassMap::Size(class_id), 422 region->mapped_user >> 10, 423 region->n_allocated, 424 region->n_allocated - region->n_freed); 425 } 426 } 427 428 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone 429 // introspection API. 430 void ForceLock() { 431 for (uptr i = 0; i < kNumClasses; i++) { 432 GetRegionInfo(i)->mutex.Lock(); 433 } 434 } 435 436 void ForceUnlock() { 437 for (int i = (int)kNumClasses - 1; i >= 0; i--) { 438 GetRegionInfo(i)->mutex.Unlock(); 439 } 440 } 441 442 // Iterate over all existing chunks. 443 // The allocator must be locked when calling this function. 444 void ForEachChunk(ForEachChunkCallback callback, void *arg) { 445 for (uptr class_id = 1; class_id < kNumClasses; class_id++) { 446 RegionInfo *region = GetRegionInfo(class_id); 447 uptr chunk_size = SizeClassMap::Size(class_id); 448 uptr region_beg = kSpaceBeg + class_id * kRegionSize; 449 for (uptr chunk = region_beg; 450 chunk < region_beg + region->allocated_user; 451 chunk += chunk_size) { 452 // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk)); 453 callback(chunk, arg); 454 } 455 } 456 } 457 458 typedef SizeClassMap SizeClassMapT; 459 static const uptr kNumClasses = SizeClassMap::kNumClasses; 460 static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded; 461 462 private: 463 static const uptr kRegionSize = kSpaceSize / kNumClassesRounded; 464 static const uptr kSpaceEnd = kSpaceBeg + kSpaceSize; 465 COMPILER_CHECK(kSpaceBeg % kSpaceSize == 0); 466 // kRegionSize must be >= 2^32. 467 COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2))); 468 // Populate the free list with at most this number of bytes at once 469 // or with one element if its size is greater. 470 static const uptr kPopulateSize = 1 << 14; 471 // Call mmap for user memory with at least this size. 472 static const uptr kUserMapSize = 1 << 16; 473 // Call mmap for metadata memory with at least this size. 474 static const uptr kMetaMapSize = 1 << 16; 475 476 struct RegionInfo { 477 BlockingMutex mutex; 478 LFStack<Batch> free_list; 479 uptr allocated_user; // Bytes allocated for user memory. 480 uptr allocated_meta; // Bytes allocated for metadata. 481 uptr mapped_user; // Bytes mapped for user memory. 482 uptr mapped_meta; // Bytes mapped for metadata. 483 uptr n_allocated, n_freed; // Just stats. 484 }; 485 COMPILER_CHECK(sizeof(RegionInfo) >= kCacheLineSize); 486 487 static uptr AdditionalSize() { 488 return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded, 489 GetPageSizeCached()); 490 } 491 492 RegionInfo *GetRegionInfo(uptr class_id) { 493 CHECK_LT(class_id, kNumClasses); 494 RegionInfo *regions = reinterpret_cast<RegionInfo*>(kSpaceBeg + kSpaceSize); 495 return ®ions[class_id]; 496 } 497 498 static uptr GetChunkIdx(uptr chunk, uptr size) { 499 uptr offset = chunk % kRegionSize; 500 // Here we divide by a non-constant. This is costly. 501 // size always fits into 32-bits. If the offset fits too, use 32-bit div. 502 if (offset >> (SANITIZER_WORDSIZE / 2)) 503 return offset / size; 504 return (u32)offset / (u32)size; 505 } 506 507 NOINLINE Batch* PopulateFreeList(AllocatorStats *stat, AllocatorCache *c, 508 uptr class_id, RegionInfo *region) { 509 BlockingMutexLock l(®ion->mutex); 510 Batch *b = region->free_list.Pop(); 511 if (b) 512 return b; 513 uptr size = SizeClassMap::Size(class_id); 514 uptr count = size < kPopulateSize ? SizeClassMap::MaxCached(class_id) : 1; 515 uptr beg_idx = region->allocated_user; 516 uptr end_idx = beg_idx + count * size; 517 uptr region_beg = kSpaceBeg + kRegionSize * class_id; 518 if (end_idx + size > region->mapped_user) { 519 // Do the mmap for the user memory. 520 uptr map_size = kUserMapSize; 521 while (end_idx + size > region->mapped_user + map_size) 522 map_size += kUserMapSize; 523 CHECK_GE(region->mapped_user + map_size, end_idx); 524 MapWithCallback(region_beg + region->mapped_user, map_size); 525 stat->Add(AllocatorStatMmapped, map_size); 526 region->mapped_user += map_size; 527 } 528 uptr total_count = (region->mapped_user - beg_idx - size) 529 / size / count * count; 530 region->allocated_meta += total_count * kMetadataSize; 531 if (region->allocated_meta > region->mapped_meta) { 532 uptr map_size = kMetaMapSize; 533 while (region->allocated_meta > region->mapped_meta + map_size) 534 map_size += kMetaMapSize; 535 // Do the mmap for the metadata. 536 CHECK_GE(region->mapped_meta + map_size, region->allocated_meta); 537 MapWithCallback(region_beg + kRegionSize - 538 region->mapped_meta - map_size, map_size); 539 region->mapped_meta += map_size; 540 } 541 CHECK_LE(region->allocated_meta, region->mapped_meta); 542 if (region->mapped_user + region->mapped_meta > kRegionSize) { 543 Printf("%s: Out of memory. Dying. ", SanitizerToolName); 544 Printf("The process has exhausted %zuMB for size class %zu.\n", 545 kRegionSize / 1024 / 1024, size); 546 Die(); 547 } 548 for (;;) { 549 if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id)) 550 b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch))); 551 else 552 b = (Batch*)(region_beg + beg_idx); 553 b->count = count; 554 for (uptr i = 0; i < count; i++) 555 b->batch[i] = (void*)(region_beg + beg_idx + i * size); 556 region->allocated_user += count * size; 557 CHECK_LE(region->allocated_user, region->mapped_user); 558 beg_idx += count * size; 559 if (beg_idx + count * size + size > region->mapped_user) 560 break; 561 CHECK_GT(b->count, 0); 562 region->free_list.Push(b); 563 } 564 return b; 565 } 566}; 567 568// Maps integers in rage [0, kSize) to u8 values. 569template<u64 kSize> 570class FlatByteMap { 571 public: 572 void TestOnlyInit() { 573 internal_memset(map_, 0, sizeof(map_)); 574 } 575 576 void set(uptr idx, u8 val) { 577 CHECK_LT(idx, kSize); 578 CHECK_EQ(0U, map_[idx]); 579 map_[idx] = val; 580 } 581 u8 operator[] (uptr idx) { 582 CHECK_LT(idx, kSize); 583 // FIXME: CHECK may be too expensive here. 584 return map_[idx]; 585 } 586 private: 587 u8 map_[kSize]; 588}; 589 590// FIXME: Also implement TwoLevelByteMap. 591 592// SizeClassAllocator32 -- allocator for 32-bit address space. 593// This allocator can theoretically be used on 64-bit arch, but there it is less 594// efficient than SizeClassAllocator64. 595// 596// [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can 597// be returned by MmapOrDie(). 598// 599// Region: 600// a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize). 601// Since the regions are aligned by kRegionSize, there are exactly 602// kNumPossibleRegions possible regions in the address space and so we keep 603// a ByteMap possible_regions to store the size classes of each Region. 604// 0 size class means the region is not used by the allocator. 605// 606// One Region is used to allocate chunks of a single size class. 607// A Region looks like this: 608// UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1 609// 610// In order to avoid false sharing the objects of this class should be 611// chache-line aligned. 612template <const uptr kSpaceBeg, const u64 kSpaceSize, 613 const uptr kMetadataSize, class SizeClassMap, 614 const uptr kRegionSizeLog, 615 class ByteMap, 616 class MapUnmapCallback = NoOpMapUnmapCallback> 617class SizeClassAllocator32 { 618 public: 619 typedef typename SizeClassMap::TransferBatch Batch; 620 typedef SizeClassAllocator32<kSpaceBeg, kSpaceSize, kMetadataSize, 621 SizeClassMap, kRegionSizeLog, ByteMap, MapUnmapCallback> ThisT; 622 typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache; 623 624 void Init() { 625 possible_regions.TestOnlyInit(); 626 internal_memset(size_class_info_array, 0, sizeof(size_class_info_array)); 627 } 628 629 void *MapWithCallback(uptr size) { 630 size = RoundUpTo(size, GetPageSizeCached()); 631 void *res = MmapOrDie(size, "SizeClassAllocator32"); 632 MapUnmapCallback().OnMap((uptr)res, size); 633 return res; 634 } 635 636 void UnmapWithCallback(uptr beg, uptr size) { 637 MapUnmapCallback().OnUnmap(beg, size); 638 UnmapOrDie(reinterpret_cast<void *>(beg), size); 639 } 640 641 static bool CanAllocate(uptr size, uptr alignment) { 642 return size <= SizeClassMap::kMaxSize && 643 alignment <= SizeClassMap::kMaxSize; 644 } 645 646 void *GetMetaData(const void *p) { 647 CHECK(PointerIsMine(p)); 648 uptr mem = reinterpret_cast<uptr>(p); 649 uptr beg = ComputeRegionBeg(mem); 650 uptr size = SizeClassMap::Size(GetSizeClass(p)); 651 u32 offset = mem - beg; 652 uptr n = offset / (u32)size; // 32-bit division 653 uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize; 654 return reinterpret_cast<void*>(meta); 655 } 656 657 NOINLINE Batch* AllocateBatch(AllocatorStats *stat, AllocatorCache *c, 658 uptr class_id) { 659 CHECK_LT(class_id, kNumClasses); 660 SizeClassInfo *sci = GetSizeClassInfo(class_id); 661 SpinMutexLock l(&sci->mutex); 662 if (sci->free_list.empty()) 663 PopulateFreeList(stat, c, sci, class_id); 664 CHECK(!sci->free_list.empty()); 665 Batch *b = sci->free_list.front(); 666 sci->free_list.pop_front(); 667 return b; 668 } 669 670 NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id, Batch *b) { 671 CHECK_LT(class_id, kNumClasses); 672 SizeClassInfo *sci = GetSizeClassInfo(class_id); 673 SpinMutexLock l(&sci->mutex); 674 CHECK_GT(b->count, 0); 675 sci->free_list.push_front(b); 676 } 677 678 bool PointerIsMine(const void *p) { 679 return GetSizeClass(p) != 0; 680 } 681 682 uptr GetSizeClass(const void *p) { 683 return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))]; 684 } 685 686 void *GetBlockBegin(const void *p) { 687 CHECK(PointerIsMine(p)); 688 uptr mem = reinterpret_cast<uptr>(p); 689 uptr beg = ComputeRegionBeg(mem); 690 uptr size = SizeClassMap::Size(GetSizeClass(p)); 691 u32 offset = mem - beg; 692 u32 n = offset / (u32)size; // 32-bit division 693 uptr res = beg + (n * (u32)size); 694 return reinterpret_cast<void*>(res); 695 } 696 697 uptr GetActuallyAllocatedSize(void *p) { 698 CHECK(PointerIsMine(p)); 699 return SizeClassMap::Size(GetSizeClass(p)); 700 } 701 702 uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); } 703 704 uptr TotalMemoryUsed() { 705 // No need to lock here. 706 uptr res = 0; 707 for (uptr i = 0; i < kNumPossibleRegions; i++) 708 if (possible_regions[i]) 709 res += kRegionSize; 710 return res; 711 } 712 713 void TestOnlyUnmap() { 714 for (uptr i = 0; i < kNumPossibleRegions; i++) 715 if (possible_regions[i]) 716 UnmapWithCallback((i * kRegionSize), kRegionSize); 717 } 718 719 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone 720 // introspection API. 721 void ForceLock() { 722 for (uptr i = 0; i < kNumClasses; i++) { 723 GetSizeClassInfo(i)->mutex.Lock(); 724 } 725 } 726 727 void ForceUnlock() { 728 for (int i = kNumClasses - 1; i >= 0; i--) { 729 GetSizeClassInfo(i)->mutex.Unlock(); 730 } 731 } 732 733 // Iterate over all existing chunks. 734 // The allocator must be locked when calling this function. 735 void ForEachChunk(ForEachChunkCallback callback, void *arg) { 736 for (uptr region = 0; region < kNumPossibleRegions; region++) 737 if (possible_regions[region]) { 738 uptr chunk_size = SizeClassMap::Size(possible_regions[region]); 739 uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize); 740 uptr region_beg = region * kRegionSize; 741 for (uptr chunk = region_beg; 742 chunk < region_beg + max_chunks_in_region * chunk_size; 743 chunk += chunk_size) { 744 // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk)); 745 callback(chunk, arg); 746 } 747 } 748 } 749 750 void PrintStats() { 751 } 752 753 typedef SizeClassMap SizeClassMapT; 754 static const uptr kNumClasses = SizeClassMap::kNumClasses; 755 756 private: 757 static const uptr kRegionSize = 1 << kRegionSizeLog; 758 static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize; 759 760 struct SizeClassInfo { 761 SpinMutex mutex; 762 IntrusiveList<Batch> free_list; 763 char padding[kCacheLineSize - sizeof(uptr) - sizeof(IntrusiveList<Batch>)]; 764 }; 765 COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize); 766 767 uptr ComputeRegionId(uptr mem) { 768 uptr res = mem >> kRegionSizeLog; 769 CHECK_LT(res, kNumPossibleRegions); 770 return res; 771 } 772 773 uptr ComputeRegionBeg(uptr mem) { 774 return mem & ~(kRegionSize - 1); 775 } 776 777 uptr AllocateRegion(AllocatorStats *stat, uptr class_id) { 778 CHECK_LT(class_id, kNumClasses); 779 uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize, 780 "SizeClassAllocator32")); 781 MapUnmapCallback().OnMap(res, kRegionSize); 782 stat->Add(AllocatorStatMmapped, kRegionSize); 783 CHECK_EQ(0U, (res & (kRegionSize - 1))); 784 possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id)); 785 return res; 786 } 787 788 SizeClassInfo *GetSizeClassInfo(uptr class_id) { 789 CHECK_LT(class_id, kNumClasses); 790 return &size_class_info_array[class_id]; 791 } 792 793 void PopulateFreeList(AllocatorStats *stat, AllocatorCache *c, 794 SizeClassInfo *sci, uptr class_id) { 795 uptr size = SizeClassMap::Size(class_id); 796 uptr reg = AllocateRegion(stat, class_id); 797 uptr n_chunks = kRegionSize / (size + kMetadataSize); 798 uptr max_count = SizeClassMap::MaxCached(class_id); 799 Batch *b = 0; 800 for (uptr i = reg; i < reg + n_chunks * size; i += size) { 801 if (b == 0) { 802 if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id)) 803 b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch))); 804 else 805 b = (Batch*)i; 806 b->count = 0; 807 } 808 b->batch[b->count++] = (void*)i; 809 if (b->count == max_count) { 810 CHECK_GT(b->count, 0); 811 sci->free_list.push_back(b); 812 b = 0; 813 } 814 } 815 if (b) { 816 CHECK_GT(b->count, 0); 817 sci->free_list.push_back(b); 818 } 819 } 820 821 ByteMap possible_regions; 822 SizeClassInfo size_class_info_array[kNumClasses]; 823}; 824 825// Objects of this type should be used as local caches for SizeClassAllocator64 826// or SizeClassAllocator32. Since the typical use of this class is to have one 827// object per thread in TLS, is has to be POD. 828template<class SizeClassAllocator> 829struct SizeClassAllocatorLocalCache { 830 typedef SizeClassAllocator Allocator; 831 static const uptr kNumClasses = SizeClassAllocator::kNumClasses; 832 833 void Init(AllocatorGlobalStats *s) { 834 stats_.Init(); 835 if (s) 836 s->Register(&stats_); 837 } 838 839 void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) { 840 Drain(allocator); 841 if (s) 842 s->Unregister(&stats_); 843 } 844 845 void *Allocate(SizeClassAllocator *allocator, uptr class_id) { 846 CHECK_NE(class_id, 0UL); 847 CHECK_LT(class_id, kNumClasses); 848 stats_.Add(AllocatorStatMalloced, SizeClassMap::Size(class_id)); 849 PerClass *c = &per_class_[class_id]; 850 if (UNLIKELY(c->count == 0)) 851 Refill(allocator, class_id); 852 void *res = c->batch[--c->count]; 853 PREFETCH(c->batch[c->count - 1]); 854 return res; 855 } 856 857 void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) { 858 CHECK_NE(class_id, 0UL); 859 CHECK_LT(class_id, kNumClasses); 860 // If the first allocator call on a new thread is a deallocation, then 861 // max_count will be zero, leading to check failure. 862 InitCache(); 863 stats_.Add(AllocatorStatFreed, SizeClassMap::Size(class_id)); 864 PerClass *c = &per_class_[class_id]; 865 CHECK_NE(c->max_count, 0UL); 866 if (UNLIKELY(c->count == c->max_count)) 867 Drain(allocator, class_id); 868 c->batch[c->count++] = p; 869 } 870 871 void Drain(SizeClassAllocator *allocator) { 872 for (uptr class_id = 0; class_id < kNumClasses; class_id++) { 873 PerClass *c = &per_class_[class_id]; 874 while (c->count > 0) 875 Drain(allocator, class_id); 876 } 877 } 878 879 // private: 880 typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap; 881 typedef typename SizeClassMap::TransferBatch Batch; 882 struct PerClass { 883 uptr count; 884 uptr max_count; 885 void *batch[2 * SizeClassMap::kMaxNumCached]; 886 }; 887 PerClass per_class_[kNumClasses]; 888 AllocatorStats stats_; 889 890 void InitCache() { 891 if (per_class_[1].max_count) 892 return; 893 for (uptr i = 0; i < kNumClasses; i++) { 894 PerClass *c = &per_class_[i]; 895 c->max_count = 2 * SizeClassMap::MaxCached(i); 896 } 897 } 898 899 NOINLINE void Refill(SizeClassAllocator *allocator, uptr class_id) { 900 InitCache(); 901 PerClass *c = &per_class_[class_id]; 902 Batch *b = allocator->AllocateBatch(&stats_, this, class_id); 903 CHECK_GT(b->count, 0); 904 for (uptr i = 0; i < b->count; i++) 905 c->batch[i] = b->batch[i]; 906 c->count = b->count; 907 if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id)) 908 Deallocate(allocator, SizeClassMap::ClassID(sizeof(Batch)), b); 909 } 910 911 NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) { 912 InitCache(); 913 PerClass *c = &per_class_[class_id]; 914 Batch *b; 915 if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id)) 916 b = (Batch*)Allocate(allocator, SizeClassMap::ClassID(sizeof(Batch))); 917 else 918 b = (Batch*)c->batch[0]; 919 uptr cnt = Min(c->max_count / 2, c->count); 920 for (uptr i = 0; i < cnt; i++) { 921 b->batch[i] = c->batch[i]; 922 c->batch[i] = c->batch[i + c->max_count / 2]; 923 } 924 b->count = cnt; 925 c->count -= cnt; 926 CHECK_GT(b->count, 0); 927 allocator->DeallocateBatch(&stats_, class_id, b); 928 } 929}; 930 931// This class can (de)allocate only large chunks of memory using mmap/unmap. 932// The main purpose of this allocator is to cover large and rare allocation 933// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64). 934template <class MapUnmapCallback = NoOpMapUnmapCallback> 935class LargeMmapAllocator { 936 public: 937 void Init() { 938 internal_memset(this, 0, sizeof(*this)); 939 page_size_ = GetPageSizeCached(); 940 } 941 942 void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) { 943 CHECK(IsPowerOfTwo(alignment)); 944 uptr map_size = RoundUpMapSize(size); 945 if (alignment > page_size_) 946 map_size += alignment; 947 if (map_size < size) return AllocatorReturnNull(); // Overflow. 948 uptr map_beg = reinterpret_cast<uptr>( 949 MmapOrDie(map_size, "LargeMmapAllocator")); 950 MapUnmapCallback().OnMap(map_beg, map_size); 951 uptr map_end = map_beg + map_size; 952 uptr res = map_beg + page_size_; 953 if (res & (alignment - 1)) // Align. 954 res += alignment - (res & (alignment - 1)); 955 CHECK_EQ(0, res & (alignment - 1)); 956 CHECK_LE(res + size, map_end); 957 Header *h = GetHeader(res); 958 h->size = size; 959 h->map_beg = map_beg; 960 h->map_size = map_size; 961 uptr size_log = MostSignificantSetBitIndex(map_size); 962 CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log)); 963 { 964 SpinMutexLock l(&mutex_); 965 uptr idx = n_chunks_++; 966 chunks_sorted_ = false; 967 CHECK_LT(idx, kMaxNumChunks); 968 h->chunk_idx = idx; 969 chunks_[idx] = h; 970 stats.n_allocs++; 971 stats.currently_allocated += map_size; 972 stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated); 973 stats.by_size_log[size_log]++; 974 stat->Add(AllocatorStatMalloced, map_size); 975 stat->Add(AllocatorStatMmapped, map_size); 976 } 977 return reinterpret_cast<void*>(res); 978 } 979 980 void Deallocate(AllocatorStats *stat, void *p) { 981 Header *h = GetHeader(p); 982 { 983 SpinMutexLock l(&mutex_); 984 uptr idx = h->chunk_idx; 985 CHECK_EQ(chunks_[idx], h); 986 CHECK_LT(idx, n_chunks_); 987 chunks_[idx] = chunks_[n_chunks_ - 1]; 988 chunks_[idx]->chunk_idx = idx; 989 n_chunks_--; 990 chunks_sorted_ = false; 991 stats.n_frees++; 992 stats.currently_allocated -= h->map_size; 993 stat->Add(AllocatorStatFreed, h->map_size); 994 stat->Add(AllocatorStatUnmapped, h->map_size); 995 } 996 MapUnmapCallback().OnUnmap(h->map_beg, h->map_size); 997 UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size); 998 } 999 1000 uptr TotalMemoryUsed() { 1001 SpinMutexLock l(&mutex_); 1002 uptr res = 0; 1003 for (uptr i = 0; i < n_chunks_; i++) { 1004 Header *h = chunks_[i]; 1005 CHECK_EQ(h->chunk_idx, i); 1006 res += RoundUpMapSize(h->size); 1007 } 1008 return res; 1009 } 1010 1011 bool PointerIsMine(const void *p) { 1012 return GetBlockBegin(p) != 0; 1013 } 1014 1015 uptr GetActuallyAllocatedSize(void *p) { 1016 return RoundUpTo(GetHeader(p)->size, page_size_); 1017 } 1018 1019 // At least page_size_/2 metadata bytes is available. 1020 void *GetMetaData(const void *p) { 1021 // Too slow: CHECK_EQ(p, GetBlockBegin(p)); 1022 if (!IsAligned(reinterpret_cast<uptr>(p), page_size_)) { 1023 Printf("%s: bad pointer %p\n", SanitizerToolName, p); 1024 CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_)); 1025 } 1026 return GetHeader(p) + 1; 1027 } 1028 1029 void *GetBlockBegin(const void *ptr) { 1030 uptr p = reinterpret_cast<uptr>(ptr); 1031 SpinMutexLock l(&mutex_); 1032 uptr nearest_chunk = 0; 1033 // Cache-friendly linear search. 1034 for (uptr i = 0; i < n_chunks_; i++) { 1035 uptr ch = reinterpret_cast<uptr>(chunks_[i]); 1036 if (p < ch) continue; // p is at left to this chunk, skip it. 1037 if (p - ch < p - nearest_chunk) 1038 nearest_chunk = ch; 1039 } 1040 if (!nearest_chunk) 1041 return 0; 1042 Header *h = reinterpret_cast<Header *>(nearest_chunk); 1043 CHECK_GE(nearest_chunk, h->map_beg); 1044 CHECK_LT(nearest_chunk, h->map_beg + h->map_size); 1045 CHECK_LE(nearest_chunk, p); 1046 if (h->map_beg + h->map_size <= p) 1047 return 0; 1048 return GetUser(h); 1049 } 1050 1051 // This function does the same as GetBlockBegin, but is much faster. 1052 // Must be called with the allocator locked. 1053 void *GetBlockBeginFastLocked(void *ptr) { 1054 mutex_.CheckLocked(); 1055 uptr p = reinterpret_cast<uptr>(ptr); 1056 uptr n = n_chunks_; 1057 if (!n) return 0; 1058 if (!chunks_sorted_) { 1059 // Do one-time sort. chunks_sorted_ is reset in Allocate/Deallocate. 1060 SortArray(reinterpret_cast<uptr*>(chunks_), n); 1061 for (uptr i = 0; i < n; i++) 1062 chunks_[i]->chunk_idx = i; 1063 chunks_sorted_ = true; 1064 min_mmap_ = reinterpret_cast<uptr>(chunks_[0]); 1065 max_mmap_ = reinterpret_cast<uptr>(chunks_[n - 1]) + 1066 chunks_[n - 1]->map_size; 1067 } 1068 if (p < min_mmap_ || p >= max_mmap_) 1069 return 0; 1070 uptr beg = 0, end = n - 1; 1071 // This loop is a log(n) lower_bound. It does not check for the exact match 1072 // to avoid expensive cache-thrashing loads. 1073 while (end - beg >= 2) { 1074 uptr mid = (beg + end) / 2; // Invariant: mid >= beg + 1 1075 if (p < reinterpret_cast<uptr>(chunks_[mid])) 1076 end = mid - 1; // We are not interested in chunks_[mid]. 1077 else 1078 beg = mid; // chunks_[mid] may still be what we want. 1079 } 1080 1081 if (beg < end) { 1082 CHECK_EQ(beg + 1, end); 1083 // There are 2 chunks left, choose one. 1084 if (p >= reinterpret_cast<uptr>(chunks_[end])) 1085 beg = end; 1086 } 1087 1088 Header *h = chunks_[beg]; 1089 if (h->map_beg + h->map_size <= p || p < h->map_beg) 1090 return 0; 1091 return GetUser(h); 1092 } 1093 1094 void PrintStats() { 1095 Printf("Stats: LargeMmapAllocator: allocated %zd times, " 1096 "remains %zd (%zd K) max %zd M; by size logs: ", 1097 stats.n_allocs, stats.n_allocs - stats.n_frees, 1098 stats.currently_allocated >> 10, stats.max_allocated >> 20); 1099 for (uptr i = 0; i < ARRAY_SIZE(stats.by_size_log); i++) { 1100 uptr c = stats.by_size_log[i]; 1101 if (!c) continue; 1102 Printf("%zd:%zd; ", i, c); 1103 } 1104 Printf("\n"); 1105 } 1106 1107 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone 1108 // introspection API. 1109 void ForceLock() { 1110 mutex_.Lock(); 1111 } 1112 1113 void ForceUnlock() { 1114 mutex_.Unlock(); 1115 } 1116 1117 // Iterate over all existing chunks. 1118 // The allocator must be locked when calling this function. 1119 void ForEachChunk(ForEachChunkCallback callback, void *arg) { 1120 for (uptr i = 0; i < n_chunks_; i++) 1121 callback(reinterpret_cast<uptr>(GetUser(chunks_[i])), arg); 1122 } 1123 1124 private: 1125 static const int kMaxNumChunks = 1 << FIRST_32_SECOND_64(15, 18); 1126 struct Header { 1127 uptr map_beg; 1128 uptr map_size; 1129 uptr size; 1130 uptr chunk_idx; 1131 }; 1132 1133 Header *GetHeader(uptr p) { 1134 CHECK(IsAligned(p, page_size_)); 1135 return reinterpret_cast<Header*>(p - page_size_); 1136 } 1137 Header *GetHeader(const void *p) { 1138 return GetHeader(reinterpret_cast<uptr>(p)); 1139 } 1140 1141 void *GetUser(Header *h) { 1142 CHECK(IsAligned((uptr)h, page_size_)); 1143 return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_); 1144 } 1145 1146 uptr RoundUpMapSize(uptr size) { 1147 return RoundUpTo(size, page_size_) + page_size_; 1148 } 1149 1150 uptr page_size_; 1151 Header *chunks_[kMaxNumChunks]; 1152 uptr n_chunks_; 1153 uptr min_mmap_, max_mmap_; 1154 bool chunks_sorted_; 1155 struct Stats { 1156 uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64]; 1157 } stats; 1158 SpinMutex mutex_; 1159}; 1160 1161// This class implements a complete memory allocator by using two 1162// internal allocators: 1163// PrimaryAllocator is efficient, but may not allocate some sizes (alignments). 1164// When allocating 2^x bytes it should return 2^x aligned chunk. 1165// PrimaryAllocator is used via a local AllocatorCache. 1166// SecondaryAllocator can allocate anything, but is not efficient. 1167template <class PrimaryAllocator, class AllocatorCache, 1168 class SecondaryAllocator> // NOLINT 1169class CombinedAllocator { 1170 public: 1171 void Init() { 1172 primary_.Init(); 1173 secondary_.Init(); 1174 stats_.Init(); 1175 } 1176 1177 void *Allocate(AllocatorCache *cache, uptr size, uptr alignment, 1178 bool cleared = false) { 1179 // Returning 0 on malloc(0) may break a lot of code. 1180 if (size == 0) 1181 size = 1; 1182 if (size + alignment < size) 1183 return AllocatorReturnNull(); 1184 if (alignment > 8) 1185 size = RoundUpTo(size, alignment); 1186 void *res; 1187 if (primary_.CanAllocate(size, alignment)) 1188 res = cache->Allocate(&primary_, primary_.ClassID(size)); 1189 else 1190 res = secondary_.Allocate(&stats_, size, alignment); 1191 if (alignment > 8) 1192 CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0); 1193 if (cleared && res) 1194 internal_memset(res, 0, size); 1195 return res; 1196 } 1197 1198 void Deallocate(AllocatorCache *cache, void *p) { 1199 if (!p) return; 1200 if (primary_.PointerIsMine(p)) 1201 cache->Deallocate(&primary_, primary_.GetSizeClass(p), p); 1202 else 1203 secondary_.Deallocate(&stats_, p); 1204 } 1205 1206 void *Reallocate(AllocatorCache *cache, void *p, uptr new_size, 1207 uptr alignment) { 1208 if (!p) 1209 return Allocate(cache, new_size, alignment); 1210 if (!new_size) { 1211 Deallocate(cache, p); 1212 return 0; 1213 } 1214 CHECK(PointerIsMine(p)); 1215 uptr old_size = GetActuallyAllocatedSize(p); 1216 uptr memcpy_size = Min(new_size, old_size); 1217 void *new_p = Allocate(cache, new_size, alignment); 1218 if (new_p) 1219 internal_memcpy(new_p, p, memcpy_size); 1220 Deallocate(cache, p); 1221 return new_p; 1222 } 1223 1224 bool PointerIsMine(void *p) { 1225 if (primary_.PointerIsMine(p)) 1226 return true; 1227 return secondary_.PointerIsMine(p); 1228 } 1229 1230 bool FromPrimary(void *p) { 1231 return primary_.PointerIsMine(p); 1232 } 1233 1234 void *GetMetaData(const void *p) { 1235 if (primary_.PointerIsMine(p)) 1236 return primary_.GetMetaData(p); 1237 return secondary_.GetMetaData(p); 1238 } 1239 1240 void *GetBlockBegin(const void *p) { 1241 if (primary_.PointerIsMine(p)) 1242 return primary_.GetBlockBegin(p); 1243 return secondary_.GetBlockBegin(p); 1244 } 1245 1246 // This function does the same as GetBlockBegin, but is much faster. 1247 // Must be called with the allocator locked. 1248 void *GetBlockBeginFastLocked(void *p) { 1249 if (primary_.PointerIsMine(p)) 1250 return primary_.GetBlockBegin(p); 1251 return secondary_.GetBlockBeginFastLocked(p); 1252 } 1253 1254 uptr GetActuallyAllocatedSize(void *p) { 1255 if (primary_.PointerIsMine(p)) 1256 return primary_.GetActuallyAllocatedSize(p); 1257 return secondary_.GetActuallyAllocatedSize(p); 1258 } 1259 1260 uptr TotalMemoryUsed() { 1261 return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed(); 1262 } 1263 1264 void TestOnlyUnmap() { primary_.TestOnlyUnmap(); } 1265 1266 void InitCache(AllocatorCache *cache) { 1267 cache->Init(&stats_); 1268 } 1269 1270 void DestroyCache(AllocatorCache *cache) { 1271 cache->Destroy(&primary_, &stats_); 1272 } 1273 1274 void SwallowCache(AllocatorCache *cache) { 1275 cache->Drain(&primary_); 1276 } 1277 1278 void GetStats(AllocatorStatCounters s) const { 1279 stats_.Get(s); 1280 } 1281 1282 void PrintStats() { 1283 primary_.PrintStats(); 1284 secondary_.PrintStats(); 1285 } 1286 1287 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone 1288 // introspection API. 1289 void ForceLock() { 1290 primary_.ForceLock(); 1291 secondary_.ForceLock(); 1292 } 1293 1294 void ForceUnlock() { 1295 secondary_.ForceUnlock(); 1296 primary_.ForceUnlock(); 1297 } 1298 1299 // Iterate over all existing chunks. 1300 // The allocator must be locked when calling this function. 1301 void ForEachChunk(ForEachChunkCallback callback, void *arg) { 1302 primary_.ForEachChunk(callback, arg); 1303 secondary_.ForEachChunk(callback, arg); 1304 } 1305 1306 private: 1307 PrimaryAllocator primary_; 1308 SecondaryAllocator secondary_; 1309 AllocatorGlobalStats stats_; 1310}; 1311 1312// Returns true if calloc(size, n) should return 0 due to overflow in size*n. 1313bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n); 1314 1315} // namespace __sanitizer 1316 1317#endif // SANITIZER_ALLOCATOR_H 1318 1319