1// Copyright (c) 2005, 2007, Google Inc. 2// All rights reserved. 3// 4// Redistribution and use in source and binary forms, with or without 5// modification, are permitted provided that the following conditions are 6// met: 7// 8// * Redistributions of source code must retain the above copyright 9// notice, this list of conditions and the following disclaimer. 10// * Redistributions in binary form must reproduce the above 11// copyright notice, this list of conditions and the following disclaimer 12// in the documentation and/or other materials provided with the 13// distribution. 14// * Neither the name of Google Inc. nor the names of its 15// contributors may be used to endorse or promote products derived from 16// this software without specific prior written permission. 17// 18// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 30// --- 31// Author: Sanjay Ghemawat 32 33#include "config.h" 34#if !(defined(USE_SYSTEM_MALLOC) && USE_SYSTEM_MALLOC) 35#include "TCSystemAlloc.h" 36 37#include "Assertions.h" 38#include "CheckedArithmetic.h" 39#include "TCSpinLock.h" 40#include "VMTags.h" 41#include <algorithm> 42#include <stdint.h> 43 44#if OS(WINDOWS) 45#include "windows.h" 46#else 47#include <errno.h> 48#include <unistd.h> 49#include <sys/mman.h> 50#endif 51 52#ifndef MAP_ANONYMOUS 53#define MAP_ANONYMOUS MAP_ANON 54#endif 55 56using namespace std; 57 58// Structure for discovering alignment 59union MemoryAligner { 60 void* p; 61 double d; 62 size_t s; 63}; 64 65static SpinLock spinlock = SPINLOCK_INITIALIZER; 66 67// Page size is initialized on demand 68static size_t pagesize = 0; 69 70// Configuration parameters. 71// 72// if use_devmem is true, either use_sbrk or use_mmap must also be true. 73// For 2.2 kernels, it looks like the sbrk address space (500MBish) and 74// the mmap address space (1300MBish) are disjoint, so we need both allocators 75// to get as much virtual memory as possible. 76#ifndef WTF_CHANGES 77static bool use_devmem = false; 78static bool use_sbrk = false; 79#endif 80 81#if HAVE(MMAP) 82static bool use_mmap = true; 83#endif 84 85#if HAVE(VIRTUALALLOC) 86static bool use_VirtualAlloc = true; 87#endif 88 89// Flags to keep us from retrying allocators that failed. 90static bool devmem_failure = false; 91static bool sbrk_failure = false; 92static bool mmap_failure = false; 93static bool VirtualAlloc_failure = false; 94 95#ifndef WTF_CHANGES 96DEFINE_int32(malloc_devmem_start, 0, 97 "Physical memory starting location in MB for /dev/mem allocation." 98 " Setting this to 0 disables /dev/mem allocation"); 99DEFINE_int32(malloc_devmem_limit, 0, 100 "Physical memory limit location in MB for /dev/mem allocation." 101 " Setting this to 0 means no limit."); 102#endif 103 104#ifndef WTF_CHANGES 105 106static void* TrySbrk(size_t size, size_t *actual_size, size_t alignment) { 107 size = ((size + alignment - 1) / alignment) * alignment; 108 109 // could theoretically return the "extra" bytes here, but this 110 // is simple and correct. 111 if (actual_size) 112 *actual_size = size; 113 114 void* result = sbrk(size); 115 if (result == reinterpret_cast<void*>(-1)) { 116 sbrk_failure = true; 117 return NULL; 118 } 119 120 // Is it aligned? 121 uintptr_t ptr = reinterpret_cast<uintptr_t>(result); 122 if ((ptr & (alignment-1)) == 0) return result; 123 124 // Try to get more memory for alignment 125 size_t extra = alignment - (ptr & (alignment-1)); 126 void* r2 = sbrk(extra); 127 if (reinterpret_cast<uintptr_t>(r2) == (ptr + size)) { 128 // Contiguous with previous result 129 return reinterpret_cast<void*>(ptr + extra); 130 } 131 132 // Give up and ask for "size + alignment - 1" bytes so 133 // that we can find an aligned region within it. 134 result = sbrk(size + alignment - 1); 135 if (result == reinterpret_cast<void*>(-1)) { 136 sbrk_failure = true; 137 return NULL; 138 } 139 ptr = reinterpret_cast<uintptr_t>(result); 140 if ((ptr & (alignment-1)) != 0) { 141 ptr += alignment - (ptr & (alignment-1)); 142 } 143 return reinterpret_cast<void*>(ptr); 144} 145 146#endif /* ifndef(WTF_CHANGES) */ 147 148#if HAVE(MMAP) 149 150static void* TryMmap(size_t size, size_t *actual_size, size_t alignment) { 151 // Enforce page alignment 152 if (pagesize == 0) pagesize = getpagesize(); 153 if (alignment < pagesize) alignment = pagesize; 154 size = ((size + alignment - 1) / alignment) * alignment; 155 156 // could theoretically return the "extra" bytes here, but this 157 // is simple and correct. 158 if (actual_size) 159 *actual_size = size; 160 161 // Ask for extra memory if alignment > pagesize 162 size_t extra = 0; 163 if (alignment > pagesize) { 164 extra = alignment - pagesize; 165 } 166 Checked<size_t> mapSize = Checked<size_t>(size) + extra + 2 * pagesize; 167 void* result = mmap(NULL, mapSize.unsafeGet(), 168 PROT_READ | PROT_WRITE, 169 MAP_PRIVATE|MAP_ANONYMOUS, 170 VM_TAG_FOR_TCMALLOC_MEMORY, 0); 171 if (result == reinterpret_cast<void*>(MAP_FAILED)) { 172 mmap_failure = true; 173 return NULL; 174 } 175 mmap(result, pagesize, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, VM_TAG_FOR_TCMALLOC_MEMORY, 0); 176 mmap(static_cast<char*>(result) + (mapSize - pagesize).unsafeGet(), pagesize, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, VM_TAG_FOR_TCMALLOC_MEMORY, 0); 177 result = static_cast<char*>(result) + pagesize; 178 // Adjust the return memory so it is aligned 179 uintptr_t ptr = reinterpret_cast<uintptr_t>(result); 180 size_t adjust = 0; 181 if ((ptr & (alignment - 1)) != 0) { 182 adjust = alignment - (ptr & (alignment - 1)); 183 } 184 185 // Return the unused memory to the system 186 if (adjust > 0) { 187 munmap(reinterpret_cast<void*>(ptr), adjust); 188 } 189 if (adjust < extra) { 190 munmap(reinterpret_cast<void*>(ptr + adjust + size), extra - adjust); 191 } 192 193 ptr += adjust; 194 return reinterpret_cast<void*>(ptr); 195} 196 197#endif /* HAVE(MMAP) */ 198 199#if HAVE(VIRTUALALLOC) 200 201static void* TryVirtualAlloc(size_t size, size_t *actual_size, size_t alignment) { 202 // Enforce page alignment 203 if (pagesize == 0) { 204 SYSTEM_INFO system_info; 205 GetSystemInfo(&system_info); 206 pagesize = system_info.dwPageSize; 207 } 208 209 if (alignment < pagesize) alignment = pagesize; 210 size = ((size + alignment - 1) / alignment) * alignment; 211 212 // could theoretically return the "extra" bytes here, but this 213 // is simple and correct. 214 if (actual_size) 215 *actual_size = size; 216 217 // Ask for extra memory if alignment > pagesize 218 size_t extra = 0; 219 if (alignment > pagesize) { 220 extra = alignment - pagesize; 221 } 222 void* result = VirtualAlloc(NULL, size + extra, 223 MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN, 224 PAGE_READWRITE); 225 226 if (result == NULL) { 227 VirtualAlloc_failure = true; 228 return NULL; 229 } 230 231 // Adjust the return memory so it is aligned 232 uintptr_t ptr = reinterpret_cast<uintptr_t>(result); 233 size_t adjust = 0; 234 if ((ptr & (alignment - 1)) != 0) { 235 adjust = alignment - (ptr & (alignment - 1)); 236 } 237 238 // Return the unused memory to the system - we'd like to release but the best we can do 239 // is decommit, since Windows only lets you free the whole allocation. 240 if (adjust > 0) { 241 VirtualFree(reinterpret_cast<void*>(ptr), adjust, MEM_DECOMMIT); 242 } 243 if (adjust < extra) { 244 VirtualFree(reinterpret_cast<void*>(ptr + adjust + size), extra-adjust, MEM_DECOMMIT); 245 } 246 247 ptr += adjust; 248 return reinterpret_cast<void*>(ptr); 249} 250 251#endif /* HAVE(MMAP) */ 252 253#ifndef WTF_CHANGES 254static void* TryDevMem(size_t size, size_t *actual_size, size_t alignment) { 255 static bool initialized = false; 256 static off_t physmem_base; // next physical memory address to allocate 257 static off_t physmem_limit; // maximum physical address allowed 258 static int physmem_fd; // file descriptor for /dev/mem 259 260 // Check if we should use /dev/mem allocation. Note that it may take 261 // a while to get this flag initialized, so meanwhile we fall back to 262 // the next allocator. (It looks like 7MB gets allocated before 263 // this flag gets initialized -khr.) 264 if (FLAGS_malloc_devmem_start == 0) { 265 // NOTE: not a devmem_failure - we'd like TCMalloc_SystemAlloc to 266 // try us again next time. 267 return NULL; 268 } 269 270 if (!initialized) { 271 physmem_fd = open("/dev/mem", O_RDWR); 272 if (physmem_fd < 0) { 273 devmem_failure = true; 274 return NULL; 275 } 276 physmem_base = FLAGS_malloc_devmem_start*1024LL*1024LL; 277 physmem_limit = FLAGS_malloc_devmem_limit*1024LL*1024LL; 278 initialized = true; 279 } 280 281 // Enforce page alignment 282 if (pagesize == 0) pagesize = getpagesize(); 283 if (alignment < pagesize) alignment = pagesize; 284 size = ((size + alignment - 1) / alignment) * alignment; 285 286 // could theoretically return the "extra" bytes here, but this 287 // is simple and correct. 288 if (actual_size) 289 *actual_size = size; 290 291 // Ask for extra memory if alignment > pagesize 292 size_t extra = 0; 293 if (alignment > pagesize) { 294 extra = alignment - pagesize; 295 } 296 297 // check to see if we have any memory left 298 if (physmem_limit != 0 && physmem_base + size + extra > physmem_limit) { 299 devmem_failure = true; 300 return NULL; 301 } 302 void *result = mmap(0, size + extra, PROT_READ | PROT_WRITE, 303 MAP_SHARED, physmem_fd, physmem_base); 304 if (result == reinterpret_cast<void*>(MAP_FAILED)) { 305 devmem_failure = true; 306 return NULL; 307 } 308 uintptr_t ptr = reinterpret_cast<uintptr_t>(result); 309 310 // Adjust the return memory so it is aligned 311 size_t adjust = 0; 312 if ((ptr & (alignment - 1)) != 0) { 313 adjust = alignment - (ptr & (alignment - 1)); 314 } 315 316 // Return the unused virtual memory to the system 317 if (adjust > 0) { 318 munmap(reinterpret_cast<void*>(ptr), adjust); 319 } 320 if (adjust < extra) { 321 munmap(reinterpret_cast<void*>(ptr + adjust + size), extra - adjust); 322 } 323 324 ptr += adjust; 325 physmem_base += adjust + size; 326 327 return reinterpret_cast<void*>(ptr); 328} 329#endif 330 331void* TCMalloc_SystemAlloc(size_t size, size_t *actual_size, size_t alignment) { 332 // Discard requests that overflow 333 if (size + alignment < size) return NULL; 334 335 SpinLockHolder lock_holder(&spinlock); 336 337 // Enforce minimum alignment 338 if (alignment < sizeof(MemoryAligner)) alignment = sizeof(MemoryAligner); 339 340 // Try twice, once avoiding allocators that failed before, and once 341 // more trying all allocators even if they failed before. 342 for (int i = 0; i < 2; i++) { 343 344#ifndef WTF_CHANGES 345 if (use_devmem && !devmem_failure) { 346 void* result = TryDevMem(size, actual_size, alignment); 347 if (result != NULL) return result; 348 } 349 350 if (use_sbrk && !sbrk_failure) { 351 void* result = TrySbrk(size, actual_size, alignment); 352 if (result != NULL) return result; 353 } 354#endif 355 356#if HAVE(MMAP) 357 if (use_mmap && !mmap_failure) { 358 void* result = TryMmap(size, actual_size, alignment); 359 if (result != NULL) return result; 360 } 361#endif 362 363#if HAVE(VIRTUALALLOC) 364 if (use_VirtualAlloc && !VirtualAlloc_failure) { 365 void* result = TryVirtualAlloc(size, actual_size, alignment); 366 if (result != NULL) return result; 367 } 368#endif 369 370 // nothing worked - reset failure flags and try again 371 devmem_failure = false; 372 sbrk_failure = false; 373 mmap_failure = false; 374 VirtualAlloc_failure = false; 375 } 376 return NULL; 377} 378 379#if HAVE(MADV_FREE_REUSE) 380 381void TCMalloc_SystemRelease(void* start, size_t length) 382{ 383 int madviseResult; 384 385 while ((madviseResult = madvise(start, length, MADV_FREE_REUSABLE)) == -1 && errno == EAGAIN) { } 386 387 // Although really advisory, if madvise fail, we want to know about it. 388 ASSERT_UNUSED(madviseResult, madviseResult != -1); 389} 390 391#elif HAVE(MADV_FREE) || HAVE(MADV_DONTNEED) 392 393void TCMalloc_SystemRelease(void* start, size_t length) 394{ 395 // MADV_FREE clears the modified bit on pages, which allows 396 // them to be discarded immediately. 397#if HAVE(MADV_FREE) 398 const int advice = MADV_FREE; 399#else 400 const int advice = MADV_DONTNEED; 401#endif 402 if (pagesize == 0) pagesize = getpagesize(); 403 const size_t pagemask = pagesize - 1; 404 405 size_t new_start = reinterpret_cast<size_t>(start); 406 size_t end = new_start + length; 407 size_t new_end = end; 408 409 // Round up the starting address and round down the ending address 410 // to be page aligned: 411 new_start = (new_start + pagesize - 1) & ~pagemask; 412 new_end = new_end & ~pagemask; 413 414 ASSERT((new_start & pagemask) == 0); 415 ASSERT((new_end & pagemask) == 0); 416 ASSERT(new_start >= reinterpret_cast<size_t>(start)); 417 ASSERT(new_end <= end); 418 419 if (new_end > new_start) { 420 // Note -- ignoring most return codes, because if this fails it 421 // doesn't matter... 422 while (madvise(reinterpret_cast<char*>(new_start), new_end - new_start, 423 advice) == -1 && 424 errno == EAGAIN) { 425 // NOP 426 } 427 } 428} 429 430#elif HAVE(MMAP) 431 432void TCMalloc_SystemRelease(void* start, size_t length) 433{ 434 void* newAddress = mmap(start, length, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0); 435 // If the mmap failed then that's ok, we just won't return the memory to the system. 436 ASSERT_UNUSED(newAddress, newAddress == start || newAddress == reinterpret_cast<void*>(MAP_FAILED)); 437} 438 439#elif HAVE(VIRTUALALLOC) 440 441void TCMalloc_SystemRelease(void* start, size_t length) 442{ 443 if (VirtualFree(start, length, MEM_DECOMMIT)) 444 return; 445 446 // The decommit may fail if the memory region consists of allocations 447 // from more than one call to VirtualAlloc. In this case, fall back to 448 // using VirtualQuery to retrieve the allocation boundaries and decommit 449 // them each individually. 450 451 char* ptr = static_cast<char*>(start); 452 char* end = ptr + length; 453 MEMORY_BASIC_INFORMATION info; 454 while (ptr < end) { 455 size_t resultSize = VirtualQuery(ptr, &info, sizeof(info)); 456 ASSERT_UNUSED(resultSize, resultSize == sizeof(info)); 457 458 size_t decommitSize = min<size_t>(info.RegionSize, end - ptr); 459 BOOL success = VirtualFree(ptr, decommitSize, MEM_DECOMMIT); 460 ASSERT_UNUSED(success, success); 461 ptr += decommitSize; 462 } 463} 464 465#else 466 467// Platforms that don't support returning memory use an empty inline version of TCMalloc_SystemRelease 468// declared in TCSystemAlloc.h 469 470#endif 471 472#if HAVE(MADV_FREE_REUSE) 473 474void TCMalloc_SystemCommit(void* start, size_t length) 475{ 476 while (madvise(start, length, MADV_FREE_REUSE) == -1 && errno == EAGAIN) { } 477} 478 479#elif HAVE(VIRTUALALLOC) 480 481void TCMalloc_SystemCommit(void* start, size_t length) 482{ 483 if (VirtualAlloc(start, length, MEM_COMMIT, PAGE_READWRITE) == start) 484 return; 485 486 // The commit may fail if the memory region consists of allocations 487 // from more than one call to VirtualAlloc. In this case, fall back to 488 // using VirtualQuery to retrieve the allocation boundaries and commit them 489 // each individually. 490 491 char* ptr = static_cast<char*>(start); 492 char* end = ptr + length; 493 MEMORY_BASIC_INFORMATION info; 494 while (ptr < end) { 495 size_t resultSize = VirtualQuery(ptr, &info, sizeof(info)); 496 ASSERT_UNUSED(resultSize, resultSize == sizeof(info)); 497 498 size_t commitSize = min<size_t>(info.RegionSize, end - ptr); 499 void* newAddress = VirtualAlloc(ptr, commitSize, MEM_COMMIT, PAGE_READWRITE); 500 ASSERT_UNUSED(newAddress, newAddress == ptr); 501 ptr += commitSize; 502 } 503} 504 505#else 506 507// Platforms that don't need to explicitly commit memory use an empty inline version of TCMalloc_SystemCommit 508// declared in TCSystemAlloc.h 509 510#endif 511 512#endif // #if !(defined(USE_SYSTEM_MALLOC) && USE_SYSTEM_MALLOC) 513 514