1#define JEMALLOC_CHUNK_C_ 2#include "jemalloc/internal/jemalloc_internal.h" 3 4/******************************************************************************/ 5/* Data. */ 6 7const char *opt_dss = DSS_DEFAULT; 8size_t opt_lg_chunk = 0; --- 318 unchanged lines hidden (view full) --- 327 * advantage of this to avoid demanding zeroed chunks, but taking advantage of 328 * them if they are returned. 329 */ 330static void * 331chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment, 332 bool *zero, bool *commit, dss_prec_t dss_prec) 333{ 334 void *ret; |
335 336 assert(size != 0); 337 assert((size & chunksize_mask) == 0); 338 assert(alignment != 0); 339 assert((alignment & chunksize_mask) == 0); 340 |
341 /* "primary" dss. */ 342 if (have_dss && dss_prec == dss_prec_primary && (ret = 343 chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) != 344 NULL) 345 return (ret); |
346 /* mmap. */ 347 if ((ret = chunk_alloc_mmap(new_addr, size, alignment, zero, commit)) != 348 NULL) |
349 return (ret); 350 /* "secondary" dss. */ 351 if (have_dss && dss_prec == dss_prec_secondary && (ret = 352 chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) != 353 NULL) 354 return (ret); 355 356 /* All strategies for allocation failed. */ --- 8 unchanged lines hidden (view full) --- 365 366 /* 367 * Directly call chunk_alloc_mmap() rather than chunk_alloc_core() 368 * because it's critical that chunk_alloc_base() return untouched 369 * demand-zeroed virtual memory. 370 */ 371 zero = true; 372 commit = true; |
373 ret = chunk_alloc_mmap(NULL, size, chunksize, &zero, &commit); |
374 if (ret == NULL) 375 return (NULL); 376 if (config_valgrind) 377 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); 378 379 return (ret); 380} 381 --- 21 unchanged lines hidden (view full) --- 403 return (ret); 404} 405 406static arena_t * 407chunk_arena_get(unsigned arena_ind) 408{ 409 arena_t *arena; 410 |
411 arena = arena_get(arena_ind, false); |
412 /* 413 * The arena we're allocating on behalf of must have been initialized 414 * already. 415 */ 416 assert(arena != NULL); 417 return (arena); 418} 419 --- 10 unchanged lines hidden (view full) --- 430 if (ret == NULL) 431 return (NULL); 432 if (config_valgrind) 433 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); 434 435 return (ret); 436} 437 |
438static void * 439chunk_alloc_retained(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, 440 size_t size, size_t alignment, bool *zero, bool *commit) 441{ 442 443 assert(size != 0); 444 assert((size & chunksize_mask) == 0); 445 assert(alignment != 0); 446 assert((alignment & chunksize_mask) == 0); 447 448 return (chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_retained, 449 &arena->chunks_ad_retained, false, new_addr, size, alignment, zero, 450 commit, true)); 451} 452 |
453void * 454chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, 455 size_t size, size_t alignment, bool *zero, bool *commit) 456{ 457 void *ret; 458 459 chunk_hooks_assure_initialized(arena, chunk_hooks); |
460 461 ret = chunk_alloc_retained(arena, chunk_hooks, new_addr, size, 462 alignment, zero, commit); 463 if (ret == NULL) { 464 ret = chunk_hooks->alloc(new_addr, size, alignment, zero, 465 commit, arena->ind); 466 if (ret == NULL) 467 return (NULL); 468 } 469 |
470 if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default) 471 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize); 472 return (ret); 473} 474 475static void 476chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks, 477 extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache, --- 242 unchanged lines hidden (view full) --- 720 if (info.dwPageSize & ((1U << LG_PAGE) - 1)) 721 return (true); 722 723 /* 724 * Configure chunksize (if not set) to match granularity (usually 64K), 725 * so pages_map will always take fast path. 726 */ 727 if (!opt_lg_chunk) { |
728 opt_lg_chunk = ffs_u((unsigned)info.dwAllocationGranularity) |
729 - 1; 730 } 731#else 732 if (!opt_lg_chunk) 733 opt_lg_chunk = LG_CHUNK_DEFAULT; 734#endif 735 736 /* Set variables according to the value of opt_lg_chunk. */ 737 chunksize = (ZU(1) << opt_lg_chunk); 738 assert(chunksize >= PAGE); 739 chunksize_mask = chunksize - 1; 740 chunk_npages = (chunksize >> LG_PAGE); 741 742 if (have_dss && chunk_dss_boot()) 743 return (true); |
744 if (rtree_new(&chunks_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) - 745 opt_lg_chunk), chunks_rtree_node_alloc, NULL)) |
746 return (true); 747 748 return (false); 749} 750 751void 752chunk_prefork(void) 753{ --- 17 unchanged lines hidden --- |