1#define JEMALLOC_ARENA_C_ 2#include "jemalloc/internal/jemalloc_internal.h" 3 4/******************************************************************************/ 5/* Data. */ 6 7ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT; 8static ssize_t lg_dirty_mult_default; 9arena_bin_info_t arena_bin_info[NBINS]; 10 11size_t map_bias; 12size_t map_misc_offset; 13size_t arena_maxrun; /* Max run size for arenas. */ |
14size_t large_maxclass; /* Max large size class. */ |
15static size_t small_maxrun; /* Max run size used for small size classes. */ 16static bool *small_run_tab; /* Valid small run page multiples. */ 17unsigned nlclasses; /* Number of large size classes. */ 18unsigned nhclasses; /* Number of huge size classes. */ 19 20/******************************************************************************/ 21/* 22 * Function prototypes for static functions that are referenced prior to --- 11 unchanged lines hidden (view full) --- 34/******************************************************************************/ 35 36#define CHUNK_MAP_KEY ((uintptr_t)0x1U) 37 38JEMALLOC_INLINE_C arena_chunk_map_misc_t * 39arena_miscelm_key_create(size_t size) 40{ 41 |
42 return ((arena_chunk_map_misc_t *)(arena_mapbits_size_encode(size) | |
43 CHUNK_MAP_KEY)); 44} 45 46JEMALLOC_INLINE_C bool 47arena_miscelm_is_key(const arena_chunk_map_misc_t *miscelm) 48{ 49 50 return (((uintptr_t)miscelm & CHUNK_MAP_KEY) != 0); 51} 52 53#undef CHUNK_MAP_KEY 54 55JEMALLOC_INLINE_C size_t 56arena_miscelm_key_size_get(const arena_chunk_map_misc_t *miscelm) 57{ 58 59 assert(arena_miscelm_is_key(miscelm)); 60 |
61 return (arena_mapbits_size_decode((uintptr_t)miscelm)); |
62} 63 64JEMALLOC_INLINE_C size_t 65arena_miscelm_size_get(arena_chunk_map_misc_t *miscelm) 66{ 67 arena_chunk_t *chunk; 68 size_t pageind, mapbits; 69 70 assert(!arena_miscelm_is_key(miscelm)); 71 72 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); 73 pageind = arena_miscelm_to_pageind(miscelm); 74 mapbits = arena_mapbits_get(chunk, pageind); |
75 return (arena_mapbits_size_decode(mapbits)); |
76} 77 78JEMALLOC_INLINE_C int 79arena_run_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b) 80{ 81 uintptr_t a_miscelm = (uintptr_t)a; 82 uintptr_t b_miscelm = (uintptr_t)b; 83 --- 225 unchanged lines hidden (view full) --- 309} 310 311JEMALLOC_INLINE_C void 312arena_run_reg_dalloc(arena_run_t *run, void *ptr) 313{ 314 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 315 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 316 size_t mapbits = arena_mapbits_get(chunk, pageind); |
317 szind_t binind = arena_ptr_small_binind_get(ptr, mapbits); |
318 arena_bin_info_t *bin_info = &arena_bin_info[binind]; 319 unsigned regind = arena_run_regind(run, bin_info, ptr); 320 321 assert(run->nfree < bin_info->nregs); 322 /* Freeing an interior pointer can cause assertion failure. */ 323 assert(((uintptr_t)ptr - 324 ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) + 325 (uintptr_t)bin_info->reg0_offset)) % --- 94 unchanged lines hidden (view full) --- 420} 421 422static bool 423arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size, 424 bool remove, bool zero) 425{ 426 arena_chunk_t *chunk; 427 arena_chunk_map_misc_t *miscelm; |
428 size_t flag_dirty, flag_decommitted, run_ind, need_pages; |
429 size_t flag_unzeroed_mask; 430 431 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 432 miscelm = arena_run_to_miscelm(run); 433 run_ind = arena_miscelm_to_pageind(miscelm); 434 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); 435 flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind); 436 need_pages = (size >> LG_PAGE); --- 17 unchanged lines hidden (view full) --- 454 } else if (flag_dirty != 0) { 455 /* The run is dirty, so all pages must be zeroed. */ 456 arena_run_zero(chunk, run_ind, need_pages); 457 } else { 458 /* 459 * The run is clean, so some pages may be zeroed (i.e. 460 * never before touched). 461 */ |
462 size_t i; |
463 for (i = 0; i < need_pages; i++) { 464 if (arena_mapbits_unzeroed_get(chunk, run_ind+i) 465 != 0) 466 arena_run_zero(chunk, run_ind+i, 1); 467 else if (config_debug) { 468 arena_run_page_validate_zeroed(chunk, 469 run_ind+i); 470 } else { --- 32 unchanged lines hidden (view full) --- 503arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) 504{ 505 506 return (arena_run_split_large_helper(arena, run, size, false, zero)); 507} 508 509static bool 510arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size, |
511 szind_t binind) |
512{ 513 arena_chunk_t *chunk; 514 arena_chunk_map_misc_t *miscelm; 515 size_t flag_dirty, flag_decommitted, run_ind, need_pages, i; 516 517 assert(binind != BININD_INVALID); 518 519 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); --- 255 unchanged lines hidden (view full) --- 775 } 776 } else 777 arena->spare = chunk; 778} 779 780static void 781arena_huge_malloc_stats_update(arena_t *arena, size_t usize) 782{ |
783 szind_t index = size2index(usize) - nlclasses - NBINS; |
784 785 cassert(config_stats); 786 787 arena->stats.nmalloc_huge++; 788 arena->stats.allocated_huge += usize; 789 arena->stats.hstats[index].nmalloc++; 790 arena->stats.hstats[index].curhchunks++; 791} 792 793static void 794arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize) 795{ |
796 szind_t index = size2index(usize) - nlclasses - NBINS; |
797 798 cassert(config_stats); 799 800 arena->stats.nmalloc_huge--; 801 arena->stats.allocated_huge -= usize; 802 arena->stats.hstats[index].nmalloc--; 803 arena->stats.hstats[index].curhchunks--; 804} 805 806static void 807arena_huge_dalloc_stats_update(arena_t *arena, size_t usize) 808{ |
809 szind_t index = size2index(usize) - nlclasses - NBINS; |
810 811 cassert(config_stats); 812 813 arena->stats.ndalloc_huge++; 814 arena->stats.allocated_huge -= usize; 815 arena->stats.hstats[index].ndalloc++; 816 arena->stats.hstats[index].curhchunks--; 817} 818 819static void 820arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize) 821{ |
822 szind_t index = size2index(usize) - nlclasses - NBINS; |
823 824 cassert(config_stats); 825 826 arena->stats.ndalloc_huge--; 827 arena->stats.allocated_huge += usize; 828 arena->stats.hstats[index].ndalloc--; 829 arena->stats.hstats[index].curhchunks++; 830} --- 289 unchanged lines hidden (view full) --- 1120 * arena_chunk_alloc() failed, but another thread may have made 1121 * sufficient memory available while this one dropped arena->lock in 1122 * arena_chunk_alloc(), so search one more time. 1123 */ 1124 return (arena_run_alloc_large_helper(arena, size, zero)); 1125} 1126 1127static arena_run_t * |
1128arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind) |
1129{ 1130 arena_run_t *run = arena_run_first_best_fit(arena, size); 1131 if (run != NULL) { 1132 if (arena_run_split_small(arena, run, size, binind)) 1133 run = NULL; 1134 } 1135 return (run); 1136} 1137 1138static arena_run_t * |
1139arena_run_alloc_small(arena_t *arena, size_t size, szind_t binind) |
1140{ 1141 arena_chunk_t *chunk; 1142 arena_run_t *run; 1143 1144 assert(size <= arena_maxrun); 1145 assert(size == PAGE_CEILING(size)); 1146 assert(binind != BININD_INVALID); 1147 --- 736 unchanged lines hidden (view full) --- 1884 } 1885 return (run); 1886} 1887 1888static arena_run_t * 1889arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin) 1890{ 1891 arena_run_t *run; |
1892 szind_t binind; |
1893 arena_bin_info_t *bin_info; 1894 1895 /* Look for a usable run. */ 1896 run = arena_bin_nonfull_run_tryget(bin); 1897 if (run != NULL) 1898 return (run); 1899 /* No existing runs have any space available. */ 1900 --- 33 unchanged lines hidden (view full) --- 1934 1935 return (NULL); 1936} 1937 1938/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */ 1939static void * 1940arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin) 1941{ |
1942 szind_t binind; |
1943 arena_bin_info_t *bin_info; 1944 arena_run_t *run; 1945 1946 binind = arena_bin_index(arena, bin); 1947 bin_info = &arena_bin_info[binind]; 1948 bin->runcur = NULL; 1949 run = arena_bin_nonfull_run_get(arena, bin); 1950 if (bin->runcur != NULL && bin->runcur->nfree > 0) { 1951 /* 1952 * Another thread updated runcur while this one ran without the 1953 * bin lock in arena_bin_nonfull_run_get(). 1954 */ |
1955 void *ret; |
1956 assert(bin->runcur->nfree > 0); 1957 ret = arena_run_reg_alloc(bin->runcur, bin_info); 1958 if (run != NULL) { 1959 arena_chunk_t *chunk; 1960 1961 /* 1962 * arena_run_alloc_small() may have allocated run, or 1963 * it may have pulled run from the bin's run tree. --- 17 unchanged lines hidden (view full) --- 1981 bin->runcur = run; 1982 1983 assert(bin->runcur->nfree > 0); 1984 1985 return (arena_run_reg_alloc(bin->runcur, bin_info)); 1986} 1987 1988void |
1989arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind, |
1990 uint64_t prof_accumbytes) 1991{ 1992 unsigned i, nfill; 1993 arena_bin_t *bin; |
1994 1995 assert(tbin->ncached == 0); 1996 1997 if (config_prof && arena_prof_accum(arena, prof_accumbytes)) 1998 prof_idump(); 1999 bin = &arena->bins[binind]; 2000 malloc_mutex_lock(&bin->lock); 2001 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> 2002 tbin->lg_fill_div); i < nfill; i++) { |
2003 arena_run_t *run; 2004 void *ptr; |
2005 if ((run = bin->runcur) != NULL && run->nfree > 0) 2006 ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]); 2007 else 2008 ptr = arena_bin_malloc_hard(arena, bin); 2009 if (ptr == NULL) { 2010 /* 2011 * OOM. tbin->avail isn't yet filled down to its first 2012 * element, so the successful allocations (if any) must --- 58 unchanged lines hidden (view full) --- 2071#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption) 2072arena_redzone_corruption_t *arena_redzone_corruption = 2073 JEMALLOC_N(arena_redzone_corruption_impl); 2074#endif 2075 2076static void 2077arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset) 2078{ |
2079 bool error = false; 2080 2081 if (opt_junk_alloc) { |
2082 size_t size = bin_info->reg_size; 2083 size_t redzone_size = bin_info->redzone_size; 2084 size_t i; 2085 |
2086 for (i = 1; i <= redzone_size; i++) { 2087 uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i); 2088 if (*byte != 0xa5) { 2089 error = true; 2090 arena_redzone_corruption(ptr, size, false, i, 2091 *byte); 2092 if (reset) 2093 *byte = 0xa5; --- 33 unchanged lines hidden (view full) --- 2127#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) 2128arena_dalloc_junk_small_t *arena_dalloc_junk_small = 2129 JEMALLOC_N(arena_dalloc_junk_small_impl); 2130#endif 2131 2132void 2133arena_quarantine_junk_small(void *ptr, size_t usize) 2134{ |
2135 szind_t binind; |
2136 arena_bin_info_t *bin_info; 2137 cassert(config_fill); 2138 assert(opt_junk_free); 2139 assert(opt_quarantine); 2140 assert(usize <= SMALL_MAXCLASS); 2141 2142 binind = size2index(usize); 2143 bin_info = &arena_bin_info[binind]; 2144 arena_redzones_validate(ptr, bin_info, true); 2145} 2146 2147void * 2148arena_malloc_small(arena_t *arena, size_t size, bool zero) 2149{ 2150 void *ret; 2151 arena_bin_t *bin; 2152 arena_run_t *run; |
2153 szind_t binind; |
2154 2155 binind = size2index(size); 2156 assert(binind < NBINS); 2157 bin = &arena->bins[binind]; 2158 size = index2size(binind); 2159 2160 malloc_mutex_lock(&bin->lock); 2161 if ((run = bin->runcur) != NULL && run->nfree > 0) --- 67 unchanged lines hidden (view full) --- 2229 if (run == NULL) { 2230 malloc_mutex_unlock(&arena->lock); 2231 return (NULL); 2232 } 2233 miscelm = arena_run_to_miscelm(run); 2234 ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) + 2235 random_offset); 2236 if (config_stats) { |
2237 szind_t index = size2index(usize) - NBINS; |
2238 2239 arena->stats.nmalloc_large++; 2240 arena->stats.nrequests_large++; 2241 arena->stats.allocated_large += usize; 2242 arena->stats.lstats[index].nmalloc++; 2243 arena->stats.lstats[index].nrequests++; 2244 arena->stats.lstats[index].curruns++; 2245 } --- 76 unchanged lines hidden (view full) --- 2322 assert(decommitted); /* Cause of OOM. */ 2323 arena_run_dalloc(arena, run, dirty, false, decommitted); 2324 malloc_mutex_unlock(&arena->lock); 2325 return (NULL); 2326 } 2327 ret = arena_miscelm_to_rpages(miscelm); 2328 2329 if (config_stats) { |
2330 szind_t index = size2index(usize) - NBINS; |
2331 2332 arena->stats.nmalloc_large++; 2333 arena->stats.nrequests_large++; 2334 arena->stats.allocated_large += usize; 2335 arena->stats.lstats[index].nmalloc++; 2336 arena->stats.lstats[index].nrequests++; 2337 arena->stats.lstats[index].curruns++; 2338 } --- 13 unchanged lines hidden (view full) --- 2352 bool zero, tcache_t *tcache) 2353{ 2354 void *ret; 2355 2356 if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE 2357 && (usize & PAGE_MASK) == 0))) { 2358 /* Small; alignment doesn't require special run placement. */ 2359 ret = arena_malloc(tsd, arena, usize, zero, tcache); |
2360 } else if (usize <= large_maxclass && alignment <= PAGE) { |
2361 /* 2362 * Large; alignment doesn't require special run placement. 2363 * However, the cached pointer may be at a random offset from 2364 * the base of the run, so do some bit manipulation to retrieve 2365 * the base. 2366 */ 2367 ret = arena_malloc(tsd, arena, usize, zero, tcache); 2368 if (config_cache_oblivious) 2369 ret = (void *)((uintptr_t)ret & ~PAGE_MASK); 2370 } else { |
2371 if (likely(usize <= large_maxclass)) { |
2372 ret = arena_palloc_large(tsd, arena, usize, alignment, 2373 zero); 2374 } else if (likely(alignment <= chunksize)) 2375 ret = huge_malloc(tsd, arena, usize, zero, tcache); 2376 else { 2377 ret = huge_palloc(tsd, arena, usize, alignment, zero, 2378 tcache); 2379 } 2380 } 2381 return (ret); 2382} 2383 2384void 2385arena_prof_promoted(const void *ptr, size_t size) 2386{ 2387 arena_chunk_t *chunk; 2388 size_t pageind; |
2389 szind_t binind; |
2390 2391 cassert(config_prof); 2392 assert(ptr != NULL); 2393 assert(CHUNK_ADDR2BASE(ptr) != ptr); 2394 assert(isalloc(ptr, false) == LARGE_MINCLASS); 2395 assert(isalloc(ptr, true) == LARGE_MINCLASS); 2396 assert(size <= SMALL_MAXCLASS); 2397 --- 11 unchanged lines hidden (view full) --- 2409arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run, 2410 arena_bin_t *bin) 2411{ 2412 2413 /* Dissociate run from bin. */ 2414 if (run == bin->runcur) 2415 bin->runcur = NULL; 2416 else { |
2417 szind_t binind = arena_bin_index(extent_node_arena_get( |
2418 &chunk->node), bin); 2419 arena_bin_info_t *bin_info = &arena_bin_info[binind]; 2420 2421 if (bin_info->nregs != 1) { 2422 /* 2423 * This block's conditional is necessary because if the 2424 * run only contains one region, then it never gets 2425 * inserted into the non-full runs tree. --- 47 unchanged lines hidden (view full) --- 2473static void 2474arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr, 2475 arena_chunk_map_bits_t *bitselm, bool junked) 2476{ 2477 size_t pageind, rpages_ind; 2478 arena_run_t *run; 2479 arena_bin_t *bin; 2480 arena_bin_info_t *bin_info; |
2481 szind_t binind; |
2482 2483 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 2484 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); 2485 run = &arena_miscelm_get(chunk, rpages_ind)->run; 2486 binind = run->binind; 2487 bin = &arena->bins[binind]; 2488 bin_info = &arena_bin_info[binind]; 2489 --- 65 unchanged lines hidden (view full) --- 2555} 2556#ifdef JEMALLOC_JET 2557#undef arena_dalloc_junk_large 2558#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large) 2559arena_dalloc_junk_large_t *arena_dalloc_junk_large = 2560 JEMALLOC_N(arena_dalloc_junk_large_impl); 2561#endif 2562 |
2563static void |
2564arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk, 2565 void *ptr, bool junked) 2566{ 2567 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 2568 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); 2569 arena_run_t *run = &miscelm->run; 2570 2571 if (config_fill || config_stats) { 2572 size_t usize = arena_mapbits_large_size_get(chunk, pageind) - 2573 large_pad; 2574 2575 if (!junked) 2576 arena_dalloc_junk_large(ptr, usize); 2577 if (config_stats) { |
2578 szind_t index = size2index(usize) - NBINS; |
2579 2580 arena->stats.ndalloc_large++; 2581 arena->stats.allocated_large -= usize; 2582 arena->stats.lstats[index].ndalloc++; 2583 arena->stats.lstats[index].curruns--; 2584 } 2585 } 2586 --- 30 unchanged lines hidden (view full) --- 2617 /* 2618 * Shrink the run, and make trailing pages available for other 2619 * allocations. 2620 */ 2621 malloc_mutex_lock(&arena->lock); 2622 arena_run_trim_tail(arena, chunk, run, oldsize + large_pad, size + 2623 large_pad, true); 2624 if (config_stats) { |
2625 szind_t oldindex = size2index(oldsize) - NBINS; 2626 szind_t index = size2index(size) - NBINS; |
2627 2628 arena->stats.ndalloc_large++; 2629 arena->stats.allocated_large -= oldsize; 2630 arena->stats.lstats[oldindex].ndalloc++; 2631 arena->stats.lstats[oldindex].curruns--; 2632 2633 arena->stats.nmalloc_large++; 2634 arena->stats.nrequests_large++; 2635 arena->stats.allocated_large += size; 2636 arena->stats.lstats[index].nmalloc++; 2637 arena->stats.lstats[index].nrequests++; 2638 arena->stats.lstats[index].curruns++; 2639 } 2640 malloc_mutex_unlock(&arena->lock); 2641} 2642 2643static bool 2644arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, |
2645 size_t oldsize, size_t usize_min, size_t usize_max, bool zero) |
2646{ 2647 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 2648 size_t npages = (oldsize + large_pad) >> LG_PAGE; 2649 size_t followsize; |
2650 2651 assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) - 2652 large_pad); 2653 2654 /* Try to extend the run. */ |
2655 malloc_mutex_lock(&arena->lock); |
2656 if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk, 2657 pageind+npages) != 0) 2658 goto label_fail; 2659 followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages); 2660 if (oldsize + followsize >= usize_min) { |
2661 /* 2662 * The next run is available and sufficiently large. Split the 2663 * following run, then merge the first part with the existing 2664 * allocation. 2665 */ 2666 arena_run_t *run; |
2667 size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask; |
2668 |
2669 usize = usize_max; |
2670 while (oldsize + followsize < usize) 2671 usize = index2size(size2index(usize)-1); 2672 assert(usize >= usize_min); |
2673 assert(usize >= oldsize); |
2674 splitsize = usize - oldsize; |
2675 if (splitsize == 0) 2676 goto label_fail; |
2677 2678 run = &arena_miscelm_get(chunk, pageind+npages)->run; |
2679 if (arena_run_split_large(arena, run, splitsize, zero)) 2680 goto label_fail; |
2681 2682 size = oldsize + splitsize; 2683 npages = (size + large_pad) >> LG_PAGE; 2684 2685 /* 2686 * Mark the extended run as dirty if either portion of the run 2687 * was dirty before allocation. This is rather pedantic, 2688 * because there's not actually any sequence of events that --- 7 unchanged lines hidden (view full) --- 2696 arena_mapbits_large_set(chunk, pageind, size + large_pad, 2697 flag_dirty | (flag_unzeroed_mask & 2698 arena_mapbits_unzeroed_get(chunk, pageind))); 2699 arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty | 2700 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, 2701 pageind+npages-1))); 2702 2703 if (config_stats) { |
2704 szind_t oldindex = size2index(oldsize) - NBINS; 2705 szind_t index = size2index(size) - NBINS; |
2706 2707 arena->stats.ndalloc_large++; 2708 arena->stats.allocated_large -= oldsize; 2709 arena->stats.lstats[oldindex].ndalloc++; 2710 arena->stats.lstats[oldindex].curruns--; 2711 2712 arena->stats.nmalloc_large++; 2713 arena->stats.nrequests_large++; 2714 arena->stats.allocated_large += size; 2715 arena->stats.lstats[index].nmalloc++; 2716 arena->stats.lstats[index].nrequests++; 2717 arena->stats.lstats[index].curruns++; 2718 } 2719 malloc_mutex_unlock(&arena->lock); 2720 return (false); 2721 } |
2722label_fail: |
2723 malloc_mutex_unlock(&arena->lock); |
2724 return (true); 2725} 2726 2727#ifdef JEMALLOC_JET 2728#undef arena_ralloc_junk_large 2729#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl) 2730#endif 2731static void --- 12 unchanged lines hidden (view full) --- 2744 JEMALLOC_N(arena_ralloc_junk_large_impl); 2745#endif 2746 2747/* 2748 * Try to resize a large allocation, in order to avoid copying. This will 2749 * always fail if growing an object, and the following run is already in use. 2750 */ 2751static bool |
2752arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min, 2753 size_t usize_max, bool zero) |
2754{ |
2755 arena_chunk_t *chunk; 2756 arena_t *arena; |
2757 |
2758 if (oldsize == usize_max) { 2759 /* Current size class is compatible and maximal. */ |
2760 return (false); |
2761 } |
2762 |
2763 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 2764 arena = extent_node_arena_get(&chunk->node); |
2765 |
2766 if (oldsize < usize_max) { 2767 bool ret = arena_ralloc_large_grow(arena, chunk, ptr, oldsize, 2768 usize_min, usize_max, zero); 2769 if (config_fill && !ret && !zero) { 2770 if (unlikely(opt_junk_alloc)) { 2771 memset((void *)((uintptr_t)ptr + oldsize), 0xa5, 2772 isalloc(ptr, config_prof) - oldsize); 2773 } else if (unlikely(opt_zero)) { 2774 memset((void *)((uintptr_t)ptr + oldsize), 0, 2775 isalloc(ptr, config_prof) - oldsize); |
2776 } |
2777 } |
2778 return (ret); |
2779 } |
2780 2781 assert(oldsize > usize_max); 2782 /* Fill before shrinking in order avoid a race. */ 2783 arena_ralloc_junk_large(ptr, oldsize, usize_max); 2784 arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, usize_max); 2785 return (false); |
2786} 2787 2788bool 2789arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, 2790 bool zero) 2791{ |
2792 size_t usize_min, usize_max; |
2793 |
2794 usize_min = s2u(size); 2795 usize_max = s2u(size + extra); 2796 if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) { |
2797 /* 2798 * Avoid moving the allocation if the size class can be left the 2799 * same. 2800 */ |
2801 if (oldsize <= SMALL_MAXCLASS) { 2802 assert(arena_bin_info[size2index(oldsize)].reg_size == 2803 oldsize); 2804 if ((usize_max <= SMALL_MAXCLASS && 2805 size2index(usize_max) == size2index(oldsize)) || 2806 (size <= oldsize && usize_max >= oldsize)) 2807 return (false); 2808 } else { 2809 if (usize_max > SMALL_MAXCLASS) { 2810 if (!arena_ralloc_large(ptr, oldsize, usize_min, 2811 usize_max, zero)) |
2812 return (false); |
2813 } 2814 } 2815 2816 /* Reallocation would require a move. */ 2817 return (true); |
2818 } else { 2819 return (huge_ralloc_no_move(ptr, oldsize, usize_min, usize_max, 2820 zero)); 2821 } |
2822} 2823 |
2824static void * 2825arena_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize, 2826 size_t alignment, bool zero, tcache_t *tcache) 2827{ 2828 2829 if (alignment == 0) 2830 return (arena_malloc(tsd, arena, usize, zero, tcache)); 2831 usize = sa2u(usize, alignment); 2832 if (usize == 0) 2833 return (NULL); 2834 return (ipalloct(tsd, usize, alignment, zero, tcache, arena)); 2835} 2836 |
2837void * 2838arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, |
2839 size_t alignment, bool zero, tcache_t *tcache) |
2840{ 2841 void *ret; |
2842 size_t usize; |
2843 |
2844 usize = s2u(size); 2845 if (usize == 0) 2846 return (NULL); 2847 2848 if (likely(usize <= large_maxclass)) { |
2849 size_t copysize; 2850 2851 /* Try to avoid moving the allocation. */ |
2852 if (!arena_ralloc_no_move(ptr, oldsize, usize, 0, zero)) |
2853 return (ptr); 2854 2855 /* 2856 * size and oldsize are different enough that we need to move 2857 * the object. In that case, fall back to allocating new space 2858 * and copying. 2859 */ |
2860 ret = arena_ralloc_move_helper(tsd, arena, usize, alignment, 2861 zero, tcache); 2862 if (ret == NULL) 2863 return (NULL); |
2864 |
2865 /* 2866 * Junk/zero-filling were already done by 2867 * ipalloc()/arena_malloc(). 2868 */ 2869 |
2870 copysize = (usize < oldsize) ? usize : oldsize; |
2871 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); 2872 memcpy(ret, ptr, copysize); 2873 isqalloc(tsd, ptr, oldsize, tcache); 2874 } else { |
2875 ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment, 2876 zero, tcache); |
2877 } 2878 return (ret); 2879} 2880 2881dss_prec_t 2882arena_dss_prec_get(arena_t *arena) 2883{ 2884 dss_prec_t ret; --- 331 unchanged lines hidden (view full) --- 3216#undef SC 3217 3218 return (false); 3219} 3220 3221bool 3222arena_boot(void) 3223{ |
3224 unsigned i; 3225 3226 arena_lg_dirty_mult_default_set(opt_lg_dirty_mult); 3227 3228 /* 3229 * Compute the header size such that it is large enough to contain the 3230 * page map. The page map is biased to omit entries for the header 3231 * itself, so some iteration is necessary to compute the map bias. 3232 * 3233 * 1) Compute safe header_size and map_bias values that include enough 3234 * space for an unbiased page map. 3235 * 2) Refine map_bias based on (1) to omit the header pages in the page 3236 * map. The resulting map_bias may be one too small. 3237 * 3) Refine map_bias based on (2). The result will be >= the result 3238 * from (2), and will always be correct. 3239 */ 3240 map_bias = 0; 3241 for (i = 0; i < 3; i++) { |
3242 size_t header_size = offsetof(arena_chunk_t, map_bits) + |
3243 ((sizeof(arena_chunk_map_bits_t) + 3244 sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias)); 3245 map_bias = (header_size + PAGE_MASK) >> LG_PAGE; 3246 } 3247 assert(map_bias > 0); 3248 3249 map_misc_offset = offsetof(arena_chunk_t, map_bits) + 3250 sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias); 3251 3252 arena_maxrun = chunksize - (map_bias << LG_PAGE); 3253 assert(arena_maxrun > 0); |
3254 large_maxclass = index2size(size2index(chunksize)-1); 3255 if (large_maxclass > arena_maxrun) { |
3256 /* 3257 * For small chunk sizes it's possible for there to be fewer 3258 * non-header pages available than are necessary to serve the 3259 * size classes just below chunksize. 3260 */ |
3261 large_maxclass = arena_maxrun; |
3262 } |
3263 assert(large_maxclass > 0); 3264 nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS); |
3265 nhclasses = NSIZES - nlclasses - NBINS; 3266 3267 bin_info_init(); 3268 return (small_run_size_init()); 3269} 3270 3271void 3272arena_prefork(arena_t *arena) --- 36 unchanged lines hidden --- |