arena.c (289900) | arena.c (296221) |
---|---|
1#define JEMALLOC_ARENA_C_ 2#include "jemalloc/internal/jemalloc_internal.h" 3 4/******************************************************************************/ 5/* Data. */ 6 | 1#define JEMALLOC_ARENA_C_ 2#include "jemalloc/internal/jemalloc_internal.h" 3 4/******************************************************************************/ 5/* Data. */ 6 |
7purge_mode_t opt_purge = PURGE_DEFAULT; 8const char *purge_mode_names[] = { 9 "ratio", 10 "decay", 11 "N/A" 12}; |
|
7ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT; 8static ssize_t lg_dirty_mult_default; | 13ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT; 14static ssize_t lg_dirty_mult_default; |
15ssize_t opt_decay_time = DECAY_TIME_DEFAULT; 16static ssize_t decay_time_default; 17 |
|
9arena_bin_info_t arena_bin_info[NBINS]; 10 11size_t map_bias; 12size_t map_misc_offset; 13size_t arena_maxrun; /* Max run size for arenas. */ 14size_t large_maxclass; /* Max large size class. */ | 18arena_bin_info_t arena_bin_info[NBINS]; 19 20size_t map_bias; 21size_t map_misc_offset; 22size_t arena_maxrun; /* Max run size for arenas. */ 23size_t large_maxclass; /* Max large size class. */ |
15static size_t small_maxrun; /* Max run size used for small size classes. */ | 24size_t run_quantize_max; /* Max run_quantize_*() input. */ 25static size_t small_maxrun; /* Max run size for small size classes. */ |
16static bool *small_run_tab; /* Valid small run page multiples. */ | 26static bool *small_run_tab; /* Valid small run page multiples. */ |
27static size_t *run_quantize_floor_tab; /* run_quantize_floor() memoization. */ 28static size_t *run_quantize_ceil_tab; /* run_quantize_ceil() memoization. */ |
|
17unsigned nlclasses; /* Number of large size classes. */ 18unsigned nhclasses; /* Number of huge size classes. */ | 29unsigned nlclasses; /* Number of large size classes. */ 30unsigned nhclasses; /* Number of huge size classes. */ |
31static szind_t runs_avail_bias; /* Size index for first runs_avail tree. */ 32static szind_t runs_avail_nclasses; /* Number of runs_avail trees. */ |
|
19 20/******************************************************************************/ 21/* 22 * Function prototypes for static functions that are referenced prior to 23 * definition. 24 */ 25 | 33 34/******************************************************************************/ 35/* 36 * Function prototypes for static functions that are referenced prior to 37 * definition. 38 */ 39 |
26static void arena_purge(arena_t *arena, bool all); | 40static void arena_purge_to_limit(arena_t *arena, size_t ndirty_limit); |
27static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, 28 bool cleaned, bool decommitted); 29static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, 30 arena_run_t *run, arena_bin_t *bin); 31static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, 32 arena_run_t *run, arena_bin_t *bin); 33 34/******************************************************************************/ 35 | 41static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, 42 bool cleaned, bool decommitted); 43static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, 44 arena_run_t *run, arena_bin_t *bin); 45static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, 46 arena_run_t *run, arena_bin_t *bin); 47 48/******************************************************************************/ 49 |
36#define CHUNK_MAP_KEY ((uintptr_t)0x1U) 37 38JEMALLOC_INLINE_C arena_chunk_map_misc_t * 39arena_miscelm_key_create(size_t size) 40{ 41 42 return ((arena_chunk_map_misc_t *)(arena_mapbits_size_encode(size) | 43 CHUNK_MAP_KEY)); 44} 45 46JEMALLOC_INLINE_C bool 47arena_miscelm_is_key(const arena_chunk_map_misc_t *miscelm) 48{ 49 50 return (((uintptr_t)miscelm & CHUNK_MAP_KEY) != 0); 51} 52 53#undef CHUNK_MAP_KEY 54 | |
55JEMALLOC_INLINE_C size_t | 50JEMALLOC_INLINE_C size_t |
56arena_miscelm_key_size_get(const arena_chunk_map_misc_t *miscelm) | 51arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm) |
57{ | 52{ |
58 59 assert(arena_miscelm_is_key(miscelm)); 60 61 return (arena_mapbits_size_decode((uintptr_t)miscelm)); 62} 63 64JEMALLOC_INLINE_C size_t 65arena_miscelm_size_get(arena_chunk_map_misc_t *miscelm) 66{ | |
67 arena_chunk_t *chunk; 68 size_t pageind, mapbits; 69 | 53 arena_chunk_t *chunk; 54 size_t pageind, mapbits; 55 |
70 assert(!arena_miscelm_is_key(miscelm)); 71 | |
72 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); 73 pageind = arena_miscelm_to_pageind(miscelm); 74 mapbits = arena_mapbits_get(chunk, pageind); 75 return (arena_mapbits_size_decode(mapbits)); 76} 77 78JEMALLOC_INLINE_C int | 56 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); 57 pageind = arena_miscelm_to_pageind(miscelm); 58 mapbits = arena_mapbits_get(chunk, pageind); 59 return (arena_mapbits_size_decode(mapbits)); 60} 61 62JEMALLOC_INLINE_C int |
79arena_run_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b) | 63arena_run_addr_comp(const arena_chunk_map_misc_t *a, 64 const arena_chunk_map_misc_t *b) |
80{ 81 uintptr_t a_miscelm = (uintptr_t)a; 82 uintptr_t b_miscelm = (uintptr_t)b; 83 84 assert(a != NULL); 85 assert(b != NULL); 86 87 return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm)); 88} 89 90/* Generate red-black tree functions. */ 91rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_misc_t, | 65{ 66 uintptr_t a_miscelm = (uintptr_t)a; 67 uintptr_t b_miscelm = (uintptr_t)b; 68 69 assert(a != NULL); 70 assert(b != NULL); 71 72 return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm)); 73} 74 75/* Generate red-black tree functions. */ 76rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_misc_t, |
92 rb_link, arena_run_comp) | 77 rb_link, arena_run_addr_comp) |
93 94static size_t | 78 79static size_t |
95run_quantize(size_t size) | 80run_quantize_floor_compute(size_t size) |
96{ 97 size_t qsize; 98 99 assert(size != 0); 100 assert(size == PAGE_CEILING(size)); 101 102 /* Don't change sizes that are valid small run sizes. */ 103 if (size <= small_maxrun && small_run_tab[size >> LG_PAGE]) 104 return (size); 105 106 /* 107 * Round down to the nearest run size that can actually be requested 108 * during normal large allocation. Add large_pad so that cache index 109 * randomization can offset the allocation from the page boundary. 110 */ 111 qsize = index2size(size2index(size - large_pad + 1) - 1) + large_pad; 112 if (qsize <= SMALL_MAXCLASS + large_pad) | 81{ 82 size_t qsize; 83 84 assert(size != 0); 85 assert(size == PAGE_CEILING(size)); 86 87 /* Don't change sizes that are valid small run sizes. */ 88 if (size <= small_maxrun && small_run_tab[size >> LG_PAGE]) 89 return (size); 90 91 /* 92 * Round down to the nearest run size that can actually be requested 93 * during normal large allocation. Add large_pad so that cache index 94 * randomization can offset the allocation from the page boundary. 95 */ 96 qsize = index2size(size2index(size - large_pad + 1) - 1) + large_pad; 97 if (qsize <= SMALL_MAXCLASS + large_pad) |
113 return (run_quantize(size - large_pad)); | 98 return (run_quantize_floor_compute(size - large_pad)); |
114 assert(qsize <= size); 115 return (qsize); 116} 117 118static size_t | 99 assert(qsize <= size); 100 return (qsize); 101} 102 103static size_t |
119run_quantize_next(size_t size) | 104run_quantize_ceil_compute_hard(size_t size) |
120{ 121 size_t large_run_size_next; 122 123 assert(size != 0); 124 assert(size == PAGE_CEILING(size)); 125 126 /* 127 * Return the next quantized size greater than the input size. --- 17 unchanged lines hidden (view full) --- 145 if (large_run_size_next < size) 146 return (large_run_size_next); 147 return (size); 148 } 149 } 150} 151 152static size_t | 105{ 106 size_t large_run_size_next; 107 108 assert(size != 0); 109 assert(size == PAGE_CEILING(size)); 110 111 /* 112 * Return the next quantized size greater than the input size. --- 17 unchanged lines hidden (view full) --- 130 if (large_run_size_next < size) 131 return (large_run_size_next); 132 return (size); 133 } 134 } 135} 136 137static size_t |
153run_quantize_first(size_t size) | 138run_quantize_ceil_compute(size_t size) |
154{ | 139{ |
155 size_t qsize = run_quantize(size); | 140 size_t qsize = run_quantize_floor_compute(size); |
156 157 if (qsize < size) { 158 /* 159 * Skip a quantization that may have an adequately large run, 160 * because under-sized runs may be mixed in. This only happens 161 * when an unusual size is requested, i.e. for aligned 162 * allocation, and is just one of several places where linear 163 * search would potentially find sufficiently aligned available 164 * memory somewhere lower. 165 */ | 141 142 if (qsize < size) { 143 /* 144 * Skip a quantization that may have an adequately large run, 145 * because under-sized runs may be mixed in. This only happens 146 * when an unusual size is requested, i.e. for aligned 147 * allocation, and is just one of several places where linear 148 * search would potentially find sufficiently aligned available 149 * memory somewhere lower. 150 */ |
166 qsize = run_quantize_next(size); | 151 qsize = run_quantize_ceil_compute_hard(qsize); |
167 } 168 return (qsize); 169} 170 | 152 } 153 return (qsize); 154} 155 |
171JEMALLOC_INLINE_C int 172arena_avail_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b) | 156#ifdef JEMALLOC_JET 157#undef run_quantize_floor 158#define run_quantize_floor JEMALLOC_N(run_quantize_floor_impl) 159#endif 160static size_t 161run_quantize_floor(size_t size) |
173{ | 162{ |
174 int ret; 175 uintptr_t a_miscelm = (uintptr_t)a; 176 size_t a_qsize = run_quantize(arena_miscelm_is_key(a) ? 177 arena_miscelm_key_size_get(a) : arena_miscelm_size_get(a)); 178 size_t b_qsize = run_quantize(arena_miscelm_size_get(b)); | 163 size_t ret; |
179 | 164 |
180 /* 181 * Compare based on quantized size rather than size, in order to sort 182 * equally useful runs only by address. 183 */ 184 ret = (a_qsize > b_qsize) - (a_qsize < b_qsize); 185 if (ret == 0) { 186 if (!arena_miscelm_is_key(a)) { 187 uintptr_t b_miscelm = (uintptr_t)b; | 165 assert(size > 0); 166 assert(size <= run_quantize_max); 167 assert((size & PAGE_MASK) == 0); |
188 | 168 |
189 ret = (a_miscelm > b_miscelm) - (a_miscelm < b_miscelm); 190 } else { 191 /* 192 * Treat keys as if they are lower than anything else. 193 */ 194 ret = -1; 195 } 196 } | 169 ret = run_quantize_floor_tab[(size >> LG_PAGE) - 1]; 170 assert(ret == run_quantize_floor_compute(size)); 171 return (ret); 172} 173#ifdef JEMALLOC_JET 174#undef run_quantize_floor 175#define run_quantize_floor JEMALLOC_N(run_quantize_floor) 176run_quantize_t *run_quantize_floor = JEMALLOC_N(run_quantize_floor_impl); 177#endif |
197 | 178 |
179#ifdef JEMALLOC_JET 180#undef run_quantize_ceil 181#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil_impl) 182#endif 183static size_t 184run_quantize_ceil(size_t size) 185{ 186 size_t ret; 187 188 assert(size > 0); 189 assert(size <= run_quantize_max); 190 assert((size & PAGE_MASK) == 0); 191 192 ret = run_quantize_ceil_tab[(size >> LG_PAGE) - 1]; 193 assert(ret == run_quantize_ceil_compute(size)); |
|
198 return (ret); 199} | 194 return (ret); 195} |
196#ifdef JEMALLOC_JET 197#undef run_quantize_ceil 198#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil) 199run_quantize_t *run_quantize_ceil = JEMALLOC_N(run_quantize_ceil_impl); 200#endif |
|
200 | 201 |
201/* Generate red-black tree functions. */ 202rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, 203 arena_chunk_map_misc_t, rb_link, arena_avail_comp) | 202static arena_run_tree_t * 203arena_runs_avail_get(arena_t *arena, szind_t ind) 204{ |
204 | 205 |
206 assert(ind >= runs_avail_bias); 207 assert(ind - runs_avail_bias < runs_avail_nclasses); 208 209 return (&arena->runs_avail[ind - runs_avail_bias]); 210} 211 |
|
205static void 206arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 207 size_t npages) 208{ | 212static void 213arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 214 size_t npages) 215{ |
209 | 216 szind_t ind = size2index(run_quantize_floor(arena_miscelm_size_get( 217 arena_miscelm_get(chunk, pageind)))); |
210 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 211 LG_PAGE)); | 218 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 219 LG_PAGE)); |
212 arena_avail_tree_insert(&arena->runs_avail, arena_miscelm_get(chunk, 213 pageind)); | 220 arena_run_tree_insert(arena_runs_avail_get(arena, ind), 221 arena_miscelm_get(chunk, pageind)); |
214} 215 216static void 217arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 218 size_t npages) 219{ | 222} 223 224static void 225arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 226 size_t npages) 227{ |
220 | 228 szind_t ind = size2index(run_quantize_floor(arena_miscelm_size_get( 229 arena_miscelm_get(chunk, pageind)))); |
221 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 222 LG_PAGE)); | 230 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 231 LG_PAGE)); |
223 arena_avail_tree_remove(&arena->runs_avail, arena_miscelm_get(chunk, 224 pageind)); | 232 arena_run_tree_remove(arena_runs_avail_get(arena, ind), 233 arena_miscelm_get(chunk, pageind)); |
225} 226 227static void 228arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 229 size_t npages) 230{ 231 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); 232 --- 54 unchanged lines hidden (view full) --- 287 arena->ndirty -= arena_chunk_dirty_npages(node); 288 } 289} 290 291JEMALLOC_INLINE_C void * 292arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info) 293{ 294 void *ret; | 234} 235 236static void 237arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 238 size_t npages) 239{ 240 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); 241 --- 54 unchanged lines hidden (view full) --- 296 arena->ndirty -= arena_chunk_dirty_npages(node); 297 } 298} 299 300JEMALLOC_INLINE_C void * 301arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info) 302{ 303 void *ret; |
295 unsigned regind; | 304 size_t regind; |
296 arena_chunk_map_misc_t *miscelm; 297 void *rpages; 298 299 assert(run->nfree > 0); 300 assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info)); 301 | 305 arena_chunk_map_misc_t *miscelm; 306 void *rpages; 307 308 assert(run->nfree > 0); 309 assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info)); 310 |
302 regind = bitmap_sfu(run->bitmap, &bin_info->bitmap_info); | 311 regind = (unsigned)bitmap_sfu(run->bitmap, &bin_info->bitmap_info); |
303 miscelm = arena_run_to_miscelm(run); 304 rpages = arena_miscelm_to_rpages(miscelm); 305 ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset + 306 (uintptr_t)(bin_info->reg_interval * regind)); 307 run->nfree--; 308 return (ret); 309} 310 311JEMALLOC_INLINE_C void 312arena_run_reg_dalloc(arena_run_t *run, void *ptr) 313{ 314 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 315 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 316 size_t mapbits = arena_mapbits_get(chunk, pageind); 317 szind_t binind = arena_ptr_small_binind_get(ptr, mapbits); 318 arena_bin_info_t *bin_info = &arena_bin_info[binind]; | 312 miscelm = arena_run_to_miscelm(run); 313 rpages = arena_miscelm_to_rpages(miscelm); 314 ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset + 315 (uintptr_t)(bin_info->reg_interval * regind)); 316 run->nfree--; 317 return (ret); 318} 319 320JEMALLOC_INLINE_C void 321arena_run_reg_dalloc(arena_run_t *run, void *ptr) 322{ 323 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 324 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 325 size_t mapbits = arena_mapbits_get(chunk, pageind); 326 szind_t binind = arena_ptr_small_binind_get(ptr, mapbits); 327 arena_bin_info_t *bin_info = &arena_bin_info[binind]; |
319 unsigned regind = arena_run_regind(run, bin_info, ptr); | 328 size_t regind = arena_run_regind(run, bin_info, ptr); |
320 321 assert(run->nfree < bin_info->nregs); 322 /* Freeing an interior pointer can cause assertion failure. */ 323 assert(((uintptr_t)ptr - 324 ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) + 325 (uintptr_t)bin_info->reg0_offset)) % 326 (uintptr_t)bin_info->reg_interval == 0); 327 assert((uintptr_t)ptr >= --- 31 unchanged lines hidden (view full) --- 359 UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE)); 360 361 arena_run_page_mark_zeroed(chunk, run_ind); 362 for (i = 0; i < PAGE / sizeof(size_t); i++) 363 assert(p[i] == 0); 364} 365 366static void | 329 330 assert(run->nfree < bin_info->nregs); 331 /* Freeing an interior pointer can cause assertion failure. */ 332 assert(((uintptr_t)ptr - 333 ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) + 334 (uintptr_t)bin_info->reg0_offset)) % 335 (uintptr_t)bin_info->reg_interval == 0); 336 assert((uintptr_t)ptr >= --- 31 unchanged lines hidden (view full) --- 368 UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE)); 369 370 arena_run_page_mark_zeroed(chunk, run_ind); 371 for (i = 0; i < PAGE / sizeof(size_t); i++) 372 assert(p[i] == 0); 373} 374 375static void |
367arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages) | 376arena_nactive_add(arena_t *arena, size_t add_pages) |
368{ 369 370 if (config_stats) { | 377{ 378 379 if (config_stats) { |
371 ssize_t cactive_diff = CHUNK_CEILING((arena->nactive + add_pages 372 - sub_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive << | 380 size_t cactive_add = CHUNK_CEILING((arena->nactive + 381 add_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive << |
373 LG_PAGE); | 382 LG_PAGE); |
374 if (cactive_diff != 0) 375 stats_cactive_add(cactive_diff); | 383 if (cactive_add != 0) 384 stats_cactive_add(cactive_add); |
376 } | 385 } |
386 arena->nactive += add_pages; |
|
377} 378 379static void | 387} 388 389static void |
390arena_nactive_sub(arena_t *arena, size_t sub_pages) 391{ 392 393 if (config_stats) { 394 size_t cactive_sub = CHUNK_CEILING(arena->nactive << LG_PAGE) - 395 CHUNK_CEILING((arena->nactive - sub_pages) << LG_PAGE); 396 if (cactive_sub != 0) 397 stats_cactive_sub(cactive_sub); 398 } 399 arena->nactive -= sub_pages; 400} 401 402static void |
|
380arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind, 381 size_t flag_dirty, size_t flag_decommitted, size_t need_pages) 382{ 383 size_t total_pages, rem_pages; 384 385 assert(flag_dirty == 0 || flag_decommitted == 0); 386 387 total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >> 388 LG_PAGE; 389 assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) == 390 flag_dirty); 391 assert(need_pages <= total_pages); 392 rem_pages = total_pages - need_pages; 393 394 arena_avail_remove(arena, chunk, run_ind, total_pages); 395 if (flag_dirty != 0) 396 arena_run_dirty_remove(arena, chunk, run_ind, total_pages); | 403arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind, 404 size_t flag_dirty, size_t flag_decommitted, size_t need_pages) 405{ 406 size_t total_pages, rem_pages; 407 408 assert(flag_dirty == 0 || flag_decommitted == 0); 409 410 total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >> 411 LG_PAGE; 412 assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) == 413 flag_dirty); 414 assert(need_pages <= total_pages); 415 rem_pages = total_pages - need_pages; 416 417 arena_avail_remove(arena, chunk, run_ind, total_pages); 418 if (flag_dirty != 0) 419 arena_run_dirty_remove(arena, chunk, run_ind, total_pages); |
397 arena_cactive_update(arena, need_pages, 0); 398 arena->nactive += need_pages; | 420 arena_nactive_add(arena, need_pages); |
399 400 /* Keep track of trailing unused pages for later use. */ 401 if (rem_pages > 0) { 402 size_t flags = flag_dirty | flag_decommitted; 403 size_t flag_unzeroed_mask = (flags == 0) ? CHUNK_MAP_UNZEROED : 404 0; 405 406 arena_mapbits_unallocated_set(chunk, run_ind+need_pages, --- 299 unchanged lines hidden (view full) --- 706 if (arena->spare != NULL) 707 chunk = arena_chunk_init_spare(arena); 708 else { 709 chunk = arena_chunk_init_hard(arena); 710 if (chunk == NULL) 711 return (NULL); 712 } 713 | 421 422 /* Keep track of trailing unused pages for later use. */ 423 if (rem_pages > 0) { 424 size_t flags = flag_dirty | flag_decommitted; 425 size_t flag_unzeroed_mask = (flags == 0) ? CHUNK_MAP_UNZEROED : 426 0; 427 428 arena_mapbits_unallocated_set(chunk, run_ind+need_pages, --- 299 unchanged lines hidden (view full) --- 728 if (arena->spare != NULL) 729 chunk = arena_chunk_init_spare(arena); 730 else { 731 chunk = arena_chunk_init_hard(arena); 732 if (chunk == NULL) 733 return (NULL); 734 } 735 |
714 /* Insert the run into the runs_avail tree. */ | |
715 arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias); 716 717 return (chunk); 718} 719 720static void 721arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk) 722{ --- 4 unchanged lines hidden (view full) --- 727 arena_maxrun); 728 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == 729 arena_maxrun); 730 assert(arena_mapbits_dirty_get(chunk, map_bias) == 731 arena_mapbits_dirty_get(chunk, chunk_npages-1)); 732 assert(arena_mapbits_decommitted_get(chunk, map_bias) == 733 arena_mapbits_decommitted_get(chunk, chunk_npages-1)); 734 | 736 arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias); 737 738 return (chunk); 739} 740 741static void 742arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk) 743{ --- 4 unchanged lines hidden (view full) --- 748 arena_maxrun); 749 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == 750 arena_maxrun); 751 assert(arena_mapbits_dirty_get(chunk, map_bias) == 752 arena_mapbits_dirty_get(chunk, chunk_npages-1)); 753 assert(arena_mapbits_decommitted_get(chunk, map_bias) == 754 arena_mapbits_decommitted_get(chunk, chunk_npages-1)); 755 |
735 /* 736 * Remove run from the runs_avail tree, so that the arena does not use 737 * it. 738 */ | 756 /* Remove run from runs_avail, so that the arena does not use it. */ |
739 arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias); 740 741 if (arena->spare != NULL) { 742 arena_chunk_t *spare = arena->spare; 743 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; 744 bool committed; 745 746 arena->spare = chunk; --- 136 unchanged lines hidden (view full) --- 883 zero, &commit); 884 if (ret == NULL) { 885 /* Revert optimistic stats updates. */ 886 malloc_mutex_lock(&arena->lock); 887 if (config_stats) { 888 arena_huge_malloc_stats_update_undo(arena, usize); 889 arena->stats.mapped -= usize; 890 } | 757 arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias); 758 759 if (arena->spare != NULL) { 760 arena_chunk_t *spare = arena->spare; 761 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; 762 bool committed; 763 764 arena->spare = chunk; --- 136 unchanged lines hidden (view full) --- 901 zero, &commit); 902 if (ret == NULL) { 903 /* Revert optimistic stats updates. */ 904 malloc_mutex_lock(&arena->lock); 905 if (config_stats) { 906 arena_huge_malloc_stats_update_undo(arena, usize); 907 arena->stats.mapped -= usize; 908 } |
891 arena->nactive -= (usize >> LG_PAGE); | 909 arena_nactive_sub(arena, usize >> LG_PAGE); |
892 malloc_mutex_unlock(&arena->lock); 893 } 894 895 return (ret); 896} 897 898void * 899arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment, --- 5 unchanged lines hidden (view full) --- 905 906 malloc_mutex_lock(&arena->lock); 907 908 /* Optimistically update stats. */ 909 if (config_stats) { 910 arena_huge_malloc_stats_update(arena, usize); 911 arena->stats.mapped += usize; 912 } | 910 malloc_mutex_unlock(&arena->lock); 911 } 912 913 return (ret); 914} 915 916void * 917arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment, --- 5 unchanged lines hidden (view full) --- 923 924 malloc_mutex_lock(&arena->lock); 925 926 /* Optimistically update stats. */ 927 if (config_stats) { 928 arena_huge_malloc_stats_update(arena, usize); 929 arena->stats.mapped += usize; 930 } |
913 arena->nactive += (usize >> LG_PAGE); | 931 arena_nactive_add(arena, usize >> LG_PAGE); |
914 915 ret = chunk_alloc_cache(arena, &chunk_hooks, NULL, csize, alignment, 916 zero, true); 917 malloc_mutex_unlock(&arena->lock); 918 if (ret == NULL) { 919 ret = arena_chunk_alloc_huge_hard(arena, &chunk_hooks, usize, 920 alignment, zero, csize); 921 } 922 | 932 933 ret = chunk_alloc_cache(arena, &chunk_hooks, NULL, csize, alignment, 934 zero, true); 935 malloc_mutex_unlock(&arena->lock); 936 if (ret == NULL) { 937 ret = arena_chunk_alloc_huge_hard(arena, &chunk_hooks, usize, 938 alignment, zero, csize); 939 } 940 |
923 if (config_stats && ret != NULL) 924 stats_cactive_add(usize); | |
925 return (ret); 926} 927 928void 929arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize) 930{ 931 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; 932 size_t csize; 933 934 csize = CHUNK_CEILING(usize); 935 malloc_mutex_lock(&arena->lock); 936 if (config_stats) { 937 arena_huge_dalloc_stats_update(arena, usize); 938 arena->stats.mapped -= usize; | 941 return (ret); 942} 943 944void 945arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize) 946{ 947 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; 948 size_t csize; 949 950 csize = CHUNK_CEILING(usize); 951 malloc_mutex_lock(&arena->lock); 952 if (config_stats) { 953 arena_huge_dalloc_stats_update(arena, usize); 954 arena->stats.mapped -= usize; |
939 stats_cactive_sub(usize); | |
940 } | 955 } |
941 arena->nactive -= (usize >> LG_PAGE); | 956 arena_nactive_sub(arena, usize >> LG_PAGE); |
942 943 chunk_dalloc_cache(arena, &chunk_hooks, chunk, csize, true); 944 malloc_mutex_unlock(&arena->lock); 945} 946 947void 948arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk, size_t oldsize, 949 size_t usize) 950{ 951 952 assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize)); 953 assert(oldsize != usize); 954 955 malloc_mutex_lock(&arena->lock); 956 if (config_stats) 957 arena_huge_ralloc_stats_update(arena, oldsize, usize); | 957 958 chunk_dalloc_cache(arena, &chunk_hooks, chunk, csize, true); 959 malloc_mutex_unlock(&arena->lock); 960} 961 962void 963arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk, size_t oldsize, 964 size_t usize) 965{ 966 967 assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize)); 968 assert(oldsize != usize); 969 970 malloc_mutex_lock(&arena->lock); 971 if (config_stats) 972 arena_huge_ralloc_stats_update(arena, oldsize, usize); |
958 if (oldsize < usize) { 959 size_t udiff = usize - oldsize; 960 arena->nactive += udiff >> LG_PAGE; 961 if (config_stats) 962 stats_cactive_add(udiff); 963 } else { 964 size_t udiff = oldsize - usize; 965 arena->nactive -= udiff >> LG_PAGE; 966 if (config_stats) 967 stats_cactive_sub(udiff); 968 } | 973 if (oldsize < usize) 974 arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE); 975 else 976 arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE); |
969 malloc_mutex_unlock(&arena->lock); 970} 971 972void 973arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize, 974 size_t usize) 975{ 976 size_t udiff = oldsize - usize; 977 size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize); 978 979 malloc_mutex_lock(&arena->lock); 980 if (config_stats) { 981 arena_huge_ralloc_stats_update(arena, oldsize, usize); | 977 malloc_mutex_unlock(&arena->lock); 978} 979 980void 981arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize, 982 size_t usize) 983{ 984 size_t udiff = oldsize - usize; 985 size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize); 986 987 malloc_mutex_lock(&arena->lock); 988 if (config_stats) { 989 arena_huge_ralloc_stats_update(arena, oldsize, usize); |
982 if (cdiff != 0) { | 990 if (cdiff != 0) |
983 arena->stats.mapped -= cdiff; | 991 arena->stats.mapped -= cdiff; |
984 stats_cactive_sub(udiff); 985 } | |
986 } | 992 } |
987 arena->nactive -= udiff >> LG_PAGE; | 993 arena_nactive_sub(arena, udiff >> LG_PAGE); |
988 989 if (cdiff != 0) { 990 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; 991 void *nchunk = (void *)((uintptr_t)chunk + 992 CHUNK_CEILING(usize)); 993 994 chunk_dalloc_cache(arena, &chunk_hooks, nchunk, cdiff, true); 995 } --- 13 unchanged lines hidden (view full) --- 1009 if (err) { 1010 /* Revert optimistic stats updates. */ 1011 malloc_mutex_lock(&arena->lock); 1012 if (config_stats) { 1013 arena_huge_ralloc_stats_update_undo(arena, oldsize, 1014 usize); 1015 arena->stats.mapped -= cdiff; 1016 } | 994 995 if (cdiff != 0) { 996 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; 997 void *nchunk = (void *)((uintptr_t)chunk + 998 CHUNK_CEILING(usize)); 999 1000 chunk_dalloc_cache(arena, &chunk_hooks, nchunk, cdiff, true); 1001 } --- 13 unchanged lines hidden (view full) --- 1015 if (err) { 1016 /* Revert optimistic stats updates. */ 1017 malloc_mutex_lock(&arena->lock); 1018 if (config_stats) { 1019 arena_huge_ralloc_stats_update_undo(arena, oldsize, 1020 usize); 1021 arena->stats.mapped -= cdiff; 1022 } |
1017 arena->nactive -= (udiff >> LG_PAGE); | 1023 arena_nactive_sub(arena, udiff >> LG_PAGE); |
1018 malloc_mutex_unlock(&arena->lock); 1019 } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk, 1020 cdiff, true, arena->ind)) { 1021 chunk_dalloc_arena(arena, chunk_hooks, nchunk, cdiff, *zero, 1022 true); 1023 err = true; 1024 } 1025 return (err); --- 11 unchanged lines hidden (view full) --- 1037 1038 malloc_mutex_lock(&arena->lock); 1039 1040 /* Optimistically update stats. */ 1041 if (config_stats) { 1042 arena_huge_ralloc_stats_update(arena, oldsize, usize); 1043 arena->stats.mapped += cdiff; 1044 } | 1024 malloc_mutex_unlock(&arena->lock); 1025 } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk, 1026 cdiff, true, arena->ind)) { 1027 chunk_dalloc_arena(arena, chunk_hooks, nchunk, cdiff, *zero, 1028 true); 1029 err = true; 1030 } 1031 return (err); --- 11 unchanged lines hidden (view full) --- 1043 1044 malloc_mutex_lock(&arena->lock); 1045 1046 /* Optimistically update stats. */ 1047 if (config_stats) { 1048 arena_huge_ralloc_stats_update(arena, oldsize, usize); 1049 arena->stats.mapped += cdiff; 1050 } |
1045 arena->nactive += (udiff >> LG_PAGE); | 1051 arena_nactive_add(arena, udiff >> LG_PAGE); |
1046 1047 err = (chunk_alloc_cache(arena, &arena->chunk_hooks, nchunk, cdiff, 1048 chunksize, zero, true) == NULL); 1049 malloc_mutex_unlock(&arena->lock); 1050 if (err) { 1051 err = arena_chunk_ralloc_huge_expand_hard(arena, &chunk_hooks, 1052 chunk, oldsize, usize, zero, nchunk, udiff, 1053 cdiff); 1054 } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk, 1055 cdiff, true, arena->ind)) { 1056 chunk_dalloc_arena(arena, &chunk_hooks, nchunk, cdiff, *zero, 1057 true); 1058 err = true; 1059 } 1060 | 1052 1053 err = (chunk_alloc_cache(arena, &arena->chunk_hooks, nchunk, cdiff, 1054 chunksize, zero, true) == NULL); 1055 malloc_mutex_unlock(&arena->lock); 1056 if (err) { 1057 err = arena_chunk_ralloc_huge_expand_hard(arena, &chunk_hooks, 1058 chunk, oldsize, usize, zero, nchunk, udiff, 1059 cdiff); 1060 } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk, 1061 cdiff, true, arena->ind)) { 1062 chunk_dalloc_arena(arena, &chunk_hooks, nchunk, cdiff, *zero, 1063 true); 1064 err = true; 1065 } 1066 |
1061 if (config_stats && !err) 1062 stats_cactive_add(udiff); | |
1063 return (err); 1064} 1065 1066/* 1067 * Do first-best-fit run selection, i.e. select the lowest run that best fits. | 1067 return (err); 1068} 1069 1070/* 1071 * Do first-best-fit run selection, i.e. select the lowest run that best fits. |
1068 * Run sizes are quantized, so not all candidate runs are necessarily exactly 1069 * the same size. | 1072 * Run sizes are indexed, so not all candidate runs are necessarily exactly the 1073 * same size. |
1070 */ 1071static arena_run_t * 1072arena_run_first_best_fit(arena_t *arena, size_t size) 1073{ | 1074 */ 1075static arena_run_t * 1076arena_run_first_best_fit(arena_t *arena, size_t size) 1077{ |
1074 size_t search_size = run_quantize_first(size); 1075 arena_chunk_map_misc_t *key = arena_miscelm_key_create(search_size); 1076 arena_chunk_map_misc_t *miscelm = 1077 arena_avail_tree_nsearch(&arena->runs_avail, key); 1078 if (miscelm == NULL) 1079 return (NULL); 1080 return (&miscelm->run); | 1078 szind_t ind, i; 1079 1080 ind = size2index(run_quantize_ceil(size)); 1081 for (i = ind; i < runs_avail_nclasses + runs_avail_bias; i++) { 1082 arena_chunk_map_misc_t *miscelm = arena_run_tree_first( 1083 arena_runs_avail_get(arena, i)); 1084 if (miscelm != NULL) 1085 return (&miscelm->run); 1086 } 1087 1088 return (NULL); |
1081} 1082 1083static arena_run_t * 1084arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero) 1085{ 1086 arena_run_t *run = arena_run_first_best_fit(arena, s2u(size)); 1087 if (run != NULL) { 1088 if (arena_run_split_large(arena, run, size, zero)) --- 110 unchanged lines hidden (view full) --- 1199 malloc_mutex_lock(&arena->lock); 1200 arena->lg_dirty_mult = lg_dirty_mult; 1201 arena_maybe_purge(arena); 1202 malloc_mutex_unlock(&arena->lock); 1203 1204 return (false); 1205} 1206 | 1089} 1090 1091static arena_run_t * 1092arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero) 1093{ 1094 arena_run_t *run = arena_run_first_best_fit(arena, s2u(size)); 1095 if (run != NULL) { 1096 if (arena_run_split_large(arena, run, size, zero)) --- 110 unchanged lines hidden (view full) --- 1207 malloc_mutex_lock(&arena->lock); 1208 arena->lg_dirty_mult = lg_dirty_mult; 1209 arena_maybe_purge(arena); 1210 malloc_mutex_unlock(&arena->lock); 1211 1212 return (false); 1213} 1214 |
1207void 1208arena_maybe_purge(arena_t *arena) | 1215static void 1216arena_decay_deadline_init(arena_t *arena) |
1209{ 1210 | 1217{ 1218 |
1219 assert(opt_purge == purge_mode_decay); 1220 1221 /* 1222 * Generate a new deadline that is uniformly random within the next 1223 * epoch after the current one. 1224 */ 1225 nstime_copy(&arena->decay_deadline, &arena->decay_epoch); 1226 nstime_add(&arena->decay_deadline, &arena->decay_interval); 1227 if (arena->decay_time > 0) { 1228 nstime_t jitter; 1229 1230 nstime_init(&jitter, prng_range(&arena->decay_jitter_state, 1231 nstime_ns(&arena->decay_interval))); 1232 nstime_add(&arena->decay_deadline, &jitter); 1233 } 1234} 1235 1236static bool 1237arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time) 1238{ 1239 1240 assert(opt_purge == purge_mode_decay); 1241 1242 return (nstime_compare(&arena->decay_deadline, time) <= 0); 1243} 1244 1245static size_t 1246arena_decay_backlog_npages_limit(const arena_t *arena) 1247{ 1248 static const uint64_t h_steps[] = { 1249#define STEP(step, h, x, y) \ 1250 h, 1251 SMOOTHSTEP 1252#undef STEP 1253 }; 1254 uint64_t sum; 1255 size_t npages_limit_backlog; 1256 unsigned i; 1257 1258 assert(opt_purge == purge_mode_decay); 1259 1260 /* 1261 * For each element of decay_backlog, multiply by the corresponding 1262 * fixed-point smoothstep decay factor. Sum the products, then divide 1263 * to round down to the nearest whole number of pages. 1264 */ 1265 sum = 0; 1266 for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) 1267 sum += arena->decay_backlog[i] * h_steps[i]; 1268 npages_limit_backlog = (sum >> SMOOTHSTEP_BFP); 1269 1270 return (npages_limit_backlog); 1271} 1272 1273static void 1274arena_decay_epoch_advance(arena_t *arena, const nstime_t *time) 1275{ 1276 uint64_t nadvance; 1277 nstime_t delta; 1278 size_t ndirty_delta; 1279 1280 assert(opt_purge == purge_mode_decay); 1281 assert(arena_decay_deadline_reached(arena, time)); 1282 1283 nstime_copy(&delta, time); 1284 nstime_subtract(&delta, &arena->decay_epoch); 1285 nadvance = nstime_divide(&delta, &arena->decay_interval); 1286 assert(nadvance > 0); 1287 1288 /* Add nadvance decay intervals to epoch. */ 1289 nstime_copy(&delta, &arena->decay_interval); 1290 nstime_imultiply(&delta, nadvance); 1291 nstime_add(&arena->decay_epoch, &delta); 1292 1293 /* Set a new deadline. */ 1294 arena_decay_deadline_init(arena); 1295 1296 /* Update the backlog. */ 1297 if (nadvance >= SMOOTHSTEP_NSTEPS) { 1298 memset(arena->decay_backlog, 0, (SMOOTHSTEP_NSTEPS-1) * 1299 sizeof(size_t)); 1300 } else { 1301 memmove(arena->decay_backlog, &arena->decay_backlog[nadvance], 1302 (SMOOTHSTEP_NSTEPS - nadvance) * sizeof(size_t)); 1303 if (nadvance > 1) { 1304 memset(&arena->decay_backlog[SMOOTHSTEP_NSTEPS - 1305 nadvance], 0, (nadvance-1) * sizeof(size_t)); 1306 } 1307 } 1308 ndirty_delta = (arena->ndirty > arena->decay_ndirty) ? arena->ndirty - 1309 arena->decay_ndirty : 0; 1310 arena->decay_ndirty = arena->ndirty; 1311 arena->decay_backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta; 1312 arena->decay_backlog_npages_limit = 1313 arena_decay_backlog_npages_limit(arena); 1314} 1315 1316static size_t 1317arena_decay_npages_limit(arena_t *arena) 1318{ 1319 size_t npages_limit; 1320 1321 assert(opt_purge == purge_mode_decay); 1322 1323 npages_limit = arena->decay_backlog_npages_limit; 1324 1325 /* Add in any dirty pages created during the current epoch. */ 1326 if (arena->ndirty > arena->decay_ndirty) 1327 npages_limit += arena->ndirty - arena->decay_ndirty; 1328 1329 return (npages_limit); 1330} 1331 1332static void 1333arena_decay_init(arena_t *arena, ssize_t decay_time) 1334{ 1335 1336 arena->decay_time = decay_time; 1337 if (decay_time > 0) { 1338 nstime_init2(&arena->decay_interval, decay_time, 0); 1339 nstime_idivide(&arena->decay_interval, SMOOTHSTEP_NSTEPS); 1340 } 1341 1342 nstime_init(&arena->decay_epoch, 0); 1343 nstime_update(&arena->decay_epoch); 1344 arena->decay_jitter_state = (uint64_t)(uintptr_t)arena; 1345 arena_decay_deadline_init(arena); 1346 arena->decay_ndirty = arena->ndirty; 1347 arena->decay_backlog_npages_limit = 0; 1348 memset(arena->decay_backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t)); 1349} 1350 1351static bool 1352arena_decay_time_valid(ssize_t decay_time) 1353{ 1354 1355 return (decay_time >= -1 && decay_time <= NSTIME_SEC_MAX); 1356} 1357 1358ssize_t 1359arena_decay_time_get(arena_t *arena) 1360{ 1361 ssize_t decay_time; 1362 1363 malloc_mutex_lock(&arena->lock); 1364 decay_time = arena->decay_time; 1365 malloc_mutex_unlock(&arena->lock); 1366 1367 return (decay_time); 1368} 1369 1370bool 1371arena_decay_time_set(arena_t *arena, ssize_t decay_time) 1372{ 1373 1374 if (!arena_decay_time_valid(decay_time)) 1375 return (true); 1376 1377 malloc_mutex_lock(&arena->lock); 1378 /* 1379 * Restart decay backlog from scratch, which may cause many dirty pages 1380 * to be immediately purged. It would conceptually be possible to map 1381 * the old backlog onto the new backlog, but there is no justification 1382 * for such complexity since decay_time changes are intended to be 1383 * infrequent, either between the {-1, 0, >0} states, or a one-time 1384 * arbitrary change during initial arena configuration. 1385 */ 1386 arena_decay_init(arena, decay_time); 1387 arena_maybe_purge(arena); 1388 malloc_mutex_unlock(&arena->lock); 1389 1390 return (false); 1391} 1392 1393static void 1394arena_maybe_purge_ratio(arena_t *arena) 1395{ 1396 1397 assert(opt_purge == purge_mode_ratio); 1398 |
|
1211 /* Don't purge if the option is disabled. */ 1212 if (arena->lg_dirty_mult < 0) 1213 return; | 1399 /* Don't purge if the option is disabled. */ 1400 if (arena->lg_dirty_mult < 0) 1401 return; |
1214 /* Don't recursively purge. */ 1215 if (arena->purging) 1216 return; | 1402 |
1217 /* 1218 * Iterate, since preventing recursive purging could otherwise leave too 1219 * many dirty pages. 1220 */ 1221 while (true) { 1222 size_t threshold = (arena->nactive >> arena->lg_dirty_mult); 1223 if (threshold < chunk_npages) 1224 threshold = chunk_npages; 1225 /* 1226 * Don't purge unless the number of purgeable pages exceeds the 1227 * threshold. 1228 */ 1229 if (arena->ndirty <= threshold) 1230 return; | 1403 /* 1404 * Iterate, since preventing recursive purging could otherwise leave too 1405 * many dirty pages. 1406 */ 1407 while (true) { 1408 size_t threshold = (arena->nactive >> arena->lg_dirty_mult); 1409 if (threshold < chunk_npages) 1410 threshold = chunk_npages; 1411 /* 1412 * Don't purge unless the number of purgeable pages exceeds the 1413 * threshold. 1414 */ 1415 if (arena->ndirty <= threshold) 1416 return; |
1231 arena_purge(arena, false); | 1417 arena_purge_to_limit(arena, threshold); |
1232 } 1233} 1234 | 1418 } 1419} 1420 |
1421static void 1422arena_maybe_purge_decay(arena_t *arena) 1423{ 1424 nstime_t time; 1425 size_t ndirty_limit; 1426 1427 assert(opt_purge == purge_mode_decay); 1428 1429 /* Purge all or nothing if the option is disabled. */ 1430 if (arena->decay_time <= 0) { 1431 if (arena->decay_time == 0) 1432 arena_purge_to_limit(arena, 0); 1433 return; 1434 } 1435 1436 nstime_copy(&time, &arena->decay_epoch); 1437 if (unlikely(nstime_update(&time))) { 1438 /* Time went backwards. Force an epoch advance. */ 1439 nstime_copy(&time, &arena->decay_deadline); 1440 } 1441 1442 if (arena_decay_deadline_reached(arena, &time)) 1443 arena_decay_epoch_advance(arena, &time); 1444 1445 ndirty_limit = arena_decay_npages_limit(arena); 1446 1447 /* 1448 * Don't try to purge unless the number of purgeable pages exceeds the 1449 * current limit. 1450 */ 1451 if (arena->ndirty <= ndirty_limit) 1452 return; 1453 arena_purge_to_limit(arena, ndirty_limit); 1454} 1455 1456void 1457arena_maybe_purge(arena_t *arena) 1458{ 1459 1460 /* Don't recursively purge. */ 1461 if (arena->purging) 1462 return; 1463 1464 if (opt_purge == purge_mode_ratio) 1465 arena_maybe_purge_ratio(arena); 1466 else 1467 arena_maybe_purge_decay(arena); 1468} 1469 |
|
1235static size_t 1236arena_dirty_count(arena_t *arena) 1237{ 1238 size_t ndirty = 0; 1239 arena_runs_dirty_link_t *rdelm; 1240 extent_node_t *chunkselm; 1241 1242 for (rdelm = qr_next(&arena->runs_dirty, rd_link), --- 19 unchanged lines hidden (view full) --- 1262 } 1263 ndirty += npages; 1264 } 1265 1266 return (ndirty); 1267} 1268 1269static size_t | 1470static size_t 1471arena_dirty_count(arena_t *arena) 1472{ 1473 size_t ndirty = 0; 1474 arena_runs_dirty_link_t *rdelm; 1475 extent_node_t *chunkselm; 1476 1477 for (rdelm = qr_next(&arena->runs_dirty, rd_link), --- 19 unchanged lines hidden (view full) --- 1497 } 1498 ndirty += npages; 1499 } 1500 1501 return (ndirty); 1502} 1503 1504static size_t |
1270arena_compute_npurge(arena_t *arena, bool all) 1271{ 1272 size_t npurge; 1273 1274 /* 1275 * Compute the minimum number of pages that this thread should try to 1276 * purge. 1277 */ 1278 if (!all) { 1279 size_t threshold = (arena->nactive >> arena->lg_dirty_mult); 1280 threshold = threshold < chunk_npages ? chunk_npages : threshold; 1281 1282 npurge = arena->ndirty - threshold; 1283 } else 1284 npurge = arena->ndirty; 1285 1286 return (npurge); 1287} 1288 1289static size_t 1290arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all, 1291 size_t npurge, arena_runs_dirty_link_t *purge_runs_sentinel, | 1505arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, 1506 size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel, |
1292 extent_node_t *purge_chunks_sentinel) 1293{ 1294 arena_runs_dirty_link_t *rdelm, *rdelm_next; 1295 extent_node_t *chunkselm; 1296 size_t nstashed = 0; 1297 | 1507 extent_node_t *purge_chunks_sentinel) 1508{ 1509 arena_runs_dirty_link_t *rdelm, *rdelm_next; 1510 extent_node_t *chunkselm; 1511 size_t nstashed = 0; 1512 |
1298 /* Stash at least npurge pages. */ | 1513 /* Stash runs/chunks according to ndirty_limit. */ |
1299 for (rdelm = qr_next(&arena->runs_dirty, rd_link), 1300 chunkselm = qr_next(&arena->chunks_cache, cc_link); 1301 rdelm != &arena->runs_dirty; rdelm = rdelm_next) { 1302 size_t npages; 1303 rdelm_next = qr_next(rdelm, rd_link); 1304 1305 if (rdelm == &chunkselm->rd) { 1306 extent_node_t *chunkselm_next; 1307 bool zero; 1308 UNUSED void *chunk; 1309 | 1514 for (rdelm = qr_next(&arena->runs_dirty, rd_link), 1515 chunkselm = qr_next(&arena->chunks_cache, cc_link); 1516 rdelm != &arena->runs_dirty; rdelm = rdelm_next) { 1517 size_t npages; 1518 rdelm_next = qr_next(rdelm, rd_link); 1519 1520 if (rdelm == &chunkselm->rd) { 1521 extent_node_t *chunkselm_next; 1522 bool zero; 1523 UNUSED void *chunk; 1524 |
1525 npages = extent_node_size_get(chunkselm) >> LG_PAGE; 1526 if (opt_purge == purge_mode_decay && arena->ndirty - 1527 (nstashed + npages) < ndirty_limit) 1528 break; 1529 |
|
1310 chunkselm_next = qr_next(chunkselm, cc_link); 1311 /* 1312 * Allocate. chunkselm remains valid due to the 1313 * dalloc_node=false argument to chunk_alloc_cache(). 1314 */ 1315 zero = false; 1316 chunk = chunk_alloc_cache(arena, chunk_hooks, 1317 extent_node_addr_get(chunkselm), 1318 extent_node_size_get(chunkselm), chunksize, &zero, 1319 false); 1320 assert(chunk == extent_node_addr_get(chunkselm)); 1321 assert(zero == extent_node_zeroed_get(chunkselm)); 1322 extent_node_dirty_insert(chunkselm, purge_runs_sentinel, 1323 purge_chunks_sentinel); | 1530 chunkselm_next = qr_next(chunkselm, cc_link); 1531 /* 1532 * Allocate. chunkselm remains valid due to the 1533 * dalloc_node=false argument to chunk_alloc_cache(). 1534 */ 1535 zero = false; 1536 chunk = chunk_alloc_cache(arena, chunk_hooks, 1537 extent_node_addr_get(chunkselm), 1538 extent_node_size_get(chunkselm), chunksize, &zero, 1539 false); 1540 assert(chunk == extent_node_addr_get(chunkselm)); 1541 assert(zero == extent_node_zeroed_get(chunkselm)); 1542 extent_node_dirty_insert(chunkselm, purge_runs_sentinel, 1543 purge_chunks_sentinel); |
1324 npages = extent_node_size_get(chunkselm) >> LG_PAGE; | 1544 assert(npages == (extent_node_size_get(chunkselm) >> 1545 LG_PAGE)); |
1325 chunkselm = chunkselm_next; 1326 } else { 1327 arena_chunk_t *chunk = 1328 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); 1329 arena_chunk_map_misc_t *miscelm = 1330 arena_rd_to_miscelm(rdelm); 1331 size_t pageind = arena_miscelm_to_pageind(miscelm); 1332 arena_run_t *run = &miscelm->run; 1333 size_t run_size = 1334 arena_mapbits_unallocated_size_get(chunk, pageind); 1335 1336 npages = run_size >> LG_PAGE; | 1546 chunkselm = chunkselm_next; 1547 } else { 1548 arena_chunk_t *chunk = 1549 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); 1550 arena_chunk_map_misc_t *miscelm = 1551 arena_rd_to_miscelm(rdelm); 1552 size_t pageind = arena_miscelm_to_pageind(miscelm); 1553 arena_run_t *run = &miscelm->run; 1554 size_t run_size = 1555 arena_mapbits_unallocated_size_get(chunk, pageind); 1556 1557 npages = run_size >> LG_PAGE; |
1558 if (opt_purge == purge_mode_decay && arena->ndirty - 1559 (nstashed + npages) < ndirty_limit) 1560 break; |
|
1337 1338 assert(pageind + npages <= chunk_npages); 1339 assert(arena_mapbits_dirty_get(chunk, pageind) == 1340 arena_mapbits_dirty_get(chunk, pageind+npages-1)); 1341 1342 /* 1343 * If purging the spare chunk's run, make it available 1344 * prior to allocation. --- 9 unchanged lines hidden (view full) --- 1354 else { 1355 assert(qr_next(rdelm, rd_link) == rdelm); 1356 assert(qr_prev(rdelm, rd_link) == rdelm); 1357 } 1358 qr_meld(purge_runs_sentinel, rdelm, rd_link); 1359 } 1360 1361 nstashed += npages; | 1561 1562 assert(pageind + npages <= chunk_npages); 1563 assert(arena_mapbits_dirty_get(chunk, pageind) == 1564 arena_mapbits_dirty_get(chunk, pageind+npages-1)); 1565 1566 /* 1567 * If purging the spare chunk's run, make it available 1568 * prior to allocation. --- 9 unchanged lines hidden (view full) --- 1578 else { 1579 assert(qr_next(rdelm, rd_link) == rdelm); 1580 assert(qr_prev(rdelm, rd_link) == rdelm); 1581 } 1582 qr_meld(purge_runs_sentinel, rdelm, rd_link); 1583 } 1584 1585 nstashed += npages; |
1362 if (!all && nstashed >= npurge) | 1586 if (opt_purge == purge_mode_ratio && arena->ndirty - nstashed <= 1587 ndirty_limit) |
1363 break; 1364 } 1365 1366 return (nstashed); 1367} 1368 1369static size_t 1370arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks, --- 123 unchanged lines hidden (view full) --- 1494 pageind) != 0); 1495 arena_run_t *run = &miscelm->run; 1496 qr_remove(rdelm, rd_link); 1497 arena_run_dalloc(arena, run, false, true, decommitted); 1498 } 1499 } 1500} 1501 | 1588 break; 1589 } 1590 1591 return (nstashed); 1592} 1593 1594static size_t 1595arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks, --- 123 unchanged lines hidden (view full) --- 1719 pageind) != 0); 1720 arena_run_t *run = &miscelm->run; 1721 qr_remove(rdelm, rd_link); 1722 arena_run_dalloc(arena, run, false, true, decommitted); 1723 } 1724 } 1725} 1726 |
1727/* 1728 * NB: ndirty_limit is interpreted differently depending on opt_purge: 1729 * - purge_mode_ratio: Purge as few dirty run/chunks as possible to reach the 1730 * desired state: 1731 * (arena->ndirty <= ndirty_limit) 1732 * - purge_mode_decay: Purge as many dirty runs/chunks as possible without 1733 * violating the invariant: 1734 * (arena->ndirty >= ndirty_limit) 1735 */ |
|
1502static void | 1736static void |
1503arena_purge(arena_t *arena, bool all) | 1737arena_purge_to_limit(arena_t *arena, size_t ndirty_limit) |
1504{ 1505 chunk_hooks_t chunk_hooks = chunk_hooks_get(arena); | 1738{ 1739 chunk_hooks_t chunk_hooks = chunk_hooks_get(arena); |
1506 size_t npurge, npurgeable, npurged; | 1740 size_t npurge, npurged; |
1507 arena_runs_dirty_link_t purge_runs_sentinel; 1508 extent_node_t purge_chunks_sentinel; 1509 1510 arena->purging = true; 1511 1512 /* 1513 * Calls to arena_dirty_count() are disabled even for debug builds 1514 * because overhead grows nonlinearly as memory usage increases. 1515 */ 1516 if (false && config_debug) { 1517 size_t ndirty = arena_dirty_count(arena); 1518 assert(ndirty == arena->ndirty); 1519 } | 1741 arena_runs_dirty_link_t purge_runs_sentinel; 1742 extent_node_t purge_chunks_sentinel; 1743 1744 arena->purging = true; 1745 1746 /* 1747 * Calls to arena_dirty_count() are disabled even for debug builds 1748 * because overhead grows nonlinearly as memory usage increases. 1749 */ 1750 if (false && config_debug) { 1751 size_t ndirty = arena_dirty_count(arena); 1752 assert(ndirty == arena->ndirty); 1753 } |
1520 assert((arena->nactive >> arena->lg_dirty_mult) < arena->ndirty || all); | 1754 assert(opt_purge != purge_mode_ratio || (arena->nactive >> 1755 arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0); |
1521 | 1756 |
1522 if (config_stats) 1523 arena->stats.npurge++; 1524 1525 npurge = arena_compute_npurge(arena, all); | |
1526 qr_new(&purge_runs_sentinel, rd_link); 1527 extent_node_dirty_linkage_init(&purge_chunks_sentinel); 1528 | 1757 qr_new(&purge_runs_sentinel, rd_link); 1758 extent_node_dirty_linkage_init(&purge_chunks_sentinel); 1759 |
1529 npurgeable = arena_stash_dirty(arena, &chunk_hooks, all, npurge, | 1760 npurge = arena_stash_dirty(arena, &chunk_hooks, ndirty_limit, |
1530 &purge_runs_sentinel, &purge_chunks_sentinel); | 1761 &purge_runs_sentinel, &purge_chunks_sentinel); |
1531 assert(npurgeable >= npurge); | 1762 if (npurge == 0) 1763 goto label_return; |
1532 npurged = arena_purge_stashed(arena, &chunk_hooks, &purge_runs_sentinel, 1533 &purge_chunks_sentinel); | 1764 npurged = arena_purge_stashed(arena, &chunk_hooks, &purge_runs_sentinel, 1765 &purge_chunks_sentinel); |
1534 assert(npurged == npurgeable); | 1766 assert(npurged == npurge); |
1535 arena_unstash_purged(arena, &chunk_hooks, &purge_runs_sentinel, 1536 &purge_chunks_sentinel); 1537 | 1767 arena_unstash_purged(arena, &chunk_hooks, &purge_runs_sentinel, 1768 &purge_chunks_sentinel); 1769 |
1770 if (config_stats) 1771 arena->stats.npurge++; 1772 1773label_return: |
|
1538 arena->purging = false; 1539} 1540 1541void | 1774 arena->purging = false; 1775} 1776 1777void |
1542arena_purge_all(arena_t *arena) | 1778arena_purge(arena_t *arena, bool all) |
1543{ 1544 1545 malloc_mutex_lock(&arena->lock); | 1779{ 1780 1781 malloc_mutex_lock(&arena->lock); |
1546 arena_purge(arena, true); | 1782 if (all) 1783 arena_purge_to_limit(arena, 0); 1784 else 1785 arena_maybe_purge(arena); |
1547 malloc_mutex_unlock(&arena->lock); 1548} 1549 1550static void 1551arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size, 1552 size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty, 1553 size_t flag_decommitted) 1554{ --- 100 unchanged lines hidden (view full) --- 1655 } else { 1656 arena_bin_info_t *bin_info = &arena_bin_info[run->binind]; 1657 size = bin_info->run_size; 1658 } 1659 1660 return (size); 1661} 1662 | 1786 malloc_mutex_unlock(&arena->lock); 1787} 1788 1789static void 1790arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size, 1791 size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty, 1792 size_t flag_decommitted) 1793{ --- 100 unchanged lines hidden (view full) --- 1894 } else { 1895 arena_bin_info_t *bin_info = &arena_bin_info[run->binind]; 1896 size = bin_info->run_size; 1897 } 1898 1899 return (size); 1900} 1901 |
1663static bool 1664arena_run_decommit(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run) 1665{ 1666 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); 1667 size_t run_ind = arena_miscelm_to_pageind(miscelm); 1668 size_t offset = run_ind << LG_PAGE; 1669 size_t length = arena_run_size_get(arena, chunk, run, run_ind); 1670 1671 return (arena->chunk_hooks.decommit(chunk, chunksize, offset, length, 1672 arena->ind)); 1673} 1674 | |
1675static void 1676arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned, 1677 bool decommitted) 1678{ 1679 arena_chunk_t *chunk; 1680 arena_chunk_map_misc_t *miscelm; 1681 size_t size, run_ind, run_pages, flag_dirty, flag_decommitted; 1682 1683 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 1684 miscelm = arena_run_to_miscelm(run); 1685 run_ind = arena_miscelm_to_pageind(miscelm); 1686 assert(run_ind >= map_bias); 1687 assert(run_ind < chunk_npages); 1688 size = arena_run_size_get(arena, chunk, run, run_ind); 1689 run_pages = (size >> LG_PAGE); | 1902static void 1903arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned, 1904 bool decommitted) 1905{ 1906 arena_chunk_t *chunk; 1907 arena_chunk_map_misc_t *miscelm; 1908 size_t size, run_ind, run_pages, flag_dirty, flag_decommitted; 1909 1910 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 1911 miscelm = arena_run_to_miscelm(run); 1912 run_ind = arena_miscelm_to_pageind(miscelm); 1913 assert(run_ind >= map_bias); 1914 assert(run_ind < chunk_npages); 1915 size = arena_run_size_get(arena, chunk, run, run_ind); 1916 run_pages = (size >> LG_PAGE); |
1690 arena_cactive_update(arena, 0, run_pages); 1691 arena->nactive -= run_pages; | 1917 arena_nactive_sub(arena, run_pages); |
1692 1693 /* 1694 * The run is dirty if the caller claims to have dirtied it, as well as 1695 * if it was already dirty before being allocated and the caller 1696 * doesn't claim to have cleaned it. 1697 */ 1698 assert(arena_mapbits_dirty_get(chunk, run_ind) == 1699 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); --- 45 unchanged lines hidden (view full) --- 1745 * allows for an old spare to be fully deallocated, thus decreasing the 1746 * chances of spuriously crossing the dirty page purging threshold. 1747 */ 1748 if (dirty) 1749 arena_maybe_purge(arena); 1750} 1751 1752static void | 1918 1919 /* 1920 * The run is dirty if the caller claims to have dirtied it, as well as 1921 * if it was already dirty before being allocated and the caller 1922 * doesn't claim to have cleaned it. 1923 */ 1924 assert(arena_mapbits_dirty_get(chunk, run_ind) == 1925 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); --- 45 unchanged lines hidden (view full) --- 1971 * allows for an old spare to be fully deallocated, thus decreasing the 1972 * chances of spuriously crossing the dirty page purging threshold. 1973 */ 1974 if (dirty) 1975 arena_maybe_purge(arena); 1976} 1977 1978static void |
1753arena_run_dalloc_decommit(arena_t *arena, arena_chunk_t *chunk, 1754 arena_run_t *run) 1755{ 1756 bool committed = arena_run_decommit(arena, chunk, run); 1757 1758 arena_run_dalloc(arena, run, committed, false, !committed); 1759} 1760 1761static void | |
1762arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, 1763 size_t oldsize, size_t newsize) 1764{ 1765 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); 1766 size_t pageind = arena_miscelm_to_pageind(miscelm); 1767 size_t head_npages = (oldsize - newsize) >> LG_PAGE; 1768 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); 1769 size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind); --- 211 unchanged lines hidden (view full) --- 1981 bin->runcur = run; 1982 1983 assert(bin->runcur->nfree > 0); 1984 1985 return (arena_run_reg_alloc(bin->runcur, bin_info)); 1986} 1987 1988void | 1979arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, 1980 size_t oldsize, size_t newsize) 1981{ 1982 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); 1983 size_t pageind = arena_miscelm_to_pageind(miscelm); 1984 size_t head_npages = (oldsize - newsize) >> LG_PAGE; 1985 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); 1986 size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind); --- 211 unchanged lines hidden (view full) --- 2198 bin->runcur = run; 2199 2200 assert(bin->runcur->nfree > 0); 2201 2202 return (arena_run_reg_alloc(bin->runcur, bin_info)); 2203} 2204 2205void |
1989arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind, 1990 uint64_t prof_accumbytes) | 2206arena_tcache_fill_small(tsd_t *tsd, arena_t *arena, tcache_bin_t *tbin, 2207 szind_t binind, uint64_t prof_accumbytes) |
1991{ 1992 unsigned i, nfill; 1993 arena_bin_t *bin; 1994 1995 assert(tbin->ncached == 0); 1996 1997 if (config_prof && arena_prof_accum(arena, prof_accumbytes)) 1998 prof_idump(); --- 6 unchanged lines hidden (view full) --- 2005 if ((run = bin->runcur) != NULL && run->nfree > 0) 2006 ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]); 2007 else 2008 ptr = arena_bin_malloc_hard(arena, bin); 2009 if (ptr == NULL) { 2010 /* 2011 * OOM. tbin->avail isn't yet filled down to its first 2012 * element, so the successful allocations (if any) must | 2208{ 2209 unsigned i, nfill; 2210 arena_bin_t *bin; 2211 2212 assert(tbin->ncached == 0); 2213 2214 if (config_prof && arena_prof_accum(arena, prof_accumbytes)) 2215 prof_idump(); --- 6 unchanged lines hidden (view full) --- 2222 if ((run = bin->runcur) != NULL && run->nfree > 0) 2223 ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]); 2224 else 2225 ptr = arena_bin_malloc_hard(arena, bin); 2226 if (ptr == NULL) { 2227 /* 2228 * OOM. tbin->avail isn't yet filled down to its first 2229 * element, so the successful allocations (if any) must |
2013 * be moved to the base of tbin->avail before bailing 2014 * out. | 2230 * be moved just before tbin->avail before bailing out. |
2015 */ 2016 if (i > 0) { | 2231 */ 2232 if (i > 0) { |
2017 memmove(tbin->avail, &tbin->avail[nfill - i], | 2233 memmove(tbin->avail - i, tbin->avail - nfill, |
2018 i * sizeof(void *)); 2019 } 2020 break; 2021 } 2022 if (config_fill && unlikely(opt_junk_alloc)) { 2023 arena_alloc_junk_small(ptr, &arena_bin_info[binind], 2024 true); 2025 } 2026 /* Insert such that low regions get used first. */ | 2234 i * sizeof(void *)); 2235 } 2236 break; 2237 } 2238 if (config_fill && unlikely(opt_junk_alloc)) { 2239 arena_alloc_junk_small(ptr, &arena_bin_info[binind], 2240 true); 2241 } 2242 /* Insert such that low regions get used first. */ |
2027 tbin->avail[nfill - 1 - i] = ptr; | 2243 *(tbin->avail - nfill + i) = ptr; |
2028 } 2029 if (config_stats) { 2030 bin->stats.nmalloc += i; 2031 bin->stats.nrequests += tbin->tstats.nrequests; 2032 bin->stats.curregs += i; 2033 bin->stats.nfills++; 2034 tbin->tstats.nrequests = 0; 2035 } 2036 malloc_mutex_unlock(&bin->lock); 2037 tbin->ncached = i; | 2244 } 2245 if (config_stats) { 2246 bin->stats.nmalloc += i; 2247 bin->stats.nrequests += tbin->tstats.nrequests; 2248 bin->stats.curregs += i; 2249 bin->stats.nfills++; 2250 tbin->tstats.nrequests = 0; 2251 } 2252 malloc_mutex_unlock(&bin->lock); 2253 tbin->ncached = i; |
2254 arena_decay_tick(tsd, arena); |
|
2038} 2039 2040void 2041arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero) 2042{ 2043 2044 if (zero) { 2045 size_t redzone_size = bin_info->redzone_size; --- 93 unchanged lines hidden (view full) --- 2139 assert(opt_quarantine); 2140 assert(usize <= SMALL_MAXCLASS); 2141 2142 binind = size2index(usize); 2143 bin_info = &arena_bin_info[binind]; 2144 arena_redzones_validate(ptr, bin_info, true); 2145} 2146 | 2255} 2256 2257void 2258arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero) 2259{ 2260 2261 if (zero) { 2262 size_t redzone_size = bin_info->redzone_size; --- 93 unchanged lines hidden (view full) --- 2356 assert(opt_quarantine); 2357 assert(usize <= SMALL_MAXCLASS); 2358 2359 binind = size2index(usize); 2360 bin_info = &arena_bin_info[binind]; 2361 arena_redzones_validate(ptr, bin_info, true); 2362} 2363 |
2147void * 2148arena_malloc_small(arena_t *arena, size_t size, bool zero) | 2364static void * 2365arena_malloc_small(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero) |
2149{ 2150 void *ret; 2151 arena_bin_t *bin; | 2366{ 2367 void *ret; 2368 arena_bin_t *bin; |
2369 size_t usize; |
|
2152 arena_run_t *run; | 2370 arena_run_t *run; |
2153 szind_t binind; | |
2154 | 2371 |
2155 binind = size2index(size); | |
2156 assert(binind < NBINS); 2157 bin = &arena->bins[binind]; | 2372 assert(binind < NBINS); 2373 bin = &arena->bins[binind]; |
2158 size = index2size(binind); | 2374 usize = index2size(binind); |
2159 2160 malloc_mutex_lock(&bin->lock); 2161 if ((run = bin->runcur) != NULL && run->nfree > 0) 2162 ret = arena_run_reg_alloc(run, &arena_bin_info[binind]); 2163 else 2164 ret = arena_bin_malloc_hard(arena, bin); 2165 2166 if (ret == NULL) { 2167 malloc_mutex_unlock(&bin->lock); 2168 return (NULL); 2169 } 2170 2171 if (config_stats) { 2172 bin->stats.nmalloc++; 2173 bin->stats.nrequests++; 2174 bin->stats.curregs++; 2175 } 2176 malloc_mutex_unlock(&bin->lock); | 2375 2376 malloc_mutex_lock(&bin->lock); 2377 if ((run = bin->runcur) != NULL && run->nfree > 0) 2378 ret = arena_run_reg_alloc(run, &arena_bin_info[binind]); 2379 else 2380 ret = arena_bin_malloc_hard(arena, bin); 2381 2382 if (ret == NULL) { 2383 malloc_mutex_unlock(&bin->lock); 2384 return (NULL); 2385 } 2386 2387 if (config_stats) { 2388 bin->stats.nmalloc++; 2389 bin->stats.nrequests++; 2390 bin->stats.curregs++; 2391 } 2392 malloc_mutex_unlock(&bin->lock); |
2177 if (config_prof && !isthreaded && arena_prof_accum(arena, size)) | 2393 if (config_prof && !isthreaded && arena_prof_accum(arena, usize)) |
2178 prof_idump(); 2179 2180 if (!zero) { 2181 if (config_fill) { 2182 if (unlikely(opt_junk_alloc)) { 2183 arena_alloc_junk_small(ret, 2184 &arena_bin_info[binind], false); 2185 } else if (unlikely(opt_zero)) | 2394 prof_idump(); 2395 2396 if (!zero) { 2397 if (config_fill) { 2398 if (unlikely(opt_junk_alloc)) { 2399 arena_alloc_junk_small(ret, 2400 &arena_bin_info[binind], false); 2401 } else if (unlikely(opt_zero)) |
2186 memset(ret, 0, size); | 2402 memset(ret, 0, usize); |
2187 } | 2403 } |
2188 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); | 2404 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize); |
2189 } else { 2190 if (config_fill && unlikely(opt_junk_alloc)) { 2191 arena_alloc_junk_small(ret, &arena_bin_info[binind], 2192 true); 2193 } | 2405 } else { 2406 if (config_fill && unlikely(opt_junk_alloc)) { 2407 arena_alloc_junk_small(ret, &arena_bin_info[binind], 2408 true); 2409 } |
2194 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); 2195 memset(ret, 0, size); | 2410 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize); 2411 memset(ret, 0, usize); |
2196 } 2197 | 2412 } 2413 |
2414 arena_decay_tick(tsd, arena); |
|
2198 return (ret); 2199} 2200 2201void * | 2415 return (ret); 2416} 2417 2418void * |
2202arena_malloc_large(arena_t *arena, size_t size, bool zero) | 2419arena_malloc_large(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero) |
2203{ 2204 void *ret; 2205 size_t usize; 2206 uintptr_t random_offset; 2207 arena_run_t *run; 2208 arena_chunk_map_misc_t *miscelm; 2209 UNUSED bool idump; 2210 2211 /* Large allocation. */ | 2420{ 2421 void *ret; 2422 size_t usize; 2423 uintptr_t random_offset; 2424 arena_run_t *run; 2425 arena_chunk_map_misc_t *miscelm; 2426 UNUSED bool idump; 2427 2428 /* Large allocation. */ |
2212 usize = s2u(size); | 2429 usize = index2size(binind); |
2213 malloc_mutex_lock(&arena->lock); 2214 if (config_cache_oblivious) { 2215 uint64_t r; 2216 2217 /* 2218 * Compute a uniformly distributed offset within the first page 2219 * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64 2220 * for 4 KiB pages and 64-byte cachelines. 2221 */ | 2430 malloc_mutex_lock(&arena->lock); 2431 if (config_cache_oblivious) { 2432 uint64_t r; 2433 2434 /* 2435 * Compute a uniformly distributed offset within the first page 2436 * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64 2437 * for 4 KiB pages and 64-byte cachelines. 2438 */ |
2222 prng64(r, LG_PAGE - LG_CACHELINE, arena->offset_state, 2223 UINT64_C(6364136223846793009), 2224 UINT64_C(1442695040888963409)); | 2439 r = prng_lg_range(&arena->offset_state, LG_PAGE - LG_CACHELINE); |
2225 random_offset = ((uintptr_t)r) << LG_CACHELINE; 2226 } else 2227 random_offset = 0; 2228 run = arena_run_alloc_large(arena, usize + large_pad, zero); 2229 if (run == NULL) { 2230 malloc_mutex_unlock(&arena->lock); 2231 return (NULL); 2232 } 2233 miscelm = arena_run_to_miscelm(run); 2234 ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) + 2235 random_offset); 2236 if (config_stats) { | 2440 random_offset = ((uintptr_t)r) << LG_CACHELINE; 2441 } else 2442 random_offset = 0; 2443 run = arena_run_alloc_large(arena, usize + large_pad, zero); 2444 if (run == NULL) { 2445 malloc_mutex_unlock(&arena->lock); 2446 return (NULL); 2447 } 2448 miscelm = arena_run_to_miscelm(run); 2449 ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) + 2450 random_offset); 2451 if (config_stats) { |
2237 szind_t index = size2index(usize) - NBINS; | 2452 szind_t index = binind - NBINS; |
2238 2239 arena->stats.nmalloc_large++; 2240 arena->stats.nrequests_large++; 2241 arena->stats.allocated_large += usize; 2242 arena->stats.lstats[index].nmalloc++; 2243 arena->stats.lstats[index].nrequests++; 2244 arena->stats.lstats[index].curruns++; 2245 } --- 7 unchanged lines hidden (view full) --- 2253 if (config_fill) { 2254 if (unlikely(opt_junk_alloc)) 2255 memset(ret, 0xa5, usize); 2256 else if (unlikely(opt_zero)) 2257 memset(ret, 0, usize); 2258 } 2259 } 2260 | 2453 2454 arena->stats.nmalloc_large++; 2455 arena->stats.nrequests_large++; 2456 arena->stats.allocated_large += usize; 2457 arena->stats.lstats[index].nmalloc++; 2458 arena->stats.lstats[index].nrequests++; 2459 arena->stats.lstats[index].curruns++; 2460 } --- 7 unchanged lines hidden (view full) --- 2468 if (config_fill) { 2469 if (unlikely(opt_junk_alloc)) 2470 memset(ret, 0xa5, usize); 2471 else if (unlikely(opt_zero)) 2472 memset(ret, 0, usize); 2473 } 2474 } 2475 |
2476 arena_decay_tick(tsd, arena); |
|
2261 return (ret); 2262} 2263 | 2477 return (ret); 2478} 2479 |
2480void * 2481arena_malloc_hard(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind, 2482 bool zero, tcache_t *tcache) 2483{ 2484 2485 arena = arena_choose(tsd, arena); 2486 if (unlikely(arena == NULL)) 2487 return (NULL); 2488 2489 if (likely(size <= SMALL_MAXCLASS)) 2490 return (arena_malloc_small(tsd, arena, ind, zero)); 2491 if (likely(size <= large_maxclass)) 2492 return (arena_malloc_large(tsd, arena, ind, zero)); 2493 return (huge_malloc(tsd, arena, index2size(ind), zero, tcache)); 2494} 2495 |
|
2264/* Only handles large allocations that require more than page alignment. */ 2265static void * 2266arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, 2267 bool zero) 2268{ 2269 void *ret; 2270 size_t alloc_size, leadsize, trailsize; 2271 arena_run_t *run; --- 67 unchanged lines hidden (view full) --- 2339 malloc_mutex_unlock(&arena->lock); 2340 2341 if (config_fill && !zero) { 2342 if (unlikely(opt_junk_alloc)) 2343 memset(ret, 0xa5, usize); 2344 else if (unlikely(opt_zero)) 2345 memset(ret, 0, usize); 2346 } | 2496/* Only handles large allocations that require more than page alignment. */ 2497static void * 2498arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, 2499 bool zero) 2500{ 2501 void *ret; 2502 size_t alloc_size, leadsize, trailsize; 2503 arena_run_t *run; --- 67 unchanged lines hidden (view full) --- 2571 malloc_mutex_unlock(&arena->lock); 2572 2573 if (config_fill && !zero) { 2574 if (unlikely(opt_junk_alloc)) 2575 memset(ret, 0xa5, usize); 2576 else if (unlikely(opt_zero)) 2577 memset(ret, 0, usize); 2578 } |
2579 arena_decay_tick(tsd, arena); |
|
2347 return (ret); 2348} 2349 2350void * 2351arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, 2352 bool zero, tcache_t *tcache) 2353{ 2354 void *ret; 2355 2356 if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE 2357 && (usize & PAGE_MASK) == 0))) { 2358 /* Small; alignment doesn't require special run placement. */ | 2580 return (ret); 2581} 2582 2583void * 2584arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, 2585 bool zero, tcache_t *tcache) 2586{ 2587 void *ret; 2588 2589 if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE 2590 && (usize & PAGE_MASK) == 0))) { 2591 /* Small; alignment doesn't require special run placement. */ |
2359 ret = arena_malloc(tsd, arena, usize, zero, tcache); | 2592 ret = arena_malloc(tsd, arena, usize, size2index(usize), zero, 2593 tcache, true); |
2360 } else if (usize <= large_maxclass && alignment <= PAGE) { 2361 /* 2362 * Large; alignment doesn't require special run placement. 2363 * However, the cached pointer may be at a random offset from 2364 * the base of the run, so do some bit manipulation to retrieve 2365 * the base. 2366 */ | 2594 } else if (usize <= large_maxclass && alignment <= PAGE) { 2595 /* 2596 * Large; alignment doesn't require special run placement. 2597 * However, the cached pointer may be at a random offset from 2598 * the base of the run, so do some bit manipulation to retrieve 2599 * the base. 2600 */ |
2367 ret = arena_malloc(tsd, arena, usize, zero, tcache); | 2601 ret = arena_malloc(tsd, arena, usize, size2index(usize), zero, 2602 tcache, true); |
2368 if (config_cache_oblivious) 2369 ret = (void *)((uintptr_t)ret & ~PAGE_MASK); 2370 } else { 2371 if (likely(usize <= large_maxclass)) { 2372 ret = arena_palloc_large(tsd, arena, usize, alignment, 2373 zero); 2374 } else if (likely(alignment <= chunksize)) 2375 ret = huge_malloc(tsd, arena, usize, zero, tcache); --- 60 unchanged lines hidden (view full) --- 2436 2437 assert(run != bin->runcur); 2438 assert(arena_run_tree_search(&bin->runs, arena_run_to_miscelm(run)) == 2439 NULL); 2440 2441 malloc_mutex_unlock(&bin->lock); 2442 /******************************/ 2443 malloc_mutex_lock(&arena->lock); | 2603 if (config_cache_oblivious) 2604 ret = (void *)((uintptr_t)ret & ~PAGE_MASK); 2605 } else { 2606 if (likely(usize <= large_maxclass)) { 2607 ret = arena_palloc_large(tsd, arena, usize, alignment, 2608 zero); 2609 } else if (likely(alignment <= chunksize)) 2610 ret = huge_malloc(tsd, arena, usize, zero, tcache); --- 60 unchanged lines hidden (view full) --- 2671 2672 assert(run != bin->runcur); 2673 assert(arena_run_tree_search(&bin->runs, arena_run_to_miscelm(run)) == 2674 NULL); 2675 2676 malloc_mutex_unlock(&bin->lock); 2677 /******************************/ 2678 malloc_mutex_lock(&arena->lock); |
2444 arena_run_dalloc_decommit(arena, chunk, run); | 2679 arena_run_dalloc(arena, run, true, false, false); |
2445 malloc_mutex_unlock(&arena->lock); 2446 /****************************/ 2447 malloc_mutex_lock(&bin->lock); 2448 if (config_stats) 2449 bin->stats.curruns--; 2450} 2451 2452static void --- 70 unchanged lines hidden (view full) --- 2523 run = &arena_miscelm_get(chunk, rpages_ind)->run; 2524 bin = &arena->bins[run->binind]; 2525 malloc_mutex_lock(&bin->lock); 2526 arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, false); 2527 malloc_mutex_unlock(&bin->lock); 2528} 2529 2530void | 2680 malloc_mutex_unlock(&arena->lock); 2681 /****************************/ 2682 malloc_mutex_lock(&bin->lock); 2683 if (config_stats) 2684 bin->stats.curruns--; 2685} 2686 2687static void --- 70 unchanged lines hidden (view full) --- 2758 run = &arena_miscelm_get(chunk, rpages_ind)->run; 2759 bin = &arena->bins[run->binind]; 2760 malloc_mutex_lock(&bin->lock); 2761 arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, false); 2762 malloc_mutex_unlock(&bin->lock); 2763} 2764 2765void |
2531arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, | 2766arena_dalloc_small(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, void *ptr, |
2532 size_t pageind) 2533{ 2534 arena_chunk_map_bits_t *bitselm; 2535 2536 if (config_debug) { 2537 /* arena_ptr_small_binind_get() does extra sanity checking. */ 2538 assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, 2539 pageind)) != BININD_INVALID); 2540 } 2541 bitselm = arena_bitselm_get(chunk, pageind); 2542 arena_dalloc_bin(arena, chunk, ptr, pageind, bitselm); | 2767 size_t pageind) 2768{ 2769 arena_chunk_map_bits_t *bitselm; 2770 2771 if (config_debug) { 2772 /* arena_ptr_small_binind_get() does extra sanity checking. */ 2773 assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, 2774 pageind)) != BININD_INVALID); 2775 } 2776 bitselm = arena_bitselm_get(chunk, pageind); 2777 arena_dalloc_bin(arena, chunk, ptr, pageind, bitselm); |
2778 arena_decay_tick(tsd, arena); |
|
2543} 2544 2545#ifdef JEMALLOC_JET 2546#undef arena_dalloc_junk_large 2547#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl) 2548#endif 2549void 2550arena_dalloc_junk_large(void *ptr, size_t usize) --- 28 unchanged lines hidden (view full) --- 2579 2580 arena->stats.ndalloc_large++; 2581 arena->stats.allocated_large -= usize; 2582 arena->stats.lstats[index].ndalloc++; 2583 arena->stats.lstats[index].curruns--; 2584 } 2585 } 2586 | 2779} 2780 2781#ifdef JEMALLOC_JET 2782#undef arena_dalloc_junk_large 2783#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl) 2784#endif 2785void 2786arena_dalloc_junk_large(void *ptr, size_t usize) --- 28 unchanged lines hidden (view full) --- 2815 2816 arena->stats.ndalloc_large++; 2817 arena->stats.allocated_large -= usize; 2818 arena->stats.lstats[index].ndalloc++; 2819 arena->stats.lstats[index].curruns--; 2820 } 2821 } 2822 |
2587 arena_run_dalloc_decommit(arena, chunk, run); | 2823 arena_run_dalloc(arena, run, true, false, false); |
2588} 2589 2590void 2591arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk, 2592 void *ptr) 2593{ 2594 2595 arena_dalloc_large_locked_impl(arena, chunk, ptr, true); 2596} 2597 2598void | 2824} 2825 2826void 2827arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk, 2828 void *ptr) 2829{ 2830 2831 arena_dalloc_large_locked_impl(arena, chunk, ptr, true); 2832} 2833 2834void |
2599arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr) | 2835arena_dalloc_large(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, void *ptr) |
2600{ 2601 2602 malloc_mutex_lock(&arena->lock); 2603 arena_dalloc_large_locked_impl(arena, chunk, ptr, false); 2604 malloc_mutex_unlock(&arena->lock); | 2836{ 2837 2838 malloc_mutex_lock(&arena->lock); 2839 arena_dalloc_large_locked_impl(arena, chunk, ptr, false); 2840 malloc_mutex_unlock(&arena->lock); |
2841 arena_decay_tick(tsd, arena); |
|
2605} 2606 2607static void 2608arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr, 2609 size_t oldsize, size_t size) 2610{ 2611 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 2612 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); --- 184 unchanged lines hidden (view full) --- 2797 assert(oldsize > usize_max); 2798 /* Fill before shrinking in order avoid a race. */ 2799 arena_ralloc_junk_large(ptr, oldsize, usize_max); 2800 arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, usize_max); 2801 return (false); 2802} 2803 2804bool | 2842} 2843 2844static void 2845arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr, 2846 size_t oldsize, size_t size) 2847{ 2848 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 2849 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); --- 184 unchanged lines hidden (view full) --- 3034 assert(oldsize > usize_max); 3035 /* Fill before shrinking in order avoid a race. */ 3036 arena_ralloc_junk_large(ptr, oldsize, usize_max); 3037 arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, usize_max); 3038 return (false); 3039} 3040 3041bool |
2805arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, 2806 bool zero) | 3042arena_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, 3043 size_t extra, bool zero) |
2807{ 2808 size_t usize_min, usize_max; 2809 | 3044{ 3045 size_t usize_min, usize_max; 3046 |
3047 /* Calls with non-zero extra had to clamp extra. */ 3048 assert(extra == 0 || size + extra <= HUGE_MAXCLASS); 3049 3050 if (unlikely(size > HUGE_MAXCLASS)) 3051 return (true); 3052 |
|
2810 usize_min = s2u(size); 2811 usize_max = s2u(size + extra); 2812 if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) { | 3053 usize_min = s2u(size); 3054 usize_max = s2u(size + extra); 3055 if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) { |
3056 arena_chunk_t *chunk; 3057 |
|
2813 /* 2814 * Avoid moving the allocation if the size class can be left the 2815 * same. 2816 */ 2817 if (oldsize <= SMALL_MAXCLASS) { 2818 assert(arena_bin_info[size2index(oldsize)].reg_size == 2819 oldsize); | 3058 /* 3059 * Avoid moving the allocation if the size class can be left the 3060 * same. 3061 */ 3062 if (oldsize <= SMALL_MAXCLASS) { 3063 assert(arena_bin_info[size2index(oldsize)].reg_size == 3064 oldsize); |
2820 if ((usize_max <= SMALL_MAXCLASS && 2821 size2index(usize_max) == size2index(oldsize)) || 2822 (size <= oldsize && usize_max >= oldsize)) 2823 return (false); | 3065 if ((usize_max > SMALL_MAXCLASS || 3066 size2index(usize_max) != size2index(oldsize)) && 3067 (size > oldsize || usize_max < oldsize)) 3068 return (true); |
2824 } else { | 3069 } else { |
2825 if (usize_max > SMALL_MAXCLASS) { 2826 if (!arena_ralloc_large(ptr, oldsize, usize_min, 2827 usize_max, zero)) 2828 return (false); 2829 } | 3070 if (usize_max <= SMALL_MAXCLASS) 3071 return (true); 3072 if (arena_ralloc_large(ptr, oldsize, usize_min, 3073 usize_max, zero)) 3074 return (true); |
2830 } 2831 | 3075 } 3076 |
2832 /* Reallocation would require a move. */ 2833 return (true); | 3077 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 3078 arena_decay_tick(tsd, extent_node_arena_get(&chunk->node)); 3079 return (false); |
2834 } else { | 3080 } else { |
2835 return (huge_ralloc_no_move(ptr, oldsize, usize_min, usize_max, 2836 zero)); | 3081 return (huge_ralloc_no_move(tsd, ptr, oldsize, usize_min, 3082 usize_max, zero)); |
2837 } 2838} 2839 2840static void * 2841arena_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize, 2842 size_t alignment, bool zero, tcache_t *tcache) 2843{ 2844 2845 if (alignment == 0) | 3083 } 3084} 3085 3086static void * 3087arena_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize, 3088 size_t alignment, bool zero, tcache_t *tcache) 3089{ 3090 3091 if (alignment == 0) |
2846 return (arena_malloc(tsd, arena, usize, zero, tcache)); | 3092 return (arena_malloc(tsd, arena, usize, size2index(usize), zero, 3093 tcache, true)); |
2847 usize = sa2u(usize, alignment); | 3094 usize = sa2u(usize, alignment); |
2848 if (usize == 0) | 3095 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) |
2849 return (NULL); 2850 return (ipalloct(tsd, usize, alignment, zero, tcache, arena)); 2851} 2852 2853void * 2854arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, 2855 size_t alignment, bool zero, tcache_t *tcache) 2856{ 2857 void *ret; 2858 size_t usize; 2859 2860 usize = s2u(size); | 3096 return (NULL); 3097 return (ipalloct(tsd, usize, alignment, zero, tcache, arena)); 3098} 3099 3100void * 3101arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, 3102 size_t alignment, bool zero, tcache_t *tcache) 3103{ 3104 void *ret; 3105 size_t usize; 3106 3107 usize = s2u(size); |
2861 if (usize == 0) | 3108 if (unlikely(usize == 0 || size > HUGE_MAXCLASS)) |
2862 return (NULL); 2863 2864 if (likely(usize <= large_maxclass)) { 2865 size_t copysize; 2866 2867 /* Try to avoid moving the allocation. */ | 3109 return (NULL); 3110 3111 if (likely(usize <= large_maxclass)) { 3112 size_t copysize; 3113 3114 /* Try to avoid moving the allocation. */ |
2868 if (!arena_ralloc_no_move(ptr, oldsize, usize, 0, zero)) | 3115 if (!arena_ralloc_no_move(tsd, ptr, oldsize, usize, 0, zero)) |
2869 return (ptr); 2870 2871 /* 2872 * size and oldsize are different enough that we need to move 2873 * the object. In that case, fall back to allocating new space 2874 * and copying. 2875 */ 2876 ret = arena_ralloc_move_helper(tsd, arena, usize, alignment, --- 46 unchanged lines hidden (view full) --- 2923 2924 return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default)); 2925} 2926 2927bool 2928arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult) 2929{ 2930 | 3116 return (ptr); 3117 3118 /* 3119 * size and oldsize are different enough that we need to move 3120 * the object. In that case, fall back to allocating new space 3121 * and copying. 3122 */ 3123 ret = arena_ralloc_move_helper(tsd, arena, usize, alignment, --- 46 unchanged lines hidden (view full) --- 3170 3171 return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default)); 3172} 3173 3174bool 3175arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult) 3176{ 3177 |
3178 if (opt_purge != purge_mode_ratio) 3179 return (true); |
|
2931 if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) 2932 return (true); 2933 atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult); 2934 return (false); 2935} 2936 | 3180 if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) 3181 return (true); 3182 atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult); 3183 return (false); 3184} 3185 |
2937void 2938arena_stats_merge(arena_t *arena, const char **dss, ssize_t *lg_dirty_mult, 2939 size_t *nactive, size_t *ndirty, arena_stats_t *astats, 2940 malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats, 2941 malloc_huge_stats_t *hstats) | 3186ssize_t 3187arena_decay_time_default_get(void) |
2942{ | 3188{ |
2943 unsigned i; | |
2944 | 3189 |
2945 malloc_mutex_lock(&arena->lock); | 3190 return ((ssize_t)atomic_read_z((size_t *)&decay_time_default)); 3191} 3192 3193bool 3194arena_decay_time_default_set(ssize_t decay_time) 3195{ 3196 3197 if (opt_purge != purge_mode_decay) 3198 return (true); 3199 if (!arena_decay_time_valid(decay_time)) 3200 return (true); 3201 atomic_write_z((size_t *)&decay_time_default, (size_t)decay_time); 3202 return (false); 3203} 3204 3205static void 3206arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads, 3207 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, 3208 size_t *nactive, size_t *ndirty) 3209{ 3210 3211 *nthreads += arena_nthreads_get(arena); |
2946 *dss = dss_prec_names[arena->dss_prec]; 2947 *lg_dirty_mult = arena->lg_dirty_mult; | 3212 *dss = dss_prec_names[arena->dss_prec]; 3213 *lg_dirty_mult = arena->lg_dirty_mult; |
3214 *decay_time = arena->decay_time; |
|
2948 *nactive += arena->nactive; 2949 *ndirty += arena->ndirty; | 3215 *nactive += arena->nactive; 3216 *ndirty += arena->ndirty; |
3217} |
|
2950 | 3218 |
3219void 3220arena_basic_stats_merge(arena_t *arena, unsigned *nthreads, const char **dss, 3221 ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive, 3222 size_t *ndirty) 3223{ 3224 3225 malloc_mutex_lock(&arena->lock); 3226 arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult, 3227 decay_time, nactive, ndirty); 3228 malloc_mutex_unlock(&arena->lock); 3229} 3230 3231void 3232arena_stats_merge(arena_t *arena, unsigned *nthreads, const char **dss, 3233 ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive, 3234 size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats, 3235 malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats) 3236{ 3237 unsigned i; 3238 3239 cassert(config_stats); 3240 3241 malloc_mutex_lock(&arena->lock); 3242 arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult, 3243 decay_time, nactive, ndirty); 3244 |
|
2951 astats->mapped += arena->stats.mapped; 2952 astats->npurge += arena->stats.npurge; 2953 astats->nmadvise += arena->stats.nmadvise; 2954 astats->purged += arena->stats.purged; 2955 astats->metadata_mapped += arena->stats.metadata_mapped; 2956 astats->metadata_allocated += arena_metadata_allocated_get(arena); 2957 astats->allocated_large += arena->stats.allocated_large; 2958 astats->nmalloc_large += arena->stats.nmalloc_large; --- 31 unchanged lines hidden (view full) --- 2990 } 2991 bstats[i].nruns += bin->stats.nruns; 2992 bstats[i].reruns += bin->stats.reruns; 2993 bstats[i].curruns += bin->stats.curruns; 2994 malloc_mutex_unlock(&bin->lock); 2995 } 2996} 2997 | 3245 astats->mapped += arena->stats.mapped; 3246 astats->npurge += arena->stats.npurge; 3247 astats->nmadvise += arena->stats.nmadvise; 3248 astats->purged += arena->stats.purged; 3249 astats->metadata_mapped += arena->stats.metadata_mapped; 3250 astats->metadata_allocated += arena_metadata_allocated_get(arena); 3251 astats->allocated_large += arena->stats.allocated_large; 3252 astats->nmalloc_large += arena->stats.nmalloc_large; --- 31 unchanged lines hidden (view full) --- 3284 } 3285 bstats[i].nruns += bin->stats.nruns; 3286 bstats[i].reruns += bin->stats.reruns; 3287 bstats[i].curruns += bin->stats.curruns; 3288 malloc_mutex_unlock(&bin->lock); 3289 } 3290} 3291 |
3292unsigned 3293arena_nthreads_get(arena_t *arena) 3294{ 3295 3296 return (atomic_read_u(&arena->nthreads)); 3297} 3298 3299void 3300arena_nthreads_inc(arena_t *arena) 3301{ 3302 3303 atomic_add_u(&arena->nthreads, 1); 3304} 3305 3306void 3307arena_nthreads_dec(arena_t *arena) 3308{ 3309 3310 atomic_sub_u(&arena->nthreads, 1); 3311} 3312 |
|
2998arena_t * 2999arena_new(unsigned ind) 3000{ 3001 arena_t *arena; | 3313arena_t * 3314arena_new(unsigned ind) 3315{ 3316 arena_t *arena; |
3317 size_t arena_size; |
|
3002 unsigned i; 3003 arena_bin_t *bin; 3004 | 3318 unsigned i; 3319 arena_bin_t *bin; 3320 |
3321 /* Compute arena size to incorporate sufficient runs_avail elements. */ 3322 arena_size = offsetof(arena_t, runs_avail) + (sizeof(arena_run_tree_t) * 3323 runs_avail_nclasses); |
|
3005 /* 3006 * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly 3007 * because there is no way to clean up if base_alloc() OOMs. 3008 */ 3009 if (config_stats) { | 3324 /* 3325 * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly 3326 * because there is no way to clean up if base_alloc() OOMs. 3327 */ 3328 if (config_stats) { |
3010 arena = (arena_t *)base_alloc(CACHELINE_CEILING(sizeof(arena_t)) 3011 + QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t) + | 3329 arena = (arena_t *)base_alloc(CACHELINE_CEILING(arena_size) + 3330 QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t) + |
3012 nhclasses) * sizeof(malloc_huge_stats_t)); 3013 } else | 3331 nhclasses) * sizeof(malloc_huge_stats_t)); 3332 } else |
3014 arena = (arena_t *)base_alloc(sizeof(arena_t)); | 3333 arena = (arena_t *)base_alloc(arena_size); |
3015 if (arena == NULL) 3016 return (NULL); 3017 3018 arena->ind = ind; 3019 arena->nthreads = 0; 3020 if (malloc_mutex_init(&arena->lock)) 3021 return (NULL); 3022 3023 if (config_stats) { 3024 memset(&arena->stats, 0, sizeof(arena_stats_t)); 3025 arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena | 3334 if (arena == NULL) 3335 return (NULL); 3336 3337 arena->ind = ind; 3338 arena->nthreads = 0; 3339 if (malloc_mutex_init(&arena->lock)) 3340 return (NULL); 3341 3342 if (config_stats) { 3343 memset(&arena->stats, 0, sizeof(arena_stats_t)); 3344 arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena |
3026 + CACHELINE_CEILING(sizeof(arena_t))); | 3345 + CACHELINE_CEILING(arena_size)); |
3027 memset(arena->stats.lstats, 0, nlclasses * 3028 sizeof(malloc_large_stats_t)); 3029 arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena | 3346 memset(arena->stats.lstats, 0, nlclasses * 3347 sizeof(malloc_large_stats_t)); 3348 arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena |
3030 + CACHELINE_CEILING(sizeof(arena_t)) + | 3349 + CACHELINE_CEILING(arena_size) + |
3031 QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t))); 3032 memset(arena->stats.hstats, 0, nhclasses * 3033 sizeof(malloc_huge_stats_t)); 3034 if (config_tcache) 3035 ql_new(&arena->tcache_ql); 3036 } 3037 3038 if (config_prof) --- 15 unchanged lines hidden (view full) --- 3054 3055 arena->spare = NULL; 3056 3057 arena->lg_dirty_mult = arena_lg_dirty_mult_default_get(); 3058 arena->purging = false; 3059 arena->nactive = 0; 3060 arena->ndirty = 0; 3061 | 3350 QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t))); 3351 memset(arena->stats.hstats, 0, nhclasses * 3352 sizeof(malloc_huge_stats_t)); 3353 if (config_tcache) 3354 ql_new(&arena->tcache_ql); 3355 } 3356 3357 if (config_prof) --- 15 unchanged lines hidden (view full) --- 3373 3374 arena->spare = NULL; 3375 3376 arena->lg_dirty_mult = arena_lg_dirty_mult_default_get(); 3377 arena->purging = false; 3378 arena->nactive = 0; 3379 arena->ndirty = 0; 3380 |
3062 arena_avail_tree_new(&arena->runs_avail); | 3381 for(i = 0; i < runs_avail_nclasses; i++) 3382 arena_run_tree_new(&arena->runs_avail[i]); |
3063 qr_new(&arena->runs_dirty, rd_link); 3064 qr_new(&arena->chunks_cache, cc_link); 3065 | 3383 qr_new(&arena->runs_dirty, rd_link); 3384 qr_new(&arena->chunks_cache, cc_link); 3385 |
3386 if (opt_purge == purge_mode_decay) 3387 arena_decay_init(arena, arena_decay_time_default_get()); 3388 |
|
3066 ql_new(&arena->huge); 3067 if (malloc_mutex_init(&arena->huge_mtx)) 3068 return (NULL); 3069 3070 extent_tree_szad_new(&arena->chunks_szad_cached); 3071 extent_tree_ad_new(&arena->chunks_ad_cached); 3072 extent_tree_szad_new(&arena->chunks_szad_retained); 3073 extent_tree_ad_new(&arena->chunks_ad_retained); --- 38 unchanged lines hidden (view full) --- 3112 /* 3113 * Determine redzone size based on minimum alignment and minimum 3114 * redzone size. Add padding to the end of the run if it is needed to 3115 * align the regions. The padding allows each redzone to be half the 3116 * minimum alignment; without the padding, each redzone would have to 3117 * be twice as large in order to maintain alignment. 3118 */ 3119 if (config_fill && unlikely(opt_redzone)) { | 3389 ql_new(&arena->huge); 3390 if (malloc_mutex_init(&arena->huge_mtx)) 3391 return (NULL); 3392 3393 extent_tree_szad_new(&arena->chunks_szad_cached); 3394 extent_tree_ad_new(&arena->chunks_ad_cached); 3395 extent_tree_szad_new(&arena->chunks_szad_retained); 3396 extent_tree_ad_new(&arena->chunks_ad_retained); --- 38 unchanged lines hidden (view full) --- 3435 /* 3436 * Determine redzone size based on minimum alignment and minimum 3437 * redzone size. Add padding to the end of the run if it is needed to 3438 * align the regions. The padding allows each redzone to be half the 3439 * minimum alignment; without the padding, each redzone would have to 3440 * be twice as large in order to maintain alignment. 3441 */ 3442 if (config_fill && unlikely(opt_redzone)) { |
3120 size_t align_min = ZU(1) << (jemalloc_ffs(bin_info->reg_size) - 3121 1); | 3443 size_t align_min = ZU(1) << (ffs_zu(bin_info->reg_size) - 1); |
3122 if (align_min <= REDZONE_MINSIZE) { 3123 bin_info->redzone_size = REDZONE_MINSIZE; 3124 pad_size = 0; 3125 } else { 3126 bin_info->redzone_size = align_min >> 1; 3127 pad_size = bin_info->redzone_size; 3128 } 3129 } else { 3130 bin_info->redzone_size = 0; 3131 pad_size = 0; 3132 } 3133 bin_info->reg_interval = bin_info->reg_size + 3134 (bin_info->redzone_size << 1); 3135 3136 /* 3137 * Compute run size under ideal conditions (no redzones, no limit on run 3138 * size). 3139 */ 3140 try_run_size = PAGE; | 3444 if (align_min <= REDZONE_MINSIZE) { 3445 bin_info->redzone_size = REDZONE_MINSIZE; 3446 pad_size = 0; 3447 } else { 3448 bin_info->redzone_size = align_min >> 1; 3449 pad_size = bin_info->redzone_size; 3450 } 3451 } else { 3452 bin_info->redzone_size = 0; 3453 pad_size = 0; 3454 } 3455 bin_info->reg_interval = bin_info->reg_size + 3456 (bin_info->redzone_size << 1); 3457 3458 /* 3459 * Compute run size under ideal conditions (no redzones, no limit on run 3460 * size). 3461 */ 3462 try_run_size = PAGE; |
3141 try_nregs = try_run_size / bin_info->reg_size; | 3463 try_nregs = (uint32_t)(try_run_size / bin_info->reg_size); |
3142 do { 3143 perfect_run_size = try_run_size; 3144 perfect_nregs = try_nregs; 3145 3146 try_run_size += PAGE; | 3464 do { 3465 perfect_run_size = try_run_size; 3466 perfect_nregs = try_nregs; 3467 3468 try_run_size += PAGE; |
3147 try_nregs = try_run_size / bin_info->reg_size; | 3469 try_nregs = (uint32_t)(try_run_size / bin_info->reg_size); |
3148 } while (perfect_run_size != perfect_nregs * bin_info->reg_size); 3149 assert(perfect_nregs <= RUN_MAXREGS); 3150 3151 actual_run_size = perfect_run_size; | 3470 } while (perfect_run_size != perfect_nregs * bin_info->reg_size); 3471 assert(perfect_nregs <= RUN_MAXREGS); 3472 3473 actual_run_size = perfect_run_size; |
3152 actual_nregs = (actual_run_size - pad_size) / bin_info->reg_interval; | 3474 actual_nregs = (uint32_t)((actual_run_size - pad_size) / 3475 bin_info->reg_interval); |
3153 3154 /* 3155 * Redzones can require enough padding that not even a single region can 3156 * fit within the number of pages that would normally be dedicated to a 3157 * run for this size class. Increase the run size until at least one 3158 * region fits. 3159 */ 3160 while (actual_nregs == 0) { 3161 assert(config_fill && unlikely(opt_redzone)); 3162 3163 actual_run_size += PAGE; | 3476 3477 /* 3478 * Redzones can require enough padding that not even a single region can 3479 * fit within the number of pages that would normally be dedicated to a 3480 * run for this size class. Increase the run size until at least one 3481 * region fits. 3482 */ 3483 while (actual_nregs == 0) { 3484 assert(config_fill && unlikely(opt_redzone)); 3485 3486 actual_run_size += PAGE; |
3164 actual_nregs = (actual_run_size - pad_size) / 3165 bin_info->reg_interval; | 3487 actual_nregs = (uint32_t)((actual_run_size - pad_size) / 3488 bin_info->reg_interval); |
3166 } 3167 3168 /* 3169 * Make sure that the run will fit within an arena chunk. 3170 */ 3171 while (actual_run_size > arena_maxrun) { 3172 actual_run_size -= PAGE; | 3489 } 3490 3491 /* 3492 * Make sure that the run will fit within an arena chunk. 3493 */ 3494 while (actual_run_size > arena_maxrun) { 3495 actual_run_size -= PAGE; |
3173 actual_nregs = (actual_run_size - pad_size) / 3174 bin_info->reg_interval; | 3496 actual_nregs = (uint32_t)((actual_run_size - pad_size) / 3497 bin_info->reg_interval); |
3175 } 3176 assert(actual_nregs > 0); 3177 assert(actual_run_size == s2u(actual_run_size)); 3178 3179 /* Copy final settings. */ 3180 bin_info->run_size = actual_run_size; 3181 bin_info->nregs = actual_nregs; | 3498 } 3499 assert(actual_nregs > 0); 3500 assert(actual_run_size == s2u(actual_run_size)); 3501 3502 /* Copy final settings. */ 3503 bin_info->run_size = actual_run_size; 3504 bin_info->nregs = actual_nregs; |
3182 bin_info->reg0_offset = actual_run_size - (actual_nregs * 3183 bin_info->reg_interval) - pad_size + bin_info->redzone_size; | 3505 bin_info->reg0_offset = (uint32_t)(actual_run_size - (actual_nregs * 3506 bin_info->reg_interval) - pad_size + bin_info->redzone_size); |
3184 3185 if (actual_run_size > small_maxrun) 3186 small_maxrun = actual_run_size; 3187 3188 assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs 3189 * bin_info->reg_interval) + pad_size == bin_info->run_size); 3190} 3191 --- 37 unchanged lines hidden (view full) --- 3229 SIZE_CLASSES 3230#undef TAB_INIT_bin_yes 3231#undef TAB_INIT_bin_no 3232#undef SC 3233 3234 return (false); 3235} 3236 | 3507 3508 if (actual_run_size > small_maxrun) 3509 small_maxrun = actual_run_size; 3510 3511 assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs 3512 * bin_info->reg_interval) + pad_size == bin_info->run_size); 3513} 3514 --- 37 unchanged lines hidden (view full) --- 3552 SIZE_CLASSES 3553#undef TAB_INIT_bin_yes 3554#undef TAB_INIT_bin_no 3555#undef SC 3556 3557 return (false); 3558} 3559 |
3560static bool 3561run_quantize_init(void) 3562{ 3563 unsigned i; 3564 3565 run_quantize_max = chunksize + large_pad; 3566 3567 run_quantize_floor_tab = (size_t *)base_alloc(sizeof(size_t) * 3568 (run_quantize_max >> LG_PAGE)); 3569 if (run_quantize_floor_tab == NULL) 3570 return (true); 3571 3572 run_quantize_ceil_tab = (size_t *)base_alloc(sizeof(size_t) * 3573 (run_quantize_max >> LG_PAGE)); 3574 if (run_quantize_ceil_tab == NULL) 3575 return (true); 3576 3577 for (i = 1; i <= run_quantize_max >> LG_PAGE; i++) { 3578 size_t run_size = i << LG_PAGE; 3579 3580 run_quantize_floor_tab[i-1] = 3581 run_quantize_floor_compute(run_size); 3582 run_quantize_ceil_tab[i-1] = 3583 run_quantize_ceil_compute(run_size); 3584 } 3585 3586 return (false); 3587} 3588 |
|
3237bool 3238arena_boot(void) 3239{ 3240 unsigned i; 3241 3242 arena_lg_dirty_mult_default_set(opt_lg_dirty_mult); | 3589bool 3590arena_boot(void) 3591{ 3592 unsigned i; 3593 3594 arena_lg_dirty_mult_default_set(opt_lg_dirty_mult); |
3595 arena_decay_time_default_set(opt_decay_time); |
|
3243 3244 /* 3245 * Compute the header size such that it is large enough to contain the 3246 * page map. The page map is biased to omit entries for the header 3247 * itself, so some iteration is necessary to compute the map bias. 3248 * 3249 * 1) Compute safe header_size and map_bias values that include enough 3250 * space for an unbiased page map. --- 25 unchanged lines hidden (view full) --- 3276 */ 3277 large_maxclass = arena_maxrun; 3278 } 3279 assert(large_maxclass > 0); 3280 nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS); 3281 nhclasses = NSIZES - nlclasses - NBINS; 3282 3283 bin_info_init(); | 3596 3597 /* 3598 * Compute the header size such that it is large enough to contain the 3599 * page map. The page map is biased to omit entries for the header 3600 * itself, so some iteration is necessary to compute the map bias. 3601 * 3602 * 1) Compute safe header_size and map_bias values that include enough 3603 * space for an unbiased page map. --- 25 unchanged lines hidden (view full) --- 3629 */ 3630 large_maxclass = arena_maxrun; 3631 } 3632 assert(large_maxclass > 0); 3633 nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS); 3634 nhclasses = NSIZES - nlclasses - NBINS; 3635 3636 bin_info_init(); |
3284 return (small_run_size_init()); | 3637 if (small_run_size_init()) 3638 return (true); 3639 if (run_quantize_init()) 3640 return (true); 3641 3642 runs_avail_bias = size2index(PAGE); 3643 runs_avail_nclasses = size2index(run_quantize_max)+1 - runs_avail_bias; 3644 3645 return (false); |
3285} 3286 3287void 3288arena_prefork(arena_t *arena) 3289{ 3290 unsigned i; 3291 3292 malloc_mutex_prefork(&arena->lock); --- 32 unchanged lines hidden --- | 3646} 3647 3648void 3649arena_prefork(arena_t *arena) 3650{ 3651 unsigned i; 3652 3653 malloc_mutex_prefork(&arena->lock); --- 32 unchanged lines hidden --- |