1#define JEMALLOC_EXTENT_C_ 2#include "jemalloc/internal/jemalloc_internal.h" 3 4/******************************************************************************/ 5/* Data. */ 6 7rtree_t extents_rtree; 8 9static void *extent_alloc_default(extent_hooks_t *extent_hooks, 10 void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, 11 unsigned arena_ind); 12static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, 13 size_t size, bool committed, unsigned arena_ind); 14static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr, 15 size_t size, size_t offset, size_t length, unsigned arena_ind); 16static bool extent_decommit_default(extent_hooks_t *extent_hooks, 17 void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); 18#ifdef PAGES_CAN_PURGE_LAZY 19static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, 20 void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); 21#endif 22#ifdef PAGES_CAN_PURGE_FORCED 23static bool extent_purge_forced_default(extent_hooks_t *extent_hooks, 24 void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); 25#endif 26#ifdef JEMALLOC_MAPS_COALESCE 27static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr, 28 size_t size, size_t size_a, size_t size_b, bool committed, 29 unsigned arena_ind); 30static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, 31 size_t size_a, void *addr_b, size_t size_b, bool committed, 32 unsigned arena_ind); 33#endif 34 35const extent_hooks_t extent_hooks_default = { 36 extent_alloc_default, 37 extent_dalloc_default, 38 extent_commit_default, 39 extent_decommit_default 40#ifdef PAGES_CAN_PURGE_LAZY 41 , 42 extent_purge_lazy_default 43#else 44 , 45 NULL 46#endif 47#ifdef PAGES_CAN_PURGE_FORCED 48 , 49 extent_purge_forced_default 50#else 51 , 52 NULL 53#endif 54#ifdef JEMALLOC_MAPS_COALESCE 55 , 56 extent_split_default, 57 extent_merge_default 58#endif 59}; 60 61/* Used exclusively for gdump triggering. */ 62static size_t curpages; 63static size_t highpages; 64 65/******************************************************************************/ 66/* 67 * Function prototypes for static functions that are referenced prior to 68 * definition. 69 */ 70 71static void extent_record(tsdn_t *tsdn, arena_t *arena, 72 extent_hooks_t **r_extent_hooks, extent_heap_t extent_heaps[NPSIZES+1], 73 bool cache, extent_t *extent); 74 75/******************************************************************************/ 76 77extent_t * 78extent_alloc(tsdn_t *tsdn, arena_t *arena) 79{ 80 extent_t *extent; 81 82 malloc_mutex_lock(tsdn, &arena->extent_cache_mtx); 83 extent = ql_last(&arena->extent_cache, ql_link); 84 if (extent == NULL) { 85 malloc_mutex_unlock(tsdn, &arena->extent_cache_mtx); 86 return (base_alloc(tsdn, arena->base, sizeof(extent_t), 87 QUANTUM)); 88 } 89 ql_tail_remove(&arena->extent_cache, extent_t, ql_link); 90 malloc_mutex_unlock(tsdn, &arena->extent_cache_mtx); 91 return (extent); 92} 93 94void 95extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) 96{ 97 malloc_mutex_lock(tsdn, &arena->extent_cache_mtx); 98 ql_elm_new(extent, ql_link); 99 ql_tail_insert(&arena->extent_cache, extent, ql_link); 100 malloc_mutex_unlock(tsdn, &arena->extent_cache_mtx); 101} 102 103extent_hooks_t * 104extent_hooks_get(arena_t *arena) 105{ 106 return (base_extent_hooks_get(arena->base)); 107} 108 109extent_hooks_t * 110extent_hooks_set(arena_t *arena, extent_hooks_t *extent_hooks) 111{ 112 return (base_extent_hooks_set(arena->base, extent_hooks)); 113} 114 115static void 116extent_hooks_assure_initialized(arena_t *arena, extent_hooks_t **r_extent_hooks) 117{ 118 if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) 119 *r_extent_hooks = extent_hooks_get(arena); 120} 121 122#ifdef JEMALLOC_JET 123#undef extent_size_quantize_floor 124#define extent_size_quantize_floor JEMALLOC_N(n_extent_size_quantize_floor) 125#endif 126size_t 127extent_size_quantize_floor(size_t size) 128{ 129 size_t ret; 130 pszind_t pind; 131 132 assert(size > 0); 133 assert((size & PAGE_MASK) == 0); 134 135 assert(size != 0); 136 assert(size == PAGE_CEILING(size)); 137 138 pind = psz2ind(size - large_pad + 1); 139 if (pind == 0) { 140 /* 141 * Avoid underflow. This short-circuit would also do the right 142 * thing for all sizes in the range for which there are 143 * PAGE-spaced size classes, but it's simplest to just handle 144 * the one case that would cause erroneous results. 145 */ 146 return (size); 147 } 148 ret = pind2sz(pind - 1) + large_pad; 149 assert(ret <= size); 150 return (ret); 151} 152#ifdef JEMALLOC_JET 153#undef extent_size_quantize_floor 154#define extent_size_quantize_floor JEMALLOC_N(extent_size_quantize_floor) 155extent_size_quantize_t *extent_size_quantize_floor = 156 JEMALLOC_N(n_extent_size_quantize_floor); 157#endif 158 159#ifdef JEMALLOC_JET 160#undef extent_size_quantize_ceil 161#define extent_size_quantize_ceil JEMALLOC_N(n_extent_size_quantize_ceil) 162#endif 163size_t 164extent_size_quantize_ceil(size_t size) 165{ 166 size_t ret; 167 168 assert(size > 0); 169 assert(size - large_pad <= LARGE_MAXCLASS); 170 assert((size & PAGE_MASK) == 0); 171 172 ret = extent_size_quantize_floor(size); 173 if (ret < size) { 174 /* 175 * Skip a quantization that may have an adequately large extent, 176 * because under-sized extents may be mixed in. This only 177 * happens when an unusual size is requested, i.e. for aligned 178 * allocation, and is just one of several places where linear 179 * search would potentially find sufficiently aligned available 180 * memory somewhere lower. 181 */ 182 ret = pind2sz(psz2ind(ret - large_pad + 1)) + large_pad; 183 } 184 return (ret); 185} 186#ifdef JEMALLOC_JET 187#undef extent_size_quantize_ceil 188#define extent_size_quantize_ceil JEMALLOC_N(extent_size_quantize_ceil) 189extent_size_quantize_t *extent_size_quantize_ceil = 190 JEMALLOC_N(n_extent_size_quantize_ceil); 191#endif 192 193/* Generate pairing heap functions. */ 194ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp) 195 196static void 197extent_heaps_insert(tsdn_t *tsdn, extent_heap_t extent_heaps[NPSIZES+1], 198 extent_t *extent) 199{ 200 size_t psz = extent_size_quantize_floor(extent_size_get(extent)); 201 pszind_t pind = psz2ind(psz); 202 203 malloc_mutex_assert_owner(tsdn, &extent_arena_get(extent)->extents_mtx); 204 205 extent_heap_insert(&extent_heaps[pind], extent); 206} 207 208static void 209extent_heaps_remove(tsdn_t *tsdn, extent_heap_t extent_heaps[NPSIZES+1], 210 extent_t *extent) 211{ 212 size_t psz = extent_size_quantize_floor(extent_size_get(extent)); 213 pszind_t pind = psz2ind(psz); 214 215 malloc_mutex_assert_owner(tsdn, &extent_arena_get(extent)->extents_mtx); 216 217 extent_heap_remove(&extent_heaps[pind], extent); 218} 219 220static bool 221extent_rtree_acquire(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, 222 const extent_t *extent, bool dependent, bool init_missing, 223 rtree_elm_t **r_elm_a, rtree_elm_t **r_elm_b) 224{ 225 *r_elm_a = rtree_elm_acquire(tsdn, &extents_rtree, rtree_ctx, 226 (uintptr_t)extent_base_get(extent), dependent, init_missing); 227 if (!dependent && *r_elm_a == NULL) 228 return (true); 229 assert(*r_elm_a != NULL); 230 231 if (extent_size_get(extent) > PAGE) { 232 *r_elm_b = rtree_elm_acquire(tsdn, &extents_rtree, rtree_ctx, 233 (uintptr_t)extent_last_get(extent), dependent, 234 init_missing); 235 if (!dependent && *r_elm_b == NULL) { 236 rtree_elm_release(tsdn, &extents_rtree, *r_elm_a); 237 return (true); 238 } 239 assert(*r_elm_b != NULL); 240 } else 241 *r_elm_b = NULL; 242 243 return (false); 244} 245 246static void 247extent_rtree_write_acquired(tsdn_t *tsdn, rtree_elm_t *elm_a, 248 rtree_elm_t *elm_b, const extent_t *extent) 249{ 250 rtree_elm_write_acquired(tsdn, &extents_rtree, elm_a, extent); 251 if (elm_b != NULL) 252 rtree_elm_write_acquired(tsdn, &extents_rtree, elm_b, extent); 253} 254 255static void 256extent_rtree_release(tsdn_t *tsdn, rtree_elm_t *elm_a, rtree_elm_t *elm_b) 257{ 258 rtree_elm_release(tsdn, &extents_rtree, elm_a); 259 if (elm_b != NULL) 260 rtree_elm_release(tsdn, &extents_rtree, elm_b); 261} 262 263static void 264extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, 265 const extent_t *extent) 266{ 267 size_t i; 268 269 assert(extent_slab_get(extent)); 270 271 for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) { 272 rtree_write(tsdn, &extents_rtree, rtree_ctx, 273 (uintptr_t)extent_base_get(extent) + (uintptr_t)(i << 274 LG_PAGE), extent); 275 } 276} 277 278static void 279extent_gprof_add(tsdn_t *tsdn, const extent_t *extent) 280{ 281 cassert(config_prof); 282 283 if (opt_prof && extent_active_get(extent)) { 284 size_t nadd = extent_size_get(extent) >> LG_PAGE; 285 size_t cur = atomic_add_zu(&curpages, nadd); 286 size_t high = atomic_read_zu(&highpages); 287 while (cur > high && atomic_cas_zu(&highpages, high, cur)) { 288 /* 289 * Don't refresh cur, because it may have decreased 290 * since this thread lost the highpages update race. 291 */ 292 high = atomic_read_zu(&highpages); 293 } 294 if (cur > high && prof_gdump_get_unlocked()) 295 prof_gdump(tsdn); 296 } 297} 298 299static void 300extent_gprof_sub(tsdn_t *tsdn, const extent_t *extent) 301{ 302 cassert(config_prof); 303 304 if (opt_prof && extent_active_get(extent)) { 305 size_t nsub = extent_size_get(extent) >> LG_PAGE; 306 assert(atomic_read_zu(&curpages) >= nsub); 307 atomic_sub_zu(&curpages, nsub); 308 } 309} 310 311static bool 312extent_register(tsdn_t *tsdn, const extent_t *extent) 313{ 314 rtree_ctx_t rtree_ctx_fallback; 315 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 316 rtree_elm_t *elm_a, *elm_b; 317 318 if (extent_rtree_acquire(tsdn, rtree_ctx, extent, false, true, &elm_a, 319 &elm_b)) 320 return (true); 321 extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent); 322 if (extent_slab_get(extent)) 323 extent_interior_register(tsdn, rtree_ctx, extent); 324 extent_rtree_release(tsdn, elm_a, elm_b); 325 326 if (config_prof) 327 extent_gprof_add(tsdn, extent); 328 329 return (false); 330} 331 332static void 333extent_reregister(tsdn_t *tsdn, const extent_t *extent) 334{ 335 bool err = extent_register(tsdn, extent); 336 assert(!err); 337} 338 339static void 340extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, 341 const extent_t *extent) 342{ 343 size_t i; 344 345 assert(extent_slab_get(extent)); 346 347 for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) { 348 rtree_clear(tsdn, &extents_rtree, rtree_ctx, 349 (uintptr_t)extent_base_get(extent) + (uintptr_t)(i << 350 LG_PAGE)); 351 } 352} 353 354static void 355extent_deregister(tsdn_t *tsdn, extent_t *extent) 356{ 357 rtree_ctx_t rtree_ctx_fallback; 358 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 359 rtree_elm_t *elm_a, *elm_b; 360 361 extent_rtree_acquire(tsdn, rtree_ctx, extent, true, false, &elm_a, 362 &elm_b); 363 extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL); 364 if (extent_slab_get(extent)) { 365 extent_interior_deregister(tsdn, rtree_ctx, extent); 366 extent_slab_set(extent, false); 367 } 368 extent_rtree_release(tsdn, elm_a, elm_b); 369 370 if (config_prof) 371 extent_gprof_sub(tsdn, extent); 372} 373 374/* 375 * Do first-best-fit extent selection, i.e. select the oldest/lowest extent that 376 * best fits. 377 */ 378static extent_t * 379extent_first_best_fit(tsdn_t *tsdn, arena_t *arena, 380 extent_heap_t extent_heaps[NPSIZES+1], size_t size) 381{ 382 pszind_t pind, i; 383 384 malloc_mutex_assert_owner(tsdn, &arena->extents_mtx); 385 386 pind = psz2ind(extent_size_quantize_ceil(size)); 387 for (i = pind; i < NPSIZES+1; i++) { 388 extent_t *extent = extent_heap_first(&extent_heaps[i]); 389 if (extent != NULL) 390 return (extent); 391 } 392 393 return (NULL); 394} 395 396static void 397extent_leak(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, 398 bool cache, extent_t *extent) 399{ 400 /* 401 * Leak extent after making sure its pages have already been purged, so 402 * that this is only a virtual memory leak. 403 */ 404 if (cache) { 405 if (extent_purge_lazy_wrapper(tsdn, arena, r_extent_hooks, 406 extent, 0, extent_size_get(extent))) { 407 extent_purge_forced_wrapper(tsdn, arena, r_extent_hooks, 408 extent, 0, extent_size_get(extent)); 409 } 410 } 411 extent_dalloc(tsdn, arena, extent); 412} 413 414static extent_t * 415extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, 416 extent_heap_t extent_heaps[NPSIZES+1], bool locked, bool cache, 417 void *new_addr, size_t usize, size_t pad, size_t alignment, bool *zero, 418 bool *commit, bool slab) 419{ 420 extent_t *extent; 421 rtree_ctx_t rtree_ctx_fallback; 422 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 423 size_t size, alloc_size, leadsize, trailsize; 424 425 if (locked) 426 malloc_mutex_assert_owner(tsdn, &arena->extents_mtx); 427 assert(new_addr == NULL || !slab); 428 assert(pad == 0 || !slab); 429 assert(alignment > 0); 430 if (config_debug && new_addr != NULL) { 431 extent_t *prev; 432 433 /* 434 * Non-NULL new_addr has two use cases: 435 * 436 * 1) Recycle a known-extant extent, e.g. during purging. 437 * 2) Perform in-place expanding reallocation. 438 * 439 * Regardless of use case, new_addr must either refer to a 440 * non-existing extent, or to the base of an extant extent, 441 * since only active slabs support interior lookups (which of 442 * course cannot be recycled). 443 */ 444 assert(PAGE_ADDR2BASE(new_addr) == new_addr); 445 assert(pad == 0); 446 assert(alignment <= PAGE); 447 prev = extent_lookup(tsdn, (void *)((uintptr_t)new_addr - PAGE), 448 false); 449 assert(prev == NULL || extent_past_get(prev) == new_addr); 450 } 451 452 size = usize + pad; 453 alloc_size = size + PAGE_CEILING(alignment) - PAGE; 454 /* Beware size_t wrap-around. */ 455 if (alloc_size < usize) 456 return (NULL); 457 if (!locked) 458 malloc_mutex_lock(tsdn, &arena->extents_mtx); 459 extent_hooks_assure_initialized(arena, r_extent_hooks); 460 if (new_addr != NULL) { 461 rtree_elm_t *elm; 462 463 elm = rtree_elm_acquire(tsdn, &extents_rtree, rtree_ctx, 464 (uintptr_t)new_addr, false, false); 465 if (elm != NULL) { 466 extent = rtree_elm_read_acquired(tsdn, &extents_rtree, 467 elm); 468 if (extent != NULL) { 469 assert(extent_base_get(extent) == new_addr); 470 if (extent_arena_get(extent) != arena || 471 extent_size_get(extent) < size || 472 extent_active_get(extent) || 473 extent_retained_get(extent) == cache) 474 extent = NULL; 475 } 476 rtree_elm_release(tsdn, &extents_rtree, elm); 477 } else 478 extent = NULL; 479 } else { 480 extent = extent_first_best_fit(tsdn, arena, extent_heaps, 481 alloc_size); 482 } 483 if (extent == NULL) { 484 if (!locked) 485 malloc_mutex_unlock(tsdn, &arena->extents_mtx); 486 return (NULL); 487 } 488 extent_heaps_remove(tsdn, extent_heaps, extent); 489 arena_extent_cache_maybe_remove(tsdn, arena, extent, cache); 490 491 leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(extent), 492 PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(extent); 493 assert(new_addr == NULL || leadsize == 0); 494 assert(extent_size_get(extent) >= leadsize + size); 495 trailsize = extent_size_get(extent) - leadsize - size; 496 if (extent_zeroed_get(extent)) 497 *zero = true; 498 if (extent_committed_get(extent)) 499 *commit = true; 500 501 /* Split the lead. */ 502 if (leadsize != 0) { 503 extent_t *lead = extent; 504 extent = extent_split_wrapper(tsdn, arena, r_extent_hooks, 505 lead, leadsize, leadsize, size + trailsize, usize + 506 trailsize); 507 if (extent == NULL) { 508 extent_deregister(tsdn, lead); 509 extent_leak(tsdn, arena, r_extent_hooks, cache, lead); 510 if (!locked) 511 malloc_mutex_unlock(tsdn, &arena->extents_mtx); 512 return (NULL); 513 } 514 extent_heaps_insert(tsdn, extent_heaps, lead); 515 arena_extent_cache_maybe_insert(tsdn, arena, lead, cache); 516 } 517 518 /* Split the trail. */ 519 if (trailsize != 0) { 520 extent_t *trail = extent_split_wrapper(tsdn, arena, 521 r_extent_hooks, extent, size, usize, trailsize, trailsize); 522 if (trail == NULL) { 523 extent_deregister(tsdn, extent); 524 extent_leak(tsdn, arena, r_extent_hooks, cache, 525 extent); 526 if (!locked) 527 malloc_mutex_unlock(tsdn, &arena->extents_mtx); 528 return (NULL); 529 } 530 extent_heaps_insert(tsdn, extent_heaps, trail); 531 arena_extent_cache_maybe_insert(tsdn, arena, trail, cache); 532 } else if (leadsize == 0) { 533 /* 534 * Splitting causes usize to be set as a side effect, but no 535 * splitting occurred. 536 */ 537 extent_usize_set(extent, usize); 538 } 539 540 if (*commit && !extent_committed_get(extent)) { 541 if (extent_commit_wrapper(tsdn, arena, r_extent_hooks, extent, 542 0, extent_size_get(extent))) { 543 if (!locked) 544 malloc_mutex_unlock(tsdn, &arena->extents_mtx); 545 extent_record(tsdn, arena, r_extent_hooks, extent_heaps, 546 cache, extent); 547 return (NULL); 548 } 549 extent_zeroed_set(extent, true); 550 } 551 552 if (pad != 0) 553 extent_addr_randomize(tsdn, extent, alignment); 554 extent_active_set(extent, true); 555 if (slab) { 556 extent_slab_set(extent, slab); 557 extent_interior_register(tsdn, rtree_ctx, extent); 558 } 559 560 if (!locked) 561 malloc_mutex_unlock(tsdn, &arena->extents_mtx); 562 563 if (*zero) { 564 if (!extent_zeroed_get(extent)) { 565 memset(extent_addr_get(extent), 0, 566 extent_usize_get(extent)); 567 } else if (config_debug) { 568 size_t i; 569 size_t *p = (size_t *)(uintptr_t) 570 extent_addr_get(extent); 571 572 for (i = 0; i < usize / sizeof(size_t); i++) 573 assert(p[i] == 0); 574 } 575 } 576 return (extent); 577} 578 579/* 580 * If the caller specifies (!*zero), it is still possible to receive zeroed 581 * memory, in which case *zero is toggled to true. arena_extent_alloc() takes 582 * advantage of this to avoid demanding zeroed extents, but taking advantage of 583 * them if they are returned. 584 */ 585static void * 586extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, 587 size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) 588{ 589 void *ret; 590 591 assert(size != 0); 592 assert(alignment != 0); 593 594 /* "primary" dss. */ 595 if (have_dss && dss_prec == dss_prec_primary && (ret = 596 extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, 597 commit)) != NULL) 598 return (ret); 599 /* mmap. */ 600 if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit)) 601 != NULL) 602 return (ret); 603 /* "secondary" dss. */ 604 if (have_dss && dss_prec == dss_prec_secondary && (ret = 605 extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, 606 commit)) != NULL) 607 return (ret); 608 609 /* All strategies for allocation failed. */ 610 return (NULL); 611} 612 613static extent_t * 614extent_alloc_cache_impl(tsdn_t *tsdn, arena_t *arena, 615 extent_hooks_t **r_extent_hooks, bool locked, void *new_addr, size_t usize, 616 size_t pad, size_t alignment, bool *zero, bool *commit, bool slab) 617{ 618 extent_t *extent; 619 620 assert(usize + pad != 0); 621 assert(alignment != 0); 622 623 extent = extent_recycle(tsdn, arena, r_extent_hooks, 624 arena->extents_cached, locked, true, new_addr, usize, pad, 625 alignment, zero, commit, slab); 626 return (extent); 627} 628 629extent_t * 630extent_alloc_cache_locked(tsdn_t *tsdn, arena_t *arena, 631 extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad, 632 size_t alignment, bool *zero, bool *commit, bool slab) 633{ 634 malloc_mutex_assert_owner(tsdn, &arena->extents_mtx); 635 636 return (extent_alloc_cache_impl(tsdn, arena, r_extent_hooks, true, 637 new_addr, usize, pad, alignment, zero, commit, slab)); 638} 639 640extent_t * 641extent_alloc_cache(tsdn_t *tsdn, arena_t *arena, 642 extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad, 643 size_t alignment, bool *zero, bool *commit, bool slab) 644{ 645 return (extent_alloc_cache_impl(tsdn, arena, r_extent_hooks, false, 646 new_addr, usize, pad, alignment, zero, commit, slab)); 647} 648 649static void * 650extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr, 651 size_t size, size_t alignment, bool *zero, bool *commit) 652{ 653 void *ret; 654 655 ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero, 656 commit, arena->dss_prec); 657 return (ret); 658} 659 660static void * 661extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size, 662 size_t alignment, bool *zero, bool *commit, unsigned arena_ind) 663{ 664 tsdn_t *tsdn; 665 arena_t *arena; 666 667 assert(extent_hooks == &extent_hooks_default); 668 669 tsdn = tsdn_fetch(); 670 arena = arena_get(tsdn, arena_ind, false); 671 /* 672 * The arena we're allocating on behalf of must have been initialized 673 * already. 674 */ 675 assert(arena != NULL); 676 677 return (extent_alloc_default_impl(tsdn, arena, new_addr, size, 678 alignment, zero, commit)); 679} 680 681static void 682extent_retain(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, 683 extent_t *extent) 684{ 685 if (config_stats) 686 arena->stats.retained += extent_size_get(extent); 687 extent_record(tsdn, arena, r_extent_hooks, arena->extents_retained, 688 false, extent); 689} 690 691/* 692 * If virtual memory is retained, create increasingly larger extents from which 693 * to split requested extents in order to limit the total number of disjoint 694 * virtual memory ranges retained by each arena. 695 */ 696static extent_t * 697extent_grow_retained(tsdn_t *tsdn, arena_t *arena, 698 extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad, 699 size_t alignment, bool *zero, bool *commit, bool slab) 700{ 701 extent_t *extent; 702 void *ptr; 703 size_t size, alloc_size, alloc_size_min, leadsize, trailsize; 704 bool zeroed, committed; 705 706 /* 707 * Check whether the next extent size in the series would be large 708 * enough to satisfy this request. If no, just bail, so that e.g. a 709 * series of unsatisfiable allocation requests doesn't cause unused 710 * extent creation as a side effect. 711 */ 712 size = usize + pad; 713 alloc_size = pind2sz(arena->extent_grow_next); 714 alloc_size_min = size + PAGE_CEILING(alignment) - PAGE; 715 /* Beware size_t wrap-around. */ 716 if (alloc_size_min < usize) 717 return (NULL); 718 if (alloc_size < alloc_size_min) 719 return (NULL); 720 extent = extent_alloc(tsdn, arena); 721 if (extent == NULL) 722 return (NULL); 723 zeroed = false; 724 committed = false; 725 ptr = extent_alloc_core(tsdn, arena, new_addr, alloc_size, PAGE, 726 &zeroed, &committed, arena->dss_prec); 727 extent_init(extent, arena, ptr, alloc_size, alloc_size, 728 arena_extent_sn_next(arena), false, zeroed, committed, false); 729 if (ptr == NULL || extent_register(tsdn, extent)) { 730 extent_dalloc(tsdn, arena, extent); 731 return (NULL); 732 } 733 /* 734 * Set the extent as active *after registration so that no gprof-related 735 * accounting occurs during registration. 736 */ 737 extent_active_set(extent, true); 738 739 leadsize = ALIGNMENT_CEILING((uintptr_t)ptr, PAGE_CEILING(alignment)) - 740 (uintptr_t)ptr; 741 assert(new_addr == NULL || leadsize == 0); 742 assert(alloc_size >= leadsize + size); 743 trailsize = alloc_size - leadsize - size; 744 if (extent_zeroed_get(extent)) 745 *zero = true; 746 if (extent_committed_get(extent)) 747 *commit = true; 748 749 /* Split the lead. */ 750 if (leadsize != 0) { 751 extent_t *lead = extent; 752 extent = extent_split_wrapper(tsdn, arena, r_extent_hooks, lead, 753 leadsize, leadsize, size + trailsize, usize + trailsize); 754 if (extent == NULL) { 755 extent_deregister(tsdn, lead); 756 extent_leak(tsdn, arena, r_extent_hooks, false, lead); 757 return (NULL); 758 } 759 extent_retain(tsdn, arena, r_extent_hooks, lead); 760 } 761 762 /* Split the trail. */ 763 if (trailsize != 0) { 764 extent_t *trail = extent_split_wrapper(tsdn, arena, 765 r_extent_hooks, extent, size, usize, trailsize, trailsize); 766 if (trail == NULL) { 767 extent_deregister(tsdn, extent); 768 extent_leak(tsdn, arena, r_extent_hooks, false, extent); 769 return (NULL); 770 } 771 extent_retain(tsdn, arena, r_extent_hooks, trail); 772 } else if (leadsize == 0) { 773 /* 774 * Splitting causes usize to be set as a side effect, but no 775 * splitting occurred. 776 */ 777 extent_usize_set(extent, usize); 778 } 779 780 if (*commit && !extent_committed_get(extent)) { 781 if (extent_commit_wrapper(tsdn, arena, r_extent_hooks, extent, 782 0, extent_size_get(extent))) { 783 extent_retain(tsdn, arena, r_extent_hooks, extent); 784 return (NULL); 785 } 786 extent_zeroed_set(extent, true); 787 } 788 789 if (config_prof) { 790 /* Adjust gprof stats now that extent is final size. */ 791 extent_gprof_add(tsdn, extent); 792 } 793 if (pad != 0) 794 extent_addr_randomize(tsdn, extent, alignment); 795 if (slab) { 796 rtree_ctx_t rtree_ctx_fallback; 797 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, 798 &rtree_ctx_fallback); 799 800 extent_slab_set(extent, true); 801 extent_interior_register(tsdn, rtree_ctx, extent); 802 } 803 if (*zero && !extent_zeroed_get(extent)) 804 memset(extent_addr_get(extent), 0, extent_usize_get(extent)); 805 if (arena->extent_grow_next + 1 < NPSIZES) 806 arena->extent_grow_next++; 807 return (extent); 808} 809 810static extent_t * 811extent_alloc_retained(tsdn_t *tsdn, arena_t *arena, 812 extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad, 813 size_t alignment, bool *zero, bool *commit, bool slab) 814{ 815 extent_t *extent; 816 817 assert(usize != 0); 818 assert(alignment != 0); 819 820 extent = extent_recycle(tsdn, arena, r_extent_hooks, 821 arena->extents_retained, false, false, new_addr, usize, pad, 822 alignment, zero, commit, slab); 823 if (extent != NULL) { 824 if (config_stats) { 825 size_t size = usize + pad; 826 arena->stats.retained -= size; 827 } 828 if (config_prof) 829 extent_gprof_add(tsdn, extent); 830 } 831 if (!config_munmap && extent == NULL) { 832 extent = extent_grow_retained(tsdn, arena, r_extent_hooks, 833 new_addr, usize, pad, alignment, zero, commit, slab); 834 } 835 836 return (extent); 837} 838 839static extent_t * 840extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena, 841 extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad, 842 size_t alignment, bool *zero, bool *commit, bool slab) 843{ 844 extent_t *extent; 845 size_t size; 846 void *addr; 847 848 size = usize + pad; 849 extent = extent_alloc(tsdn, arena); 850 if (extent == NULL) 851 return (NULL); 852 if (*r_extent_hooks == &extent_hooks_default) { 853 /* Call directly to propagate tsdn. */ 854 addr = extent_alloc_default_impl(tsdn, arena, new_addr, size, 855 alignment, zero, commit); 856 } else { 857 addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr, size, 858 alignment, zero, commit, arena_ind_get(arena)); 859 } 860 if (addr == NULL) { 861 extent_dalloc(tsdn, arena, extent); 862 return (NULL); 863 } 864 extent_init(extent, arena, addr, size, usize, 865 arena_extent_sn_next(arena), true, zero, commit, slab); 866 if (pad != 0) 867 extent_addr_randomize(tsdn, extent, alignment); 868 if (extent_register(tsdn, extent)) { 869 extent_leak(tsdn, arena, r_extent_hooks, false, extent); 870 return (NULL); 871 } 872 873 return (extent); 874} 875 876extent_t * 877extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, 878 extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad, 879 size_t alignment, bool *zero, bool *commit, bool slab) 880{ 881 extent_t *extent; 882 883 extent_hooks_assure_initialized(arena, r_extent_hooks); 884 885 extent = extent_alloc_retained(tsdn, arena, r_extent_hooks, new_addr, 886 usize, pad, alignment, zero, commit, slab); 887 if (extent == NULL) { 888 extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks, 889 new_addr, usize, pad, alignment, zero, commit, slab); 890 } 891 892 return (extent); 893} 894 895static bool 896extent_can_coalesce(const extent_t *a, const extent_t *b) 897{ 898 if (extent_arena_get(a) != extent_arena_get(b)) 899 return (false); 900 if (extent_active_get(a) != extent_active_get(b)) 901 return (false); 902 if (extent_committed_get(a) != extent_committed_get(b)) 903 return (false); 904 if (extent_retained_get(a) != extent_retained_get(b)) 905 return (false); 906 907 return (true); 908} 909 910static void 911extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, 912 extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b, 913 extent_heap_t extent_heaps[NPSIZES+1], bool cache) 914{ 915 if (!extent_can_coalesce(a, b)) 916 return; 917 918 extent_heaps_remove(tsdn, extent_heaps, a); 919 extent_heaps_remove(tsdn, extent_heaps, b); 920 921 arena_extent_cache_maybe_remove(tsdn, extent_arena_get(a), a, cache); 922 arena_extent_cache_maybe_remove(tsdn, extent_arena_get(b), b, cache); 923 924 if (extent_merge_wrapper(tsdn, arena, r_extent_hooks, a, b)) { 925 extent_heaps_insert(tsdn, extent_heaps, a); 926 extent_heaps_insert(tsdn, extent_heaps, b); 927 arena_extent_cache_maybe_insert(tsdn, extent_arena_get(a), a, 928 cache); 929 arena_extent_cache_maybe_insert(tsdn, extent_arena_get(b), b, 930 cache); 931 return; 932 } 933 934 extent_heaps_insert(tsdn, extent_heaps, a); 935 arena_extent_cache_maybe_insert(tsdn, extent_arena_get(a), a, cache); 936} 937 938static void 939extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, 940 extent_heap_t extent_heaps[NPSIZES+1], bool cache, extent_t *extent) 941{ 942 extent_t *prev, *next; 943 rtree_ctx_t rtree_ctx_fallback; 944 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 945 946 assert(!cache || !extent_zeroed_get(extent)); 947 948 malloc_mutex_lock(tsdn, &arena->extents_mtx); 949 extent_hooks_assure_initialized(arena, r_extent_hooks); 950 951 extent_usize_set(extent, 0); 952 extent_active_set(extent, false); 953 extent_zeroed_set(extent, !cache && extent_zeroed_get(extent)); 954 if (extent_slab_get(extent)) { 955 extent_interior_deregister(tsdn, rtree_ctx, extent); 956 extent_slab_set(extent, false); 957 } 958 959 assert(extent_lookup(tsdn, extent_base_get(extent), true) == extent); 960 extent_heaps_insert(tsdn, extent_heaps, extent); 961 arena_extent_cache_maybe_insert(tsdn, arena, extent, cache); 962 963 /* Try to coalesce forward. */ 964 next = rtree_read(tsdn, &extents_rtree, rtree_ctx, 965 (uintptr_t)extent_past_get(extent), false); 966 if (next != NULL) { 967 extent_try_coalesce(tsdn, arena, r_extent_hooks, extent, next, 968 extent_heaps, cache); 969 } 970 971 /* Try to coalesce backward. */ 972 prev = rtree_read(tsdn, &extents_rtree, rtree_ctx, 973 (uintptr_t)extent_before_get(extent), false); 974 if (prev != NULL) { 975 extent_try_coalesce(tsdn, arena, r_extent_hooks, prev, extent, 976 extent_heaps, cache); 977 } 978 979 malloc_mutex_unlock(tsdn, &arena->extents_mtx); 980} 981 982void 983extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) 984{ 985 extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; 986 987 if (extent_register(tsdn, extent)) { 988 extent_leak(tsdn, arena, &extent_hooks, false, extent); 989 return; 990 } 991 extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent); 992} 993 994void 995extent_dalloc_cache(tsdn_t *tsdn, arena_t *arena, 996 extent_hooks_t **r_extent_hooks, extent_t *extent) 997{ 998 assert(extent_base_get(extent) != NULL); 999 assert(extent_size_get(extent) != 0); 1000 1001 extent_addr_set(extent, extent_base_get(extent)); 1002 extent_zeroed_set(extent, false); 1003 1004 extent_record(tsdn, arena, r_extent_hooks, arena->extents_cached, true, 1005 extent); 1006} 1007 1008static bool 1009extent_dalloc_default_impl(void *addr, size_t size) 1010{ 1011 if (!have_dss || !extent_in_dss(addr)) 1012 return (extent_dalloc_mmap(addr, size)); 1013 return (true); 1014} 1015 1016static bool 1017extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size, 1018 bool committed, unsigned arena_ind) 1019{ 1020 assert(extent_hooks == &extent_hooks_default); 1021 1022 return (extent_dalloc_default_impl(addr, size)); 1023} 1024 1025bool 1026extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena, 1027 extent_hooks_t **r_extent_hooks, extent_t *extent) 1028{ 1029 bool err; 1030 1031 assert(extent_base_get(extent) != NULL); 1032 assert(extent_size_get(extent) != 0); 1033 1034 extent_addr_set(extent, extent_base_get(extent)); 1035 1036 extent_hooks_assure_initialized(arena, r_extent_hooks); 1037 /* 1038 * Try to deallocate. Deregister first to avoid a race with other 1039 * allocating threads, and reregister if deallocation fails. 1040 */ 1041 extent_deregister(tsdn, extent); 1042 if (*r_extent_hooks == &extent_hooks_default) { 1043 /* Call directly to propagate tsdn. */ 1044 err = extent_dalloc_default_impl(extent_base_get(extent), 1045 extent_size_get(extent)); 1046 } else { 1047 err = ((*r_extent_hooks)->dalloc == NULL || 1048 (*r_extent_hooks)->dalloc(*r_extent_hooks, 1049 extent_base_get(extent), extent_size_get(extent), 1050 extent_committed_get(extent), arena_ind_get(arena))); 1051 } 1052 1053 if (!err) 1054 extent_dalloc(tsdn, arena, extent); 1055 1056 return (err); 1057} 1058 1059void 1060extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, 1061 extent_hooks_t **r_extent_hooks, extent_t *extent) 1062{ 1063 bool zeroed; 1064 1065 if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks, extent)) 1066 return; 1067 1068 extent_reregister(tsdn, extent); 1069 /* Try to decommit; purge if that fails. */ 1070 if (!extent_committed_get(extent)) 1071 zeroed = true; 1072 else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent, 1073 0, extent_size_get(extent))) 1074 zeroed = true; 1075 else if ((*r_extent_hooks)->purge_lazy != NULL && 1076 !(*r_extent_hooks)->purge_lazy(*r_extent_hooks, 1077 extent_base_get(extent), extent_size_get(extent), 0, 1078 extent_size_get(extent), arena_ind_get(arena))) 1079 zeroed = false; 1080 else if ((*r_extent_hooks)->purge_forced != NULL && 1081 !(*r_extent_hooks)->purge_forced(*r_extent_hooks, 1082 extent_base_get(extent), extent_size_get(extent), 0, 1083 extent_size_get(extent), arena_ind_get(arena))) 1084 zeroed = true; 1085 else 1086 zeroed = false; 1087 extent_zeroed_set(extent, zeroed); 1088 1089 if (config_stats) 1090 arena->stats.retained += extent_size_get(extent); 1091 if (config_prof) 1092 extent_gprof_sub(tsdn, extent); 1093 1094 extent_record(tsdn, arena, r_extent_hooks, arena->extents_retained, 1095 false, extent); 1096} 1097 1098static bool 1099extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size, 1100 size_t offset, size_t length, unsigned arena_ind) 1101{ 1102 assert(extent_hooks == &extent_hooks_default); 1103 1104 return (pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset), 1105 length)); 1106} 1107 1108bool 1109extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena, 1110 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, 1111 size_t length) 1112{ 1113 bool err; 1114 1115 extent_hooks_assure_initialized(arena, r_extent_hooks); 1116 err = ((*r_extent_hooks)->commit == NULL || 1117 (*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent), 1118 extent_size_get(extent), offset, length, arena_ind_get(arena))); 1119 extent_committed_set(extent, extent_committed_get(extent) || !err); 1120 return (err); 1121} 1122 1123static bool 1124extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size, 1125 size_t offset, size_t length, unsigned arena_ind) 1126{ 1127 assert(extent_hooks == &extent_hooks_default); 1128 1129 return (pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset), 1130 length)); 1131} 1132 1133bool 1134extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena, 1135 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, 1136 size_t length) 1137{ 1138 bool err; 1139 1140 extent_hooks_assure_initialized(arena, r_extent_hooks); 1141 1142 err = ((*r_extent_hooks)->decommit == NULL || 1143 (*r_extent_hooks)->decommit(*r_extent_hooks, 1144 extent_base_get(extent), extent_size_get(extent), offset, length, 1145 arena_ind_get(arena))); 1146 extent_committed_set(extent, extent_committed_get(extent) && err); 1147 return (err); 1148} 1149 1150#ifdef PAGES_CAN_PURGE_LAZY 1151static bool 1152extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size, 1153 size_t offset, size_t length, unsigned arena_ind) 1154{ 1155 assert(extent_hooks == &extent_hooks_default); 1156 assert(addr != NULL); 1157 assert((offset & PAGE_MASK) == 0); 1158 assert(length != 0); 1159 assert((length & PAGE_MASK) == 0); 1160 1161 return (pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset), 1162 length)); 1163} 1164#endif 1165 1166bool 1167extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena, 1168 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, 1169 size_t length) 1170{ 1171 extent_hooks_assure_initialized(arena, r_extent_hooks); 1172 return ((*r_extent_hooks)->purge_lazy == NULL || 1173 (*r_extent_hooks)->purge_lazy(*r_extent_hooks, 1174 extent_base_get(extent), extent_size_get(extent), offset, length, 1175 arena_ind_get(arena))); 1176} 1177 1178#ifdef PAGES_CAN_PURGE_FORCED 1179static bool 1180extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr, 1181 size_t size, size_t offset, size_t length, unsigned arena_ind) 1182{ 1183 assert(extent_hooks == &extent_hooks_default); 1184 assert(addr != NULL); 1185 assert((offset & PAGE_MASK) == 0); 1186 assert(length != 0); 1187 assert((length & PAGE_MASK) == 0); 1188 1189 return (pages_purge_forced((void *)((uintptr_t)addr + 1190 (uintptr_t)offset), length)); 1191} 1192#endif 1193 1194bool 1195extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena, 1196 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, 1197 size_t length) 1198{ 1199 extent_hooks_assure_initialized(arena, r_extent_hooks); 1200 return ((*r_extent_hooks)->purge_forced == NULL || 1201 (*r_extent_hooks)->purge_forced(*r_extent_hooks, 1202 extent_base_get(extent), extent_size_get(extent), offset, length, 1203 arena_ind_get(arena))); 1204} 1205 1206#ifdef JEMALLOC_MAPS_COALESCE 1207static bool 1208extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size, 1209 size_t size_a, size_t size_b, bool committed, unsigned arena_ind) 1210{ 1211 assert(extent_hooks == &extent_hooks_default); 1212 1213 if (!maps_coalesce) 1214 return (true); 1215 return (false); 1216} 1217#endif 1218 1219extent_t * 1220extent_split_wrapper(tsdn_t *tsdn, arena_t *arena, 1221 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, 1222 size_t usize_a, size_t size_b, size_t usize_b) 1223{ 1224 extent_t *trail; 1225 rtree_ctx_t rtree_ctx_fallback; 1226 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 1227 rtree_elm_t *lead_elm_a, *lead_elm_b, *trail_elm_a, *trail_elm_b; 1228 1229 assert(extent_size_get(extent) == size_a + size_b); 1230 1231 extent_hooks_assure_initialized(arena, r_extent_hooks); 1232 1233 if ((*r_extent_hooks)->split == NULL) 1234 return (NULL); 1235 1236 trail = extent_alloc(tsdn, arena); 1237 if (trail == NULL) 1238 goto label_error_a; 1239 1240 { 1241 extent_t lead; 1242 1243 extent_init(&lead, arena, extent_addr_get(extent), size_a, 1244 usize_a, extent_sn_get(extent), extent_active_get(extent), 1245 extent_zeroed_get(extent), extent_committed_get(extent), 1246 extent_slab_get(extent)); 1247 1248 if (extent_rtree_acquire(tsdn, rtree_ctx, &lead, false, true, 1249 &lead_elm_a, &lead_elm_b)) 1250 goto label_error_b; 1251 } 1252 1253 extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) + 1254 size_a), size_b, usize_b, extent_sn_get(extent), 1255 extent_active_get(extent), extent_zeroed_get(extent), 1256 extent_committed_get(extent), extent_slab_get(extent)); 1257 if (extent_rtree_acquire(tsdn, rtree_ctx, trail, false, true, 1258 &trail_elm_a, &trail_elm_b)) 1259 goto label_error_c; 1260 1261 if ((*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent), 1262 size_a + size_b, size_a, size_b, extent_committed_get(extent), 1263 arena_ind_get(arena))) 1264 goto label_error_d; 1265 1266 extent_size_set(extent, size_a); 1267 extent_usize_set(extent, usize_a); 1268 1269 extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent); 1270 extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail); 1271 1272 extent_rtree_release(tsdn, lead_elm_a, lead_elm_b); 1273 extent_rtree_release(tsdn, trail_elm_a, trail_elm_b); 1274 1275 return (trail); 1276label_error_d: 1277 extent_rtree_release(tsdn, trail_elm_a, trail_elm_b); 1278label_error_c: 1279 extent_rtree_release(tsdn, lead_elm_a, lead_elm_b); 1280label_error_b: 1281 extent_dalloc(tsdn, arena, trail); 1282label_error_a: 1283 return (NULL); 1284} 1285 1286static bool 1287extent_merge_default_impl(void *addr_a, void *addr_b) 1288{ 1289 if (!maps_coalesce) 1290 return (true); 1291 if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) 1292 return (true); 1293 1294 return (false); 1295} 1296 1297#ifdef JEMALLOC_MAPS_COALESCE 1298static bool 1299extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a, 1300 void *addr_b, size_t size_b, bool committed, unsigned arena_ind) 1301{ 1302 assert(extent_hooks == &extent_hooks_default); 1303 1304 return (extent_merge_default_impl(addr_a, addr_b)); 1305} 1306#endif 1307 1308bool 1309extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena, 1310 extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) 1311{ 1312 bool err; 1313 rtree_ctx_t rtree_ctx_fallback; 1314 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 1315 rtree_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b; 1316 1317 extent_hooks_assure_initialized(arena, r_extent_hooks); 1318 1319 if ((*r_extent_hooks)->merge == NULL) 1320 return (true); 1321 1322 if (*r_extent_hooks == &extent_hooks_default) { 1323 /* Call directly to propagate tsdn. */ 1324 err = extent_merge_default_impl(extent_base_get(a), 1325 extent_base_get(b)); 1326 } else { 1327 err = (*r_extent_hooks)->merge(*r_extent_hooks, 1328 extent_base_get(a), extent_size_get(a), extent_base_get(b), 1329 extent_size_get(b), extent_committed_get(a), 1330 arena_ind_get(arena)); 1331 } 1332 1333 if (err) 1334 return (true); 1335 1336 /* 1337 * The rtree writes must happen while all the relevant elements are 1338 * owned, so the following code uses decomposed helper functions rather 1339 * than extent_{,de}register() to do things in the right order. 1340 */ 1341 extent_rtree_acquire(tsdn, rtree_ctx, a, true, false, &a_elm_a, 1342 &a_elm_b); 1343 extent_rtree_acquire(tsdn, rtree_ctx, b, true, false, &b_elm_a, 1344 &b_elm_b); 1345 1346 if (a_elm_b != NULL) { 1347 rtree_elm_write_acquired(tsdn, &extents_rtree, a_elm_b, NULL); 1348 rtree_elm_release(tsdn, &extents_rtree, a_elm_b); 1349 } 1350 if (b_elm_b != NULL) { 1351 rtree_elm_write_acquired(tsdn, &extents_rtree, b_elm_a, NULL); 1352 rtree_elm_release(tsdn, &extents_rtree, b_elm_a); 1353 } else 1354 b_elm_b = b_elm_a; 1355 1356 extent_size_set(a, extent_size_get(a) + extent_size_get(b)); 1357 extent_usize_set(a, extent_usize_get(a) + extent_usize_get(b)); 1358 extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ? 1359 extent_sn_get(a) : extent_sn_get(b)); 1360 extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b)); 1361 1362 extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a); 1363 extent_rtree_release(tsdn, a_elm_a, b_elm_b); 1364 1365 extent_dalloc(tsdn, extent_arena_get(b), b); 1366 1367 return (false); 1368} 1369 1370bool 1371extent_boot(void) 1372{ 1373 if (rtree_new(&extents_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) - 1374 LG_PAGE))) 1375 return (true); 1376 1377 if (have_dss) 1378 extent_dss_boot(); 1379 1380 return (false); 1381} 1382