1#define JEMALLOC_EXTENT_C_
2#include "jemalloc/internal/jemalloc_preamble.h"
3#include "jemalloc/internal/jemalloc_internal_includes.h"
4
5#include "jemalloc/internal/assert.h"
6#include "jemalloc/internal/extent_dss.h"
7#include "jemalloc/internal/extent_mmap.h"
8#include "jemalloc/internal/ph.h"
9#include "jemalloc/internal/rtree.h"
10#include "jemalloc/internal/mutex.h"
11#include "jemalloc/internal/mutex_pool.h"
12
13/******************************************************************************/
14/* Data. */
15
16rtree_t		extents_rtree;
17/* Keyed by the address of the extent_t being protected. */
18mutex_pool_t	extent_mutex_pool;
19
20size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT;
21
22static const bitmap_info_t extents_bitmap_info =
23    BITMAP_INFO_INITIALIZER(NPSIZES+1);
24
25static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr,
26    size_t size, size_t alignment, bool *zero, bool *commit,
27    unsigned arena_ind);
28static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr,
29    size_t size, bool committed, unsigned arena_ind);
30static void extent_destroy_default(extent_hooks_t *extent_hooks, void *addr,
31    size_t size, bool committed, unsigned arena_ind);
32static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr,
33    size_t size, size_t offset, size_t length, unsigned arena_ind);
34static bool extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
35    extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
36    size_t length, bool growing_retained);
37static bool extent_decommit_default(extent_hooks_t *extent_hooks,
38    void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
39#ifdef PAGES_CAN_PURGE_LAZY
40static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr,
41    size_t size, size_t offset, size_t length, unsigned arena_ind);
42#endif
43static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
44    extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
45    size_t length, bool growing_retained);
46#ifdef PAGES_CAN_PURGE_FORCED
47static bool extent_purge_forced_default(extent_hooks_t *extent_hooks,
48    void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
49#endif
50static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
51    extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
52    size_t length, bool growing_retained);
53#ifdef JEMALLOC_MAPS_COALESCE
54static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr,
55    size_t size, size_t size_a, size_t size_b, bool committed,
56    unsigned arena_ind);
57#endif
58static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena,
59    extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
60    szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
61    bool growing_retained);
62#ifdef JEMALLOC_MAPS_COALESCE
63static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a,
64    size_t size_a, void *addr_b, size_t size_b, bool committed,
65    unsigned arena_ind);
66#endif
67static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
68    extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
69    bool growing_retained);
70
71const extent_hooks_t	extent_hooks_default = {
72	extent_alloc_default,
73	extent_dalloc_default,
74	extent_destroy_default,
75	extent_commit_default,
76	extent_decommit_default
77#ifdef PAGES_CAN_PURGE_LAZY
78	,
79	extent_purge_lazy_default
80#else
81	,
82	NULL
83#endif
84#ifdef PAGES_CAN_PURGE_FORCED
85	,
86	extent_purge_forced_default
87#else
88	,
89	NULL
90#endif
91#ifdef JEMALLOC_MAPS_COALESCE
92	,
93	extent_split_default,
94	extent_merge_default
95#endif
96};
97
98/* Used exclusively for gdump triggering. */
99static atomic_zu_t curpages;
100static atomic_zu_t highpages;
101
102/******************************************************************************/
103/*
104 * Function prototypes for static functions that are referenced prior to
105 * definition.
106 */
107
108static void extent_deregister(tsdn_t *tsdn, extent_t *extent);
109static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena,
110    extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr,
111    size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind,
112    bool *zero, bool *commit, bool growing_retained);
113static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
114    extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
115    extent_t *extent, bool *coalesced, bool growing_retained);
116static void extent_record(tsdn_t *tsdn, arena_t *arena,
117    extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent,
118    bool growing_retained);
119
120/******************************************************************************/
121
122ph_gen(UNUSED, extent_avail_, extent_tree_t, extent_t, ph_link,
123    extent_esnead_comp)
124
125typedef enum {
126	lock_result_success,
127	lock_result_failure,
128	lock_result_no_extent
129} lock_result_t;
130
131static lock_result_t
132extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm,
133    extent_t **result) {
134	extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree,
135	    elm, true);
136
137	if (extent1 == NULL) {
138		return lock_result_no_extent;
139	}
140	/*
141	 * It's possible that the extent changed out from under us, and with it
142	 * the leaf->extent mapping.  We have to recheck while holding the lock.
143	 */
144	extent_lock(tsdn, extent1);
145	extent_t *extent2 = rtree_leaf_elm_extent_read(tsdn,
146	    &extents_rtree, elm, true);
147
148	if (extent1 == extent2) {
149		*result = extent1;
150		return lock_result_success;
151	} else {
152		extent_unlock(tsdn, extent1);
153		return lock_result_failure;
154	}
155}
156
157/*
158 * Returns a pool-locked extent_t * if there's one associated with the given
159 * address, and NULL otherwise.
160 */
161static extent_t *
162extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr) {
163	extent_t *ret = NULL;
164	rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree,
165	    rtree_ctx, (uintptr_t)addr, false, false);
166	if (elm == NULL) {
167		return NULL;
168	}
169	lock_result_t lock_result;
170	do {
171		lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret);
172	} while (lock_result == lock_result_failure);
173	return ret;
174}
175
176extent_t *
177extent_alloc(tsdn_t *tsdn, arena_t *arena) {
178	malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
179	extent_t *extent = extent_avail_first(&arena->extent_avail);
180	if (extent == NULL) {
181		malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
182		return base_alloc_extent(tsdn, arena->base);
183	}
184	extent_avail_remove(&arena->extent_avail, extent);
185	malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
186	return extent;
187}
188
189void
190extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
191	malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
192	extent_avail_insert(&arena->extent_avail, extent);
193	malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
194}
195
196extent_hooks_t *
197extent_hooks_get(arena_t *arena) {
198	return base_extent_hooks_get(arena->base);
199}
200
201extent_hooks_t *
202extent_hooks_set(tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks) {
203	background_thread_info_t *info;
204	if (have_background_thread) {
205		info = arena_background_thread_info_get(arena);
206		malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
207	}
208	extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks);
209	if (have_background_thread) {
210		malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
211	}
212
213	return ret;
214}
215
216static void
217extent_hooks_assure_initialized(arena_t *arena,
218    extent_hooks_t **r_extent_hooks) {
219	if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) {
220		*r_extent_hooks = extent_hooks_get(arena);
221	}
222}
223
224#ifndef JEMALLOC_JET
225static
226#endif
227size_t
228extent_size_quantize_floor(size_t size) {
229	size_t ret;
230	pszind_t pind;
231
232	assert(size > 0);
233	assert((size & PAGE_MASK) == 0);
234
235	pind = sz_psz2ind(size - sz_large_pad + 1);
236	if (pind == 0) {
237		/*
238		 * Avoid underflow.  This short-circuit would also do the right
239		 * thing for all sizes in the range for which there are
240		 * PAGE-spaced size classes, but it's simplest to just handle
241		 * the one case that would cause erroneous results.
242		 */
243		return size;
244	}
245	ret = sz_pind2sz(pind - 1) + sz_large_pad;
246	assert(ret <= size);
247	return ret;
248}
249
250#ifndef JEMALLOC_JET
251static
252#endif
253size_t
254extent_size_quantize_ceil(size_t size) {
255	size_t ret;
256
257	assert(size > 0);
258	assert(size - sz_large_pad <= LARGE_MAXCLASS);
259	assert((size & PAGE_MASK) == 0);
260
261	ret = extent_size_quantize_floor(size);
262	if (ret < size) {
263		/*
264		 * Skip a quantization that may have an adequately large extent,
265		 * because under-sized extents may be mixed in.  This only
266		 * happens when an unusual size is requested, i.e. for aligned
267		 * allocation, and is just one of several places where linear
268		 * search would potentially find sufficiently aligned available
269		 * memory somewhere lower.
270		 */
271		ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) +
272		    sz_large_pad;
273	}
274	return ret;
275}
276
277/* Generate pairing heap functions. */
278ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp)
279
280bool
281extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
282    bool delay_coalesce) {
283	if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS,
284	    malloc_mutex_rank_exclusive)) {
285		return true;
286	}
287	for (unsigned i = 0; i < NPSIZES+1; i++) {
288		extent_heap_new(&extents->heaps[i]);
289	}
290	bitmap_init(extents->bitmap, &extents_bitmap_info, true);
291	extent_list_init(&extents->lru);
292	atomic_store_zu(&extents->npages, 0, ATOMIC_RELAXED);
293	extents->state = state;
294	extents->delay_coalesce = delay_coalesce;
295	return false;
296}
297
298extent_state_t
299extents_state_get(const extents_t *extents) {
300	return extents->state;
301}
302
303size_t
304extents_npages_get(extents_t *extents) {
305	return atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
306}
307
308static void
309extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
310	malloc_mutex_assert_owner(tsdn, &extents->mtx);
311	assert(extent_state_get(extent) == extents->state);
312
313	size_t size = extent_size_get(extent);
314	size_t psz = extent_size_quantize_floor(size);
315	pszind_t pind = sz_psz2ind(psz);
316	if (extent_heap_empty(&extents->heaps[pind])) {
317		bitmap_unset(extents->bitmap, &extents_bitmap_info,
318		    (size_t)pind);
319	}
320	extent_heap_insert(&extents->heaps[pind], extent);
321	extent_list_append(&extents->lru, extent);
322	size_t npages = size >> LG_PAGE;
323	/*
324	 * All modifications to npages hold the mutex (as asserted above), so we
325	 * don't need an atomic fetch-add; we can get by with a load followed by
326	 * a store.
327	 */
328	size_t cur_extents_npages =
329	    atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
330	atomic_store_zu(&extents->npages, cur_extents_npages + npages,
331	    ATOMIC_RELAXED);
332}
333
334static void
335extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
336	malloc_mutex_assert_owner(tsdn, &extents->mtx);
337	assert(extent_state_get(extent) == extents->state);
338
339	size_t size = extent_size_get(extent);
340	size_t psz = extent_size_quantize_floor(size);
341	pszind_t pind = sz_psz2ind(psz);
342	extent_heap_remove(&extents->heaps[pind], extent);
343	if (extent_heap_empty(&extents->heaps[pind])) {
344		bitmap_set(extents->bitmap, &extents_bitmap_info,
345		    (size_t)pind);
346	}
347	extent_list_remove(&extents->lru, extent);
348	size_t npages = size >> LG_PAGE;
349	/*
350	 * As in extents_insert_locked, we hold extents->mtx and so don't need
351	 * atomic operations for updating extents->npages.
352	 */
353	size_t cur_extents_npages =
354	    atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
355	assert(cur_extents_npages >= npages);
356	atomic_store_zu(&extents->npages,
357	    cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED);
358}
359
360/*
361 * Find an extent with size [min_size, max_size) to satisfy the alignment
362 * requirement.  For each size, try only the first extent in the heap.
363 */
364static extent_t *
365extents_fit_alignment(extents_t *extents, size_t min_size, size_t max_size,
366    size_t alignment) {
367        pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(min_size));
368        pszind_t pind_max = sz_psz2ind(extent_size_quantize_ceil(max_size));
369
370	for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
371	    &extents_bitmap_info, (size_t)pind); i < pind_max; i =
372	    (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
373	    (size_t)i+1)) {
374		assert(i < NPSIZES);
375		assert(!extent_heap_empty(&extents->heaps[i]));
376		extent_t *extent = extent_heap_first(&extents->heaps[i]);
377		uintptr_t base = (uintptr_t)extent_base_get(extent);
378		size_t candidate_size = extent_size_get(extent);
379		assert(candidate_size >= min_size);
380
381		uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base,
382		    PAGE_CEILING(alignment));
383		if (base > next_align || base + candidate_size <= next_align) {
384			/* Overflow or not crossing the next alignment. */
385			continue;
386		}
387
388		size_t leadsize = next_align - base;
389		if (candidate_size - leadsize >= min_size) {
390			return extent;
391		}
392	}
393
394	return NULL;
395}
396
397/* Do any-best-fit extent selection, i.e. select any extent that best fits. */
398static extent_t *
399extents_best_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
400    size_t size) {
401	pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
402	pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
403	    (size_t)pind);
404	if (i < NPSIZES+1) {
405		/*
406		 * In order to reduce fragmentation, avoid reusing and splitting
407		 * large extents for much smaller sizes.
408		 */
409		if ((sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) {
410			return NULL;
411		}
412		assert(!extent_heap_empty(&extents->heaps[i]));
413		extent_t *extent = extent_heap_first(&extents->heaps[i]);
414		assert(extent_size_get(extent) >= size);
415		return extent;
416	}
417
418	return NULL;
419}
420
421/*
422 * Do first-fit extent selection, i.e. select the oldest/lowest extent that is
423 * large enough.
424 */
425static extent_t *
426extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
427    size_t size) {
428	extent_t *ret = NULL;
429
430	pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
431	for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
432	    &extents_bitmap_info, (size_t)pind); i < NPSIZES+1; i =
433	    (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
434	    (size_t)i+1)) {
435		assert(!extent_heap_empty(&extents->heaps[i]));
436		extent_t *extent = extent_heap_first(&extents->heaps[i]);
437		assert(extent_size_get(extent) >= size);
438		if (ret == NULL || extent_snad_comp(extent, ret) < 0) {
439			ret = extent;
440		}
441		if (i == NPSIZES) {
442			break;
443		}
444		assert(i < NPSIZES);
445	}
446
447	return ret;
448}
449
450/*
451 * Do {best,first}-fit extent selection, where the selection policy choice is
452 * based on extents->delay_coalesce.  Best-fit selection requires less
453 * searching, but its layout policy is less stable and may cause higher virtual
454 * memory fragmentation as a side effect.
455 */
456static extent_t *
457extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
458    size_t esize, size_t alignment) {
459	malloc_mutex_assert_owner(tsdn, &extents->mtx);
460
461	size_t max_size = esize + PAGE_CEILING(alignment) - PAGE;
462	/* Beware size_t wrap-around. */
463	if (max_size < esize) {
464		return NULL;
465	}
466
467	extent_t *extent = extents->delay_coalesce ?
468	    extents_best_fit_locked(tsdn, arena, extents, max_size) :
469	    extents_first_fit_locked(tsdn, arena, extents, max_size);
470
471	if (alignment > PAGE && extent == NULL) {
472		/*
473		 * max_size guarantees the alignment requirement but is rather
474		 * pessimistic.  Next we try to satisfy the aligned allocation
475		 * with sizes in [esize, max_size).
476		 */
477		extent = extents_fit_alignment(extents, esize, max_size,
478		    alignment);
479	}
480
481	return extent;
482}
483
484static bool
485extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena,
486    extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
487    extent_t *extent) {
488	extent_state_set(extent, extent_state_active);
489	bool coalesced;
490	extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx,
491	    extents, extent, &coalesced, false);
492	extent_state_set(extent, extents_state_get(extents));
493
494	if (!coalesced) {
495		return true;
496	}
497	extents_insert_locked(tsdn, extents, extent);
498	return false;
499}
500
501extent_t *
502extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
503    extents_t *extents, void *new_addr, size_t size, size_t pad,
504    size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
505	assert(size + pad != 0);
506	assert(alignment != 0);
507	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
508	    WITNESS_RANK_CORE, 0);
509
510	extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, extents,
511	    new_addr, size, pad, alignment, slab, szind, zero, commit, false);
512	assert(extent == NULL || extent_dumpable_get(extent));
513	return extent;
514}
515
516void
517extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
518    extents_t *extents, extent_t *extent) {
519	assert(extent_base_get(extent) != NULL);
520	assert(extent_size_get(extent) != 0);
521	assert(extent_dumpable_get(extent));
522	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
523	    WITNESS_RANK_CORE, 0);
524
525	extent_addr_set(extent, extent_base_get(extent));
526	extent_zeroed_set(extent, false);
527
528	extent_record(tsdn, arena, r_extent_hooks, extents, extent, false);
529}
530
531extent_t *
532extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
533    extents_t *extents, size_t npages_min) {
534	rtree_ctx_t rtree_ctx_fallback;
535	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
536
537	malloc_mutex_lock(tsdn, &extents->mtx);
538
539	/*
540	 * Get the LRU coalesced extent, if any.  If coalescing was delayed,
541	 * the loop will iterate until the LRU extent is fully coalesced.
542	 */
543	extent_t *extent;
544	while (true) {
545		/* Get the LRU extent, if any. */
546		extent = extent_list_first(&extents->lru);
547		if (extent == NULL) {
548			goto label_return;
549		}
550		/* Check the eviction limit. */
551		size_t extents_npages = atomic_load_zu(&extents->npages,
552		    ATOMIC_RELAXED);
553		if (extents_npages <= npages_min) {
554			extent = NULL;
555			goto label_return;
556		}
557		extents_remove_locked(tsdn, extents, extent);
558		if (!extents->delay_coalesce) {
559			break;
560		}
561		/* Try to coalesce. */
562		if (extent_try_delayed_coalesce(tsdn, arena, r_extent_hooks,
563		    rtree_ctx, extents, extent)) {
564			break;
565		}
566		/*
567		 * The LRU extent was just coalesced and the result placed in
568		 * the LRU at its neighbor's position.  Start over.
569		 */
570	}
571
572	/*
573	 * Either mark the extent active or deregister it to protect against
574	 * concurrent operations.
575	 */
576	switch (extents_state_get(extents)) {
577	case extent_state_active:
578		not_reached();
579	case extent_state_dirty:
580	case extent_state_muzzy:
581		extent_state_set(extent, extent_state_active);
582		break;
583	case extent_state_retained:
584		extent_deregister(tsdn, extent);
585		break;
586	default:
587		not_reached();
588	}
589
590label_return:
591	malloc_mutex_unlock(tsdn, &extents->mtx);
592	return extent;
593}
594
595static void
596extents_leak(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
597    extents_t *extents, extent_t *extent, bool growing_retained) {
598	/*
599	 * Leak extent after making sure its pages have already been purged, so
600	 * that this is only a virtual memory leak.
601	 */
602	if (extents_state_get(extents) == extent_state_dirty) {
603		if (extent_purge_lazy_impl(tsdn, arena, r_extent_hooks,
604		    extent, 0, extent_size_get(extent), growing_retained)) {
605			extent_purge_forced_impl(tsdn, arena, r_extent_hooks,
606			    extent, 0, extent_size_get(extent),
607			    growing_retained);
608		}
609	}
610	extent_dalloc(tsdn, arena, extent);
611}
612
613void
614extents_prefork(tsdn_t *tsdn, extents_t *extents) {
615	malloc_mutex_prefork(tsdn, &extents->mtx);
616}
617
618void
619extents_postfork_parent(tsdn_t *tsdn, extents_t *extents) {
620	malloc_mutex_postfork_parent(tsdn, &extents->mtx);
621}
622
623void
624extents_postfork_child(tsdn_t *tsdn, extents_t *extents) {
625	malloc_mutex_postfork_child(tsdn, &extents->mtx);
626}
627
628static void
629extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
630    extent_t *extent) {
631	assert(extent_arena_get(extent) == arena);
632	assert(extent_state_get(extent) == extent_state_active);
633
634	extent_state_set(extent, extents_state_get(extents));
635	extents_insert_locked(tsdn, extents, extent);
636}
637
638static void
639extent_deactivate(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
640    extent_t *extent) {
641	malloc_mutex_lock(tsdn, &extents->mtx);
642	extent_deactivate_locked(tsdn, arena, extents, extent);
643	malloc_mutex_unlock(tsdn, &extents->mtx);
644}
645
646static void
647extent_activate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
648    extent_t *extent) {
649	assert(extent_arena_get(extent) == arena);
650	assert(extent_state_get(extent) == extents_state_get(extents));
651
652	extents_remove_locked(tsdn, extents, extent);
653	extent_state_set(extent, extent_state_active);
654}
655
656static bool
657extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
658    const extent_t *extent, bool dependent, bool init_missing,
659    rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) {
660	*r_elm_a = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
661	    (uintptr_t)extent_base_get(extent), dependent, init_missing);
662	if (!dependent && *r_elm_a == NULL) {
663		return true;
664	}
665	assert(*r_elm_a != NULL);
666
667	*r_elm_b = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
668	    (uintptr_t)extent_last_get(extent), dependent, init_missing);
669	if (!dependent && *r_elm_b == NULL) {
670		return true;
671	}
672	assert(*r_elm_b != NULL);
673
674	return false;
675}
676
677static void
678extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a,
679    rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) {
680	rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab);
681	if (elm_b != NULL) {
682		rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, extent, szind,
683		    slab);
684	}
685}
686
687static void
688extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent,
689    szind_t szind) {
690	assert(extent_slab_get(extent));
691
692	/* Register interior. */
693	for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
694		rtree_write(tsdn, &extents_rtree, rtree_ctx,
695		    (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
696		    LG_PAGE), extent, szind, true);
697	}
698}
699
700static JEMALLOC_NORETURN void
701extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) {
702	cassert(config_prof);
703	/* prof_gdump() requirement. */
704	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
705	    WITNESS_RANK_CORE, 0);
706
707	if (opt_prof && extent_state_get(extent) == extent_state_active) {
708		size_t nadd = extent_size_get(extent) >> LG_PAGE;
709		size_t cur = atomic_fetch_add_zu(&curpages, nadd,
710		    ATOMIC_RELAXED) + nadd;
711		size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED);
712		while (cur > high && !atomic_compare_exchange_weak_zu(
713		    &highpages, &high, cur, ATOMIC_RELAXED, ATOMIC_RELAXED)) {
714			/*
715			 * Don't refresh cur, because it may have decreased
716			 * since this thread lost the highpages update race.
717			 * Note that high is updated in case of CAS failure.
718			 */
719		}
720		if (cur > high && prof_gdump_get_unlocked()) {
721			prof_gdump(tsdn);
722		}
723	}
724}
725
726static JEMALLOC_NORETURN void
727extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) {
728	cassert(config_prof);
729
730	if (opt_prof && extent_state_get(extent) == extent_state_active) {
731		size_t nsub = extent_size_get(extent) >> LG_PAGE;
732		assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub);
733		atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED);
734	}
735}
736
737static bool
738extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) {
739	rtree_ctx_t rtree_ctx_fallback;
740	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
741	rtree_leaf_elm_t *elm_a, *elm_b;
742
743	/*
744	 * We need to hold the lock to protect against a concurrent coalesce
745	 * operation that sees us in a partial state.
746	 */
747	extent_lock(tsdn, extent);
748
749	if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true,
750	    &elm_a, &elm_b)) {
751		extent_unlock(tsdn, extent);
752		return true;
753	}
754
755	szind_t szind = extent_szind_get_maybe_invalid(extent);
756	bool slab = extent_slab_get(extent);
757	extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab);
758	if (slab) {
759		extent_interior_register(tsdn, rtree_ctx, extent, szind);
760	}
761
762	extent_unlock(tsdn, extent);
763
764	if (config_prof && gdump_add) {
765		extent_gdump_add(tsdn, extent);
766	}
767
768	return false;
769}
770
771static bool
772extent_register(tsdn_t *tsdn, extent_t *extent) {
773	return extent_register_impl(tsdn, extent, true);
774}
775
776static bool
777extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) {
778	return extent_register_impl(tsdn, extent, false);
779}
780
781static void
782extent_reregister(tsdn_t *tsdn, extent_t *extent) {
783	bool err = extent_register(tsdn, extent);
784	assert(!err);
785}
786
787/*
788 * Removes all pointers to the given extent from the global rtree indices for
789 * its interior.  This is relevant for slab extents, for which we need to do
790 * metadata lookups at places other than the head of the extent.  We deregister
791 * on the interior, then, when an extent moves from being an active slab to an
792 * inactive state.
793 */
794static void
795extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
796    extent_t *extent) {
797	size_t i;
798
799	assert(extent_slab_get(extent));
800
801	for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
802		rtree_clear(tsdn, &extents_rtree, rtree_ctx,
803		    (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
804		    LG_PAGE));
805	}
806}
807
808/*
809 * Removes all pointers to the given extent from the global rtree.
810 */
811static void
812extent_deregister_impl(tsdn_t *tsdn, extent_t *extent, bool gdump) {
813	rtree_ctx_t rtree_ctx_fallback;
814	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
815	rtree_leaf_elm_t *elm_a, *elm_b;
816	extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, true, false,
817	    &elm_a, &elm_b);
818
819	extent_lock(tsdn, extent);
820
821	extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, NSIZES, false);
822	if (extent_slab_get(extent)) {
823		extent_interior_deregister(tsdn, rtree_ctx, extent);
824		extent_slab_set(extent, false);
825	}
826
827	extent_unlock(tsdn, extent);
828
829	if (config_prof && gdump) {
830		extent_gdump_sub(tsdn, extent);
831	}
832}
833
834static void
835extent_deregister(tsdn_t *tsdn, extent_t *extent) {
836	extent_deregister_impl(tsdn, extent, true);
837}
838
839static void
840extent_deregister_no_gdump_sub(tsdn_t *tsdn, extent_t *extent) {
841	extent_deregister_impl(tsdn, extent, false);
842}
843
844/*
845 * Tries to find and remove an extent from extents that can be used for the
846 * given allocation request.
847 */
848static extent_t *
849extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
850    extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
851    void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
852    bool growing_retained) {
853	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
854	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
855	assert(alignment > 0);
856	if (config_debug && new_addr != NULL) {
857		/*
858		 * Non-NULL new_addr has two use cases:
859		 *
860		 *   1) Recycle a known-extant extent, e.g. during purging.
861		 *   2) Perform in-place expanding reallocation.
862		 *
863		 * Regardless of use case, new_addr must either refer to a
864		 * non-existing extent, or to the base of an extant extent,
865		 * since only active slabs support interior lookups (which of
866		 * course cannot be recycled).
867		 */
868		assert(PAGE_ADDR2BASE(new_addr) == new_addr);
869		assert(pad == 0);
870		assert(alignment <= PAGE);
871	}
872
873	size_t esize = size + pad;
874	malloc_mutex_lock(tsdn, &extents->mtx);
875	extent_hooks_assure_initialized(arena, r_extent_hooks);
876	extent_t *extent;
877	if (new_addr != NULL) {
878		extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr);
879		if (extent != NULL) {
880			/*
881			 * We might null-out extent to report an error, but we
882			 * still need to unlock the associated mutex after.
883			 */
884			extent_t *unlock_extent = extent;
885			assert(extent_base_get(extent) == new_addr);
886			if (extent_arena_get(extent) != arena ||
887			    extent_size_get(extent) < esize ||
888			    extent_state_get(extent) !=
889			    extents_state_get(extents)) {
890				extent = NULL;
891			}
892			extent_unlock(tsdn, unlock_extent);
893		}
894	} else {
895		extent = extents_fit_locked(tsdn, arena, extents, esize,
896		    alignment);
897	}
898	if (extent == NULL) {
899		malloc_mutex_unlock(tsdn, &extents->mtx);
900		return NULL;
901	}
902
903	extent_activate_locked(tsdn, arena, extents, extent);
904	malloc_mutex_unlock(tsdn, &extents->mtx);
905
906	return extent;
907}
908
909/*
910 * Given an allocation request and an extent guaranteed to be able to satisfy
911 * it, this splits off lead and trail extents, leaving extent pointing to an
912 * extent satisfying the allocation.
913 * This function doesn't put lead or trail into any extents_t; it's the caller's
914 * job to ensure that they can be reused.
915 */
916typedef enum {
917	/*
918	 * Split successfully.  lead, extent, and trail, are modified to extents
919	 * describing the ranges before, in, and after the given allocation.
920	 */
921	extent_split_interior_ok,
922	/*
923	 * The extent can't satisfy the given allocation request.  None of the
924	 * input extent_t *s are touched.
925	 */
926	extent_split_interior_cant_alloc,
927	/*
928	 * In a potentially invalid state.  Must leak (if *to_leak is non-NULL),
929	 * and salvage what's still salvageable (if *to_salvage is non-NULL).
930	 * None of lead, extent, or trail are valid.
931	 */
932	extent_split_interior_error
933} extent_split_interior_result_t;
934
935static extent_split_interior_result_t
936extent_split_interior(tsdn_t *tsdn, arena_t *arena,
937    extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx,
938    /* The result of splitting, in case of success. */
939    extent_t **extent, extent_t **lead, extent_t **trail,
940    /* The mess to clean up, in case of error. */
941    extent_t **to_leak, extent_t **to_salvage,
942    void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
943    szind_t szind, bool growing_retained) {
944	size_t esize = size + pad;
945	size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(*extent),
946	    PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(*extent);
947	assert(new_addr == NULL || leadsize == 0);
948	if (extent_size_get(*extent) < leadsize + esize) {
949		return extent_split_interior_cant_alloc;
950	}
951	size_t trailsize = extent_size_get(*extent) - leadsize - esize;
952
953	*lead = NULL;
954	*trail = NULL;
955	*to_leak = NULL;
956	*to_salvage = NULL;
957
958	/* Split the lead. */
959	if (leadsize != 0) {
960		*lead = *extent;
961		*extent = extent_split_impl(tsdn, arena, r_extent_hooks,
962		    *lead, leadsize, NSIZES, false, esize + trailsize, szind,
963		    slab, growing_retained);
964		if (*extent == NULL) {
965			*to_leak = *lead;
966			*lead = NULL;
967			return extent_split_interior_error;
968		}
969	}
970
971	/* Split the trail. */
972	if (trailsize != 0) {
973		*trail = extent_split_impl(tsdn, arena, r_extent_hooks, *extent,
974		    esize, szind, slab, trailsize, NSIZES, false,
975		    growing_retained);
976		if (*trail == NULL) {
977			*to_leak = *extent;
978			*to_salvage = *lead;
979			*lead = NULL;
980			*extent = NULL;
981			return extent_split_interior_error;
982		}
983	}
984
985	if (leadsize == 0 && trailsize == 0) {
986		/*
987		 * Splitting causes szind to be set as a side effect, but no
988		 * splitting occurred.
989		 */
990		extent_szind_set(*extent, szind);
991		if (szind != NSIZES) {
992			rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
993			    (uintptr_t)extent_addr_get(*extent), szind, slab);
994			if (slab && extent_size_get(*extent) > PAGE) {
995				rtree_szind_slab_update(tsdn, &extents_rtree,
996				    rtree_ctx,
997				    (uintptr_t)extent_past_get(*extent) -
998				    (uintptr_t)PAGE, szind, slab);
999			}
1000		}
1001	}
1002
1003	return extent_split_interior_ok;
1004}
1005
1006/*
1007 * This fulfills the indicated allocation request out of the given extent (which
1008 * the caller should have ensured was big enough).  If there's any unused space
1009 * before or after the resulting allocation, that space is given its own extent
1010 * and put back into extents.
1011 */
1012static extent_t *
1013extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
1014    extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
1015    void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
1016    szind_t szind, extent_t *extent, bool growing_retained) {
1017	extent_t *lead;
1018	extent_t *trail;
1019	extent_t *to_leak;
1020	extent_t *to_salvage;
1021
1022	extent_split_interior_result_t result = extent_split_interior(
1023	    tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
1024	    &to_leak, &to_salvage, new_addr, size, pad, alignment, slab, szind,
1025	    growing_retained);
1026
1027	if (result == extent_split_interior_ok) {
1028		if (lead != NULL) {
1029			extent_deactivate(tsdn, arena, extents, lead);
1030		}
1031		if (trail != NULL) {
1032			extent_deactivate(tsdn, arena, extents, trail);
1033		}
1034		return extent;
1035	} else {
1036		/*
1037		 * We should have picked an extent that was large enough to
1038		 * fulfill our allocation request.
1039		 */
1040		assert(result == extent_split_interior_error);
1041		if (to_salvage != NULL) {
1042			extent_deregister(tsdn, to_salvage);
1043		}
1044		if (to_leak != NULL) {
1045			void *leak = extent_base_get(to_leak);
1046			extent_deregister_no_gdump_sub(tsdn, to_leak);
1047			extents_leak(tsdn, arena, r_extent_hooks, extents,
1048			    to_leak, growing_retained);
1049			assert(extent_lock_from_addr(tsdn, rtree_ctx, leak)
1050			    == NULL);
1051		}
1052		return NULL;
1053	}
1054	unreachable();
1055}
1056
1057/*
1058 * Tries to satisfy the given allocation request by reusing one of the extents
1059 * in the given extents_t.
1060 */
1061static extent_t *
1062extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1063    extents_t *extents, void *new_addr, size_t size, size_t pad,
1064    size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit,
1065    bool growing_retained) {
1066	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1067	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1068	assert(new_addr == NULL || !slab);
1069	assert(pad == 0 || !slab);
1070	assert(!*zero || !slab);
1071
1072	rtree_ctx_t rtree_ctx_fallback;
1073	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1074
1075	extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks,
1076	    rtree_ctx, extents, new_addr, size, pad, alignment, slab,
1077	    growing_retained);
1078	if (extent == NULL) {
1079		return NULL;
1080	}
1081
1082	extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx,
1083	    extents, new_addr, size, pad, alignment, slab, szind, extent,
1084	    growing_retained);
1085	if (extent == NULL) {
1086		return NULL;
1087	}
1088
1089	if (*commit && !extent_committed_get(extent)) {
1090		if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent,
1091		    0, extent_size_get(extent), growing_retained)) {
1092			extent_record(tsdn, arena, r_extent_hooks, extents,
1093			    extent, growing_retained);
1094			return NULL;
1095		}
1096		extent_zeroed_set(extent, true);
1097	}
1098
1099	if (extent_committed_get(extent)) {
1100		*commit = true;
1101	}
1102	if (extent_zeroed_get(extent)) {
1103		*zero = true;
1104	}
1105
1106	if (pad != 0) {
1107		extent_addr_randomize(tsdn, extent, alignment);
1108	}
1109	assert(extent_state_get(extent) == extent_state_active);
1110	if (slab) {
1111		extent_slab_set(extent, slab);
1112		extent_interior_register(tsdn, rtree_ctx, extent, szind);
1113	}
1114
1115	if (*zero) {
1116		void *addr = extent_base_get(extent);
1117		size_t sz = extent_size_get(extent);
1118		if (!extent_zeroed_get(extent)) {
1119			if (pages_purge_forced(addr, sz)) {
1120				memset(addr, 0, sz);
1121			}
1122		} else if (config_debug) {
1123			size_t *p = (size_t *)(uintptr_t)addr;
1124			for (size_t i = 0; i < sz / sizeof(size_t); i++) {
1125				assert(p[i] == 0);
1126			}
1127		}
1128	}
1129	return extent;
1130}
1131
1132/*
1133 * If the caller specifies (!*zero), it is still possible to receive zeroed
1134 * memory, in which case *zero is toggled to true.  arena_extent_alloc() takes
1135 * advantage of this to avoid demanding zeroed extents, but taking advantage of
1136 * them if they are returned.
1137 */
1138static void *
1139extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
1140    size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) {
1141	void *ret;
1142
1143	assert(size != 0);
1144	assert(alignment != 0);
1145
1146	/* "primary" dss. */
1147	if (have_dss && dss_prec == dss_prec_primary && (ret =
1148	    extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
1149	    commit)) != NULL) {
1150		return ret;
1151	}
1152	/* mmap. */
1153	if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit))
1154	    != NULL) {
1155		return ret;
1156	}
1157	/* "secondary" dss. */
1158	if (have_dss && dss_prec == dss_prec_secondary && (ret =
1159	    extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
1160	    commit)) != NULL) {
1161		return ret;
1162	}
1163
1164	/* All strategies for allocation failed. */
1165	return NULL;
1166}
1167
1168static void *
1169extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
1170    size_t size, size_t alignment, bool *zero, bool *commit) {
1171	void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
1172	    commit, (dss_prec_t)atomic_load_u(&arena->dss_prec,
1173	    ATOMIC_RELAXED));
1174	if (have_madvise_huge && ret) {
1175		pages_set_thp_state(ret, size);
1176	}
1177	return ret;
1178}
1179
1180static void *
1181extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
1182    size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
1183	tsdn_t *tsdn;
1184	arena_t *arena;
1185
1186	tsdn = tsdn_fetch();
1187	arena = arena_get(tsdn, arena_ind, false);
1188	/*
1189	 * The arena we're allocating on behalf of must have been initialized
1190	 * already.
1191	 */
1192	assert(arena != NULL);
1193
1194	return extent_alloc_default_impl(tsdn, arena, new_addr, size,
1195	    alignment, zero, commit);
1196}
1197
1198static void
1199extent_hook_pre_reentrancy(tsdn_t *tsdn, arena_t *arena) {
1200	tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
1201	if (arena == arena_get(tsd_tsdn(tsd), 0, false)) {
1202		/*
1203		 * The only legitimate case of customized extent hooks for a0 is
1204		 * hooks with no allocation activities.  One such example is to
1205		 * place metadata on pre-allocated resources such as huge pages.
1206		 * In that case, rely on reentrancy_level checks to catch
1207		 * infinite recursions.
1208		 */
1209		pre_reentrancy(tsd, NULL);
1210	} else {
1211		pre_reentrancy(tsd, arena);
1212	}
1213}
1214
1215static void
1216extent_hook_post_reentrancy(tsdn_t *tsdn) {
1217	tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
1218	post_reentrancy(tsd);
1219}
1220
1221/*
1222 * If virtual memory is retained, create increasingly larger extents from which
1223 * to split requested extents in order to limit the total number of disjoint
1224 * virtual memory ranges retained by each arena.
1225 */
1226static extent_t *
1227extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
1228    extent_hooks_t **r_extent_hooks, size_t size, size_t pad, size_t alignment,
1229    bool slab, szind_t szind, bool *zero, bool *commit) {
1230	malloc_mutex_assert_owner(tsdn, &arena->extent_grow_mtx);
1231	assert(pad == 0 || !slab);
1232	assert(!*zero || !slab);
1233
1234	size_t esize = size + pad;
1235	size_t alloc_size_min = esize + PAGE_CEILING(alignment) - PAGE;
1236	/* Beware size_t wrap-around. */
1237	if (alloc_size_min < esize) {
1238		goto label_err;
1239	}
1240	/*
1241	 * Find the next extent size in the series that would be large enough to
1242	 * satisfy this request.
1243	 */
1244	pszind_t egn_skip = 0;
1245	size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
1246	while (alloc_size < alloc_size_min) {
1247		egn_skip++;
1248		if (arena->extent_grow_next + egn_skip == NPSIZES) {
1249			/* Outside legal range. */
1250			goto label_err;
1251		}
1252		assert(arena->extent_grow_next + egn_skip < NPSIZES);
1253		alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
1254	}
1255
1256	extent_t *extent = extent_alloc(tsdn, arena);
1257	if (extent == NULL) {
1258		goto label_err;
1259	}
1260	bool zeroed = false;
1261	bool committed = false;
1262
1263	void *ptr;
1264	if (*r_extent_hooks == &extent_hooks_default) {
1265		ptr = extent_alloc_default_impl(tsdn, arena, NULL,
1266		    alloc_size, PAGE, &zeroed, &committed);
1267	} else {
1268		extent_hook_pre_reentrancy(tsdn, arena);
1269		ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL,
1270		    alloc_size, PAGE, &zeroed, &committed,
1271		    arena_ind_get(arena));
1272		extent_hook_post_reentrancy(tsdn);
1273	}
1274
1275	extent_init(extent, arena, ptr, alloc_size, false, NSIZES,
1276	    arena_extent_sn_next(arena), extent_state_active, zeroed,
1277	    committed, true);
1278	if (ptr == NULL) {
1279		extent_dalloc(tsdn, arena, extent);
1280		goto label_err;
1281	}
1282
1283	if (extent_register_no_gdump_add(tsdn, extent)) {
1284		extents_leak(tsdn, arena, r_extent_hooks,
1285		    &arena->extents_retained, extent, true);
1286		goto label_err;
1287	}
1288
1289	if (extent_zeroed_get(extent) && extent_committed_get(extent)) {
1290		*zero = true;
1291	}
1292	if (extent_committed_get(extent)) {
1293		*commit = true;
1294	}
1295
1296	rtree_ctx_t rtree_ctx_fallback;
1297	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1298
1299	extent_t *lead;
1300	extent_t *trail;
1301	extent_t *to_leak;
1302	extent_t *to_salvage;
1303	extent_split_interior_result_t result = extent_split_interior(
1304	    tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
1305	    &to_leak, &to_salvage, NULL, size, pad, alignment, slab, szind,
1306	    true);
1307
1308	if (result == extent_split_interior_ok) {
1309		if (lead != NULL) {
1310			extent_record(tsdn, arena, r_extent_hooks,
1311			    &arena->extents_retained, lead, true);
1312		}
1313		if (trail != NULL) {
1314			extent_record(tsdn, arena, r_extent_hooks,
1315			    &arena->extents_retained, trail, true);
1316		}
1317	} else {
1318		/*
1319		 * We should have allocated a sufficiently large extent; the
1320		 * cant_alloc case should not occur.
1321		 */
1322		assert(result == extent_split_interior_error);
1323		if (to_salvage != NULL) {
1324			if (config_prof) {
1325				extent_gdump_add(tsdn, to_salvage);
1326			}
1327			extent_record(tsdn, arena, r_extent_hooks,
1328			    &arena->extents_retained, to_salvage, true);
1329		}
1330		if (to_leak != NULL) {
1331			extent_deregister_no_gdump_sub(tsdn, to_leak);
1332			extents_leak(tsdn, arena, r_extent_hooks,
1333			    &arena->extents_retained, to_leak, true);
1334		}
1335		goto label_err;
1336	}
1337
1338	if (*commit && !extent_committed_get(extent)) {
1339		if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 0,
1340		    extent_size_get(extent), true)) {
1341			extent_record(tsdn, arena, r_extent_hooks,
1342			    &arena->extents_retained, extent, true);
1343			goto label_err;
1344		}
1345		extent_zeroed_set(extent, true);
1346	}
1347
1348	/*
1349	 * Increment extent_grow_next if doing so wouldn't exceed the allowed
1350	 * range.
1351	 */
1352	if (arena->extent_grow_next + egn_skip + 1 <=
1353	    arena->retain_grow_limit) {
1354		arena->extent_grow_next += egn_skip + 1;
1355	} else {
1356		arena->extent_grow_next = arena->retain_grow_limit;
1357	}
1358	/* All opportunities for failure are past. */
1359	malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1360
1361	if (config_prof) {
1362		/* Adjust gdump stats now that extent is final size. */
1363		extent_gdump_add(tsdn, extent);
1364	}
1365	if (pad != 0) {
1366		extent_addr_randomize(tsdn, extent, alignment);
1367	}
1368	if (slab) {
1369		rtree_ctx_t rtree_ctx_fallback1;
1370		rtree_ctx_t *rtree_ctx1 = tsdn_rtree_ctx(tsdn,
1371		    &rtree_ctx_fallback1);
1372
1373		extent_slab_set(extent, true);
1374		extent_interior_register(tsdn, rtree_ctx1, extent, szind);
1375	}
1376	if (*zero && !extent_zeroed_get(extent)) {
1377		void *addr = extent_base_get(extent);
1378		size_t sz = extent_size_get(extent);
1379		if (pages_purge_forced(addr, sz)) {
1380			memset(addr, 0, sz);
1381		}
1382	}
1383
1384	return extent;
1385label_err:
1386	malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1387	return NULL;
1388}
1389
1390static extent_t *
1391extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
1392    extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1393    size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1394	assert(size != 0);
1395	assert(alignment != 0);
1396
1397	malloc_mutex_lock(tsdn, &arena->extent_grow_mtx);
1398
1399	extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks,
1400	    &arena->extents_retained, new_addr, size, pad, alignment, slab,
1401	    szind, zero, commit, true);
1402	if (extent != NULL) {
1403		malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1404		if (config_prof) {
1405			extent_gdump_add(tsdn, extent);
1406		}
1407	} else if (opt_retain && new_addr == NULL) {
1408		extent = extent_grow_retained(tsdn, arena, r_extent_hooks, size,
1409		    pad, alignment, slab, szind, zero, commit);
1410		/* extent_grow_retained() always releases extent_grow_mtx. */
1411	} else {
1412		malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1413	}
1414	malloc_mutex_assert_not_owner(tsdn, &arena->extent_grow_mtx);
1415
1416	return extent;
1417}
1418
1419static extent_t *
1420extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
1421    extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1422    size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1423	size_t esize = size + pad;
1424	extent_t *extent = extent_alloc(tsdn, arena);
1425	if (extent == NULL) {
1426		return NULL;
1427	}
1428	void *addr;
1429	if (*r_extent_hooks == &extent_hooks_default) {
1430		/* Call directly to propagate tsdn. */
1431		addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize,
1432		    alignment, zero, commit);
1433	} else {
1434		extent_hook_pre_reentrancy(tsdn, arena);
1435		addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr,
1436		    esize, alignment, zero, commit, arena_ind_get(arena));
1437		extent_hook_post_reentrancy(tsdn);
1438	}
1439	if (addr == NULL) {
1440		extent_dalloc(tsdn, arena, extent);
1441		return NULL;
1442	}
1443	extent_init(extent, arena, addr, esize, slab, szind,
1444	    arena_extent_sn_next(arena), extent_state_active, *zero, *commit,
1445	    true);
1446	if (pad != 0) {
1447		extent_addr_randomize(tsdn, extent, alignment);
1448	}
1449	if (extent_register(tsdn, extent)) {
1450		extents_leak(tsdn, arena, r_extent_hooks,
1451		    &arena->extents_retained, extent, false);
1452		return NULL;
1453	}
1454
1455	return extent;
1456}
1457
1458extent_t *
1459extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
1460    extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1461    size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1462	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1463	    WITNESS_RANK_CORE, 0);
1464
1465	extent_hooks_assure_initialized(arena, r_extent_hooks);
1466
1467	extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks,
1468	    new_addr, size, pad, alignment, slab, szind, zero, commit);
1469	if (extent == NULL) {
1470		if (opt_retain && new_addr != NULL) {
1471			/*
1472			 * When retain is enabled and new_addr is set, we do not
1473			 * attempt extent_alloc_wrapper_hard which does mmap
1474			 * that is very unlikely to succeed (unless it happens
1475			 * to be at the end).
1476			 */
1477			return NULL;
1478		}
1479		extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks,
1480		    new_addr, size, pad, alignment, slab, szind, zero, commit);
1481	}
1482
1483	assert(extent == NULL || extent_dumpable_get(extent));
1484	return extent;
1485}
1486
1487static bool
1488extent_can_coalesce(arena_t *arena, extents_t *extents, const extent_t *inner,
1489    const extent_t *outer) {
1490	assert(extent_arena_get(inner) == arena);
1491	if (extent_arena_get(outer) != arena) {
1492		return false;
1493	}
1494
1495	assert(extent_state_get(inner) == extent_state_active);
1496	if (extent_state_get(outer) != extents->state) {
1497		return false;
1498	}
1499
1500	if (extent_committed_get(inner) != extent_committed_get(outer)) {
1501		return false;
1502	}
1503
1504	return true;
1505}
1506
1507static bool
1508extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1509    extents_t *extents, extent_t *inner, extent_t *outer, bool forward,
1510    bool growing_retained) {
1511	assert(extent_can_coalesce(arena, extents, inner, outer));
1512
1513	extent_activate_locked(tsdn, arena, extents, outer);
1514
1515	malloc_mutex_unlock(tsdn, &extents->mtx);
1516	bool err = extent_merge_impl(tsdn, arena, r_extent_hooks,
1517	    forward ? inner : outer, forward ? outer : inner, growing_retained);
1518	malloc_mutex_lock(tsdn, &extents->mtx);
1519
1520	if (err) {
1521		extent_deactivate_locked(tsdn, arena, extents, outer);
1522	}
1523
1524	return err;
1525}
1526
1527static extent_t *
1528extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
1529    extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
1530    extent_t *extent, bool *coalesced, bool growing_retained) {
1531	/*
1532	 * Continue attempting to coalesce until failure, to protect against
1533	 * races with other threads that are thwarted by this one.
1534	 */
1535	bool again;
1536	do {
1537		again = false;
1538
1539		/* Try to coalesce forward. */
1540		extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx,
1541		    extent_past_get(extent));
1542		if (next != NULL) {
1543			/*
1544			 * extents->mtx only protects against races for
1545			 * like-state extents, so call extent_can_coalesce()
1546			 * before releasing next's pool lock.
1547			 */
1548			bool can_coalesce = extent_can_coalesce(arena, extents,
1549			    extent, next);
1550
1551			extent_unlock(tsdn, next);
1552
1553			if (can_coalesce && !extent_coalesce(tsdn, arena,
1554			    r_extent_hooks, extents, extent, next, true,
1555			    growing_retained)) {
1556				if (extents->delay_coalesce) {
1557					/* Do minimal coalescing. */
1558					*coalesced = true;
1559					return extent;
1560				}
1561				again = true;
1562			}
1563		}
1564
1565		/* Try to coalesce backward. */
1566		extent_t *prev = extent_lock_from_addr(tsdn, rtree_ctx,
1567		    extent_before_get(extent));
1568		if (prev != NULL) {
1569			bool can_coalesce = extent_can_coalesce(arena, extents,
1570			    extent, prev);
1571			extent_unlock(tsdn, prev);
1572
1573			if (can_coalesce && !extent_coalesce(tsdn, arena,
1574			    r_extent_hooks, extents, extent, prev, false,
1575			    growing_retained)) {
1576				extent = prev;
1577				if (extents->delay_coalesce) {
1578					/* Do minimal coalescing. */
1579					*coalesced = true;
1580					return extent;
1581				}
1582				again = true;
1583			}
1584		}
1585	} while (again);
1586
1587	if (extents->delay_coalesce) {
1588		*coalesced = false;
1589	}
1590	return extent;
1591}
1592
1593/*
1594 * Does the metadata management portions of putting an unused extent into the
1595 * given extents_t (coalesces, deregisters slab interiors, the heap operations).
1596 */
1597static void
1598extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1599    extents_t *extents, extent_t *extent, bool growing_retained) {
1600	rtree_ctx_t rtree_ctx_fallback;
1601	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1602
1603	assert((extents_state_get(extents) != extent_state_dirty &&
1604	    extents_state_get(extents) != extent_state_muzzy) ||
1605	    !extent_zeroed_get(extent));
1606
1607	malloc_mutex_lock(tsdn, &extents->mtx);
1608	extent_hooks_assure_initialized(arena, r_extent_hooks);
1609
1610	extent_szind_set(extent, NSIZES);
1611	if (extent_slab_get(extent)) {
1612		extent_interior_deregister(tsdn, rtree_ctx, extent);
1613		extent_slab_set(extent, false);
1614	}
1615
1616	assert(rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
1617	    (uintptr_t)extent_base_get(extent), true) == extent);
1618
1619	if (!extents->delay_coalesce) {
1620		extent = extent_try_coalesce(tsdn, arena, r_extent_hooks,
1621		    rtree_ctx, extents, extent, NULL, growing_retained);
1622	} else if (extent_size_get(extent) >= LARGE_MINCLASS) {
1623		/* Always coalesce large extents eagerly. */
1624		bool coalesced;
1625		size_t prev_size;
1626		do {
1627			prev_size = extent_size_get(extent);
1628			assert(extent_state_get(extent) == extent_state_active);
1629			extent = extent_try_coalesce(tsdn, arena,
1630			    r_extent_hooks, rtree_ctx, extents, extent,
1631			    &coalesced, growing_retained);
1632		} while (coalesced &&
1633		    extent_size_get(extent) >= prev_size + LARGE_MINCLASS);
1634	}
1635	extent_deactivate_locked(tsdn, arena, extents, extent);
1636
1637	malloc_mutex_unlock(tsdn, &extents->mtx);
1638}
1639
1640void
1641extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
1642	extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
1643
1644	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1645	    WITNESS_RANK_CORE, 0);
1646
1647	if (extent_register(tsdn, extent)) {
1648		extents_leak(tsdn, arena, &extent_hooks,
1649		    &arena->extents_retained, extent, false);
1650		return;
1651	}
1652	extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent);
1653}
1654
1655static bool
1656extent_dalloc_default_impl(void *addr, size_t size) {
1657	if (!have_dss || !extent_in_dss(addr)) {
1658		return extent_dalloc_mmap(addr, size);
1659	}
1660	return true;
1661}
1662
1663static bool
1664extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1665    bool committed, unsigned arena_ind) {
1666	return extent_dalloc_default_impl(addr, size);
1667}
1668
1669static bool
1670extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena,
1671    extent_hooks_t **r_extent_hooks, extent_t *extent) {
1672	bool err;
1673
1674	assert(extent_base_get(extent) != NULL);
1675	assert(extent_size_get(extent) != 0);
1676	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1677	    WITNESS_RANK_CORE, 0);
1678
1679	extent_addr_set(extent, extent_base_get(extent));
1680
1681	extent_hooks_assure_initialized(arena, r_extent_hooks);
1682	/* Try to deallocate. */
1683	if (*r_extent_hooks == &extent_hooks_default) {
1684		/* Call directly to propagate tsdn. */
1685		err = extent_dalloc_default_impl(extent_base_get(extent),
1686		    extent_size_get(extent));
1687	} else {
1688		extent_hook_pre_reentrancy(tsdn, arena);
1689		err = ((*r_extent_hooks)->dalloc == NULL ||
1690		    (*r_extent_hooks)->dalloc(*r_extent_hooks,
1691		    extent_base_get(extent), extent_size_get(extent),
1692		    extent_committed_get(extent), arena_ind_get(arena)));
1693		extent_hook_post_reentrancy(tsdn);
1694	}
1695
1696	if (!err) {
1697		extent_dalloc(tsdn, arena, extent);
1698	}
1699
1700	return err;
1701}
1702
1703void
1704extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
1705    extent_hooks_t **r_extent_hooks, extent_t *extent) {
1706	assert(extent_dumpable_get(extent));
1707	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1708	    WITNESS_RANK_CORE, 0);
1709
1710	/*
1711	 * Deregister first to avoid a race with other allocating threads, and
1712	 * reregister if deallocation fails.
1713	 */
1714	extent_deregister(tsdn, extent);
1715	if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks, extent)) {
1716		return;
1717	}
1718
1719	extent_reregister(tsdn, extent);
1720	if (*r_extent_hooks != &extent_hooks_default) {
1721		extent_hook_pre_reentrancy(tsdn, arena);
1722	}
1723	/* Try to decommit; purge if that fails. */
1724	bool zeroed;
1725	if (!extent_committed_get(extent)) {
1726		zeroed = true;
1727	} else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent,
1728	    0, extent_size_get(extent))) {
1729		zeroed = true;
1730	} else if ((*r_extent_hooks)->purge_forced != NULL &&
1731	    !(*r_extent_hooks)->purge_forced(*r_extent_hooks,
1732	    extent_base_get(extent), extent_size_get(extent), 0,
1733	    extent_size_get(extent), arena_ind_get(arena))) {
1734		zeroed = true;
1735	} else if (extent_state_get(extent) == extent_state_muzzy ||
1736	    ((*r_extent_hooks)->purge_lazy != NULL &&
1737	    !(*r_extent_hooks)->purge_lazy(*r_extent_hooks,
1738	    extent_base_get(extent), extent_size_get(extent), 0,
1739	    extent_size_get(extent), arena_ind_get(arena)))) {
1740		zeroed = false;
1741	} else {
1742		zeroed = false;
1743	}
1744	if (*r_extent_hooks != &extent_hooks_default) {
1745		extent_hook_post_reentrancy(tsdn);
1746	}
1747	extent_zeroed_set(extent, zeroed);
1748
1749	if (config_prof) {
1750		extent_gdump_sub(tsdn, extent);
1751	}
1752
1753	extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained,
1754	    extent, false);
1755}
1756
1757static void
1758extent_destroy_default_impl(void *addr, size_t size) {
1759	if (!have_dss || !extent_in_dss(addr)) {
1760		pages_unmap(addr, size);
1761	}
1762}
1763
1764static void
1765extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1766    bool committed, unsigned arena_ind) {
1767	extent_destroy_default_impl(addr, size);
1768}
1769
1770void
1771extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena,
1772    extent_hooks_t **r_extent_hooks, extent_t *extent) {
1773	assert(extent_base_get(extent) != NULL);
1774	assert(extent_size_get(extent) != 0);
1775	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1776	    WITNESS_RANK_CORE, 0);
1777
1778	/* Deregister first to avoid a race with other allocating threads. */
1779	extent_deregister(tsdn, extent);
1780
1781	extent_addr_set(extent, extent_base_get(extent));
1782
1783	extent_hooks_assure_initialized(arena, r_extent_hooks);
1784	/* Try to destroy; silently fail otherwise. */
1785	if (*r_extent_hooks == &extent_hooks_default) {
1786		/* Call directly to propagate tsdn. */
1787		extent_destroy_default_impl(extent_base_get(extent),
1788		    extent_size_get(extent));
1789	} else if ((*r_extent_hooks)->destroy != NULL) {
1790		extent_hook_pre_reentrancy(tsdn, arena);
1791		(*r_extent_hooks)->destroy(*r_extent_hooks,
1792		    extent_base_get(extent), extent_size_get(extent),
1793		    extent_committed_get(extent), arena_ind_get(arena));
1794		extent_hook_post_reentrancy(tsdn);
1795	}
1796
1797	extent_dalloc(tsdn, arena, extent);
1798}
1799
1800static bool
1801extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1802    size_t offset, size_t length, unsigned arena_ind) {
1803	return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
1804	    length);
1805}
1806
1807static bool
1808extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
1809    extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1810    size_t length, bool growing_retained) {
1811	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1812	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1813
1814	extent_hooks_assure_initialized(arena, r_extent_hooks);
1815	if (*r_extent_hooks != &extent_hooks_default) {
1816		extent_hook_pre_reentrancy(tsdn, arena);
1817	}
1818	bool err = ((*r_extent_hooks)->commit == NULL ||
1819	    (*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent),
1820	    extent_size_get(extent), offset, length, arena_ind_get(arena)));
1821	if (*r_extent_hooks != &extent_hooks_default) {
1822		extent_hook_post_reentrancy(tsdn);
1823	}
1824	extent_committed_set(extent, extent_committed_get(extent) || !err);
1825	return err;
1826}
1827
1828bool
1829extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
1830    extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1831    size_t length) {
1832	return extent_commit_impl(tsdn, arena, r_extent_hooks, extent, offset,
1833	    length, false);
1834}
1835
1836static bool
1837extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1838    size_t offset, size_t length, unsigned arena_ind) {
1839	return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
1840	    length);
1841}
1842
1843bool
1844extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
1845    extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1846    size_t length) {
1847	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1848	    WITNESS_RANK_CORE, 0);
1849
1850	extent_hooks_assure_initialized(arena, r_extent_hooks);
1851
1852	if (*r_extent_hooks != &extent_hooks_default) {
1853		extent_hook_pre_reentrancy(tsdn, arena);
1854	}
1855	bool err = ((*r_extent_hooks)->decommit == NULL ||
1856	    (*r_extent_hooks)->decommit(*r_extent_hooks,
1857	    extent_base_get(extent), extent_size_get(extent), offset, length,
1858	    arena_ind_get(arena)));
1859	if (*r_extent_hooks != &extent_hooks_default) {
1860		extent_hook_post_reentrancy(tsdn);
1861	}
1862	extent_committed_set(extent, extent_committed_get(extent) && err);
1863	return err;
1864}
1865
1866#ifdef PAGES_CAN_PURGE_LAZY
1867static bool
1868extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1869    size_t offset, size_t length, unsigned arena_ind) {
1870	assert(addr != NULL);
1871	assert((offset & PAGE_MASK) == 0);
1872	assert(length != 0);
1873	assert((length & PAGE_MASK) == 0);
1874
1875	return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset),
1876	    length);
1877}
1878#endif
1879
1880static bool
1881extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
1882    extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1883    size_t length, bool growing_retained) {
1884	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1885	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1886
1887	extent_hooks_assure_initialized(arena, r_extent_hooks);
1888
1889	if ((*r_extent_hooks)->purge_lazy == NULL) {
1890		return true;
1891	}
1892	if (*r_extent_hooks != &extent_hooks_default) {
1893		extent_hook_pre_reentrancy(tsdn, arena);
1894	}
1895	bool err = (*r_extent_hooks)->purge_lazy(*r_extent_hooks,
1896	    extent_base_get(extent), extent_size_get(extent), offset, length,
1897	    arena_ind_get(arena));
1898	if (*r_extent_hooks != &extent_hooks_default) {
1899		extent_hook_post_reentrancy(tsdn);
1900	}
1901
1902	return err;
1903}
1904
1905bool
1906extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
1907    extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1908    size_t length) {
1909	return extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, extent,
1910	    offset, length, false);
1911}
1912
1913#ifdef PAGES_CAN_PURGE_FORCED
1914static bool
1915extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr,
1916    size_t size, size_t offset, size_t length, unsigned arena_ind) {
1917	assert(addr != NULL);
1918	assert((offset & PAGE_MASK) == 0);
1919	assert(length != 0);
1920	assert((length & PAGE_MASK) == 0);
1921
1922	return pages_purge_forced((void *)((uintptr_t)addr +
1923	    (uintptr_t)offset), length);
1924}
1925#endif
1926
1927static bool
1928extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
1929    extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1930    size_t length, bool growing_retained) {
1931	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1932	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1933
1934	extent_hooks_assure_initialized(arena, r_extent_hooks);
1935
1936	if ((*r_extent_hooks)->purge_forced == NULL) {
1937		return true;
1938	}
1939	if (*r_extent_hooks != &extent_hooks_default) {
1940		extent_hook_pre_reentrancy(tsdn, arena);
1941	}
1942	bool err = (*r_extent_hooks)->purge_forced(*r_extent_hooks,
1943	    extent_base_get(extent), extent_size_get(extent), offset, length,
1944	    arena_ind_get(arena));
1945	if (*r_extent_hooks != &extent_hooks_default) {
1946		extent_hook_post_reentrancy(tsdn);
1947	}
1948	return err;
1949}
1950
1951bool
1952extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
1953    extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1954    size_t length) {
1955	return extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent,
1956	    offset, length, false);
1957}
1958
1959#ifdef JEMALLOC_MAPS_COALESCE
1960static bool
1961extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1962    size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
1963	return !maps_coalesce;
1964}
1965#endif
1966
1967/*
1968 * Accepts the extent to split, and the characteristics of each side of the
1969 * split.  The 'a' parameters go with the 'lead' of the resulting pair of
1970 * extents (the lower addressed portion of the split), and the 'b' parameters go
1971 * with the trail (the higher addressed portion).  This makes 'extent' the lead,
1972 * and returns the trail (except in case of error).
1973 */
1974static extent_t *
1975extent_split_impl(tsdn_t *tsdn, arena_t *arena,
1976    extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
1977    szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
1978    bool growing_retained) {
1979	assert(extent_size_get(extent) == size_a + size_b);
1980	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1981	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1982
1983	extent_hooks_assure_initialized(arena, r_extent_hooks);
1984
1985	if ((*r_extent_hooks)->split == NULL) {
1986		return NULL;
1987	}
1988
1989	extent_t *trail = extent_alloc(tsdn, arena);
1990	if (trail == NULL) {
1991		goto label_error_a;
1992	}
1993
1994	extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
1995	    size_a), size_b, slab_b, szind_b, extent_sn_get(extent),
1996	    extent_state_get(extent), extent_zeroed_get(extent),
1997	    extent_committed_get(extent), extent_dumpable_get(extent));
1998
1999	rtree_ctx_t rtree_ctx_fallback;
2000	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
2001	rtree_leaf_elm_t *lead_elm_a, *lead_elm_b;
2002	{
2003		extent_t lead;
2004
2005		extent_init(&lead, arena, extent_addr_get(extent), size_a,
2006		    slab_a, szind_a, extent_sn_get(extent),
2007		    extent_state_get(extent), extent_zeroed_get(extent),
2008		    extent_committed_get(extent), extent_dumpable_get(extent));
2009
2010		extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false,
2011		    true, &lead_elm_a, &lead_elm_b);
2012	}
2013	rtree_leaf_elm_t *trail_elm_a, *trail_elm_b;
2014	extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, trail, false, true,
2015	    &trail_elm_a, &trail_elm_b);
2016
2017	if (lead_elm_a == NULL || lead_elm_b == NULL || trail_elm_a == NULL
2018	    || trail_elm_b == NULL) {
2019		goto label_error_b;
2020	}
2021
2022	extent_lock2(tsdn, extent, trail);
2023
2024	if (*r_extent_hooks != &extent_hooks_default) {
2025		extent_hook_pre_reentrancy(tsdn, arena);
2026	}
2027	bool err = (*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent),
2028	    size_a + size_b, size_a, size_b, extent_committed_get(extent),
2029	    arena_ind_get(arena));
2030	if (*r_extent_hooks != &extent_hooks_default) {
2031		extent_hook_post_reentrancy(tsdn);
2032	}
2033	if (err) {
2034		goto label_error_c;
2035	}
2036
2037	extent_size_set(extent, size_a);
2038	extent_szind_set(extent, szind_a);
2039
2040	extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent,
2041	    szind_a, slab_a);
2042	extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail,
2043	    szind_b, slab_b);
2044
2045	extent_unlock2(tsdn, extent, trail);
2046
2047	return trail;
2048label_error_c:
2049	extent_unlock2(tsdn, extent, trail);
2050label_error_b:
2051	extent_dalloc(tsdn, arena, trail);
2052label_error_a:
2053	return NULL;
2054}
2055
2056extent_t *
2057extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
2058    extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
2059    szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) {
2060	return extent_split_impl(tsdn, arena, r_extent_hooks, extent, size_a,
2061	    szind_a, slab_a, size_b, szind_b, slab_b, false);
2062}
2063
2064static bool
2065extent_merge_default_impl(void *addr_a, void *addr_b) {
2066	if (!maps_coalesce) {
2067		return true;
2068	}
2069	if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) {
2070		return true;
2071	}
2072
2073	return false;
2074}
2075
2076#ifdef JEMALLOC_MAPS_COALESCE
2077static bool
2078extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
2079    void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
2080	return extent_merge_default_impl(addr_a, addr_b);
2081}
2082#endif
2083
2084static bool
2085extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
2086    extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
2087    bool growing_retained) {
2088	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
2089	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
2090
2091	extent_hooks_assure_initialized(arena, r_extent_hooks);
2092
2093	if ((*r_extent_hooks)->merge == NULL) {
2094		return true;
2095	}
2096
2097	bool err;
2098	if (*r_extent_hooks == &extent_hooks_default) {
2099		/* Call directly to propagate tsdn. */
2100		err = extent_merge_default_impl(extent_base_get(a),
2101		    extent_base_get(b));
2102	} else {
2103		extent_hook_pre_reentrancy(tsdn, arena);
2104		err = (*r_extent_hooks)->merge(*r_extent_hooks,
2105		    extent_base_get(a), extent_size_get(a), extent_base_get(b),
2106		    extent_size_get(b), extent_committed_get(a),
2107		    arena_ind_get(arena));
2108		extent_hook_post_reentrancy(tsdn);
2109	}
2110
2111	if (err) {
2112		return true;
2113	}
2114
2115	/*
2116	 * The rtree writes must happen while all the relevant elements are
2117	 * owned, so the following code uses decomposed helper functions rather
2118	 * than extent_{,de}register() to do things in the right order.
2119	 */
2120	rtree_ctx_t rtree_ctx_fallback;
2121	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
2122	rtree_leaf_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
2123	extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, a, true, false, &a_elm_a,
2124	    &a_elm_b);
2125	extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, b, true, false, &b_elm_a,
2126	    &b_elm_b);
2127
2128	extent_lock2(tsdn, a, b);
2129
2130	if (a_elm_b != NULL) {
2131		rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL,
2132		    NSIZES, false);
2133	}
2134	if (b_elm_b != NULL) {
2135		rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL,
2136		    NSIZES, false);
2137	} else {
2138		b_elm_b = b_elm_a;
2139	}
2140
2141	extent_size_set(a, extent_size_get(a) + extent_size_get(b));
2142	extent_szind_set(a, NSIZES);
2143	extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ?
2144	    extent_sn_get(a) : extent_sn_get(b));
2145	extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
2146
2147	extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, NSIZES, false);
2148
2149	extent_unlock2(tsdn, a, b);
2150
2151	extent_dalloc(tsdn, extent_arena_get(b), b);
2152
2153	return false;
2154}
2155
2156bool
2157extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
2158    extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) {
2159	return extent_merge_impl(tsdn, arena, r_extent_hooks, a, b, false);
2160}
2161
2162bool
2163extent_boot(void) {
2164	if (rtree_new(&extents_rtree, true)) {
2165		return true;
2166	}
2167
2168	if (mutex_pool_init(&extent_mutex_pool, "extent_mutex_pool",
2169	    WITNESS_RANK_EXTENT_POOL)) {
2170		return true;
2171	}
2172
2173	if (have_dss) {
2174		extent_dss_boot();
2175	}
2176
2177	return false;
2178}
2179