1#define JEMALLOC_ARENA_C_
2#include "jemalloc/internal/jemalloc_preamble.h"
3#include "jemalloc/internal/jemalloc_internal_includes.h"
4
5#include "jemalloc/internal/assert.h"
6#include "jemalloc/internal/div.h"
7#include "jemalloc/internal/extent_dss.h"
8#include "jemalloc/internal/extent_mmap.h"
9#include "jemalloc/internal/mutex.h"
10#include "jemalloc/internal/rtree.h"
11#include "jemalloc/internal/safety_check.h"
12#include "jemalloc/internal/util.h"
13
14JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
15
16/******************************************************************************/
17/* Data. */
18
19/*
20 * Define names for both unininitialized and initialized phases, so that
21 * options and mallctl processing are straightforward.
22 */
23const char *percpu_arena_mode_names[] = {
24	"percpu",
25	"phycpu",
26	"disabled",
27	"percpu",
28	"phycpu"
29};
30percpu_arena_mode_t opt_percpu_arena = PERCPU_ARENA_DEFAULT;
31
32ssize_t opt_dirty_decay_ms = DIRTY_DECAY_MS_DEFAULT;
33ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT;
34
35static atomic_zd_t dirty_decay_ms_default;
36static atomic_zd_t muzzy_decay_ms_default;
37
38const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = {
39#define STEP(step, h, x, y)			\
40		h,
41		SMOOTHSTEP
42#undef STEP
43};
44
45static div_info_t arena_binind_div_info[SC_NBINS];
46
47size_t opt_oversize_threshold = OVERSIZE_THRESHOLD_DEFAULT;
48size_t oversize_threshold = OVERSIZE_THRESHOLD_DEFAULT;
49static unsigned huge_arena_ind;
50
51/******************************************************************************/
52/*
53 * Function prototypes for static functions that are referenced prior to
54 * definition.
55 */
56
57static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena,
58    arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit,
59    size_t npages_decay_max, bool is_background_thread);
60static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena,
61    bool is_background_thread, bool all);
62static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
63    bin_t *bin);
64static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
65    bin_t *bin);
66
67/******************************************************************************/
68
69void
70arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
71    const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
72    size_t *nactive, size_t *ndirty, size_t *nmuzzy) {
73	*nthreads += arena_nthreads_get(arena, false);
74	*dss = dss_prec_names[arena_dss_prec_get(arena)];
75	*dirty_decay_ms = arena_dirty_decay_ms_get(arena);
76	*muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
77	*nactive += atomic_load_zu(&arena->nactive, ATOMIC_RELAXED);
78	*ndirty += extents_npages_get(&arena->extents_dirty);
79	*nmuzzy += extents_npages_get(&arena->extents_muzzy);
80}
81
82void
83arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
84    const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
85    size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
86    bin_stats_t *bstats, arena_stats_large_t *lstats,
87    arena_stats_extents_t *estats) {
88	cassert(config_stats);
89
90	arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms,
91	    muzzy_decay_ms, nactive, ndirty, nmuzzy);
92
93	size_t base_allocated, base_resident, base_mapped, metadata_thp;
94	base_stats_get(tsdn, arena->base, &base_allocated, &base_resident,
95	    &base_mapped, &metadata_thp);
96
97	arena_stats_lock(tsdn, &arena->stats);
98
99	arena_stats_accum_zu(&astats->mapped, base_mapped
100	    + arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped));
101	arena_stats_accum_zu(&astats->retained,
102	    extents_npages_get(&arena->extents_retained) << LG_PAGE);
103
104	atomic_store_zu(&astats->extent_avail,
105	    atomic_load_zu(&arena->extent_avail_cnt, ATOMIC_RELAXED),
106	    ATOMIC_RELAXED);
107
108	arena_stats_accum_u64(&astats->decay_dirty.npurge,
109	    arena_stats_read_u64(tsdn, &arena->stats,
110	    &arena->stats.decay_dirty.npurge));
111	arena_stats_accum_u64(&astats->decay_dirty.nmadvise,
112	    arena_stats_read_u64(tsdn, &arena->stats,
113	    &arena->stats.decay_dirty.nmadvise));
114	arena_stats_accum_u64(&astats->decay_dirty.purged,
115	    arena_stats_read_u64(tsdn, &arena->stats,
116	    &arena->stats.decay_dirty.purged));
117
118	arena_stats_accum_u64(&astats->decay_muzzy.npurge,
119	    arena_stats_read_u64(tsdn, &arena->stats,
120	    &arena->stats.decay_muzzy.npurge));
121	arena_stats_accum_u64(&astats->decay_muzzy.nmadvise,
122	    arena_stats_read_u64(tsdn, &arena->stats,
123	    &arena->stats.decay_muzzy.nmadvise));
124	arena_stats_accum_u64(&astats->decay_muzzy.purged,
125	    arena_stats_read_u64(tsdn, &arena->stats,
126	    &arena->stats.decay_muzzy.purged));
127
128	arena_stats_accum_zu(&astats->base, base_allocated);
129	arena_stats_accum_zu(&astats->internal, arena_internal_get(arena));
130	arena_stats_accum_zu(&astats->metadata_thp, metadata_thp);
131	arena_stats_accum_zu(&astats->resident, base_resident +
132	    (((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) +
133	    extents_npages_get(&arena->extents_dirty) +
134	    extents_npages_get(&arena->extents_muzzy)) << LG_PAGE)));
135	arena_stats_accum_zu(&astats->abandoned_vm, atomic_load_zu(
136	    &arena->stats.abandoned_vm, ATOMIC_RELAXED));
137
138	for (szind_t i = 0; i < SC_NSIZES - SC_NBINS; i++) {
139		uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats,
140		    &arena->stats.lstats[i].nmalloc);
141		arena_stats_accum_u64(&lstats[i].nmalloc, nmalloc);
142		arena_stats_accum_u64(&astats->nmalloc_large, nmalloc);
143
144		uint64_t ndalloc = arena_stats_read_u64(tsdn, &arena->stats,
145		    &arena->stats.lstats[i].ndalloc);
146		arena_stats_accum_u64(&lstats[i].ndalloc, ndalloc);
147		arena_stats_accum_u64(&astats->ndalloc_large, ndalloc);
148
149		uint64_t nrequests = arena_stats_read_u64(tsdn, &arena->stats,
150		    &arena->stats.lstats[i].nrequests);
151		arena_stats_accum_u64(&lstats[i].nrequests,
152		    nmalloc + nrequests);
153		arena_stats_accum_u64(&astats->nrequests_large,
154		    nmalloc + nrequests);
155
156		/* nfill == nmalloc for large currently. */
157		arena_stats_accum_u64(&lstats[i].nfills, nmalloc);
158		arena_stats_accum_u64(&astats->nfills_large, nmalloc);
159
160		uint64_t nflush = arena_stats_read_u64(tsdn, &arena->stats,
161		    &arena->stats.lstats[i].nflushes);
162		arena_stats_accum_u64(&lstats[i].nflushes, nflush);
163		arena_stats_accum_u64(&astats->nflushes_large, nflush);
164
165		assert(nmalloc >= ndalloc);
166		assert(nmalloc - ndalloc <= SIZE_T_MAX);
167		size_t curlextents = (size_t)(nmalloc - ndalloc);
168		lstats[i].curlextents += curlextents;
169		arena_stats_accum_zu(&astats->allocated_large,
170		    curlextents * sz_index2size(SC_NBINS + i));
171	}
172
173	for (pszind_t i = 0; i < SC_NPSIZES; i++) {
174		size_t dirty, muzzy, retained, dirty_bytes, muzzy_bytes,
175		    retained_bytes;
176		dirty = extents_nextents_get(&arena->extents_dirty, i);
177		muzzy = extents_nextents_get(&arena->extents_muzzy, i);
178		retained = extents_nextents_get(&arena->extents_retained, i);
179		dirty_bytes = extents_nbytes_get(&arena->extents_dirty, i);
180		muzzy_bytes = extents_nbytes_get(&arena->extents_muzzy, i);
181		retained_bytes =
182		    extents_nbytes_get(&arena->extents_retained, i);
183
184		atomic_store_zu(&estats[i].ndirty, dirty, ATOMIC_RELAXED);
185		atomic_store_zu(&estats[i].nmuzzy, muzzy, ATOMIC_RELAXED);
186		atomic_store_zu(&estats[i].nretained, retained, ATOMIC_RELAXED);
187		atomic_store_zu(&estats[i].dirty_bytes, dirty_bytes,
188		    ATOMIC_RELAXED);
189		atomic_store_zu(&estats[i].muzzy_bytes, muzzy_bytes,
190		    ATOMIC_RELAXED);
191		atomic_store_zu(&estats[i].retained_bytes, retained_bytes,
192		    ATOMIC_RELAXED);
193	}
194
195	arena_stats_unlock(tsdn, &arena->stats);
196
197	/* tcache_bytes counts currently cached bytes. */
198	atomic_store_zu(&astats->tcache_bytes, 0, ATOMIC_RELAXED);
199	malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
200	cache_bin_array_descriptor_t *descriptor;
201	ql_foreach(descriptor, &arena->cache_bin_array_descriptor_ql, link) {
202		szind_t i = 0;
203		for (; i < SC_NBINS; i++) {
204			cache_bin_t *tbin = &descriptor->bins_small[i];
205			arena_stats_accum_zu(&astats->tcache_bytes,
206			    tbin->ncached * sz_index2size(i));
207		}
208		for (; i < nhbins; i++) {
209			cache_bin_t *tbin = &descriptor->bins_large[i];
210			arena_stats_accum_zu(&astats->tcache_bytes,
211			    tbin->ncached * sz_index2size(i));
212		}
213	}
214	malloc_mutex_prof_read(tsdn,
215	    &astats->mutex_prof_data[arena_prof_mutex_tcache_list],
216	    &arena->tcache_ql_mtx);
217	malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
218
219#define READ_ARENA_MUTEX_PROF_DATA(mtx, ind)				\
220    malloc_mutex_lock(tsdn, &arena->mtx);				\
221    malloc_mutex_prof_read(tsdn, &astats->mutex_prof_data[ind],		\
222        &arena->mtx);							\
223    malloc_mutex_unlock(tsdn, &arena->mtx);
224
225	/* Gather per arena mutex profiling data. */
226	READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large);
227	READ_ARENA_MUTEX_PROF_DATA(extent_avail_mtx,
228	    arena_prof_mutex_extent_avail)
229	READ_ARENA_MUTEX_PROF_DATA(extents_dirty.mtx,
230	    arena_prof_mutex_extents_dirty)
231	READ_ARENA_MUTEX_PROF_DATA(extents_muzzy.mtx,
232	    arena_prof_mutex_extents_muzzy)
233	READ_ARENA_MUTEX_PROF_DATA(extents_retained.mtx,
234	    arena_prof_mutex_extents_retained)
235	READ_ARENA_MUTEX_PROF_DATA(decay_dirty.mtx,
236	    arena_prof_mutex_decay_dirty)
237	READ_ARENA_MUTEX_PROF_DATA(decay_muzzy.mtx,
238	    arena_prof_mutex_decay_muzzy)
239	READ_ARENA_MUTEX_PROF_DATA(base->mtx,
240	    arena_prof_mutex_base)
241#undef READ_ARENA_MUTEX_PROF_DATA
242
243	nstime_copy(&astats->uptime, &arena->create_time);
244	nstime_update(&astats->uptime);
245	nstime_subtract(&astats->uptime, &arena->create_time);
246
247	for (szind_t i = 0; i < SC_NBINS; i++) {
248		for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
249			bin_stats_merge(tsdn, &bstats[i],
250			    &arena->bins[i].bin_shards[j]);
251		}
252	}
253}
254
255void
256arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
257    extent_hooks_t **r_extent_hooks, extent_t *extent) {
258	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
259	    WITNESS_RANK_CORE, 0);
260
261	extents_dalloc(tsdn, arena, r_extent_hooks, &arena->extents_dirty,
262	    extent);
263	if (arena_dirty_decay_ms_get(arena) == 0) {
264		arena_decay_dirty(tsdn, arena, false, true);
265	} else {
266		arena_background_thread_inactivity_check(tsdn, arena, false);
267	}
268}
269
270static void *
271arena_slab_reg_alloc(extent_t *slab, const bin_info_t *bin_info) {
272	void *ret;
273	arena_slab_data_t *slab_data = extent_slab_data_get(slab);
274	size_t regind;
275
276	assert(extent_nfree_get(slab) > 0);
277	assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
278
279	regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info);
280	ret = (void *)((uintptr_t)extent_addr_get(slab) +
281	    (uintptr_t)(bin_info->reg_size * regind));
282	extent_nfree_dec(slab);
283	return ret;
284}
285
286static void
287arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info,
288			   unsigned cnt, void** ptrs) {
289	arena_slab_data_t *slab_data = extent_slab_data_get(slab);
290
291	assert(extent_nfree_get(slab) >= cnt);
292	assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
293
294#if (! defined JEMALLOC_INTERNAL_POPCOUNTL) || (defined BITMAP_USE_TREE)
295	for (unsigned i = 0; i < cnt; i++) {
296		size_t regind = bitmap_sfu(slab_data->bitmap,
297					   &bin_info->bitmap_info);
298		*(ptrs + i) = (void *)((uintptr_t)extent_addr_get(slab) +
299		    (uintptr_t)(bin_info->reg_size * regind));
300	}
301#else
302	unsigned group = 0;
303	bitmap_t g = slab_data->bitmap[group];
304	unsigned i = 0;
305	while (i < cnt) {
306		while (g == 0) {
307			g = slab_data->bitmap[++group];
308		}
309		size_t shift = group << LG_BITMAP_GROUP_NBITS;
310		size_t pop = popcount_lu(g);
311		if (pop > (cnt - i)) {
312			pop = cnt - i;
313		}
314
315		/*
316		 * Load from memory locations only once, outside the
317		 * hot loop below.
318		 */
319		uintptr_t base = (uintptr_t)extent_addr_get(slab);
320		uintptr_t regsize = (uintptr_t)bin_info->reg_size;
321		while (pop--) {
322			size_t bit = cfs_lu(&g);
323			size_t regind = shift + bit;
324			*(ptrs + i) = (void *)(base + regsize * regind);
325
326			i++;
327		}
328		slab_data->bitmap[group] = g;
329	}
330#endif
331	extent_nfree_sub(slab, cnt);
332}
333
334#ifndef JEMALLOC_JET
335static
336#endif
337size_t
338arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) {
339	size_t diff, regind;
340
341	/* Freeing a pointer outside the slab can cause assertion failure. */
342	assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab));
343	assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab));
344	/* Freeing an interior pointer can cause assertion failure. */
345	assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) %
346	    (uintptr_t)bin_infos[binind].reg_size == 0);
347
348	diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab));
349
350	/* Avoid doing division with a variable divisor. */
351	regind = div_compute(&arena_binind_div_info[binind], diff);
352
353	assert(regind < bin_infos[binind].nregs);
354
355	return regind;
356}
357
358static void
359arena_slab_reg_dalloc(extent_t *slab, arena_slab_data_t *slab_data, void *ptr) {
360	szind_t binind = extent_szind_get(slab);
361	const bin_info_t *bin_info = &bin_infos[binind];
362	size_t regind = arena_slab_regind(slab, binind, ptr);
363
364	assert(extent_nfree_get(slab) < bin_info->nregs);
365	/* Freeing an unallocated pointer can cause assertion failure. */
366	assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind));
367
368	bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind);
369	extent_nfree_inc(slab);
370}
371
372static void
373arena_nactive_add(arena_t *arena, size_t add_pages) {
374	atomic_fetch_add_zu(&arena->nactive, add_pages, ATOMIC_RELAXED);
375}
376
377static void
378arena_nactive_sub(arena_t *arena, size_t sub_pages) {
379	assert(atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) >= sub_pages);
380	atomic_fetch_sub_zu(&arena->nactive, sub_pages, ATOMIC_RELAXED);
381}
382
383static void
384arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
385	szind_t index, hindex;
386
387	cassert(config_stats);
388
389	if (usize < SC_LARGE_MINCLASS) {
390		usize = SC_LARGE_MINCLASS;
391	}
392	index = sz_size2index(usize);
393	hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0;
394
395	arena_stats_add_u64(tsdn, &arena->stats,
396	    &arena->stats.lstats[hindex].nmalloc, 1);
397}
398
399static void
400arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
401	szind_t index, hindex;
402
403	cassert(config_stats);
404
405	if (usize < SC_LARGE_MINCLASS) {
406		usize = SC_LARGE_MINCLASS;
407	}
408	index = sz_size2index(usize);
409	hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0;
410
411	arena_stats_add_u64(tsdn, &arena->stats,
412	    &arena->stats.lstats[hindex].ndalloc, 1);
413}
414
415static void
416arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize,
417    size_t usize) {
418	arena_large_dalloc_stats_update(tsdn, arena, oldusize);
419	arena_large_malloc_stats_update(tsdn, arena, usize);
420}
421
422static bool
423arena_may_have_muzzy(arena_t *arena) {
424	return (pages_can_purge_lazy && (arena_muzzy_decay_ms_get(arena) != 0));
425}
426
427extent_t *
428arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
429    size_t alignment, bool *zero) {
430	extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
431
432	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
433	    WITNESS_RANK_CORE, 0);
434
435	szind_t szind = sz_size2index(usize);
436	size_t mapped_add;
437	bool commit = true;
438	extent_t *extent = extents_alloc(tsdn, arena, &extent_hooks,
439	    &arena->extents_dirty, NULL, usize, sz_large_pad, alignment, false,
440	    szind, zero, &commit);
441	if (extent == NULL && arena_may_have_muzzy(arena)) {
442		extent = extents_alloc(tsdn, arena, &extent_hooks,
443		    &arena->extents_muzzy, NULL, usize, sz_large_pad, alignment,
444		    false, szind, zero, &commit);
445	}
446	size_t size = usize + sz_large_pad;
447	if (extent == NULL) {
448		extent = extent_alloc_wrapper(tsdn, arena, &extent_hooks, NULL,
449		    usize, sz_large_pad, alignment, false, szind, zero,
450		    &commit);
451		if (config_stats) {
452			/*
453			 * extent may be NULL on OOM, but in that case
454			 * mapped_add isn't used below, so there's no need to
455			 * conditionlly set it to 0 here.
456			 */
457			mapped_add = size;
458		}
459	} else if (config_stats) {
460		mapped_add = 0;
461	}
462
463	if (extent != NULL) {
464		if (config_stats) {
465			arena_stats_lock(tsdn, &arena->stats);
466			arena_large_malloc_stats_update(tsdn, arena, usize);
467			if (mapped_add != 0) {
468				arena_stats_add_zu(tsdn, &arena->stats,
469				    &arena->stats.mapped, mapped_add);
470			}
471			arena_stats_unlock(tsdn, &arena->stats);
472		}
473		arena_nactive_add(arena, size >> LG_PAGE);
474	}
475
476	return extent;
477}
478
479void
480arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
481	if (config_stats) {
482		arena_stats_lock(tsdn, &arena->stats);
483		arena_large_dalloc_stats_update(tsdn, arena,
484		    extent_usize_get(extent));
485		arena_stats_unlock(tsdn, &arena->stats);
486	}
487	arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE);
488}
489
490void
491arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
492    size_t oldusize) {
493	size_t usize = extent_usize_get(extent);
494	size_t udiff = oldusize - usize;
495
496	if (config_stats) {
497		arena_stats_lock(tsdn, &arena->stats);
498		arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
499		arena_stats_unlock(tsdn, &arena->stats);
500	}
501	arena_nactive_sub(arena, udiff >> LG_PAGE);
502}
503
504void
505arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
506    size_t oldusize) {
507	size_t usize = extent_usize_get(extent);
508	size_t udiff = usize - oldusize;
509
510	if (config_stats) {
511		arena_stats_lock(tsdn, &arena->stats);
512		arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
513		arena_stats_unlock(tsdn, &arena->stats);
514	}
515	arena_nactive_add(arena, udiff >> LG_PAGE);
516}
517
518static ssize_t
519arena_decay_ms_read(arena_decay_t *decay) {
520	return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
521}
522
523static void
524arena_decay_ms_write(arena_decay_t *decay, ssize_t decay_ms) {
525	atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED);
526}
527
528static void
529arena_decay_deadline_init(arena_decay_t *decay) {
530	/*
531	 * Generate a new deadline that is uniformly random within the next
532	 * epoch after the current one.
533	 */
534	nstime_copy(&decay->deadline, &decay->epoch);
535	nstime_add(&decay->deadline, &decay->interval);
536	if (arena_decay_ms_read(decay) > 0) {
537		nstime_t jitter;
538
539		nstime_init(&jitter, prng_range_u64(&decay->jitter_state,
540		    nstime_ns(&decay->interval)));
541		nstime_add(&decay->deadline, &jitter);
542	}
543}
544
545static bool
546arena_decay_deadline_reached(const arena_decay_t *decay, const nstime_t *time) {
547	return (nstime_compare(&decay->deadline, time) <= 0);
548}
549
550static size_t
551arena_decay_backlog_npages_limit(const arena_decay_t *decay) {
552	uint64_t sum;
553	size_t npages_limit_backlog;
554	unsigned i;
555
556	/*
557	 * For each element of decay_backlog, multiply by the corresponding
558	 * fixed-point smoothstep decay factor.  Sum the products, then divide
559	 * to round down to the nearest whole number of pages.
560	 */
561	sum = 0;
562	for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
563		sum += decay->backlog[i] * h_steps[i];
564	}
565	npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
566
567	return npages_limit_backlog;
568}
569
570static void
571arena_decay_backlog_update_last(arena_decay_t *decay, size_t current_npages) {
572	size_t npages_delta = (current_npages > decay->nunpurged) ?
573	    current_npages - decay->nunpurged : 0;
574	decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta;
575
576	if (config_debug) {
577		if (current_npages > decay->ceil_npages) {
578			decay->ceil_npages = current_npages;
579		}
580		size_t npages_limit = arena_decay_backlog_npages_limit(decay);
581		assert(decay->ceil_npages >= npages_limit);
582		if (decay->ceil_npages > npages_limit) {
583			decay->ceil_npages = npages_limit;
584		}
585	}
586}
587
588static void
589arena_decay_backlog_update(arena_decay_t *decay, uint64_t nadvance_u64,
590    size_t current_npages) {
591	if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
592		memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
593		    sizeof(size_t));
594	} else {
595		size_t nadvance_z = (size_t)nadvance_u64;
596
597		assert((uint64_t)nadvance_z == nadvance_u64);
598
599		memmove(decay->backlog, &decay->backlog[nadvance_z],
600		    (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
601		if (nadvance_z > 1) {
602			memset(&decay->backlog[SMOOTHSTEP_NSTEPS -
603			    nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
604		}
605	}
606
607	arena_decay_backlog_update_last(decay, current_npages);
608}
609
610static void
611arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
612    extents_t *extents, size_t current_npages, size_t npages_limit,
613    bool is_background_thread) {
614	if (current_npages > npages_limit) {
615		arena_decay_to_limit(tsdn, arena, decay, extents, false,
616		    npages_limit, current_npages - npages_limit,
617		    is_background_thread);
618	}
619}
620
621static void
622arena_decay_epoch_advance_helper(arena_decay_t *decay, const nstime_t *time,
623    size_t current_npages) {
624	assert(arena_decay_deadline_reached(decay, time));
625
626	nstime_t delta;
627	nstime_copy(&delta, time);
628	nstime_subtract(&delta, &decay->epoch);
629
630	uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval);
631	assert(nadvance_u64 > 0);
632
633	/* Add nadvance_u64 decay intervals to epoch. */
634	nstime_copy(&delta, &decay->interval);
635	nstime_imultiply(&delta, nadvance_u64);
636	nstime_add(&decay->epoch, &delta);
637
638	/* Set a new deadline. */
639	arena_decay_deadline_init(decay);
640
641	/* Update the backlog. */
642	arena_decay_backlog_update(decay, nadvance_u64, current_npages);
643}
644
645static void
646arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
647    extents_t *extents, const nstime_t *time, bool is_background_thread) {
648	size_t current_npages = extents_npages_get(extents);
649	arena_decay_epoch_advance_helper(decay, time, current_npages);
650
651	size_t npages_limit = arena_decay_backlog_npages_limit(decay);
652	/* We may unlock decay->mtx when try_purge(). Finish logging first. */
653	decay->nunpurged = (npages_limit > current_npages) ? npages_limit :
654	    current_npages;
655
656	if (!background_thread_enabled() || is_background_thread) {
657		arena_decay_try_purge(tsdn, arena, decay, extents,
658		    current_npages, npages_limit, is_background_thread);
659	}
660}
661
662static void
663arena_decay_reinit(arena_decay_t *decay, ssize_t decay_ms) {
664	arena_decay_ms_write(decay, decay_ms);
665	if (decay_ms > 0) {
666		nstime_init(&decay->interval, (uint64_t)decay_ms *
667		    KQU(1000000));
668		nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS);
669	}
670
671	nstime_init(&decay->epoch, 0);
672	nstime_update(&decay->epoch);
673	decay->jitter_state = (uint64_t)(uintptr_t)decay;
674	arena_decay_deadline_init(decay);
675	decay->nunpurged = 0;
676	memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
677}
678
679static bool
680arena_decay_init(arena_decay_t *decay, ssize_t decay_ms,
681    arena_stats_decay_t *stats) {
682	if (config_debug) {
683		for (size_t i = 0; i < sizeof(arena_decay_t); i++) {
684			assert(((char *)decay)[i] == 0);
685		}
686		decay->ceil_npages = 0;
687	}
688	if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY,
689	    malloc_mutex_rank_exclusive)) {
690		return true;
691	}
692	decay->purging = false;
693	arena_decay_reinit(decay, decay_ms);
694	/* Memory is zeroed, so there is no need to clear stats. */
695	if (config_stats) {
696		decay->stats = stats;
697	}
698	return false;
699}
700
701static bool
702arena_decay_ms_valid(ssize_t decay_ms) {
703	if (decay_ms < -1) {
704		return false;
705	}
706	if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX *
707	    KQU(1000)) {
708		return true;
709	}
710	return false;
711}
712
713static bool
714arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
715    extents_t *extents, bool is_background_thread) {
716	malloc_mutex_assert_owner(tsdn, &decay->mtx);
717
718	/* Purge all or nothing if the option is disabled. */
719	ssize_t decay_ms = arena_decay_ms_read(decay);
720	if (decay_ms <= 0) {
721		if (decay_ms == 0) {
722			arena_decay_to_limit(tsdn, arena, decay, extents, false,
723			    0, extents_npages_get(extents),
724			    is_background_thread);
725		}
726		return false;
727	}
728
729	nstime_t time;
730	nstime_init(&time, 0);
731	nstime_update(&time);
732	if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch, &time)
733	    > 0)) {
734		/*
735		 * Time went backwards.  Move the epoch back in time and
736		 * generate a new deadline, with the expectation that time
737		 * typically flows forward for long enough periods of time that
738		 * epochs complete.  Unfortunately, this strategy is susceptible
739		 * to clock jitter triggering premature epoch advances, but
740		 * clock jitter estimation and compensation isn't feasible here
741		 * because calls into this code are event-driven.
742		 */
743		nstime_copy(&decay->epoch, &time);
744		arena_decay_deadline_init(decay);
745	} else {
746		/* Verify that time does not go backwards. */
747		assert(nstime_compare(&decay->epoch, &time) <= 0);
748	}
749
750	/*
751	 * If the deadline has been reached, advance to the current epoch and
752	 * purge to the new limit if necessary.  Note that dirty pages created
753	 * during the current epoch are not subject to purge until a future
754	 * epoch, so as a result purging only happens during epoch advances, or
755	 * being triggered by background threads (scheduled event).
756	 */
757	bool advance_epoch = arena_decay_deadline_reached(decay, &time);
758	if (advance_epoch) {
759		arena_decay_epoch_advance(tsdn, arena, decay, extents, &time,
760		    is_background_thread);
761	} else if (is_background_thread) {
762		arena_decay_try_purge(tsdn, arena, decay, extents,
763		    extents_npages_get(extents),
764		    arena_decay_backlog_npages_limit(decay),
765		    is_background_thread);
766	}
767
768	return advance_epoch;
769}
770
771static ssize_t
772arena_decay_ms_get(arena_decay_t *decay) {
773	return arena_decay_ms_read(decay);
774}
775
776ssize_t
777arena_dirty_decay_ms_get(arena_t *arena) {
778	return arena_decay_ms_get(&arena->decay_dirty);
779}
780
781ssize_t
782arena_muzzy_decay_ms_get(arena_t *arena) {
783	return arena_decay_ms_get(&arena->decay_muzzy);
784}
785
786static bool
787arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
788    extents_t *extents, ssize_t decay_ms) {
789	if (!arena_decay_ms_valid(decay_ms)) {
790		return true;
791	}
792
793	malloc_mutex_lock(tsdn, &decay->mtx);
794	/*
795	 * Restart decay backlog from scratch, which may cause many dirty pages
796	 * to be immediately purged.  It would conceptually be possible to map
797	 * the old backlog onto the new backlog, but there is no justification
798	 * for such complexity since decay_ms changes are intended to be
799	 * infrequent, either between the {-1, 0, >0} states, or a one-time
800	 * arbitrary change during initial arena configuration.
801	 */
802	arena_decay_reinit(decay, decay_ms);
803	arena_maybe_decay(tsdn, arena, decay, extents, false);
804	malloc_mutex_unlock(tsdn, &decay->mtx);
805
806	return false;
807}
808
809bool
810arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
811    ssize_t decay_ms) {
812	return arena_decay_ms_set(tsdn, arena, &arena->decay_dirty,
813	    &arena->extents_dirty, decay_ms);
814}
815
816bool
817arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
818    ssize_t decay_ms) {
819	return arena_decay_ms_set(tsdn, arena, &arena->decay_muzzy,
820	    &arena->extents_muzzy, decay_ms);
821}
822
823static size_t
824arena_stash_decayed(tsdn_t *tsdn, arena_t *arena,
825    extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_limit,
826	size_t npages_decay_max, extent_list_t *decay_extents) {
827	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
828	    WITNESS_RANK_CORE, 0);
829
830	/* Stash extents according to npages_limit. */
831	size_t nstashed = 0;
832	extent_t *extent;
833	while (nstashed < npages_decay_max &&
834	    (extent = extents_evict(tsdn, arena, r_extent_hooks, extents,
835	    npages_limit)) != NULL) {
836		extent_list_append(decay_extents, extent);
837		nstashed += extent_size_get(extent) >> LG_PAGE;
838	}
839	return nstashed;
840}
841
842static size_t
843arena_decay_stashed(tsdn_t *tsdn, arena_t *arena,
844    extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents,
845    bool all, extent_list_t *decay_extents, bool is_background_thread) {
846	size_t nmadvise, nunmapped;
847	size_t npurged;
848
849	if (config_stats) {
850		nmadvise = 0;
851		nunmapped = 0;
852	}
853	npurged = 0;
854
855	ssize_t muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
856	for (extent_t *extent = extent_list_first(decay_extents); extent !=
857	    NULL; extent = extent_list_first(decay_extents)) {
858		if (config_stats) {
859			nmadvise++;
860		}
861		size_t npages = extent_size_get(extent) >> LG_PAGE;
862		npurged += npages;
863		extent_list_remove(decay_extents, extent);
864		switch (extents_state_get(extents)) {
865		case extent_state_active:
866			not_reached();
867		case extent_state_dirty:
868			if (!all && muzzy_decay_ms != 0 &&
869			    !extent_purge_lazy_wrapper(tsdn, arena,
870			    r_extent_hooks, extent, 0,
871			    extent_size_get(extent))) {
872				extents_dalloc(tsdn, arena, r_extent_hooks,
873				    &arena->extents_muzzy, extent);
874				arena_background_thread_inactivity_check(tsdn,
875				    arena, is_background_thread);
876				break;
877			}
878			/* Fall through. */
879		case extent_state_muzzy:
880			extent_dalloc_wrapper(tsdn, arena, r_extent_hooks,
881			    extent);
882			if (config_stats) {
883				nunmapped += npages;
884			}
885			break;
886		case extent_state_retained:
887		default:
888			not_reached();
889		}
890	}
891
892	if (config_stats) {
893		arena_stats_lock(tsdn, &arena->stats);
894		arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->npurge,
895		    1);
896		arena_stats_add_u64(tsdn, &arena->stats,
897		    &decay->stats->nmadvise, nmadvise);
898		arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->purged,
899		    npurged);
900		arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped,
901		    nunmapped << LG_PAGE);
902		arena_stats_unlock(tsdn, &arena->stats);
903	}
904
905	return npurged;
906}
907
908/*
909 * npages_limit: Decay at most npages_decay_max pages without violating the
910 * invariant: (extents_npages_get(extents) >= npages_limit).  We need an upper
911 * bound on number of pages in order to prevent unbounded growth (namely in
912 * stashed), otherwise unbounded new pages could be added to extents during the
913 * current decay run, so that the purging thread never finishes.
914 */
915static void
916arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
917    extents_t *extents, bool all, size_t npages_limit, size_t npages_decay_max,
918    bool is_background_thread) {
919	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
920	    WITNESS_RANK_CORE, 1);
921	malloc_mutex_assert_owner(tsdn, &decay->mtx);
922
923	if (decay->purging) {
924		return;
925	}
926	decay->purging = true;
927	malloc_mutex_unlock(tsdn, &decay->mtx);
928
929	extent_hooks_t *extent_hooks = extent_hooks_get(arena);
930
931	extent_list_t decay_extents;
932	extent_list_init(&decay_extents);
933
934	size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents,
935	    npages_limit, npages_decay_max, &decay_extents);
936	if (npurge != 0) {
937		size_t npurged = arena_decay_stashed(tsdn, arena,
938		    &extent_hooks, decay, extents, all, &decay_extents,
939		    is_background_thread);
940		assert(npurged == npurge);
941	}
942
943	malloc_mutex_lock(tsdn, &decay->mtx);
944	decay->purging = false;
945}
946
947static bool
948arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
949    extents_t *extents, bool is_background_thread, bool all) {
950	if (all) {
951		malloc_mutex_lock(tsdn, &decay->mtx);
952		arena_decay_to_limit(tsdn, arena, decay, extents, all, 0,
953		    extents_npages_get(extents), is_background_thread);
954		malloc_mutex_unlock(tsdn, &decay->mtx);
955
956		return false;
957	}
958
959	if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
960		/* No need to wait if another thread is in progress. */
961		return true;
962	}
963
964	bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents,
965	    is_background_thread);
966	size_t npages_new;
967	if (epoch_advanced) {
968		/* Backlog is updated on epoch advance. */
969		npages_new = decay->backlog[SMOOTHSTEP_NSTEPS-1];
970	}
971	malloc_mutex_unlock(tsdn, &decay->mtx);
972
973	if (have_background_thread && background_thread_enabled() &&
974	    epoch_advanced && !is_background_thread) {
975		background_thread_interval_check(tsdn, arena, decay,
976		    npages_new);
977	}
978
979	return false;
980}
981
982static bool
983arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
984    bool all) {
985	return arena_decay_impl(tsdn, arena, &arena->decay_dirty,
986	    &arena->extents_dirty, is_background_thread, all);
987}
988
989static bool
990arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
991    bool all) {
992	return arena_decay_impl(tsdn, arena, &arena->decay_muzzy,
993	    &arena->extents_muzzy, is_background_thread, all);
994}
995
996void
997arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) {
998	if (arena_decay_dirty(tsdn, arena, is_background_thread, all)) {
999		return;
1000	}
1001	arena_decay_muzzy(tsdn, arena, is_background_thread, all);
1002}
1003
1004static void
1005arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) {
1006	arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE);
1007
1008	extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
1009	arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, slab);
1010}
1011
1012static void
1013arena_bin_slabs_nonfull_insert(bin_t *bin, extent_t *slab) {
1014	assert(extent_nfree_get(slab) > 0);
1015	extent_heap_insert(&bin->slabs_nonfull, slab);
1016	if (config_stats) {
1017		bin->stats.nonfull_slabs++;
1018	}
1019}
1020
1021static void
1022arena_bin_slabs_nonfull_remove(bin_t *bin, extent_t *slab) {
1023	extent_heap_remove(&bin->slabs_nonfull, slab);
1024	if (config_stats) {
1025		bin->stats.nonfull_slabs--;
1026	}
1027}
1028
1029static extent_t *
1030arena_bin_slabs_nonfull_tryget(bin_t *bin) {
1031	extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull);
1032	if (slab == NULL) {
1033		return NULL;
1034	}
1035	if (config_stats) {
1036		bin->stats.reslabs++;
1037		bin->stats.nonfull_slabs--;
1038	}
1039	return slab;
1040}
1041
1042static void
1043arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, extent_t *slab) {
1044	assert(extent_nfree_get(slab) == 0);
1045	/*
1046	 *  Tracking extents is required by arena_reset, which is not allowed
1047	 *  for auto arenas.  Bypass this step to avoid touching the extent
1048	 *  linkage (often results in cache misses) for auto arenas.
1049	 */
1050	if (arena_is_auto(arena)) {
1051		return;
1052	}
1053	extent_list_append(&bin->slabs_full, slab);
1054}
1055
1056static void
1057arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, extent_t *slab) {
1058	if (arena_is_auto(arena)) {
1059		return;
1060	}
1061	extent_list_remove(&bin->slabs_full, slab);
1062}
1063
1064static void
1065arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) {
1066	extent_t *slab;
1067
1068	malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1069	if (bin->slabcur != NULL) {
1070		slab = bin->slabcur;
1071		bin->slabcur = NULL;
1072		malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1073		arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
1074		malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1075	}
1076	while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) != NULL) {
1077		malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1078		arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
1079		malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1080	}
1081	for (slab = extent_list_first(&bin->slabs_full); slab != NULL;
1082	     slab = extent_list_first(&bin->slabs_full)) {
1083		arena_bin_slabs_full_remove(arena, bin, slab);
1084		malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1085		arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
1086		malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1087	}
1088	if (config_stats) {
1089		bin->stats.curregs = 0;
1090		bin->stats.curslabs = 0;
1091	}
1092	malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1093}
1094
1095void
1096arena_reset(tsd_t *tsd, arena_t *arena) {
1097	/*
1098	 * Locking in this function is unintuitive.  The caller guarantees that
1099	 * no concurrent operations are happening in this arena, but there are
1100	 * still reasons that some locking is necessary:
1101	 *
1102	 * - Some of the functions in the transitive closure of calls assume
1103	 *   appropriate locks are held, and in some cases these locks are
1104	 *   temporarily dropped to avoid lock order reversal or deadlock due to
1105	 *   reentry.
1106	 * - mallctl("epoch", ...) may concurrently refresh stats.  While
1107	 *   strictly speaking this is a "concurrent operation", disallowing
1108	 *   stats refreshes would impose an inconvenient burden.
1109	 */
1110
1111	/* Large allocations. */
1112	malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
1113
1114	for (extent_t *extent = extent_list_first(&arena->large); extent !=
1115	    NULL; extent = extent_list_first(&arena->large)) {
1116		void *ptr = extent_base_get(extent);
1117		size_t usize;
1118
1119		malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
1120		alloc_ctx_t alloc_ctx;
1121		rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
1122		rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
1123		    (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
1124		assert(alloc_ctx.szind != SC_NSIZES);
1125
1126		if (config_stats || (config_prof && opt_prof)) {
1127			usize = sz_index2size(alloc_ctx.szind);
1128			assert(usize == isalloc(tsd_tsdn(tsd), ptr));
1129		}
1130		/* Remove large allocation from prof sample set. */
1131		if (config_prof && opt_prof) {
1132			prof_free(tsd, ptr, usize, &alloc_ctx);
1133		}
1134		large_dalloc(tsd_tsdn(tsd), extent);
1135		malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
1136	}
1137	malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
1138
1139	/* Bins. */
1140	for (unsigned i = 0; i < SC_NBINS; i++) {
1141		for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
1142			arena_bin_reset(tsd, arena,
1143			    &arena->bins[i].bin_shards[j]);
1144		}
1145	}
1146
1147	atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
1148}
1149
1150static void
1151arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) {
1152	/*
1153	 * Iterate over the retained extents and destroy them.  This gives the
1154	 * extent allocator underlying the extent hooks an opportunity to unmap
1155	 * all retained memory without having to keep its own metadata
1156	 * structures.  In practice, virtual memory for dss-allocated extents is
1157	 * leaked here, so best practice is to avoid dss for arenas to be
1158	 * destroyed, or provide custom extent hooks that track retained
1159	 * dss-based extents for later reuse.
1160	 */
1161	extent_hooks_t *extent_hooks = extent_hooks_get(arena);
1162	extent_t *extent;
1163	while ((extent = extents_evict(tsdn, arena, &extent_hooks,
1164	    &arena->extents_retained, 0)) != NULL) {
1165		extent_destroy_wrapper(tsdn, arena, &extent_hooks, extent);
1166	}
1167}
1168
1169void
1170arena_destroy(tsd_t *tsd, arena_t *arena) {
1171	assert(base_ind_get(arena->base) >= narenas_auto);
1172	assert(arena_nthreads_get(arena, false) == 0);
1173	assert(arena_nthreads_get(arena, true) == 0);
1174
1175	/*
1176	 * No allocations have occurred since arena_reset() was called.
1177	 * Furthermore, the caller (arena_i_destroy_ctl()) purged all cached
1178	 * extents, so only retained extents may remain.
1179	 */
1180	assert(extents_npages_get(&arena->extents_dirty) == 0);
1181	assert(extents_npages_get(&arena->extents_muzzy) == 0);
1182
1183	/* Deallocate retained memory. */
1184	arena_destroy_retained(tsd_tsdn(tsd), arena);
1185
1186	/*
1187	 * Remove the arena pointer from the arenas array.  We rely on the fact
1188	 * that there is no way for the application to get a dirty read from the
1189	 * arenas array unless there is an inherent race in the application
1190	 * involving access of an arena being concurrently destroyed.  The
1191	 * application must synchronize knowledge of the arena's validity, so as
1192	 * long as we use an atomic write to update the arenas array, the
1193	 * application will get a clean read any time after it synchronizes
1194	 * knowledge that the arena is no longer valid.
1195	 */
1196	arena_set(base_ind_get(arena->base), NULL);
1197
1198	/*
1199	 * Destroy the base allocator, which manages all metadata ever mapped by
1200	 * this arena.
1201	 */
1202	base_delete(tsd_tsdn(tsd), arena->base);
1203}
1204
1205static extent_t *
1206arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena,
1207    extent_hooks_t **r_extent_hooks, const bin_info_t *bin_info,
1208    szind_t szind) {
1209	extent_t *slab;
1210	bool zero, commit;
1211
1212	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1213	    WITNESS_RANK_CORE, 0);
1214
1215	zero = false;
1216	commit = true;
1217	slab = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL,
1218	    bin_info->slab_size, 0, PAGE, true, szind, &zero, &commit);
1219
1220	if (config_stats && slab != NULL) {
1221		arena_stats_mapped_add(tsdn, &arena->stats,
1222		    bin_info->slab_size);
1223	}
1224
1225	return slab;
1226}
1227
1228static extent_t *
1229arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard,
1230    const bin_info_t *bin_info) {
1231	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1232	    WITNESS_RANK_CORE, 0);
1233
1234	extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
1235	szind_t szind = sz_size2index(bin_info->reg_size);
1236	bool zero = false;
1237	bool commit = true;
1238	extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks,
1239	    &arena->extents_dirty, NULL, bin_info->slab_size, 0, PAGE, true,
1240	    binind, &zero, &commit);
1241	if (slab == NULL && arena_may_have_muzzy(arena)) {
1242		slab = extents_alloc(tsdn, arena, &extent_hooks,
1243		    &arena->extents_muzzy, NULL, bin_info->slab_size, 0, PAGE,
1244		    true, binind, &zero, &commit);
1245	}
1246	if (slab == NULL) {
1247		slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks,
1248		    bin_info, szind);
1249		if (slab == NULL) {
1250			return NULL;
1251		}
1252	}
1253	assert(extent_slab_get(slab));
1254
1255	/* Initialize slab internals. */
1256	arena_slab_data_t *slab_data = extent_slab_data_get(slab);
1257	extent_nfree_binshard_set(slab, bin_info->nregs, binshard);
1258	bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false);
1259
1260	arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE);
1261
1262	return slab;
1263}
1264
1265static extent_t *
1266arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
1267    szind_t binind, unsigned binshard) {
1268	extent_t *slab;
1269	const bin_info_t *bin_info;
1270
1271	/* Look for a usable slab. */
1272	slab = arena_bin_slabs_nonfull_tryget(bin);
1273	if (slab != NULL) {
1274		return slab;
1275	}
1276	/* No existing slabs have any space available. */
1277
1278	bin_info = &bin_infos[binind];
1279
1280	/* Allocate a new slab. */
1281	malloc_mutex_unlock(tsdn, &bin->lock);
1282	/******************************/
1283	slab = arena_slab_alloc(tsdn, arena, binind, binshard, bin_info);
1284	/********************************/
1285	malloc_mutex_lock(tsdn, &bin->lock);
1286	if (slab != NULL) {
1287		if (config_stats) {
1288			bin->stats.nslabs++;
1289			bin->stats.curslabs++;
1290		}
1291		return slab;
1292	}
1293
1294	/*
1295	 * arena_slab_alloc() failed, but another thread may have made
1296	 * sufficient memory available while this one dropped bin->lock above,
1297	 * so search one more time.
1298	 */
1299	slab = arena_bin_slabs_nonfull_tryget(bin);
1300	if (slab != NULL) {
1301		return slab;
1302	}
1303
1304	return NULL;
1305}
1306
1307/* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */
1308static void *
1309arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
1310    szind_t binind, unsigned binshard) {
1311	const bin_info_t *bin_info;
1312	extent_t *slab;
1313
1314	bin_info = &bin_infos[binind];
1315	if (!arena_is_auto(arena) && bin->slabcur != NULL) {
1316		arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
1317		bin->slabcur = NULL;
1318	}
1319	slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind, binshard);
1320	if (bin->slabcur != NULL) {
1321		/*
1322		 * Another thread updated slabcur while this one ran without the
1323		 * bin lock in arena_bin_nonfull_slab_get().
1324		 */
1325		if (extent_nfree_get(bin->slabcur) > 0) {
1326			void *ret = arena_slab_reg_alloc(bin->slabcur,
1327			    bin_info);
1328			if (slab != NULL) {
1329				/*
1330				 * arena_slab_alloc() may have allocated slab,
1331				 * or it may have been pulled from
1332				 * slabs_nonfull.  Therefore it is unsafe to
1333				 * make any assumptions about how slab has
1334				 * previously been used, and
1335				 * arena_bin_lower_slab() must be called, as if
1336				 * a region were just deallocated from the slab.
1337				 */
1338				if (extent_nfree_get(slab) == bin_info->nregs) {
1339					arena_dalloc_bin_slab(tsdn, arena, slab,
1340					    bin);
1341				} else {
1342					arena_bin_lower_slab(tsdn, arena, slab,
1343					    bin);
1344				}
1345			}
1346			return ret;
1347		}
1348
1349		arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
1350		bin->slabcur = NULL;
1351	}
1352
1353	if (slab == NULL) {
1354		return NULL;
1355	}
1356	bin->slabcur = slab;
1357
1358	assert(extent_nfree_get(bin->slabcur) > 0);
1359
1360	return arena_slab_reg_alloc(slab, bin_info);
1361}
1362
1363/* Choose a bin shard and return the locked bin. */
1364bin_t *
1365arena_bin_choose_lock(tsdn_t *tsdn, arena_t *arena, szind_t binind,
1366    unsigned *binshard) {
1367	bin_t *bin;
1368	if (tsdn_null(tsdn) || tsd_arena_get(tsdn_tsd(tsdn)) == NULL) {
1369		*binshard = 0;
1370	} else {
1371		*binshard = tsd_binshardsp_get(tsdn_tsd(tsdn))->binshard[binind];
1372	}
1373	assert(*binshard < bin_infos[binind].n_shards);
1374	bin = &arena->bins[binind].bin_shards[*binshard];
1375	malloc_mutex_lock(tsdn, &bin->lock);
1376
1377	return bin;
1378}
1379
1380void
1381arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
1382    cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) {
1383	unsigned i, nfill, cnt;
1384
1385	assert(tbin->ncached == 0);
1386
1387	if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) {
1388		prof_idump(tsdn);
1389	}
1390
1391	unsigned binshard;
1392	bin_t *bin = arena_bin_choose_lock(tsdn, arena, binind, &binshard);
1393
1394	for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
1395	    tcache->lg_fill_div[binind]); i < nfill; i += cnt) {
1396		extent_t *slab;
1397		if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) >
1398		    0) {
1399			unsigned tofill = nfill - i;
1400			cnt = tofill < extent_nfree_get(slab) ?
1401				tofill : extent_nfree_get(slab);
1402			arena_slab_reg_alloc_batch(
1403			   slab, &bin_infos[binind], cnt,
1404			   tbin->avail - nfill + i);
1405		} else {
1406			cnt = 1;
1407			void *ptr = arena_bin_malloc_hard(tsdn, arena, bin,
1408			    binind, binshard);
1409			/*
1410			 * OOM.  tbin->avail isn't yet filled down to its first
1411			 * element, so the successful allocations (if any) must
1412			 * be moved just before tbin->avail before bailing out.
1413			 */
1414			if (ptr == NULL) {
1415				if (i > 0) {
1416					memmove(tbin->avail - i,
1417						tbin->avail - nfill,
1418						i * sizeof(void *));
1419				}
1420				break;
1421			}
1422			/* Insert such that low regions get used first. */
1423			*(tbin->avail - nfill + i) = ptr;
1424		}
1425		if (config_fill && unlikely(opt_junk_alloc)) {
1426			for (unsigned j = 0; j < cnt; j++) {
1427				void* ptr = *(tbin->avail - nfill + i + j);
1428				arena_alloc_junk_small(ptr, &bin_infos[binind],
1429							true);
1430			}
1431		}
1432	}
1433	if (config_stats) {
1434		bin->stats.nmalloc += i;
1435		bin->stats.nrequests += tbin->tstats.nrequests;
1436		bin->stats.curregs += i;
1437		bin->stats.nfills++;
1438		tbin->tstats.nrequests = 0;
1439	}
1440	malloc_mutex_unlock(tsdn, &bin->lock);
1441	tbin->ncached = i;
1442	arena_decay_tick(tsdn, arena);
1443}
1444
1445void
1446arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info, bool zero) {
1447	if (!zero) {
1448		memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size);
1449	}
1450}
1451
1452static void
1453arena_dalloc_junk_small_impl(void *ptr, const bin_info_t *bin_info) {
1454	memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size);
1455}
1456arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small =
1457    arena_dalloc_junk_small_impl;
1458
1459static void *
1460arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
1461	void *ret;
1462	bin_t *bin;
1463	size_t usize;
1464	extent_t *slab;
1465
1466	assert(binind < SC_NBINS);
1467	usize = sz_index2size(binind);
1468	unsigned binshard;
1469	bin = arena_bin_choose_lock(tsdn, arena, binind, &binshard);
1470
1471	if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) {
1472		ret = arena_slab_reg_alloc(slab, &bin_infos[binind]);
1473	} else {
1474		ret = arena_bin_malloc_hard(tsdn, arena, bin, binind, binshard);
1475	}
1476
1477	if (ret == NULL) {
1478		malloc_mutex_unlock(tsdn, &bin->lock);
1479		return NULL;
1480	}
1481
1482	if (config_stats) {
1483		bin->stats.nmalloc++;
1484		bin->stats.nrequests++;
1485		bin->stats.curregs++;
1486	}
1487	malloc_mutex_unlock(tsdn, &bin->lock);
1488	if (config_prof && arena_prof_accum(tsdn, arena, usize)) {
1489		prof_idump(tsdn);
1490	}
1491
1492	if (!zero) {
1493		if (config_fill) {
1494			if (unlikely(opt_junk_alloc)) {
1495				arena_alloc_junk_small(ret,
1496				    &bin_infos[binind], false);
1497			} else if (unlikely(opt_zero)) {
1498				memset(ret, 0, usize);
1499			}
1500		}
1501	} else {
1502		if (config_fill && unlikely(opt_junk_alloc)) {
1503			arena_alloc_junk_small(ret, &bin_infos[binind],
1504			    true);
1505		}
1506		memset(ret, 0, usize);
1507	}
1508
1509	arena_decay_tick(tsdn, arena);
1510	return ret;
1511}
1512
1513void *
1514arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
1515    bool zero) {
1516	assert(!tsdn_null(tsdn) || arena != NULL);
1517
1518	if (likely(!tsdn_null(tsdn))) {
1519		arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, size);
1520	}
1521	if (unlikely(arena == NULL)) {
1522		return NULL;
1523	}
1524
1525	if (likely(size <= SC_SMALL_MAXCLASS)) {
1526		return arena_malloc_small(tsdn, arena, ind, zero);
1527	}
1528	return large_malloc(tsdn, arena, sz_index2size(ind), zero);
1529}
1530
1531void *
1532arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
1533    bool zero, tcache_t *tcache) {
1534	void *ret;
1535
1536	if (usize <= SC_SMALL_MAXCLASS
1537	    && (alignment < PAGE
1538	    || (alignment == PAGE && (usize & PAGE_MASK) == 0))) {
1539		/* Small; alignment doesn't require special slab placement. */
1540		ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize),
1541		    zero, tcache, true);
1542	} else {
1543		if (likely(alignment <= CACHELINE)) {
1544			ret = large_malloc(tsdn, arena, usize, zero);
1545		} else {
1546			ret = large_palloc(tsdn, arena, usize, alignment, zero);
1547		}
1548	}
1549	return ret;
1550}
1551
1552void
1553arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize) {
1554	cassert(config_prof);
1555	assert(ptr != NULL);
1556	assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS);
1557	assert(usize <= SC_SMALL_MAXCLASS);
1558
1559	if (config_opt_safety_checks) {
1560		safety_check_set_redzone(ptr, usize, SC_LARGE_MINCLASS);
1561	}
1562
1563	rtree_ctx_t rtree_ctx_fallback;
1564	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1565
1566	extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
1567	    (uintptr_t)ptr, true);
1568	arena_t *arena = extent_arena_get(extent);
1569
1570	szind_t szind = sz_size2index(usize);
1571	extent_szind_set(extent, szind);
1572	rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
1573	    szind, false);
1574
1575	prof_accum_cancel(tsdn, &arena->prof_accum, usize);
1576
1577	assert(isalloc(tsdn, ptr) == usize);
1578}
1579
1580static size_t
1581arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) {
1582	cassert(config_prof);
1583	assert(ptr != NULL);
1584
1585	extent_szind_set(extent, SC_NBINS);
1586	rtree_ctx_t rtree_ctx_fallback;
1587	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1588	rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
1589	    SC_NBINS, false);
1590
1591	assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS);
1592
1593	return SC_LARGE_MINCLASS;
1594}
1595
1596void
1597arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
1598    bool slow_path) {
1599	cassert(config_prof);
1600	assert(opt_prof);
1601
1602	extent_t *extent = iealloc(tsdn, ptr);
1603	size_t usize = extent_usize_get(extent);
1604	size_t bumped_usize = arena_prof_demote(tsdn, extent, ptr);
1605	if (config_opt_safety_checks && usize < SC_LARGE_MINCLASS) {
1606		/*
1607		 * Currently, we only do redzoning for small sampled
1608		 * allocations.
1609		 */
1610		assert(bumped_usize == SC_LARGE_MINCLASS);
1611		safety_check_verify_redzone(ptr, usize, bumped_usize);
1612	}
1613	if (bumped_usize <= tcache_maxclass && tcache != NULL) {
1614		tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
1615		    sz_size2index(bumped_usize), slow_path);
1616	} else {
1617		large_dalloc(tsdn, extent);
1618	}
1619}
1620
1621static void
1622arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) {
1623	/* Dissociate slab from bin. */
1624	if (slab == bin->slabcur) {
1625		bin->slabcur = NULL;
1626	} else {
1627		szind_t binind = extent_szind_get(slab);
1628		const bin_info_t *bin_info = &bin_infos[binind];
1629
1630		/*
1631		 * The following block's conditional is necessary because if the
1632		 * slab only contains one region, then it never gets inserted
1633		 * into the non-full slabs heap.
1634		 */
1635		if (bin_info->nregs == 1) {
1636			arena_bin_slabs_full_remove(arena, bin, slab);
1637		} else {
1638			arena_bin_slabs_nonfull_remove(bin, slab);
1639		}
1640	}
1641}
1642
1643static void
1644arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
1645    bin_t *bin) {
1646	assert(slab != bin->slabcur);
1647
1648	malloc_mutex_unlock(tsdn, &bin->lock);
1649	/******************************/
1650	arena_slab_dalloc(tsdn, arena, slab);
1651	/****************************/
1652	malloc_mutex_lock(tsdn, &bin->lock);
1653	if (config_stats) {
1654		bin->stats.curslabs--;
1655	}
1656}
1657
1658static void
1659arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
1660    bin_t *bin) {
1661	assert(extent_nfree_get(slab) > 0);
1662
1663	/*
1664	 * Make sure that if bin->slabcur is non-NULL, it refers to the
1665	 * oldest/lowest non-full slab.  It is okay to NULL slabcur out rather
1666	 * than proactively keeping it pointing at the oldest/lowest non-full
1667	 * slab.
1668	 */
1669	if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) {
1670		/* Switch slabcur. */
1671		if (extent_nfree_get(bin->slabcur) > 0) {
1672			arena_bin_slabs_nonfull_insert(bin, bin->slabcur);
1673		} else {
1674			arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
1675		}
1676		bin->slabcur = slab;
1677		if (config_stats) {
1678			bin->stats.reslabs++;
1679		}
1680	} else {
1681		arena_bin_slabs_nonfull_insert(bin, slab);
1682	}
1683}
1684
1685static void
1686arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
1687    szind_t binind, extent_t *slab, void *ptr, bool junked) {
1688	arena_slab_data_t *slab_data = extent_slab_data_get(slab);
1689	const bin_info_t *bin_info = &bin_infos[binind];
1690
1691	if (!junked && config_fill && unlikely(opt_junk_free)) {
1692		arena_dalloc_junk_small(ptr, bin_info);
1693	}
1694
1695	arena_slab_reg_dalloc(slab, slab_data, ptr);
1696	unsigned nfree = extent_nfree_get(slab);
1697	if (nfree == bin_info->nregs) {
1698		arena_dissociate_bin_slab(arena, slab, bin);
1699		arena_dalloc_bin_slab(tsdn, arena, slab, bin);
1700	} else if (nfree == 1 && slab != bin->slabcur) {
1701		arena_bin_slabs_full_remove(arena, bin, slab);
1702		arena_bin_lower_slab(tsdn, arena, slab, bin);
1703	}
1704
1705	if (config_stats) {
1706		bin->stats.ndalloc++;
1707		bin->stats.curregs--;
1708	}
1709}
1710
1711void
1712arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
1713    szind_t binind, extent_t *extent, void *ptr) {
1714	arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, extent, ptr,
1715	    true);
1716}
1717
1718static void
1719arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) {
1720	szind_t binind = extent_szind_get(extent);
1721	unsigned binshard = extent_binshard_get(extent);
1722	bin_t *bin = &arena->bins[binind].bin_shards[binshard];
1723
1724	malloc_mutex_lock(tsdn, &bin->lock);
1725	arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, extent, ptr,
1726	    false);
1727	malloc_mutex_unlock(tsdn, &bin->lock);
1728}
1729
1730void
1731arena_dalloc_small(tsdn_t *tsdn, void *ptr) {
1732	extent_t *extent = iealloc(tsdn, ptr);
1733	arena_t *arena = extent_arena_get(extent);
1734
1735	arena_dalloc_bin(tsdn, arena, extent, ptr);
1736	arena_decay_tick(tsdn, arena);
1737}
1738
1739bool
1740arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
1741    size_t extra, bool zero, size_t *newsize) {
1742	bool ret;
1743	/* Calls with non-zero extra had to clamp extra. */
1744	assert(extra == 0 || size + extra <= SC_LARGE_MAXCLASS);
1745
1746	extent_t *extent = iealloc(tsdn, ptr);
1747	if (unlikely(size > SC_LARGE_MAXCLASS)) {
1748		ret = true;
1749		goto done;
1750	}
1751
1752	size_t usize_min = sz_s2u(size);
1753	size_t usize_max = sz_s2u(size + extra);
1754	if (likely(oldsize <= SC_SMALL_MAXCLASS && usize_min
1755	    <= SC_SMALL_MAXCLASS)) {
1756		/*
1757		 * Avoid moving the allocation if the size class can be left the
1758		 * same.
1759		 */
1760		assert(bin_infos[sz_size2index(oldsize)].reg_size ==
1761		    oldsize);
1762		if ((usize_max > SC_SMALL_MAXCLASS
1763		    || sz_size2index(usize_max) != sz_size2index(oldsize))
1764		    && (size > oldsize || usize_max < oldsize)) {
1765			ret = true;
1766			goto done;
1767		}
1768
1769		arena_decay_tick(tsdn, extent_arena_get(extent));
1770		ret = false;
1771	} else if (oldsize >= SC_LARGE_MINCLASS
1772	    && usize_max >= SC_LARGE_MINCLASS) {
1773		ret = large_ralloc_no_move(tsdn, extent, usize_min, usize_max,
1774		    zero);
1775	} else {
1776		ret = true;
1777	}
1778done:
1779	assert(extent == iealloc(tsdn, ptr));
1780	*newsize = extent_usize_get(extent);
1781
1782	return ret;
1783}
1784
1785static void *
1786arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
1787    size_t alignment, bool zero, tcache_t *tcache) {
1788	if (alignment == 0) {
1789		return arena_malloc(tsdn, arena, usize, sz_size2index(usize),
1790		    zero, tcache, true);
1791	}
1792	usize = sz_sa2u(usize, alignment);
1793	if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
1794		return NULL;
1795	}
1796	return ipalloct(tsdn, usize, alignment, zero, tcache, arena);
1797}
1798
1799void *
1800arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
1801    size_t size, size_t alignment, bool zero, tcache_t *tcache,
1802    hook_ralloc_args_t *hook_args) {
1803	size_t usize = sz_s2u(size);
1804	if (unlikely(usize == 0 || size > SC_LARGE_MAXCLASS)) {
1805		return NULL;
1806	}
1807
1808	if (likely(usize <= SC_SMALL_MAXCLASS)) {
1809		/* Try to avoid moving the allocation. */
1810		UNUSED size_t newsize;
1811		if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero,
1812		    &newsize)) {
1813			hook_invoke_expand(hook_args->is_realloc
1814			    ? hook_expand_realloc : hook_expand_rallocx,
1815			    ptr, oldsize, usize, (uintptr_t)ptr,
1816			    hook_args->args);
1817			return ptr;
1818		}
1819	}
1820
1821	if (oldsize >= SC_LARGE_MINCLASS
1822	    && usize >= SC_LARGE_MINCLASS) {
1823		return large_ralloc(tsdn, arena, ptr, usize,
1824		    alignment, zero, tcache, hook_args);
1825	}
1826
1827	/*
1828	 * size and oldsize are different enough that we need to move the
1829	 * object.  In that case, fall back to allocating new space and copying.
1830	 */
1831	void *ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment,
1832	    zero, tcache);
1833	if (ret == NULL) {
1834		return NULL;
1835	}
1836
1837	hook_invoke_alloc(hook_args->is_realloc
1838	    ? hook_alloc_realloc : hook_alloc_rallocx, ret, (uintptr_t)ret,
1839	    hook_args->args);
1840	hook_invoke_dalloc(hook_args->is_realloc
1841	    ? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args);
1842
1843	/*
1844	 * Junk/zero-filling were already done by
1845	 * ipalloc()/arena_malloc().
1846	 */
1847	size_t copysize = (usize < oldsize) ? usize : oldsize;
1848	memcpy(ret, ptr, copysize);
1849	isdalloct(tsdn, ptr, oldsize, tcache, NULL, true);
1850	return ret;
1851}
1852
1853dss_prec_t
1854arena_dss_prec_get(arena_t *arena) {
1855	return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIRE);
1856}
1857
1858bool
1859arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) {
1860	if (!have_dss) {
1861		return (dss_prec != dss_prec_disabled);
1862	}
1863	atomic_store_u(&arena->dss_prec, (unsigned)dss_prec, ATOMIC_RELEASE);
1864	return false;
1865}
1866
1867ssize_t
1868arena_dirty_decay_ms_default_get(void) {
1869	return atomic_load_zd(&dirty_decay_ms_default, ATOMIC_RELAXED);
1870}
1871
1872bool
1873arena_dirty_decay_ms_default_set(ssize_t decay_ms) {
1874	if (!arena_decay_ms_valid(decay_ms)) {
1875		return true;
1876	}
1877	atomic_store_zd(&dirty_decay_ms_default, decay_ms, ATOMIC_RELAXED);
1878	return false;
1879}
1880
1881ssize_t
1882arena_muzzy_decay_ms_default_get(void) {
1883	return atomic_load_zd(&muzzy_decay_ms_default, ATOMIC_RELAXED);
1884}
1885
1886bool
1887arena_muzzy_decay_ms_default_set(ssize_t decay_ms) {
1888	if (!arena_decay_ms_valid(decay_ms)) {
1889		return true;
1890	}
1891	atomic_store_zd(&muzzy_decay_ms_default, decay_ms, ATOMIC_RELAXED);
1892	return false;
1893}
1894
1895bool
1896arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit,
1897    size_t *new_limit) {
1898	assert(opt_retain);
1899
1900	pszind_t new_ind JEMALLOC_CC_SILENCE_INIT(0);
1901	if (new_limit != NULL) {
1902		size_t limit = *new_limit;
1903		/* Grow no more than the new limit. */
1904		if ((new_ind = sz_psz2ind(limit + 1) - 1) >= SC_NPSIZES) {
1905			return true;
1906		}
1907	}
1908
1909	malloc_mutex_lock(tsd_tsdn(tsd), &arena->extent_grow_mtx);
1910	if (old_limit != NULL) {
1911		*old_limit = sz_pind2sz(arena->retain_grow_limit);
1912	}
1913	if (new_limit != NULL) {
1914		arena->retain_grow_limit = new_ind;
1915	}
1916	malloc_mutex_unlock(tsd_tsdn(tsd), &arena->extent_grow_mtx);
1917
1918	return false;
1919}
1920
1921unsigned
1922arena_nthreads_get(arena_t *arena, bool internal) {
1923	return atomic_load_u(&arena->nthreads[internal], ATOMIC_RELAXED);
1924}
1925
1926void
1927arena_nthreads_inc(arena_t *arena, bool internal) {
1928	atomic_fetch_add_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
1929}
1930
1931void
1932arena_nthreads_dec(arena_t *arena, bool internal) {
1933	atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
1934}
1935
1936size_t
1937arena_extent_sn_next(arena_t *arena) {
1938	return atomic_fetch_add_zu(&arena->extent_sn_next, 1, ATOMIC_RELAXED);
1939}
1940
1941arena_t *
1942arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
1943	arena_t *arena;
1944	base_t *base;
1945	unsigned i;
1946
1947	if (ind == 0) {
1948		base = b0get();
1949	} else {
1950		base = base_new(tsdn, ind, extent_hooks);
1951		if (base == NULL) {
1952			return NULL;
1953		}
1954	}
1955
1956	unsigned nbins_total = 0;
1957	for (i = 0; i < SC_NBINS; i++) {
1958		nbins_total += bin_infos[i].n_shards;
1959	}
1960	size_t arena_size = sizeof(arena_t) + sizeof(bin_t) * nbins_total;
1961	arena = (arena_t *)base_alloc(tsdn, base, arena_size, CACHELINE);
1962	if (arena == NULL) {
1963		goto label_error;
1964	}
1965
1966	atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
1967	atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
1968	arena->last_thd = NULL;
1969
1970	if (config_stats) {
1971		if (arena_stats_init(tsdn, &arena->stats)) {
1972			goto label_error;
1973		}
1974
1975		ql_new(&arena->tcache_ql);
1976		ql_new(&arena->cache_bin_array_descriptor_ql);
1977		if (malloc_mutex_init(&arena->tcache_ql_mtx, "tcache_ql",
1978		    WITNESS_RANK_TCACHE_QL, malloc_mutex_rank_exclusive)) {
1979			goto label_error;
1980		}
1981	}
1982
1983	if (config_prof) {
1984		if (prof_accum_init(tsdn, &arena->prof_accum)) {
1985			goto label_error;
1986		}
1987	}
1988
1989	if (config_cache_oblivious) {
1990		/*
1991		 * A nondeterministic seed based on the address of arena reduces
1992		 * the likelihood of lockstep non-uniform cache index
1993		 * utilization among identical concurrent processes, but at the
1994		 * cost of test repeatability.  For debug builds, instead use a
1995		 * deterministic seed.
1996		 */
1997		atomic_store_zu(&arena->offset_state, config_debug ? ind :
1998		    (size_t)(uintptr_t)arena, ATOMIC_RELAXED);
1999	}
2000
2001	atomic_store_zu(&arena->extent_sn_next, 0, ATOMIC_RELAXED);
2002
2003	atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(),
2004	    ATOMIC_RELAXED);
2005
2006	atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
2007
2008	extent_list_init(&arena->large);
2009	if (malloc_mutex_init(&arena->large_mtx, "arena_large",
2010	    WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) {
2011		goto label_error;
2012	}
2013
2014	/*
2015	 * Delay coalescing for dirty extents despite the disruptive effect on
2016	 * memory layout for best-fit extent allocation, since cached extents
2017	 * are likely to be reused soon after deallocation, and the cost of
2018	 * merging/splitting extents is non-trivial.
2019	 */
2020	if (extents_init(tsdn, &arena->extents_dirty, extent_state_dirty,
2021	    true)) {
2022		goto label_error;
2023	}
2024	/*
2025	 * Coalesce muzzy extents immediately, because operations on them are in
2026	 * the critical path much less often than for dirty extents.
2027	 */
2028	if (extents_init(tsdn, &arena->extents_muzzy, extent_state_muzzy,
2029	    false)) {
2030		goto label_error;
2031	}
2032	/*
2033	 * Coalesce retained extents immediately, in part because they will
2034	 * never be evicted (and therefore there's no opportunity for delayed
2035	 * coalescing), but also because operations on retained extents are not
2036	 * in the critical path.
2037	 */
2038	if (extents_init(tsdn, &arena->extents_retained, extent_state_retained,
2039	    false)) {
2040		goto label_error;
2041	}
2042
2043	if (arena_decay_init(&arena->decay_dirty,
2044	    arena_dirty_decay_ms_default_get(), &arena->stats.decay_dirty)) {
2045		goto label_error;
2046	}
2047	if (arena_decay_init(&arena->decay_muzzy,
2048	    arena_muzzy_decay_ms_default_get(), &arena->stats.decay_muzzy)) {
2049		goto label_error;
2050	}
2051
2052	arena->extent_grow_next = sz_psz2ind(HUGEPAGE);
2053	arena->retain_grow_limit = sz_psz2ind(SC_LARGE_MAXCLASS);
2054	if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow",
2055	    WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
2056		goto label_error;
2057	}
2058
2059	extent_avail_new(&arena->extent_avail);
2060	if (malloc_mutex_init(&arena->extent_avail_mtx, "extent_avail",
2061	    WITNESS_RANK_EXTENT_AVAIL, malloc_mutex_rank_exclusive)) {
2062		goto label_error;
2063	}
2064
2065	/* Initialize bins. */
2066	uintptr_t bin_addr = (uintptr_t)arena + sizeof(arena_t);
2067	atomic_store_u(&arena->binshard_next, 0, ATOMIC_RELEASE);
2068	for (i = 0; i < SC_NBINS; i++) {
2069		unsigned nshards = bin_infos[i].n_shards;
2070		arena->bins[i].bin_shards = (bin_t *)bin_addr;
2071		bin_addr += nshards * sizeof(bin_t);
2072		for (unsigned j = 0; j < nshards; j++) {
2073			bool err = bin_init(&arena->bins[i].bin_shards[j]);
2074			if (err) {
2075				goto label_error;
2076			}
2077		}
2078	}
2079	assert(bin_addr == (uintptr_t)arena + arena_size);
2080
2081	arena->base = base;
2082	/* Set arena before creating background threads. */
2083	arena_set(ind, arena);
2084
2085	nstime_init(&arena->create_time, 0);
2086	nstime_update(&arena->create_time);
2087
2088	/* We don't support reentrancy for arena 0 bootstrapping. */
2089	if (ind != 0) {
2090		/*
2091		 * If we're here, then arena 0 already exists, so bootstrapping
2092		 * is done enough that we should have tsd.
2093		 */
2094		assert(!tsdn_null(tsdn));
2095		pre_reentrancy(tsdn_tsd(tsdn), arena);
2096		if (test_hooks_arena_new_hook) {
2097			test_hooks_arena_new_hook();
2098		}
2099		post_reentrancy(tsdn_tsd(tsdn));
2100	}
2101
2102	return arena;
2103label_error:
2104	if (ind != 0) {
2105		base_delete(tsdn, base);
2106	}
2107	return NULL;
2108}
2109
2110arena_t *
2111arena_choose_huge(tsd_t *tsd) {
2112	/* huge_arena_ind can be 0 during init (will use a0). */
2113	if (huge_arena_ind == 0) {
2114		assert(!malloc_initialized());
2115	}
2116
2117	arena_t *huge_arena = arena_get(tsd_tsdn(tsd), huge_arena_ind, false);
2118	if (huge_arena == NULL) {
2119		/* Create the huge arena on demand. */
2120		assert(huge_arena_ind != 0);
2121		huge_arena = arena_get(tsd_tsdn(tsd), huge_arena_ind, true);
2122		if (huge_arena == NULL) {
2123			return NULL;
2124		}
2125		/*
2126		 * Purge eagerly for huge allocations, because: 1) number of
2127		 * huge allocations is usually small, which means ticker based
2128		 * decay is not reliable; and 2) less immediate reuse is
2129		 * expected for huge allocations.
2130		 */
2131		if (arena_dirty_decay_ms_default_get() > 0) {
2132			arena_dirty_decay_ms_set(tsd_tsdn(tsd), huge_arena, 0);
2133		}
2134		if (arena_muzzy_decay_ms_default_get() > 0) {
2135			arena_muzzy_decay_ms_set(tsd_tsdn(tsd), huge_arena, 0);
2136		}
2137	}
2138
2139	return huge_arena;
2140}
2141
2142bool
2143arena_init_huge(void) {
2144	bool huge_enabled;
2145
2146	/* The threshold should be large size class. */
2147	if (opt_oversize_threshold > SC_LARGE_MAXCLASS ||
2148	    opt_oversize_threshold < SC_LARGE_MINCLASS) {
2149		opt_oversize_threshold = 0;
2150		oversize_threshold = SC_LARGE_MAXCLASS + PAGE;
2151		huge_enabled = false;
2152	} else {
2153		/* Reserve the index for the huge arena. */
2154		huge_arena_ind = narenas_total_get();
2155		oversize_threshold = opt_oversize_threshold;
2156		huge_enabled = true;
2157	}
2158
2159	return huge_enabled;
2160}
2161
2162bool
2163arena_is_huge(unsigned arena_ind) {
2164	if (huge_arena_ind == 0) {
2165		return false;
2166	}
2167	return (arena_ind == huge_arena_ind);
2168}
2169
2170void
2171arena_boot(sc_data_t *sc_data) {
2172	arena_dirty_decay_ms_default_set(opt_dirty_decay_ms);
2173	arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms);
2174	for (unsigned i = 0; i < SC_NBINS; i++) {
2175		sc_t *sc = &sc_data->sc[i];
2176		div_init(&arena_binind_div_info[i],
2177		    (1U << sc->lg_base) + (sc->ndelta << sc->lg_delta));
2178	}
2179}
2180
2181void
2182arena_prefork0(tsdn_t *tsdn, arena_t *arena) {
2183	malloc_mutex_prefork(tsdn, &arena->decay_dirty.mtx);
2184	malloc_mutex_prefork(tsdn, &arena->decay_muzzy.mtx);
2185}
2186
2187void
2188arena_prefork1(tsdn_t *tsdn, arena_t *arena) {
2189	if (config_stats) {
2190		malloc_mutex_prefork(tsdn, &arena->tcache_ql_mtx);
2191	}
2192}
2193
2194void
2195arena_prefork2(tsdn_t *tsdn, arena_t *arena) {
2196	malloc_mutex_prefork(tsdn, &arena->extent_grow_mtx);
2197}
2198
2199void
2200arena_prefork3(tsdn_t *tsdn, arena_t *arena) {
2201	extents_prefork(tsdn, &arena->extents_dirty);
2202	extents_prefork(tsdn, &arena->extents_muzzy);
2203	extents_prefork(tsdn, &arena->extents_retained);
2204}
2205
2206void
2207arena_prefork4(tsdn_t *tsdn, arena_t *arena) {
2208	malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx);
2209}
2210
2211void
2212arena_prefork5(tsdn_t *tsdn, arena_t *arena) {
2213	base_prefork(tsdn, arena->base);
2214}
2215
2216void
2217arena_prefork6(tsdn_t *tsdn, arena_t *arena) {
2218	malloc_mutex_prefork(tsdn, &arena->large_mtx);
2219}
2220
2221void
2222arena_prefork7(tsdn_t *tsdn, arena_t *arena) {
2223	for (unsigned i = 0; i < SC_NBINS; i++) {
2224		for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
2225			bin_prefork(tsdn, &arena->bins[i].bin_shards[j]);
2226		}
2227	}
2228}
2229
2230void
2231arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
2232	unsigned i;
2233
2234	for (i = 0; i < SC_NBINS; i++) {
2235		for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
2236			bin_postfork_parent(tsdn,
2237			    &arena->bins[i].bin_shards[j]);
2238		}
2239	}
2240	malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
2241	base_postfork_parent(tsdn, arena->base);
2242	malloc_mutex_postfork_parent(tsdn, &arena->extent_avail_mtx);
2243	extents_postfork_parent(tsdn, &arena->extents_dirty);
2244	extents_postfork_parent(tsdn, &arena->extents_muzzy);
2245	extents_postfork_parent(tsdn, &arena->extents_retained);
2246	malloc_mutex_postfork_parent(tsdn, &arena->extent_grow_mtx);
2247	malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx);
2248	malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx);
2249	if (config_stats) {
2250		malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx);
2251	}
2252}
2253
2254void
2255arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
2256	unsigned i;
2257
2258	atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
2259	atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
2260	if (tsd_arena_get(tsdn_tsd(tsdn)) == arena) {
2261		arena_nthreads_inc(arena, false);
2262	}
2263	if (tsd_iarena_get(tsdn_tsd(tsdn)) == arena) {
2264		arena_nthreads_inc(arena, true);
2265	}
2266	if (config_stats) {
2267		ql_new(&arena->tcache_ql);
2268		ql_new(&arena->cache_bin_array_descriptor_ql);
2269		tcache_t *tcache = tcache_get(tsdn_tsd(tsdn));
2270		if (tcache != NULL && tcache->arena == arena) {
2271			ql_elm_new(tcache, link);
2272			ql_tail_insert(&arena->tcache_ql, tcache, link);
2273			cache_bin_array_descriptor_init(
2274			    &tcache->cache_bin_array_descriptor,
2275			    tcache->bins_small, tcache->bins_large);
2276			ql_tail_insert(&arena->cache_bin_array_descriptor_ql,
2277			    &tcache->cache_bin_array_descriptor, link);
2278		}
2279	}
2280
2281	for (i = 0; i < SC_NBINS; i++) {
2282		for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
2283			bin_postfork_child(tsdn, &arena->bins[i].bin_shards[j]);
2284		}
2285	}
2286	malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
2287	base_postfork_child(tsdn, arena->base);
2288	malloc_mutex_postfork_child(tsdn, &arena->extent_avail_mtx);
2289	extents_postfork_child(tsdn, &arena->extents_dirty);
2290	extents_postfork_child(tsdn, &arena->extents_muzzy);
2291	extents_postfork_child(tsdn, &arena->extents_retained);
2292	malloc_mutex_postfork_child(tsdn, &arena->extent_grow_mtx);
2293	malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx);
2294	malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx);
2295	if (config_stats) {
2296		malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx);
2297	}
2298}
2299