1115140Strhodes#define	JEMALLOC_ARENA_C_
2115140Strhodes#include "jemalloc/internal/jemalloc_internal.h"
3115140Strhodes
4115140Strhodes/******************************************************************************/
5115140Strhodes/* Data. */
6115140Strhodes
7115140Strhodesssize_t		opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
8115140Strhodesarena_bin_info_t	arena_bin_info[NBINS];
9115140Strhodes
10115140StrhodesJEMALLOC_ALIGNED(CACHELINE)
11115140Strhodesconst uint8_t	small_size2bin[] = {
12115140Strhodes#define	S2B_8(i)	i,
13115140Strhodes#define	S2B_16(i)	S2B_8(i) S2B_8(i)
14115140Strhodes#define	S2B_32(i)	S2B_16(i) S2B_16(i)
15115140Strhodes#define	S2B_64(i)	S2B_32(i) S2B_32(i)
16115140Strhodes#define	S2B_128(i)	S2B_64(i) S2B_64(i)
17115140Strhodes#define	S2B_256(i)	S2B_128(i) S2B_128(i)
18115140Strhodes#define	S2B_512(i)	S2B_256(i) S2B_256(i)
19115140Strhodes#define	S2B_1024(i)	S2B_512(i) S2B_512(i)
20115140Strhodes#define	S2B_2048(i)	S2B_1024(i) S2B_1024(i)
21115140Strhodes#define	S2B_4096(i)	S2B_2048(i) S2B_2048(i)
22115140Strhodes#define	S2B_8192(i)	S2B_4096(i) S2B_4096(i)
23115140Strhodes#define	SIZE_CLASS(bin, delta, size)					\
24115140Strhodes	S2B_##delta(bin)
25115140Strhodes	SIZE_CLASSES
26115140Strhodes#undef S2B_8
27115140Strhodes#undef S2B_16
28115140Strhodes#undef S2B_32
29115140Strhodes#undef S2B_64
30115140Strhodes#undef S2B_128
31115140Strhodes#undef S2B_256
32232507Seadler#undef S2B_512
33115140Strhodes#undef S2B_1024
34115140Strhodes#undef S2B_2048
35115140Strhodes#undef S2B_4096
36115140Strhodes#undef S2B_8192
37115140Strhodes#undef SIZE_CLASS
38115140Strhodes};
39115140Strhodes
40115140Strhodes/******************************************************************************/
41115140Strhodes/* Function prototypes for non-inline static functions. */
42115140Strhodes
43115140Strhodesstatic void	arena_avail_insert(arena_t *arena, arena_chunk_t *chunk,
44115140Strhodes    size_t pageind, size_t npages, bool maybe_adjac_pred,
45115140Strhodes    bool maybe_adjac_succ);
46115140Strhodesstatic void	arena_avail_remove(arena_t *arena, arena_chunk_t *chunk,
47115140Strhodes    size_t pageind, size_t npages, bool maybe_adjac_pred,
48115140Strhodes    bool maybe_adjac_succ);
49115140Strhodesstatic void	arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
50115140Strhodes    bool large, size_t binind, bool zero);
51115140Strhodesstatic arena_chunk_t *arena_chunk_alloc(arena_t *arena);
52115140Strhodesstatic void	arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
53115140Strhodesstatic arena_run_t	*arena_run_alloc_helper(arena_t *arena, size_t size,
54115140Strhodes    bool large, size_t binind, bool zero);
55115140Strhodesstatic arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool large,
56115140Strhodes    size_t binind, bool zero);
57115140Strhodesstatic arena_chunk_t	*chunks_dirty_iter_cb(arena_chunk_tree_t *tree,
58115140Strhodes    arena_chunk_t *chunk, void *arg);
59115140Strhodesstatic void	arena_purge(arena_t *arena, bool all);
60115140Strhodesstatic void	arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
61115140Strhodes    bool cleaned);
62115140Strhodesstatic void	arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk,
63115140Strhodes    arena_run_t *run, size_t oldsize, size_t newsize);
64115140Strhodesstatic void	arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk,
65115140Strhodes    arena_run_t *run, size_t oldsize, size_t newsize, bool dirty);
66115140Strhodesstatic arena_run_t	*arena_bin_runs_first(arena_bin_t *bin);
67115140Strhodesstatic void	arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run);
68115140Strhodesstatic void	arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run);
69115140Strhodesstatic arena_run_t *arena_bin_nonfull_run_tryget(arena_bin_t *bin);
70115140Strhodesstatic arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin);
71115140Strhodesstatic void	*arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin);
72115140Strhodesstatic void	arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
73115140Strhodes    arena_bin_t *bin);
74115140Strhodesstatic void	arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
75276259Sbapt    arena_run_t *run, arena_bin_t *bin);
76276259Sbaptstatic void	arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
77115140Strhodes    arena_run_t *run, arena_bin_t *bin);
78115140Strhodesstatic void	arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk,
79115140Strhodes    void *ptr, size_t oldsize, size_t size);
80115140Strhodesstatic bool	arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk,
81115140Strhodes    void *ptr, size_t oldsize, size_t size, size_t extra, bool zero);
82219004Shselaskystatic bool	arena_ralloc_large(void *ptr, size_t oldsize, size_t size,
83232501Seadler    size_t extra, bool zero);
84232501Seadlerstatic size_t	bin_info_run_size_calc(arena_bin_info_t *bin_info,
85232501Seadler    size_t min_run_size);
86232501Seadlerstatic void	bin_info_init(void);
87232501Seadler
88219004Shselasky/******************************************************************************/
89219004Shselasky
90219004Shselaskystatic inline int
91219004Shselaskyarena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
92219004Shselasky{
93219004Shselasky	uintptr_t a_mapelm = (uintptr_t)a;
94219004Shselasky	uintptr_t b_mapelm = (uintptr_t)b;
95219004Shselasky
96219004Shselasky	assert(a != NULL);
97219004Shselasky	assert(b != NULL);
98219004Shselasky
99219004Shselasky	return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm));
100219004Shselasky}
101219004Shselasky
102/* Generate red-black tree functions. */
103rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t,
104    u.rb_link, arena_run_comp)
105
106static inline int
107arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
108{
109	int ret;
110	size_t a_size = a->bits & ~PAGE_MASK;
111	size_t b_size = b->bits & ~PAGE_MASK;
112
113	ret = (a_size > b_size) - (a_size < b_size);
114	if (ret == 0) {
115		uintptr_t a_mapelm, b_mapelm;
116
117		if ((a->bits & CHUNK_MAP_KEY) != CHUNK_MAP_KEY)
118			a_mapelm = (uintptr_t)a;
119		else {
120			/*
121			 * Treat keys as though they are lower than anything
122			 * else.
123			 */
124			a_mapelm = 0;
125		}
126		b_mapelm = (uintptr_t)b;
127
128		ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm);
129	}
130
131	return (ret);
132}
133
134/* Generate red-black tree functions. */
135rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t,
136    u.rb_link, arena_avail_comp)
137
138static inline int
139arena_chunk_dirty_comp(arena_chunk_t *a, arena_chunk_t *b)
140{
141
142	assert(a != NULL);
143	assert(b != NULL);
144
145	/*
146	 * Short-circuit for self comparison.  The following comparison code
147	 * would come to the same result, but at the cost of executing the slow
148	 * path.
149	 */
150	if (a == b)
151		return (0);
152
153	/*
154	 * Order such that chunks with higher fragmentation are "less than"
155	 * those with lower fragmentation -- purging order is from "least" to
156	 * "greatest".  Fragmentation is measured as:
157	 *
158	 *     mean current avail run size
159	 *   --------------------------------
160	 *   mean defragmented avail run size
161	 *
162	 *            navail
163	 *         -----------
164	 *         nruns_avail           nruns_avail-nruns_adjac
165	 * = ========================= = -----------------------
166	 *            navail                  nruns_avail
167	 *    -----------------------
168	 *    nruns_avail-nruns_adjac
169	 *
170	 * The following code multiplies away the denominator prior to
171	 * comparison, in order to avoid division.
172	 *
173	 */
174	{
175		size_t a_val = (a->nruns_avail - a->nruns_adjac) *
176		    b->nruns_avail;
177		size_t b_val = (b->nruns_avail - b->nruns_adjac) *
178		    a->nruns_avail;
179
180		if (a_val < b_val)
181			return (1);
182		if (a_val > b_val)
183			return (-1);
184	}
185	/*
186	 * Break ties by chunk address.  For fragmented chunks, report lower
187	 * addresses as "lower", so that fragmentation reduction happens first
188	 * at lower addresses.  However, use the opposite ordering for
189	 * unfragmented chunks, in order to increase the chances of
190	 * re-allocating dirty runs.
191	 */
192	{
193		uintptr_t a_chunk = (uintptr_t)a;
194		uintptr_t b_chunk = (uintptr_t)b;
195		int ret = ((a_chunk > b_chunk) - (a_chunk < b_chunk));
196		if (a->nruns_adjac == 0) {
197			assert(b->nruns_adjac == 0);
198			ret = -ret;
199		}
200		return (ret);
201	}
202}
203
204/* Generate red-black tree functions. */
205rb_gen(static UNUSED, arena_chunk_dirty_, arena_chunk_tree_t, arena_chunk_t,
206    dirty_link, arena_chunk_dirty_comp)
207
208static inline bool
209arena_avail_adjac_pred(arena_chunk_t *chunk, size_t pageind)
210{
211	bool ret;
212
213	if (pageind-1 < map_bias)
214		ret = false;
215	else {
216		ret = (arena_mapbits_allocated_get(chunk, pageind-1) == 0);
217		assert(ret == false || arena_mapbits_dirty_get(chunk,
218		    pageind-1) != arena_mapbits_dirty_get(chunk, pageind));
219	}
220	return (ret);
221}
222
223static inline bool
224arena_avail_adjac_succ(arena_chunk_t *chunk, size_t pageind, size_t npages)
225{
226	bool ret;
227
228	if (pageind+npages == chunk_npages)
229		ret = false;
230	else {
231		assert(pageind+npages < chunk_npages);
232		ret = (arena_mapbits_allocated_get(chunk, pageind+npages) == 0);
233		assert(ret == false || arena_mapbits_dirty_get(chunk, pageind)
234		    != arena_mapbits_dirty_get(chunk, pageind+npages));
235	}
236	return (ret);
237}
238
239static inline bool
240arena_avail_adjac(arena_chunk_t *chunk, size_t pageind, size_t npages)
241{
242
243	return (arena_avail_adjac_pred(chunk, pageind) ||
244	    arena_avail_adjac_succ(chunk, pageind, npages));
245}
246
247static void
248arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
249    size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ)
250{
251
252	assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
253	    LG_PAGE));
254
255	/*
256	 * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be
257	 * removed and reinserted even if the run to be inserted is clean.
258	 */
259	if (chunk->ndirty != 0)
260		arena_chunk_dirty_remove(&arena->chunks_dirty, chunk);
261
262	if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind))
263		chunk->nruns_adjac++;
264	if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages))
265		chunk->nruns_adjac++;
266	chunk->nruns_avail++;
267	assert(chunk->nruns_avail > chunk->nruns_adjac);
268
269	if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
270		arena->ndirty += npages;
271		chunk->ndirty += npages;
272	}
273	if (chunk->ndirty != 0)
274		arena_chunk_dirty_insert(&arena->chunks_dirty, chunk);
275
276	arena_avail_tree_insert(&arena->runs_avail, arena_mapp_get(chunk,
277	    pageind));
278}
279
280static void
281arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
282    size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ)
283{
284
285	assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
286	    LG_PAGE));
287
288	/*
289	 * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be
290	 * removed and reinserted even if the run to be removed is clean.
291	 */
292	if (chunk->ndirty != 0)
293		arena_chunk_dirty_remove(&arena->chunks_dirty, chunk);
294
295	if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind))
296		chunk->nruns_adjac--;
297	if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages))
298		chunk->nruns_adjac--;
299	chunk->nruns_avail--;
300	assert(chunk->nruns_avail > chunk->nruns_adjac || (chunk->nruns_avail
301	    == 0 && chunk->nruns_adjac == 0));
302
303	if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
304		arena->ndirty -= npages;
305		chunk->ndirty -= npages;
306	}
307	if (chunk->ndirty != 0)
308		arena_chunk_dirty_insert(&arena->chunks_dirty, chunk);
309
310	arena_avail_tree_remove(&arena->runs_avail, arena_mapp_get(chunk,
311	    pageind));
312}
313
314static inline void *
315arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
316{
317	void *ret;
318	unsigned regind;
319	bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
320	    (uintptr_t)bin_info->bitmap_offset);
321
322	assert(run->nfree > 0);
323	assert(bitmap_full(bitmap, &bin_info->bitmap_info) == false);
324
325	regind = bitmap_sfu(bitmap, &bin_info->bitmap_info);
326	ret = (void *)((uintptr_t)run + (uintptr_t)bin_info->reg0_offset +
327	    (uintptr_t)(bin_info->reg_interval * regind));
328	run->nfree--;
329	if (regind == run->nextind)
330		run->nextind++;
331	assert(regind < run->nextind);
332	return (ret);
333}
334
335static inline void
336arena_run_reg_dalloc(arena_run_t *run, void *ptr)
337{
338	arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
339	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
340	size_t mapbits = arena_mapbits_get(chunk, pageind);
341	size_t binind = arena_ptr_small_binind_get(ptr, mapbits);
342	arena_bin_info_t *bin_info = &arena_bin_info[binind];
343	unsigned regind = arena_run_regind(run, bin_info, ptr);
344	bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
345	    (uintptr_t)bin_info->bitmap_offset);
346
347	assert(run->nfree < bin_info->nregs);
348	/* Freeing an interior pointer can cause assertion failure. */
349	assert(((uintptr_t)ptr - ((uintptr_t)run +
350	    (uintptr_t)bin_info->reg0_offset)) %
351	    (uintptr_t)bin_info->reg_interval == 0);
352	assert((uintptr_t)ptr >= (uintptr_t)run +
353	    (uintptr_t)bin_info->reg0_offset);
354	/* Freeing an unallocated pointer can cause assertion failure. */
355	assert(bitmap_get(bitmap, &bin_info->bitmap_info, regind));
356
357	bitmap_unset(bitmap, &bin_info->bitmap_info, regind);
358	run->nfree++;
359}
360
361static inline void
362arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
363{
364
365	VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind <<
366	    LG_PAGE)), (npages << LG_PAGE));
367	memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
368	    (npages << LG_PAGE));
369}
370
371static inline void
372arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
373{
374	size_t i;
375	UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
376
377	VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind <<
378	    LG_PAGE)), PAGE);
379	for (i = 0; i < PAGE / sizeof(size_t); i++)
380		assert(p[i] == 0);
381}
382
383static void
384arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
385    size_t binind, bool zero)
386{
387	arena_chunk_t *chunk;
388	size_t run_ind, total_pages, need_pages, rem_pages, i;
389	size_t flag_dirty;
390
391	assert((large && binind == BININD_INVALID) || (large == false && binind
392	    != BININD_INVALID));
393
394	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
395	run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
396	flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
397	total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
398	    LG_PAGE;
399	assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
400	    flag_dirty);
401	need_pages = (size >> LG_PAGE);
402	assert(need_pages > 0);
403	assert(need_pages <= total_pages);
404	rem_pages = total_pages - need_pages;
405
406	arena_avail_remove(arena, chunk, run_ind, total_pages, true, true);
407	if (config_stats) {
408		/*
409		 * Update stats_cactive if nactive is crossing a chunk
410		 * multiple.
411		 */
412		size_t cactive_diff = CHUNK_CEILING((arena->nactive +
413		    need_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
414		    LG_PAGE);
415		if (cactive_diff != 0)
416			stats_cactive_add(cactive_diff);
417	}
418	arena->nactive += need_pages;
419
420	/* Keep track of trailing unused pages for later use. */
421	if (rem_pages > 0) {
422		if (flag_dirty != 0) {
423			arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
424			    (rem_pages << LG_PAGE), CHUNK_MAP_DIRTY);
425			arena_mapbits_unallocated_set(chunk,
426			    run_ind+total_pages-1, (rem_pages << LG_PAGE),
427			    CHUNK_MAP_DIRTY);
428		} else {
429			arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
430			    (rem_pages << LG_PAGE),
431			    arena_mapbits_unzeroed_get(chunk,
432			    run_ind+need_pages));
433			arena_mapbits_unallocated_set(chunk,
434			    run_ind+total_pages-1, (rem_pages << LG_PAGE),
435			    arena_mapbits_unzeroed_get(chunk,
436			    run_ind+total_pages-1));
437		}
438		arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages,
439		    false, true);
440	}
441
442	/*
443	 * Update the page map separately for large vs. small runs, since it is
444	 * possible to avoid iteration for large mallocs.
445	 */
446	if (large) {
447		if (zero) {
448			if (flag_dirty == 0) {
449				/*
450				 * The run is clean, so some pages may be
451				 * zeroed (i.e. never before touched).
452				 */
453				for (i = 0; i < need_pages; i++) {
454					if (arena_mapbits_unzeroed_get(chunk,
455					    run_ind+i) != 0) {
456						arena_run_zero(chunk, run_ind+i,
457						    1);
458					} else if (config_debug) {
459						arena_run_page_validate_zeroed(
460						    chunk, run_ind+i);
461					}
462				}
463			} else {
464				/*
465				 * The run is dirty, so all pages must be
466				 * zeroed.
467				 */
468				arena_run_zero(chunk, run_ind, need_pages);
469			}
470		}
471
472		/*
473		 * Set the last element first, in case the run only contains one
474		 * page (i.e. both statements set the same element).
475		 */
476		arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0,
477		    flag_dirty);
478		arena_mapbits_large_set(chunk, run_ind, size, flag_dirty);
479	} else {
480		assert(zero == false);
481		/*
482		 * Propagate the dirty and unzeroed flags to the allocated
483		 * small run, so that arena_dalloc_bin_run() has the ability to
484		 * conditionally trim clean pages.
485		 */
486		arena_mapbits_small_set(chunk, run_ind, 0, binind, flag_dirty);
487		/*
488		 * The first page will always be dirtied during small run
489		 * initialization, so a validation failure here would not
490		 * actually cause an observable failure.
491		 */
492		if (config_debug && flag_dirty == 0 &&
493		    arena_mapbits_unzeroed_get(chunk, run_ind) == 0)
494			arena_run_page_validate_zeroed(chunk, run_ind);
495		for (i = 1; i < need_pages - 1; i++) {
496			arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0);
497			if (config_debug && flag_dirty == 0 &&
498			    arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0) {
499				arena_run_page_validate_zeroed(chunk,
500				    run_ind+i);
501			}
502		}
503		arena_mapbits_small_set(chunk, run_ind+need_pages-1,
504		    need_pages-1, binind, flag_dirty);
505		if (config_debug && flag_dirty == 0 &&
506		    arena_mapbits_unzeroed_get(chunk, run_ind+need_pages-1) ==
507		    0) {
508			arena_run_page_validate_zeroed(chunk,
509			    run_ind+need_pages-1);
510		}
511	}
512	VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind <<
513	    LG_PAGE)), (need_pages << LG_PAGE));
514}
515
516static arena_chunk_t *
517arena_chunk_alloc(arena_t *arena)
518{
519	arena_chunk_t *chunk;
520	size_t i;
521
522	if (arena->spare != NULL) {
523		chunk = arena->spare;
524		arena->spare = NULL;
525
526		assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
527		assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
528		assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
529		    arena_maxclass);
530		assert(arena_mapbits_unallocated_size_get(chunk,
531		    chunk_npages-1) == arena_maxclass);
532		assert(arena_mapbits_dirty_get(chunk, map_bias) ==
533		    arena_mapbits_dirty_get(chunk, chunk_npages-1));
534	} else {
535		bool zero;
536		size_t unzeroed;
537
538		zero = false;
539		malloc_mutex_unlock(&arena->lock);
540		chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize,
541		    false, &zero, arena->dss_prec);
542		malloc_mutex_lock(&arena->lock);
543		if (chunk == NULL)
544			return (NULL);
545		if (config_stats)
546			arena->stats.mapped += chunksize;
547
548		chunk->arena = arena;
549
550		/*
551		 * Claim that no pages are in use, since the header is merely
552		 * overhead.
553		 */
554		chunk->ndirty = 0;
555
556		chunk->nruns_avail = 0;
557		chunk->nruns_adjac = 0;
558
559		/*
560		 * Initialize the map to contain one maximal free untouched run.
561		 * Mark the pages as zeroed iff chunk_alloc() returned a zeroed
562		 * chunk.
563		 */
564		unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED;
565		arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass,
566		    unzeroed);
567		/*
568		 * There is no need to initialize the internal page map entries
569		 * unless the chunk is not zeroed.
570		 */
571		if (zero == false) {
572			for (i = map_bias+1; i < chunk_npages-1; i++)
573				arena_mapbits_unzeroed_set(chunk, i, unzeroed);
574		} else if (config_debug) {
575			VALGRIND_MAKE_MEM_DEFINED(
576			    (void *)arena_mapp_get(chunk, map_bias+1),
577			    (void *)((uintptr_t)
578			    arena_mapp_get(chunk, chunk_npages-1)
579			    - (uintptr_t)arena_mapp_get(chunk, map_bias+1)));
580			for (i = map_bias+1; i < chunk_npages-1; i++) {
581				assert(arena_mapbits_unzeroed_get(chunk, i) ==
582				    unzeroed);
583			}
584		}
585		arena_mapbits_unallocated_set(chunk, chunk_npages-1,
586		    arena_maxclass, unzeroed);
587	}
588
589	/* Insert the run into the runs_avail tree. */
590	arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias,
591	    false, false);
592
593	return (chunk);
594}
595
596static void
597arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
598{
599	assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
600	assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
601	assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
602	    arena_maxclass);
603	assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
604	    arena_maxclass);
605	assert(arena_mapbits_dirty_get(chunk, map_bias) ==
606	    arena_mapbits_dirty_get(chunk, chunk_npages-1));
607
608	/*
609	 * Remove run from the runs_avail tree, so that the arena does not use
610	 * it.
611	 */
612	arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias,
613	    false, false);
614
615	if (arena->spare != NULL) {
616		arena_chunk_t *spare = arena->spare;
617
618		arena->spare = chunk;
619		malloc_mutex_unlock(&arena->lock);
620		chunk_dealloc((void *)spare, chunksize, true);
621		malloc_mutex_lock(&arena->lock);
622		if (config_stats)
623			arena->stats.mapped -= chunksize;
624	} else
625		arena->spare = chunk;
626}
627
628static arena_run_t *
629arena_run_alloc_helper(arena_t *arena, size_t size, bool large, size_t binind,
630    bool zero)
631{
632	arena_run_t *run;
633	arena_chunk_map_t *mapelm, key;
634
635	key.bits = size | CHUNK_MAP_KEY;
636	mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key);
637	if (mapelm != NULL) {
638		arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
639		size_t pageind = (((uintptr_t)mapelm -
640		    (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
641		    + map_bias;
642
643		run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
644		    LG_PAGE));
645		arena_run_split(arena, run, size, large, binind, zero);
646		return (run);
647	}
648
649	return (NULL);
650}
651
652static arena_run_t *
653arena_run_alloc(arena_t *arena, size_t size, bool large, size_t binind,
654    bool zero)
655{
656	arena_chunk_t *chunk;
657	arena_run_t *run;
658
659	assert(size <= arena_maxclass);
660	assert((size & PAGE_MASK) == 0);
661	assert((large && binind == BININD_INVALID) || (large == false && binind
662	    != BININD_INVALID));
663
664	/* Search the arena's chunks for the lowest best fit. */
665	run = arena_run_alloc_helper(arena, size, large, binind, zero);
666	if (run != NULL)
667		return (run);
668
669	/*
670	 * No usable runs.  Create a new chunk from which to allocate the run.
671	 */
672	chunk = arena_chunk_alloc(arena);
673	if (chunk != NULL) {
674		run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
675		arena_run_split(arena, run, size, large, binind, zero);
676		return (run);
677	}
678
679	/*
680	 * arena_chunk_alloc() failed, but another thread may have made
681	 * sufficient memory available while this one dropped arena->lock in
682	 * arena_chunk_alloc(), so search one more time.
683	 */
684	return (arena_run_alloc_helper(arena, size, large, binind, zero));
685}
686
687static inline void
688arena_maybe_purge(arena_t *arena)
689{
690	size_t npurgeable, threshold;
691
692	/* Don't purge if the option is disabled. */
693	if (opt_lg_dirty_mult < 0)
694		return;
695	/* Don't purge if all dirty pages are already being purged. */
696	if (arena->ndirty <= arena->npurgatory)
697		return;
698	npurgeable = arena->ndirty - arena->npurgatory;
699	threshold = (arena->nactive >> opt_lg_dirty_mult);
700	/*
701	 * Don't purge unless the number of purgeable pages exceeds the
702	 * threshold.
703	 */
704	if (npurgeable <= threshold)
705		return;
706
707	arena_purge(arena, false);
708}
709
710static inline size_t
711arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
712{
713	size_t npurged;
714	ql_head(arena_chunk_map_t) mapelms;
715	arena_chunk_map_t *mapelm;
716	size_t pageind, npages;
717	size_t nmadvise;
718
719	ql_new(&mapelms);
720
721	/*
722	 * If chunk is the spare, temporarily re-allocate it, 1) so that its
723	 * run is reinserted into runs_avail, and 2) so that it cannot be
724	 * completely discarded by another thread while arena->lock is dropped
725	 * by this thread.  Note that the arena_run_dalloc() call will
726	 * implicitly deallocate the chunk, so no explicit action is required
727	 * in this function to deallocate the chunk.
728	 *
729	 * Note that once a chunk contains dirty pages, it cannot again contain
730	 * a single run unless 1) it is a dirty run, or 2) this function purges
731	 * dirty pages and causes the transition to a single clean run.  Thus
732	 * (chunk == arena->spare) is possible, but it is not possible for
733	 * this function to be called on the spare unless it contains a dirty
734	 * run.
735	 */
736	if (chunk == arena->spare) {
737		assert(arena_mapbits_dirty_get(chunk, map_bias) != 0);
738		assert(arena_mapbits_dirty_get(chunk, chunk_npages-1) != 0);
739
740		arena_chunk_alloc(arena);
741	}
742
743	if (config_stats)
744		arena->stats.purged += chunk->ndirty;
745
746	/*
747	 * Operate on all dirty runs if there is no clean/dirty run
748	 * fragmentation.
749	 */
750	if (chunk->nruns_adjac == 0)
751		all = true;
752
753	/*
754	 * Temporarily allocate free dirty runs within chunk.  If all is false,
755	 * only operate on dirty runs that are fragments; otherwise operate on
756	 * all dirty runs.
757	 */
758	for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
759		mapelm = arena_mapp_get(chunk, pageind);
760		if (arena_mapbits_allocated_get(chunk, pageind) == 0) {
761			size_t run_size =
762			    arena_mapbits_unallocated_size_get(chunk, pageind);
763
764			npages = run_size >> LG_PAGE;
765			assert(pageind + npages <= chunk_npages);
766			assert(arena_mapbits_dirty_get(chunk, pageind) ==
767			    arena_mapbits_dirty_get(chunk, pageind+npages-1));
768
769			if (arena_mapbits_dirty_get(chunk, pageind) != 0 &&
770			    (all || arena_avail_adjac(chunk, pageind,
771			    npages))) {
772				arena_run_t *run = (arena_run_t *)((uintptr_t)
773				    chunk + (uintptr_t)(pageind << LG_PAGE));
774
775				arena_run_split(arena, run, run_size, true,
776				    BININD_INVALID, false);
777				/* Append to list for later processing. */
778				ql_elm_new(mapelm, u.ql_link);
779				ql_tail_insert(&mapelms, mapelm, u.ql_link);
780			}
781		} else {
782			/* Skip run. */
783			if (arena_mapbits_large_get(chunk, pageind) != 0) {
784				npages = arena_mapbits_large_size_get(chunk,
785				    pageind) >> LG_PAGE;
786			} else {
787				size_t binind;
788				arena_bin_info_t *bin_info;
789				arena_run_t *run = (arena_run_t *)((uintptr_t)
790				    chunk + (uintptr_t)(pageind << LG_PAGE));
791
792				assert(arena_mapbits_small_runind_get(chunk,
793				    pageind) == 0);
794				binind = arena_bin_index(arena, run->bin);
795				bin_info = &arena_bin_info[binind];
796				npages = bin_info->run_size >> LG_PAGE;
797			}
798		}
799	}
800	assert(pageind == chunk_npages);
801	assert(chunk->ndirty == 0 || all == false);
802	assert(chunk->nruns_adjac == 0);
803
804	malloc_mutex_unlock(&arena->lock);
805	if (config_stats)
806		nmadvise = 0;
807	npurged = 0;
808	ql_foreach(mapelm, &mapelms, u.ql_link) {
809		bool unzeroed;
810		size_t flag_unzeroed, i;
811
812		pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
813		    sizeof(arena_chunk_map_t)) + map_bias;
814		npages = arena_mapbits_large_size_get(chunk, pageind) >>
815		    LG_PAGE;
816		assert(pageind + npages <= chunk_npages);
817		unzeroed = pages_purge((void *)((uintptr_t)chunk + (pageind <<
818		    LG_PAGE)), (npages << LG_PAGE));
819		flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0;
820		/*
821		 * Set the unzeroed flag for all pages, now that pages_purge()
822		 * has returned whether the pages were zeroed as a side effect
823		 * of purging.  This chunk map modification is safe even though
824		 * the arena mutex isn't currently owned by this thread,
825		 * because the run is marked as allocated, thus protecting it
826		 * from being modified by any other thread.  As long as these
827		 * writes don't perturb the first and last elements'
828		 * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
829		 */
830		for (i = 0; i < npages; i++) {
831			arena_mapbits_unzeroed_set(chunk, pageind+i,
832			    flag_unzeroed);
833		}
834		npurged += npages;
835		if (config_stats)
836			nmadvise++;
837	}
838	malloc_mutex_lock(&arena->lock);
839	if (config_stats)
840		arena->stats.nmadvise += nmadvise;
841
842	/* Deallocate runs. */
843	for (mapelm = ql_first(&mapelms); mapelm != NULL;
844	    mapelm = ql_first(&mapelms)) {
845		arena_run_t *run;
846
847		pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
848		    sizeof(arena_chunk_map_t)) + map_bias;
849		run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)(pageind <<
850		    LG_PAGE));
851		ql_remove(&mapelms, mapelm, u.ql_link);
852		arena_run_dalloc(arena, run, false, true);
853	}
854
855	return (npurged);
856}
857
858static arena_chunk_t *
859chunks_dirty_iter_cb(arena_chunk_tree_t *tree, arena_chunk_t *chunk, void *arg)
860{
861       size_t *ndirty = (size_t *)arg;
862
863       assert(chunk->ndirty != 0);
864       *ndirty += chunk->ndirty;
865       return (NULL);
866}
867
868static void
869arena_purge(arena_t *arena, bool all)
870{
871	arena_chunk_t *chunk;
872	size_t npurgatory;
873	if (config_debug) {
874		size_t ndirty = 0;
875
876		arena_chunk_dirty_iter(&arena->chunks_dirty, NULL,
877		    chunks_dirty_iter_cb, (void *)&ndirty);
878		assert(ndirty == arena->ndirty);
879	}
880	assert(arena->ndirty > arena->npurgatory || all);
881	assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
882	    arena->npurgatory) || all);
883
884	if (config_stats)
885		arena->stats.npurge++;
886
887	/*
888	 * Compute the minimum number of pages that this thread should try to
889	 * purge, and add the result to arena->npurgatory.  This will keep
890	 * multiple threads from racing to reduce ndirty below the threshold.
891	 */
892	{
893		size_t npurgeable = arena->ndirty - arena->npurgatory;
894
895		if (all == false) {
896			size_t threshold = (arena->nactive >>
897			    opt_lg_dirty_mult);
898
899			npurgatory = npurgeable - threshold;
900		} else
901			npurgatory = npurgeable;
902	}
903	arena->npurgatory += npurgatory;
904
905	while (npurgatory > 0) {
906		size_t npurgeable, npurged, nunpurged;
907
908		/* Get next chunk with dirty pages. */
909		chunk = arena_chunk_dirty_first(&arena->chunks_dirty);
910		if (chunk == NULL) {
911			/*
912			 * This thread was unable to purge as many pages as
913			 * originally intended, due to races with other threads
914			 * that either did some of the purging work, or re-used
915			 * dirty pages.
916			 */
917			arena->npurgatory -= npurgatory;
918			return;
919		}
920		npurgeable = chunk->ndirty;
921		assert(npurgeable != 0);
922
923		if (npurgeable > npurgatory && chunk->nruns_adjac == 0) {
924			/*
925			 * This thread will purge all the dirty pages in chunk,
926			 * so set npurgatory to reflect this thread's intent to
927			 * purge the pages.  This tends to reduce the chances
928			 * of the following scenario:
929			 *
930			 * 1) This thread sets arena->npurgatory such that
931			 *    (arena->ndirty - arena->npurgatory) is at the
932			 *    threshold.
933			 * 2) This thread drops arena->lock.
934			 * 3) Another thread causes one or more pages to be
935			 *    dirtied, and immediately determines that it must
936			 *    purge dirty pages.
937			 *
938			 * If this scenario *does* play out, that's okay,
939			 * because all of the purging work being done really
940			 * needs to happen.
941			 */
942			arena->npurgatory += npurgeable - npurgatory;
943			npurgatory = npurgeable;
944		}
945
946		/*
947		 * Keep track of how many pages are purgeable, versus how many
948		 * actually get purged, and adjust counters accordingly.
949		 */
950		arena->npurgatory -= npurgeable;
951		npurgatory -= npurgeable;
952		npurged = arena_chunk_purge(arena, chunk, all);
953		nunpurged = npurgeable - npurged;
954		arena->npurgatory += nunpurged;
955		npurgatory += nunpurged;
956	}
957}
958
959void
960arena_purge_all(arena_t *arena)
961{
962
963	malloc_mutex_lock(&arena->lock);
964	arena_purge(arena, true);
965	malloc_mutex_unlock(&arena->lock);
966}
967
968static void
969arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
970{
971	arena_chunk_t *chunk;
972	size_t size, run_ind, run_pages, flag_dirty;
973
974	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
975	run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
976	assert(run_ind >= map_bias);
977	assert(run_ind < chunk_npages);
978	if (arena_mapbits_large_get(chunk, run_ind) != 0) {
979		size = arena_mapbits_large_size_get(chunk, run_ind);
980		assert(size == PAGE ||
981		    arena_mapbits_large_size_get(chunk,
982		    run_ind+(size>>LG_PAGE)-1) == 0);
983	} else {
984		size_t binind = arena_bin_index(arena, run->bin);
985		arena_bin_info_t *bin_info = &arena_bin_info[binind];
986		size = bin_info->run_size;
987	}
988	run_pages = (size >> LG_PAGE);
989	if (config_stats) {
990		/*
991		 * Update stats_cactive if nactive is crossing a chunk
992		 * multiple.
993		 */
994		size_t cactive_diff = CHUNK_CEILING(arena->nactive << LG_PAGE) -
995		    CHUNK_CEILING((arena->nactive - run_pages) << LG_PAGE);
996		if (cactive_diff != 0)
997			stats_cactive_sub(cactive_diff);
998	}
999	arena->nactive -= run_pages;
1000
1001	/*
1002	 * The run is dirty if the caller claims to have dirtied it, as well as
1003	 * if it was already dirty before being allocated and the caller
1004	 * doesn't claim to have cleaned it.
1005	 */
1006	assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1007	    arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
1008	if (cleaned == false && arena_mapbits_dirty_get(chunk, run_ind) != 0)
1009		dirty = true;
1010	flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
1011
1012	/* Mark pages as unallocated in the chunk map. */
1013	if (dirty) {
1014		arena_mapbits_unallocated_set(chunk, run_ind, size,
1015		    CHUNK_MAP_DIRTY);
1016		arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
1017		    CHUNK_MAP_DIRTY);
1018	} else {
1019		arena_mapbits_unallocated_set(chunk, run_ind, size,
1020		    arena_mapbits_unzeroed_get(chunk, run_ind));
1021		arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
1022		    arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
1023	}
1024
1025	/* Try to coalesce forward. */
1026	if (run_ind + run_pages < chunk_npages &&
1027	    arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
1028	    arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty) {
1029		size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
1030		    run_ind+run_pages);
1031		size_t nrun_pages = nrun_size >> LG_PAGE;
1032
1033		/*
1034		 * Remove successor from runs_avail; the coalesced run is
1035		 * inserted later.
1036		 */
1037		assert(arena_mapbits_unallocated_size_get(chunk,
1038		    run_ind+run_pages+nrun_pages-1) == nrun_size);
1039		assert(arena_mapbits_dirty_get(chunk,
1040		    run_ind+run_pages+nrun_pages-1) == flag_dirty);
1041		arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages,
1042		    false, true);
1043
1044		size += nrun_size;
1045		run_pages += nrun_pages;
1046
1047		arena_mapbits_unallocated_size_set(chunk, run_ind, size);
1048		arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
1049		    size);
1050	}
1051
1052	/* Try to coalesce backward. */
1053	if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, run_ind-1)
1054	    == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == flag_dirty) {
1055		size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
1056		    run_ind-1);
1057		size_t prun_pages = prun_size >> LG_PAGE;
1058
1059		run_ind -= prun_pages;
1060
1061		/*
1062		 * Remove predecessor from runs_avail; the coalesced run is
1063		 * inserted later.
1064		 */
1065		assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
1066		    prun_size);
1067		assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
1068		arena_avail_remove(arena, chunk, run_ind, prun_pages, true,
1069		    false);
1070
1071		size += prun_size;
1072		run_pages += prun_pages;
1073
1074		arena_mapbits_unallocated_size_set(chunk, run_ind, size);
1075		arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
1076		    size);
1077	}
1078
1079	/* Insert into runs_avail, now that coalescing is complete. */
1080	assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
1081	    arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
1082	assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1083	    arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
1084	arena_avail_insert(arena, chunk, run_ind, run_pages, true, true);
1085
1086	/* Deallocate chunk if it is now completely unused. */
1087	if (size == arena_maxclass) {
1088		assert(run_ind == map_bias);
1089		assert(run_pages == (arena_maxclass >> LG_PAGE));
1090		arena_chunk_dealloc(arena, chunk);
1091	}
1092
1093	/*
1094	 * It is okay to do dirty page processing here even if the chunk was
1095	 * deallocated above, since in that case it is the spare.  Waiting
1096	 * until after possible chunk deallocation to do dirty processing
1097	 * allows for an old spare to be fully deallocated, thus decreasing the
1098	 * chances of spuriously crossing the dirty page purging threshold.
1099	 */
1100	if (dirty)
1101		arena_maybe_purge(arena);
1102}
1103
1104static void
1105arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1106    size_t oldsize, size_t newsize)
1107{
1108	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1109	size_t head_npages = (oldsize - newsize) >> LG_PAGE;
1110	size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
1111
1112	assert(oldsize > newsize);
1113
1114	/*
1115	 * Update the chunk map so that arena_run_dalloc() can treat the
1116	 * leading run as separately allocated.  Set the last element of each
1117	 * run first, in case of single-page runs.
1118	 */
1119	assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
1120	arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
1121	arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty);
1122
1123	if (config_debug) {
1124		UNUSED size_t tail_npages = newsize >> LG_PAGE;
1125		assert(arena_mapbits_large_size_get(chunk,
1126		    pageind+head_npages+tail_npages-1) == 0);
1127		assert(arena_mapbits_dirty_get(chunk,
1128		    pageind+head_npages+tail_npages-1) == flag_dirty);
1129	}
1130	arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
1131	    flag_dirty);
1132
1133	arena_run_dalloc(arena, run, false, false);
1134}
1135
1136static void
1137arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1138    size_t oldsize, size_t newsize, bool dirty)
1139{
1140	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1141	size_t head_npages = newsize >> LG_PAGE;
1142	size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
1143
1144	assert(oldsize > newsize);
1145
1146	/*
1147	 * Update the chunk map so that arena_run_dalloc() can treat the
1148	 * trailing run as separately allocated.  Set the last element of each
1149	 * run first, in case of single-page runs.
1150	 */
1151	assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
1152	arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
1153	arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty);
1154
1155	if (config_debug) {
1156		UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
1157		assert(arena_mapbits_large_size_get(chunk,
1158		    pageind+head_npages+tail_npages-1) == 0);
1159		assert(arena_mapbits_dirty_get(chunk,
1160		    pageind+head_npages+tail_npages-1) == flag_dirty);
1161	}
1162	arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
1163	    flag_dirty);
1164
1165	arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
1166	    dirty, false);
1167}
1168
1169static arena_run_t *
1170arena_bin_runs_first(arena_bin_t *bin)
1171{
1172	arena_chunk_map_t *mapelm = arena_run_tree_first(&bin->runs);
1173	if (mapelm != NULL) {
1174		arena_chunk_t *chunk;
1175		size_t pageind;
1176		arena_run_t *run;
1177
1178		chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
1179		pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) /
1180		    sizeof(arena_chunk_map_t))) + map_bias;
1181		run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
1182		    arena_mapbits_small_runind_get(chunk, pageind)) <<
1183		    LG_PAGE));
1184		return (run);
1185	}
1186
1187	return (NULL);
1188}
1189
1190static void
1191arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
1192{
1193	arena_chunk_t *chunk = CHUNK_ADDR2BASE(run);
1194	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1195	arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
1196
1197	assert(arena_run_tree_search(&bin->runs, mapelm) == NULL);
1198
1199	arena_run_tree_insert(&bin->runs, mapelm);
1200}
1201
1202static void
1203arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
1204{
1205	arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
1206	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1207	arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
1208
1209	assert(arena_run_tree_search(&bin->runs, mapelm) != NULL);
1210
1211	arena_run_tree_remove(&bin->runs, mapelm);
1212}
1213
1214static arena_run_t *
1215arena_bin_nonfull_run_tryget(arena_bin_t *bin)
1216{
1217	arena_run_t *run = arena_bin_runs_first(bin);
1218	if (run != NULL) {
1219		arena_bin_runs_remove(bin, run);
1220		if (config_stats)
1221			bin->stats.reruns++;
1222	}
1223	return (run);
1224}
1225
1226static arena_run_t *
1227arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
1228{
1229	arena_run_t *run;
1230	size_t binind;
1231	arena_bin_info_t *bin_info;
1232
1233	/* Look for a usable run. */
1234	run = arena_bin_nonfull_run_tryget(bin);
1235	if (run != NULL)
1236		return (run);
1237	/* No existing runs have any space available. */
1238
1239	binind = arena_bin_index(arena, bin);
1240	bin_info = &arena_bin_info[binind];
1241
1242	/* Allocate a new run. */
1243	malloc_mutex_unlock(&bin->lock);
1244	/******************************/
1245	malloc_mutex_lock(&arena->lock);
1246	run = arena_run_alloc(arena, bin_info->run_size, false, binind, false);
1247	if (run != NULL) {
1248		bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
1249		    (uintptr_t)bin_info->bitmap_offset);
1250
1251		/* Initialize run internals. */
1252		run->bin = bin;
1253		run->nextind = 0;
1254		run->nfree = bin_info->nregs;
1255		bitmap_init(bitmap, &bin_info->bitmap_info);
1256	}
1257	malloc_mutex_unlock(&arena->lock);
1258	/********************************/
1259	malloc_mutex_lock(&bin->lock);
1260	if (run != NULL) {
1261		if (config_stats) {
1262			bin->stats.nruns++;
1263			bin->stats.curruns++;
1264		}
1265		return (run);
1266	}
1267
1268	/*
1269	 * arena_run_alloc() failed, but another thread may have made
1270	 * sufficient memory available while this one dropped bin->lock above,
1271	 * so search one more time.
1272	 */
1273	run = arena_bin_nonfull_run_tryget(bin);
1274	if (run != NULL)
1275		return (run);
1276
1277	return (NULL);
1278}
1279
1280/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
1281static void *
1282arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
1283{
1284	void *ret;
1285	size_t binind;
1286	arena_bin_info_t *bin_info;
1287	arena_run_t *run;
1288
1289	binind = arena_bin_index(arena, bin);
1290	bin_info = &arena_bin_info[binind];
1291	bin->runcur = NULL;
1292	run = arena_bin_nonfull_run_get(arena, bin);
1293	if (bin->runcur != NULL && bin->runcur->nfree > 0) {
1294		/*
1295		 * Another thread updated runcur while this one ran without the
1296		 * bin lock in arena_bin_nonfull_run_get().
1297		 */
1298		assert(bin->runcur->nfree > 0);
1299		ret = arena_run_reg_alloc(bin->runcur, bin_info);
1300		if (run != NULL) {
1301			arena_chunk_t *chunk;
1302
1303			/*
1304			 * arena_run_alloc() may have allocated run, or it may
1305			 * have pulled run from the bin's run tree.  Therefore
1306			 * it is unsafe to make any assumptions about how run
1307			 * has previously been used, and arena_bin_lower_run()
1308			 * must be called, as if a region were just deallocated
1309			 * from the run.
1310			 */
1311			chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
1312			if (run->nfree == bin_info->nregs)
1313				arena_dalloc_bin_run(arena, chunk, run, bin);
1314			else
1315				arena_bin_lower_run(arena, chunk, run, bin);
1316		}
1317		return (ret);
1318	}
1319
1320	if (run == NULL)
1321		return (NULL);
1322
1323	bin->runcur = run;
1324
1325	assert(bin->runcur->nfree > 0);
1326
1327	return (arena_run_reg_alloc(bin->runcur, bin_info));
1328}
1329
1330void
1331arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
1332    uint64_t prof_accumbytes)
1333{
1334	unsigned i, nfill;
1335	arena_bin_t *bin;
1336	arena_run_t *run;
1337	void *ptr;
1338
1339	assert(tbin->ncached == 0);
1340
1341	if (config_prof && arena_prof_accum(arena, prof_accumbytes))
1342		prof_idump();
1343	bin = &arena->bins[binind];
1344	malloc_mutex_lock(&bin->lock);
1345	for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
1346	    tbin->lg_fill_div); i < nfill; i++) {
1347		if ((run = bin->runcur) != NULL && run->nfree > 0)
1348			ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
1349		else
1350			ptr = arena_bin_malloc_hard(arena, bin);
1351		if (ptr == NULL)
1352			break;
1353		if (config_fill && opt_junk) {
1354			arena_alloc_junk_small(ptr, &arena_bin_info[binind],
1355			    true);
1356		}
1357		/* Insert such that low regions get used first. */
1358		tbin->avail[nfill - 1 - i] = ptr;
1359	}
1360	if (config_stats) {
1361		bin->stats.allocated += i * arena_bin_info[binind].reg_size;
1362		bin->stats.nmalloc += i;
1363		bin->stats.nrequests += tbin->tstats.nrequests;
1364		bin->stats.nfills++;
1365		tbin->tstats.nrequests = 0;
1366	}
1367	malloc_mutex_unlock(&bin->lock);
1368	tbin->ncached = i;
1369}
1370
1371void
1372arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
1373{
1374
1375	if (zero) {
1376		size_t redzone_size = bin_info->redzone_size;
1377		memset((void *)((uintptr_t)ptr - redzone_size), 0xa5,
1378		    redzone_size);
1379		memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5,
1380		    redzone_size);
1381	} else {
1382		memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5,
1383		    bin_info->reg_interval);
1384	}
1385}
1386
1387void
1388arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
1389{
1390	size_t size = bin_info->reg_size;
1391	size_t redzone_size = bin_info->redzone_size;
1392	size_t i;
1393	bool error = false;
1394
1395	for (i = 1; i <= redzone_size; i++) {
1396		unsigned byte;
1397		if ((byte = *(uint8_t *)((uintptr_t)ptr - i)) != 0xa5) {
1398			error = true;
1399			malloc_printf("<jemalloc>: Corrupt redzone "
1400			    "%zu byte%s before %p (size %zu), byte=%#x\n", i,
1401			    (i == 1) ? "" : "s", ptr, size, byte);
1402		}
1403	}
1404	for (i = 0; i < redzone_size; i++) {
1405		unsigned byte;
1406		if ((byte = *(uint8_t *)((uintptr_t)ptr + size + i)) != 0xa5) {
1407			error = true;
1408			malloc_printf("<jemalloc>: Corrupt redzone "
1409			    "%zu byte%s after end of %p (size %zu), byte=%#x\n",
1410			    i, (i == 1) ? "" : "s", ptr, size, byte);
1411		}
1412	}
1413	if (opt_abort && error)
1414		abort();
1415
1416	memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
1417	    bin_info->reg_interval);
1418}
1419
1420void *
1421arena_malloc_small(arena_t *arena, size_t size, bool zero)
1422{
1423	void *ret;
1424	arena_bin_t *bin;
1425	arena_run_t *run;
1426	size_t binind;
1427
1428	binind = SMALL_SIZE2BIN(size);
1429	assert(binind < NBINS);
1430	bin = &arena->bins[binind];
1431	size = arena_bin_info[binind].reg_size;
1432
1433	malloc_mutex_lock(&bin->lock);
1434	if ((run = bin->runcur) != NULL && run->nfree > 0)
1435		ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
1436	else
1437		ret = arena_bin_malloc_hard(arena, bin);
1438
1439	if (ret == NULL) {
1440		malloc_mutex_unlock(&bin->lock);
1441		return (NULL);
1442	}
1443
1444	if (config_stats) {
1445		bin->stats.allocated += size;
1446		bin->stats.nmalloc++;
1447		bin->stats.nrequests++;
1448	}
1449	malloc_mutex_unlock(&bin->lock);
1450	if (config_prof && isthreaded == false && arena_prof_accum(arena, size))
1451		prof_idump();
1452
1453	if (zero == false) {
1454		if (config_fill) {
1455			if (opt_junk) {
1456				arena_alloc_junk_small(ret,
1457				    &arena_bin_info[binind], false);
1458			} else if (opt_zero)
1459				memset(ret, 0, size);
1460		}
1461	} else {
1462		if (config_fill && opt_junk) {
1463			arena_alloc_junk_small(ret, &arena_bin_info[binind],
1464			    true);
1465		}
1466		VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
1467		memset(ret, 0, size);
1468	}
1469	VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
1470
1471	return (ret);
1472}
1473
1474void *
1475arena_malloc_large(arena_t *arena, size_t size, bool zero)
1476{
1477	void *ret;
1478	UNUSED bool idump;
1479
1480	/* Large allocation. */
1481	size = PAGE_CEILING(size);
1482	malloc_mutex_lock(&arena->lock);
1483	ret = (void *)arena_run_alloc(arena, size, true, BININD_INVALID, zero);
1484	if (ret == NULL) {
1485		malloc_mutex_unlock(&arena->lock);
1486		return (NULL);
1487	}
1488	if (config_stats) {
1489		arena->stats.nmalloc_large++;
1490		arena->stats.nrequests_large++;
1491		arena->stats.allocated_large += size;
1492		arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1493		arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1494		arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
1495	}
1496	if (config_prof)
1497		idump = arena_prof_accum_locked(arena, size);
1498	malloc_mutex_unlock(&arena->lock);
1499	if (config_prof && idump)
1500		prof_idump();
1501
1502	if (zero == false) {
1503		if (config_fill) {
1504			if (opt_junk)
1505				memset(ret, 0xa5, size);
1506			else if (opt_zero)
1507				memset(ret, 0, size);
1508		}
1509	}
1510
1511	return (ret);
1512}
1513
1514/* Only handles large allocations that require more than page alignment. */
1515void *
1516arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
1517{
1518	void *ret;
1519	size_t alloc_size, leadsize, trailsize;
1520	arena_run_t *run;
1521	arena_chunk_t *chunk;
1522
1523	assert((size & PAGE_MASK) == 0);
1524
1525	alignment = PAGE_CEILING(alignment);
1526	alloc_size = size + alignment - PAGE;
1527
1528	malloc_mutex_lock(&arena->lock);
1529	run = arena_run_alloc(arena, alloc_size, true, BININD_INVALID, zero);
1530	if (run == NULL) {
1531		malloc_mutex_unlock(&arena->lock);
1532		return (NULL);
1533	}
1534	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
1535
1536	leadsize = ALIGNMENT_CEILING((uintptr_t)run, alignment) -
1537	    (uintptr_t)run;
1538	assert(alloc_size >= leadsize + size);
1539	trailsize = alloc_size - leadsize - size;
1540	ret = (void *)((uintptr_t)run + leadsize);
1541	if (leadsize != 0) {
1542		arena_run_trim_head(arena, chunk, run, alloc_size, alloc_size -
1543		    leadsize);
1544	}
1545	if (trailsize != 0) {
1546		arena_run_trim_tail(arena, chunk, ret, size + trailsize, size,
1547		    false);
1548	}
1549
1550	if (config_stats) {
1551		arena->stats.nmalloc_large++;
1552		arena->stats.nrequests_large++;
1553		arena->stats.allocated_large += size;
1554		arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1555		arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1556		arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
1557	}
1558	malloc_mutex_unlock(&arena->lock);
1559
1560	if (config_fill && zero == false) {
1561		if (opt_junk)
1562			memset(ret, 0xa5, size);
1563		else if (opt_zero)
1564			memset(ret, 0, size);
1565	}
1566	return (ret);
1567}
1568
1569void
1570arena_prof_promoted(const void *ptr, size_t size)
1571{
1572	arena_chunk_t *chunk;
1573	size_t pageind, binind;
1574
1575	cassert(config_prof);
1576	assert(ptr != NULL);
1577	assert(CHUNK_ADDR2BASE(ptr) != ptr);
1578	assert(isalloc(ptr, false) == PAGE);
1579	assert(isalloc(ptr, true) == PAGE);
1580	assert(size <= SMALL_MAXCLASS);
1581
1582	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1583	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1584	binind = SMALL_SIZE2BIN(size);
1585	assert(binind < NBINS);
1586	arena_mapbits_large_binind_set(chunk, pageind, binind);
1587
1588	assert(isalloc(ptr, false) == PAGE);
1589	assert(isalloc(ptr, true) == size);
1590}
1591
1592static void
1593arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
1594    arena_bin_t *bin)
1595{
1596
1597	/* Dissociate run from bin. */
1598	if (run == bin->runcur)
1599		bin->runcur = NULL;
1600	else {
1601		size_t binind = arena_bin_index(chunk->arena, bin);
1602		arena_bin_info_t *bin_info = &arena_bin_info[binind];
1603
1604		if (bin_info->nregs != 1) {
1605			/*
1606			 * This block's conditional is necessary because if the
1607			 * run only contains one region, then it never gets
1608			 * inserted into the non-full runs tree.
1609			 */
1610			arena_bin_runs_remove(bin, run);
1611		}
1612	}
1613}
1614
1615static void
1616arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1617    arena_bin_t *bin)
1618{
1619	size_t binind;
1620	arena_bin_info_t *bin_info;
1621	size_t npages, run_ind, past;
1622
1623	assert(run != bin->runcur);
1624	assert(arena_run_tree_search(&bin->runs,
1625	    arena_mapp_get(chunk, ((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE))
1626	    == NULL);
1627
1628	binind = arena_bin_index(chunk->arena, run->bin);
1629	bin_info = &arena_bin_info[binind];
1630
1631	malloc_mutex_unlock(&bin->lock);
1632	/******************************/
1633	npages = bin_info->run_size >> LG_PAGE;
1634	run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
1635	past = (size_t)(PAGE_CEILING((uintptr_t)run +
1636	    (uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind *
1637	    bin_info->reg_interval - bin_info->redzone_size) -
1638	    (uintptr_t)chunk) >> LG_PAGE);
1639	malloc_mutex_lock(&arena->lock);
1640
1641	/*
1642	 * If the run was originally clean, and some pages were never touched,
1643	 * trim the clean pages before deallocating the dirty portion of the
1644	 * run.
1645	 */
1646	assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1647	    arena_mapbits_dirty_get(chunk, run_ind+npages-1));
1648	if (arena_mapbits_dirty_get(chunk, run_ind) == 0 && past - run_ind <
1649	    npages) {
1650		/* Trim clean pages.  Convert to large run beforehand. */
1651		assert(npages > 0);
1652		arena_mapbits_large_set(chunk, run_ind, bin_info->run_size, 0);
1653		arena_mapbits_large_set(chunk, run_ind+npages-1, 0, 0);
1654		arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE),
1655		    ((past - run_ind) << LG_PAGE), false);
1656		/* npages = past - run_ind; */
1657	}
1658	arena_run_dalloc(arena, run, true, false);
1659	malloc_mutex_unlock(&arena->lock);
1660	/****************************/
1661	malloc_mutex_lock(&bin->lock);
1662	if (config_stats)
1663		bin->stats.curruns--;
1664}
1665
1666static void
1667arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1668    arena_bin_t *bin)
1669{
1670
1671	/*
1672	 * Make sure that if bin->runcur is non-NULL, it refers to the lowest
1673	 * non-full run.  It is okay to NULL runcur out rather than proactively
1674	 * keeping it pointing at the lowest non-full run.
1675	 */
1676	if ((uintptr_t)run < (uintptr_t)bin->runcur) {
1677		/* Switch runcur. */
1678		if (bin->runcur->nfree > 0)
1679			arena_bin_runs_insert(bin, bin->runcur);
1680		bin->runcur = run;
1681		if (config_stats)
1682			bin->stats.reruns++;
1683	} else
1684		arena_bin_runs_insert(bin, run);
1685}
1686
1687void
1688arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1689    arena_chunk_map_t *mapelm)
1690{
1691	size_t pageind;
1692	arena_run_t *run;
1693	arena_bin_t *bin;
1694	arena_bin_info_t *bin_info;
1695	size_t size, binind;
1696
1697	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1698	run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
1699	    arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
1700	bin = run->bin;
1701	binind = arena_ptr_small_binind_get(ptr, mapelm->bits);
1702	bin_info = &arena_bin_info[binind];
1703	if (config_fill || config_stats)
1704		size = bin_info->reg_size;
1705
1706	if (config_fill && opt_junk)
1707		arena_dalloc_junk_small(ptr, bin_info);
1708
1709	arena_run_reg_dalloc(run, ptr);
1710	if (run->nfree == bin_info->nregs) {
1711		arena_dissociate_bin_run(chunk, run, bin);
1712		arena_dalloc_bin_run(arena, chunk, run, bin);
1713	} else if (run->nfree == 1 && run != bin->runcur)
1714		arena_bin_lower_run(arena, chunk, run, bin);
1715
1716	if (config_stats) {
1717		bin->stats.allocated -= size;
1718		bin->stats.ndalloc++;
1719	}
1720}
1721
1722void
1723arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1724    size_t pageind, arena_chunk_map_t *mapelm)
1725{
1726	arena_run_t *run;
1727	arena_bin_t *bin;
1728
1729	run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
1730	    arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
1731	bin = run->bin;
1732	malloc_mutex_lock(&bin->lock);
1733	arena_dalloc_bin_locked(arena, chunk, ptr, mapelm);
1734	malloc_mutex_unlock(&bin->lock);
1735}
1736
1737void
1738arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1739    size_t pageind)
1740{
1741	arena_chunk_map_t *mapelm;
1742
1743	if (config_debug) {
1744		/* arena_ptr_small_binind_get() does extra sanity checking. */
1745		assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
1746		    pageind)) != BININD_INVALID);
1747	}
1748	mapelm = arena_mapp_get(chunk, pageind);
1749	arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm);
1750}
1751
1752void
1753arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr)
1754{
1755
1756	if (config_fill || config_stats) {
1757		size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1758		size_t size = arena_mapbits_large_size_get(chunk, pageind);
1759
1760		if (config_fill && config_stats && opt_junk)
1761			memset(ptr, 0x5a, size);
1762		if (config_stats) {
1763			arena->stats.ndalloc_large++;
1764			arena->stats.allocated_large -= size;
1765			arena->stats.lstats[(size >> LG_PAGE) - 1].ndalloc++;
1766			arena->stats.lstats[(size >> LG_PAGE) - 1].curruns--;
1767		}
1768	}
1769
1770	arena_run_dalloc(arena, (arena_run_t *)ptr, true, false);
1771}
1772
1773void
1774arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
1775{
1776
1777	malloc_mutex_lock(&arena->lock);
1778	arena_dalloc_large_locked(arena, chunk, ptr);
1779	malloc_mutex_unlock(&arena->lock);
1780}
1781
1782static void
1783arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1784    size_t oldsize, size_t size)
1785{
1786
1787	assert(size < oldsize);
1788
1789	/*
1790	 * Shrink the run, and make trailing pages available for other
1791	 * allocations.
1792	 */
1793	malloc_mutex_lock(&arena->lock);
1794	arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size,
1795	    true);
1796	if (config_stats) {
1797		arena->stats.ndalloc_large++;
1798		arena->stats.allocated_large -= oldsize;
1799		arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
1800		arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
1801
1802		arena->stats.nmalloc_large++;
1803		arena->stats.nrequests_large++;
1804		arena->stats.allocated_large += size;
1805		arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1806		arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1807		arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
1808	}
1809	malloc_mutex_unlock(&arena->lock);
1810}
1811
1812static bool
1813arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1814    size_t oldsize, size_t size, size_t extra, bool zero)
1815{
1816	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1817	size_t npages = oldsize >> LG_PAGE;
1818	size_t followsize;
1819
1820	assert(oldsize == arena_mapbits_large_size_get(chunk, pageind));
1821
1822	/* Try to extend the run. */
1823	assert(size + extra > oldsize);
1824	malloc_mutex_lock(&arena->lock);
1825	if (pageind + npages < chunk_npages &&
1826	    arena_mapbits_allocated_get(chunk, pageind+npages) == 0 &&
1827	    (followsize = arena_mapbits_unallocated_size_get(chunk,
1828	    pageind+npages)) >= size - oldsize) {
1829		/*
1830		 * The next run is available and sufficiently large.  Split the
1831		 * following run, then merge the first part with the existing
1832		 * allocation.
1833		 */
1834		size_t flag_dirty;
1835		size_t splitsize = (oldsize + followsize <= size + extra)
1836		    ? followsize : size + extra - oldsize;
1837		arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk +
1838		    ((pageind+npages) << LG_PAGE)), splitsize, true,
1839		    BININD_INVALID, zero);
1840
1841		size = oldsize + splitsize;
1842		npages = size >> LG_PAGE;
1843
1844		/*
1845		 * Mark the extended run as dirty if either portion of the run
1846		 * was dirty before allocation.  This is rather pedantic,
1847		 * because there's not actually any sequence of events that
1848		 * could cause the resulting run to be passed to
1849		 * arena_run_dalloc() with the dirty argument set to false
1850		 * (which is when dirty flag consistency would really matter).
1851		 */
1852		flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
1853		    arena_mapbits_dirty_get(chunk, pageind+npages-1);
1854		arena_mapbits_large_set(chunk, pageind, size, flag_dirty);
1855		arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty);
1856
1857		if (config_stats) {
1858			arena->stats.ndalloc_large++;
1859			arena->stats.allocated_large -= oldsize;
1860			arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
1861			arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
1862
1863			arena->stats.nmalloc_large++;
1864			arena->stats.nrequests_large++;
1865			arena->stats.allocated_large += size;
1866			arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1867			arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1868			arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
1869		}
1870		malloc_mutex_unlock(&arena->lock);
1871		return (false);
1872	}
1873	malloc_mutex_unlock(&arena->lock);
1874
1875	return (true);
1876}
1877
1878/*
1879 * Try to resize a large allocation, in order to avoid copying.  This will
1880 * always fail if growing an object, and the following run is already in use.
1881 */
1882static bool
1883arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
1884    bool zero)
1885{
1886	size_t psize;
1887
1888	psize = PAGE_CEILING(size + extra);
1889	if (psize == oldsize) {
1890		/* Same size class. */
1891		if (config_fill && opt_junk && size < oldsize) {
1892			memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize -
1893			    size);
1894		}
1895		return (false);
1896	} else {
1897		arena_chunk_t *chunk;
1898		arena_t *arena;
1899
1900		chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1901		arena = chunk->arena;
1902
1903		if (psize < oldsize) {
1904			/* Fill before shrinking in order avoid a race. */
1905			if (config_fill && opt_junk) {
1906				memset((void *)((uintptr_t)ptr + size), 0x5a,
1907				    oldsize - size);
1908			}
1909			arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
1910			    psize);
1911			return (false);
1912		} else {
1913			bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
1914			    oldsize, PAGE_CEILING(size),
1915			    psize - PAGE_CEILING(size), zero);
1916			if (config_fill && ret == false && zero == false &&
1917			    opt_zero) {
1918				memset((void *)((uintptr_t)ptr + oldsize), 0,
1919				    size - oldsize);
1920			}
1921			return (ret);
1922		}
1923	}
1924}
1925
1926void *
1927arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
1928    bool zero)
1929{
1930
1931	/*
1932	 * Avoid moving the allocation if the size class can be left the same.
1933	 */
1934	if (oldsize <= arena_maxclass) {
1935		if (oldsize <= SMALL_MAXCLASS) {
1936			assert(arena_bin_info[SMALL_SIZE2BIN(oldsize)].reg_size
1937			    == oldsize);
1938			if ((size + extra <= SMALL_MAXCLASS &&
1939			    SMALL_SIZE2BIN(size + extra) ==
1940			    SMALL_SIZE2BIN(oldsize)) || (size <= oldsize &&
1941			    size + extra >= oldsize)) {
1942				if (config_fill && opt_junk && size < oldsize) {
1943					memset((void *)((uintptr_t)ptr + size),
1944					    0x5a, oldsize - size);
1945				}
1946				return (ptr);
1947			}
1948		} else {
1949			assert(size <= arena_maxclass);
1950			if (size + extra > SMALL_MAXCLASS) {
1951				if (arena_ralloc_large(ptr, oldsize, size,
1952				    extra, zero) == false)
1953					return (ptr);
1954			}
1955		}
1956	}
1957
1958	/* Reallocation would require a move. */
1959	return (NULL);
1960}
1961
1962void *
1963arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
1964    size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
1965    bool try_tcache_dalloc)
1966{
1967	void *ret;
1968	size_t copysize;
1969
1970	/* Try to avoid moving the allocation. */
1971	ret = arena_ralloc_no_move(ptr, oldsize, size, extra, zero);
1972	if (ret != NULL)
1973		return (ret);
1974
1975	/*
1976	 * size and oldsize are different enough that we need to move the
1977	 * object.  In that case, fall back to allocating new space and
1978	 * copying.
1979	 */
1980	if (alignment != 0) {
1981		size_t usize = sa2u(size + extra, alignment);
1982		if (usize == 0)
1983			return (NULL);
1984		ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena);
1985	} else
1986		ret = arena_malloc(arena, size + extra, zero, try_tcache_alloc);
1987
1988	if (ret == NULL) {
1989		if (extra == 0)
1990			return (NULL);
1991		/* Try again, this time without extra. */
1992		if (alignment != 0) {
1993			size_t usize = sa2u(size, alignment);
1994			if (usize == 0)
1995				return (NULL);
1996			ret = ipallocx(usize, alignment, zero, try_tcache_alloc,
1997			    arena);
1998		} else
1999			ret = arena_malloc(arena, size, zero, try_tcache_alloc);
2000
2001		if (ret == NULL)
2002			return (NULL);
2003	}
2004
2005	/* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */
2006
2007	/*
2008	 * Copy at most size bytes (not size+extra), since the caller has no
2009	 * expectation that the extra bytes will be reliably preserved.
2010	 */
2011	copysize = (size < oldsize) ? size : oldsize;
2012	VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
2013	memcpy(ret, ptr, copysize);
2014	iqallocx(ptr, try_tcache_dalloc);
2015	return (ret);
2016}
2017
2018dss_prec_t
2019arena_dss_prec_get(arena_t *arena)
2020{
2021	dss_prec_t ret;
2022
2023	malloc_mutex_lock(&arena->lock);
2024	ret = arena->dss_prec;
2025	malloc_mutex_unlock(&arena->lock);
2026	return (ret);
2027}
2028
2029void
2030arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
2031{
2032
2033	malloc_mutex_lock(&arena->lock);
2034	arena->dss_prec = dss_prec;
2035	malloc_mutex_unlock(&arena->lock);
2036}
2037
2038void
2039arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
2040    size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
2041    malloc_large_stats_t *lstats)
2042{
2043	unsigned i;
2044
2045	malloc_mutex_lock(&arena->lock);
2046	*dss = dss_prec_names[arena->dss_prec];
2047	*nactive += arena->nactive;
2048	*ndirty += arena->ndirty;
2049
2050	astats->mapped += arena->stats.mapped;
2051	astats->npurge += arena->stats.npurge;
2052	astats->nmadvise += arena->stats.nmadvise;
2053	astats->purged += arena->stats.purged;
2054	astats->allocated_large += arena->stats.allocated_large;
2055	astats->nmalloc_large += arena->stats.nmalloc_large;
2056	astats->ndalloc_large += arena->stats.ndalloc_large;
2057	astats->nrequests_large += arena->stats.nrequests_large;
2058
2059	for (i = 0; i < nlclasses; i++) {
2060		lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
2061		lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
2062		lstats[i].nrequests += arena->stats.lstats[i].nrequests;
2063		lstats[i].curruns += arena->stats.lstats[i].curruns;
2064	}
2065	malloc_mutex_unlock(&arena->lock);
2066
2067	for (i = 0; i < NBINS; i++) {
2068		arena_bin_t *bin = &arena->bins[i];
2069
2070		malloc_mutex_lock(&bin->lock);
2071		bstats[i].allocated += bin->stats.allocated;
2072		bstats[i].nmalloc += bin->stats.nmalloc;
2073		bstats[i].ndalloc += bin->stats.ndalloc;
2074		bstats[i].nrequests += bin->stats.nrequests;
2075		if (config_tcache) {
2076			bstats[i].nfills += bin->stats.nfills;
2077			bstats[i].nflushes += bin->stats.nflushes;
2078		}
2079		bstats[i].nruns += bin->stats.nruns;
2080		bstats[i].reruns += bin->stats.reruns;
2081		bstats[i].curruns += bin->stats.curruns;
2082		malloc_mutex_unlock(&bin->lock);
2083	}
2084}
2085
2086bool
2087arena_new(arena_t *arena, unsigned ind)
2088{
2089	unsigned i;
2090	arena_bin_t *bin;
2091
2092	arena->ind = ind;
2093	arena->nthreads = 0;
2094
2095	if (malloc_mutex_init(&arena->lock))
2096		return (true);
2097
2098	if (config_stats) {
2099		memset(&arena->stats, 0, sizeof(arena_stats_t));
2100		arena->stats.lstats =
2101		    (malloc_large_stats_t *)base_alloc(nlclasses *
2102		    sizeof(malloc_large_stats_t));
2103		if (arena->stats.lstats == NULL)
2104			return (true);
2105		memset(arena->stats.lstats, 0, nlclasses *
2106		    sizeof(malloc_large_stats_t));
2107		if (config_tcache)
2108			ql_new(&arena->tcache_ql);
2109	}
2110
2111	if (config_prof)
2112		arena->prof_accumbytes = 0;
2113
2114	arena->dss_prec = chunk_dss_prec_get();
2115
2116	/* Initialize chunks. */
2117	arena_chunk_dirty_new(&arena->chunks_dirty);
2118	arena->spare = NULL;
2119
2120	arena->nactive = 0;
2121	arena->ndirty = 0;
2122	arena->npurgatory = 0;
2123
2124	arena_avail_tree_new(&arena->runs_avail);
2125
2126	/* Initialize bins. */
2127	for (i = 0; i < NBINS; i++) {
2128		bin = &arena->bins[i];
2129		if (malloc_mutex_init(&bin->lock))
2130			return (true);
2131		bin->runcur = NULL;
2132		arena_run_tree_new(&bin->runs);
2133		if (config_stats)
2134			memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
2135	}
2136
2137	return (false);
2138}
2139
2140/*
2141 * Calculate bin_info->run_size such that it meets the following constraints:
2142 *
2143 *   *) bin_info->run_size >= min_run_size
2144 *   *) bin_info->run_size <= arena_maxclass
2145 *   *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
2146 *   *) bin_info->nregs <= RUN_MAXREGS
2147 *
2148 * bin_info->nregs, bin_info->bitmap_offset, and bin_info->reg0_offset are also
2149 * calculated here, since these settings are all interdependent.
2150 */
2151static size_t
2152bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
2153{
2154	size_t pad_size;
2155	size_t try_run_size, good_run_size;
2156	uint32_t try_nregs, good_nregs;
2157	uint32_t try_hdr_size, good_hdr_size;
2158	uint32_t try_bitmap_offset, good_bitmap_offset;
2159	uint32_t try_ctx0_offset, good_ctx0_offset;
2160	uint32_t try_redzone0_offset, good_redzone0_offset;
2161
2162	assert(min_run_size >= PAGE);
2163	assert(min_run_size <= arena_maxclass);
2164
2165	/*
2166	 * Determine redzone size based on minimum alignment and minimum
2167	 * redzone size.  Add padding to the end of the run if it is needed to
2168	 * align the regions.  The padding allows each redzone to be half the
2169	 * minimum alignment; without the padding, each redzone would have to
2170	 * be twice as large in order to maintain alignment.
2171	 */
2172	if (config_fill && opt_redzone) {
2173		size_t align_min = ZU(1) << (ffs(bin_info->reg_size) - 1);
2174		if (align_min <= REDZONE_MINSIZE) {
2175			bin_info->redzone_size = REDZONE_MINSIZE;
2176			pad_size = 0;
2177		} else {
2178			bin_info->redzone_size = align_min >> 1;
2179			pad_size = bin_info->redzone_size;
2180		}
2181	} else {
2182		bin_info->redzone_size = 0;
2183		pad_size = 0;
2184	}
2185	bin_info->reg_interval = bin_info->reg_size +
2186	    (bin_info->redzone_size << 1);
2187
2188	/*
2189	 * Calculate known-valid settings before entering the run_size
2190	 * expansion loop, so that the first part of the loop always copies
2191	 * valid settings.
2192	 *
2193	 * The do..while loop iteratively reduces the number of regions until
2194	 * the run header and the regions no longer overlap.  A closed formula
2195	 * would be quite messy, since there is an interdependency between the
2196	 * header's mask length and the number of regions.
2197	 */
2198	try_run_size = min_run_size;
2199	try_nregs = ((try_run_size - sizeof(arena_run_t)) /
2200	    bin_info->reg_interval)
2201	    + 1; /* Counter-act try_nregs-- in loop. */
2202	if (try_nregs > RUN_MAXREGS) {
2203		try_nregs = RUN_MAXREGS
2204		    + 1; /* Counter-act try_nregs-- in loop. */
2205	}
2206	do {
2207		try_nregs--;
2208		try_hdr_size = sizeof(arena_run_t);
2209		/* Pad to a long boundary. */
2210		try_hdr_size = LONG_CEILING(try_hdr_size);
2211		try_bitmap_offset = try_hdr_size;
2212		/* Add space for bitmap. */
2213		try_hdr_size += bitmap_size(try_nregs);
2214		if (config_prof && opt_prof && prof_promote == false) {
2215			/* Pad to a quantum boundary. */
2216			try_hdr_size = QUANTUM_CEILING(try_hdr_size);
2217			try_ctx0_offset = try_hdr_size;
2218			/* Add space for one (prof_ctx_t *) per region. */
2219			try_hdr_size += try_nregs * sizeof(prof_ctx_t *);
2220		} else
2221			try_ctx0_offset = 0;
2222		try_redzone0_offset = try_run_size - (try_nregs *
2223		    bin_info->reg_interval) - pad_size;
2224	} while (try_hdr_size > try_redzone0_offset);
2225
2226	/* run_size expansion loop. */
2227	do {
2228		/*
2229		 * Copy valid settings before trying more aggressive settings.
2230		 */
2231		good_run_size = try_run_size;
2232		good_nregs = try_nregs;
2233		good_hdr_size = try_hdr_size;
2234		good_bitmap_offset = try_bitmap_offset;
2235		good_ctx0_offset = try_ctx0_offset;
2236		good_redzone0_offset = try_redzone0_offset;
2237
2238		/* Try more aggressive settings. */
2239		try_run_size += PAGE;
2240		try_nregs = ((try_run_size - sizeof(arena_run_t) - pad_size) /
2241		    bin_info->reg_interval)
2242		    + 1; /* Counter-act try_nregs-- in loop. */
2243		if (try_nregs > RUN_MAXREGS) {
2244			try_nregs = RUN_MAXREGS
2245			    + 1; /* Counter-act try_nregs-- in loop. */
2246		}
2247		do {
2248			try_nregs--;
2249			try_hdr_size = sizeof(arena_run_t);
2250			/* Pad to a long boundary. */
2251			try_hdr_size = LONG_CEILING(try_hdr_size);
2252			try_bitmap_offset = try_hdr_size;
2253			/* Add space for bitmap. */
2254			try_hdr_size += bitmap_size(try_nregs);
2255			if (config_prof && opt_prof && prof_promote == false) {
2256				/* Pad to a quantum boundary. */
2257				try_hdr_size = QUANTUM_CEILING(try_hdr_size);
2258				try_ctx0_offset = try_hdr_size;
2259				/*
2260				 * Add space for one (prof_ctx_t *) per region.
2261				 */
2262				try_hdr_size += try_nregs *
2263				    sizeof(prof_ctx_t *);
2264			}
2265			try_redzone0_offset = try_run_size - (try_nregs *
2266			    bin_info->reg_interval) - pad_size;
2267		} while (try_hdr_size > try_redzone0_offset);
2268	} while (try_run_size <= arena_maxclass
2269	    && try_run_size <= arena_maxclass
2270	    && RUN_MAX_OVRHD * (bin_info->reg_interval << 3) >
2271	    RUN_MAX_OVRHD_RELAX
2272	    && (try_redzone0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size
2273	    && try_nregs < RUN_MAXREGS);
2274
2275	assert(good_hdr_size <= good_redzone0_offset);
2276
2277	/* Copy final settings. */
2278	bin_info->run_size = good_run_size;
2279	bin_info->nregs = good_nregs;
2280	bin_info->bitmap_offset = good_bitmap_offset;
2281	bin_info->ctx0_offset = good_ctx0_offset;
2282	bin_info->reg0_offset = good_redzone0_offset + bin_info->redzone_size;
2283
2284	assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
2285	    * bin_info->reg_interval) + pad_size == bin_info->run_size);
2286
2287	return (good_run_size);
2288}
2289
2290static void
2291bin_info_init(void)
2292{
2293	arena_bin_info_t *bin_info;
2294	size_t prev_run_size = PAGE;
2295
2296#define	SIZE_CLASS(bin, delta, size)					\
2297	bin_info = &arena_bin_info[bin];				\
2298	bin_info->reg_size = size;					\
2299	prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);\
2300	bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
2301	SIZE_CLASSES
2302#undef SIZE_CLASS
2303}
2304
2305void
2306arena_boot(void)
2307{
2308	size_t header_size;
2309	unsigned i;
2310
2311	/*
2312	 * Compute the header size such that it is large enough to contain the
2313	 * page map.  The page map is biased to omit entries for the header
2314	 * itself, so some iteration is necessary to compute the map bias.
2315	 *
2316	 * 1) Compute safe header_size and map_bias values that include enough
2317	 *    space for an unbiased page map.
2318	 * 2) Refine map_bias based on (1) to omit the header pages in the page
2319	 *    map.  The resulting map_bias may be one too small.
2320	 * 3) Refine map_bias based on (2).  The result will be >= the result
2321	 *    from (2), and will always be correct.
2322	 */
2323	map_bias = 0;
2324	for (i = 0; i < 3; i++) {
2325		header_size = offsetof(arena_chunk_t, map) +
2326		    (sizeof(arena_chunk_map_t) * (chunk_npages-map_bias));
2327		map_bias = (header_size >> LG_PAGE) + ((header_size & PAGE_MASK)
2328		    != 0);
2329	}
2330	assert(map_bias > 0);
2331
2332	arena_maxclass = chunksize - (map_bias << LG_PAGE);
2333
2334	bin_info_init();
2335}
2336
2337void
2338arena_prefork(arena_t *arena)
2339{
2340	unsigned i;
2341
2342	malloc_mutex_prefork(&arena->lock);
2343	for (i = 0; i < NBINS; i++)
2344		malloc_mutex_prefork(&arena->bins[i].lock);
2345}
2346
2347void
2348arena_postfork_parent(arena_t *arena)
2349{
2350	unsigned i;
2351
2352	for (i = 0; i < NBINS; i++)
2353		malloc_mutex_postfork_parent(&arena->bins[i].lock);
2354	malloc_mutex_postfork_parent(&arena->lock);
2355}
2356
2357void
2358arena_postfork_child(arena_t *arena)
2359{
2360	unsigned i;
2361
2362	for (i = 0; i < NBINS; i++)
2363		malloc_mutex_postfork_child(&arena->bins[i].lock);
2364	malloc_mutex_postfork_child(&arena->lock);
2365}
2366