chunk.c revision 296221
1#define	JEMALLOC_CHUNK_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
7const char	*opt_dss = DSS_DEFAULT;
8size_t		opt_lg_chunk = 0;
9
10/* Used exclusively for gdump triggering. */
11static size_t	curchunks;
12static size_t	highchunks;
13
14rtree_t		chunks_rtree;
15
16/* Various chunk-related settings. */
17size_t		chunksize;
18size_t		chunksize_mask; /* (chunksize - 1). */
19size_t		chunk_npages;
20
21static void	*chunk_alloc_default(void *new_addr, size_t size,
22    size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
23static bool	chunk_dalloc_default(void *chunk, size_t size, bool committed,
24    unsigned arena_ind);
25static bool	chunk_commit_default(void *chunk, size_t size, size_t offset,
26    size_t length, unsigned arena_ind);
27static bool	chunk_decommit_default(void *chunk, size_t size, size_t offset,
28    size_t length, unsigned arena_ind);
29static bool	chunk_purge_default(void *chunk, size_t size, size_t offset,
30    size_t length, unsigned arena_ind);
31static bool	chunk_split_default(void *chunk, size_t size, size_t size_a,
32    size_t size_b, bool committed, unsigned arena_ind);
33static bool	chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b,
34    size_t size_b, bool committed, unsigned arena_ind);
35
36const chunk_hooks_t	chunk_hooks_default = {
37	chunk_alloc_default,
38	chunk_dalloc_default,
39	chunk_commit_default,
40	chunk_decommit_default,
41	chunk_purge_default,
42	chunk_split_default,
43	chunk_merge_default
44};
45
46/******************************************************************************/
47/*
48 * Function prototypes for static functions that are referenced prior to
49 * definition.
50 */
51
52static void	chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
53    extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
54    void *chunk, size_t size, bool zeroed, bool committed);
55
56/******************************************************************************/
57
58static chunk_hooks_t
59chunk_hooks_get_locked(arena_t *arena)
60{
61
62	return (arena->chunk_hooks);
63}
64
65chunk_hooks_t
66chunk_hooks_get(arena_t *arena)
67{
68	chunk_hooks_t chunk_hooks;
69
70	malloc_mutex_lock(&arena->chunks_mtx);
71	chunk_hooks = chunk_hooks_get_locked(arena);
72	malloc_mutex_unlock(&arena->chunks_mtx);
73
74	return (chunk_hooks);
75}
76
77chunk_hooks_t
78chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks)
79{
80	chunk_hooks_t old_chunk_hooks;
81
82	malloc_mutex_lock(&arena->chunks_mtx);
83	old_chunk_hooks = arena->chunk_hooks;
84	/*
85	 * Copy each field atomically so that it is impossible for readers to
86	 * see partially updated pointers.  There are places where readers only
87	 * need one hook function pointer (therefore no need to copy the
88	 * entirety of arena->chunk_hooks), and stale reads do not affect
89	 * correctness, so they perform unlocked reads.
90	 */
91#define	ATOMIC_COPY_HOOK(n) do {					\
92	union {								\
93		chunk_##n##_t	**n;					\
94		void		**v;					\
95	} u;								\
96	u.n = &arena->chunk_hooks.n;					\
97	atomic_write_p(u.v, chunk_hooks->n);				\
98} while (0)
99	ATOMIC_COPY_HOOK(alloc);
100	ATOMIC_COPY_HOOK(dalloc);
101	ATOMIC_COPY_HOOK(commit);
102	ATOMIC_COPY_HOOK(decommit);
103	ATOMIC_COPY_HOOK(purge);
104	ATOMIC_COPY_HOOK(split);
105	ATOMIC_COPY_HOOK(merge);
106#undef ATOMIC_COPY_HOOK
107	malloc_mutex_unlock(&arena->chunks_mtx);
108
109	return (old_chunk_hooks);
110}
111
112static void
113chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks,
114    bool locked)
115{
116	static const chunk_hooks_t uninitialized_hooks =
117	    CHUNK_HOOKS_INITIALIZER;
118
119	if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) ==
120	    0) {
121		*chunk_hooks = locked ? chunk_hooks_get_locked(arena) :
122		    chunk_hooks_get(arena);
123	}
124}
125
126static void
127chunk_hooks_assure_initialized_locked(arena_t *arena,
128    chunk_hooks_t *chunk_hooks)
129{
130
131	chunk_hooks_assure_initialized_impl(arena, chunk_hooks, true);
132}
133
134static void
135chunk_hooks_assure_initialized(arena_t *arena, chunk_hooks_t *chunk_hooks)
136{
137
138	chunk_hooks_assure_initialized_impl(arena, chunk_hooks, false);
139}
140
141bool
142chunk_register(const void *chunk, const extent_node_t *node)
143{
144
145	assert(extent_node_addr_get(node) == chunk);
146
147	if (rtree_set(&chunks_rtree, (uintptr_t)chunk, node))
148		return (true);
149	if (config_prof && opt_prof) {
150		size_t size = extent_node_size_get(node);
151		size_t nadd = (size == 0) ? 1 : size / chunksize;
152		size_t cur = atomic_add_z(&curchunks, nadd);
153		size_t high = atomic_read_z(&highchunks);
154		while (cur > high && atomic_cas_z(&highchunks, high, cur)) {
155			/*
156			 * Don't refresh cur, because it may have decreased
157			 * since this thread lost the highchunks update race.
158			 */
159			high = atomic_read_z(&highchunks);
160		}
161		if (cur > high && prof_gdump_get_unlocked())
162			prof_gdump();
163	}
164
165	return (false);
166}
167
168void
169chunk_deregister(const void *chunk, const extent_node_t *node)
170{
171	bool err;
172
173	err = rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL);
174	assert(!err);
175	if (config_prof && opt_prof) {
176		size_t size = extent_node_size_get(node);
177		size_t nsub = (size == 0) ? 1 : size / chunksize;
178		assert(atomic_read_z(&curchunks) >= nsub);
179		atomic_sub_z(&curchunks, nsub);
180	}
181}
182
183/*
184 * Do first-best-fit chunk selection, i.e. select the lowest chunk that best
185 * fits.
186 */
187static extent_node_t *
188chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad,
189    extent_tree_t *chunks_ad, size_t size)
190{
191	extent_node_t key;
192
193	assert(size == CHUNK_CEILING(size));
194
195	extent_node_init(&key, arena, NULL, size, false, false);
196	return (extent_tree_szad_nsearch(chunks_szad, &key));
197}
198
199static void *
200chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
201    extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
202    void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
203    bool dalloc_node)
204{
205	void *ret;
206	extent_node_t *node;
207	size_t alloc_size, leadsize, trailsize;
208	bool zeroed, committed;
209
210	assert(new_addr == NULL || alignment == chunksize);
211	/*
212	 * Cached chunks use the node linkage embedded in their headers, in
213	 * which case dalloc_node is true, and new_addr is non-NULL because
214	 * we're operating on a specific chunk.
215	 */
216	assert(dalloc_node || new_addr != NULL);
217
218	alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize));
219	/* Beware size_t wrap-around. */
220	if (alloc_size < size)
221		return (NULL);
222	malloc_mutex_lock(&arena->chunks_mtx);
223	chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
224	if (new_addr != NULL) {
225		extent_node_t key;
226		extent_node_init(&key, arena, new_addr, alloc_size, false,
227		    false);
228		node = extent_tree_ad_search(chunks_ad, &key);
229	} else {
230		node = chunk_first_best_fit(arena, chunks_szad, chunks_ad,
231		    alloc_size);
232	}
233	if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
234	    size)) {
235		malloc_mutex_unlock(&arena->chunks_mtx);
236		return (NULL);
237	}
238	leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node),
239	    alignment) - (uintptr_t)extent_node_addr_get(node);
240	assert(new_addr == NULL || leadsize == 0);
241	assert(extent_node_size_get(node) >= leadsize + size);
242	trailsize = extent_node_size_get(node) - leadsize - size;
243	ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize);
244	zeroed = extent_node_zeroed_get(node);
245	if (zeroed)
246		*zero = true;
247	committed = extent_node_committed_get(node);
248	if (committed)
249		*commit = true;
250	/* Split the lead. */
251	if (leadsize != 0 &&
252	    chunk_hooks->split(extent_node_addr_get(node),
253	    extent_node_size_get(node), leadsize, size, false, arena->ind)) {
254		malloc_mutex_unlock(&arena->chunks_mtx);
255		return (NULL);
256	}
257	/* Remove node from the tree. */
258	extent_tree_szad_remove(chunks_szad, node);
259	extent_tree_ad_remove(chunks_ad, node);
260	arena_chunk_cache_maybe_remove(arena, node, cache);
261	if (leadsize != 0) {
262		/* Insert the leading space as a smaller chunk. */
263		extent_node_size_set(node, leadsize);
264		extent_tree_szad_insert(chunks_szad, node);
265		extent_tree_ad_insert(chunks_ad, node);
266		arena_chunk_cache_maybe_insert(arena, node, cache);
267		node = NULL;
268	}
269	if (trailsize != 0) {
270		/* Split the trail. */
271		if (chunk_hooks->split(ret, size + trailsize, size,
272		    trailsize, false, arena->ind)) {
273			if (dalloc_node && node != NULL)
274				arena_node_dalloc(arena, node);
275			malloc_mutex_unlock(&arena->chunks_mtx);
276			chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad,
277			    cache, ret, size + trailsize, zeroed, committed);
278			return (NULL);
279		}
280		/* Insert the trailing space as a smaller chunk. */
281		if (node == NULL) {
282			node = arena_node_alloc(arena);
283			if (node == NULL) {
284				malloc_mutex_unlock(&arena->chunks_mtx);
285				chunk_record(arena, chunk_hooks, chunks_szad,
286				    chunks_ad, cache, ret, size + trailsize,
287				    zeroed, committed);
288				return (NULL);
289			}
290		}
291		extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size),
292		    trailsize, zeroed, committed);
293		extent_tree_szad_insert(chunks_szad, node);
294		extent_tree_ad_insert(chunks_ad, node);
295		arena_chunk_cache_maybe_insert(arena, node, cache);
296		node = NULL;
297	}
298	if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
299		malloc_mutex_unlock(&arena->chunks_mtx);
300		chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, cache,
301		    ret, size, zeroed, committed);
302		return (NULL);
303	}
304	malloc_mutex_unlock(&arena->chunks_mtx);
305
306	assert(dalloc_node || node != NULL);
307	if (dalloc_node && node != NULL)
308		arena_node_dalloc(arena, node);
309	if (*zero) {
310		if (!zeroed)
311			memset(ret, 0, size);
312		else if (config_debug) {
313			size_t i;
314			size_t *p = (size_t *)(uintptr_t)ret;
315
316			JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size);
317			for (i = 0; i < size / sizeof(size_t); i++)
318				assert(p[i] == 0);
319		}
320	}
321	return (ret);
322}
323
324/*
325 * If the caller specifies (!*zero), it is still possible to receive zeroed
326 * memory, in which case *zero is toggled to true.  arena_chunk_alloc() takes
327 * advantage of this to avoid demanding zeroed chunks, but taking advantage of
328 * them if they are returned.
329 */
330static void *
331chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
332    bool *zero, bool *commit, dss_prec_t dss_prec)
333{
334	void *ret;
335
336	assert(size != 0);
337	assert((size & chunksize_mask) == 0);
338	assert(alignment != 0);
339	assert((alignment & chunksize_mask) == 0);
340
341	/* "primary" dss. */
342	if (have_dss && dss_prec == dss_prec_primary && (ret =
343	    chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
344	    NULL)
345		return (ret);
346	/* mmap. */
347	if ((ret = chunk_alloc_mmap(new_addr, size, alignment, zero, commit)) !=
348	    NULL)
349		return (ret);
350	/* "secondary" dss. */
351	if (have_dss && dss_prec == dss_prec_secondary && (ret =
352	    chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
353	    NULL)
354		return (ret);
355
356	/* All strategies for allocation failed. */
357	return (NULL);
358}
359
360void *
361chunk_alloc_base(size_t size)
362{
363	void *ret;
364	bool zero, commit;
365
366	/*
367	 * Directly call chunk_alloc_mmap() rather than chunk_alloc_core()
368	 * because it's critical that chunk_alloc_base() return untouched
369	 * demand-zeroed virtual memory.
370	 */
371	zero = true;
372	commit = true;
373	ret = chunk_alloc_mmap(NULL, size, chunksize, &zero, &commit);
374	if (ret == NULL)
375		return (NULL);
376	if (config_valgrind)
377		JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
378
379	return (ret);
380}
381
382void *
383chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
384    size_t size, size_t alignment, bool *zero, bool dalloc_node)
385{
386	void *ret;
387	bool commit;
388
389	assert(size != 0);
390	assert((size & chunksize_mask) == 0);
391	assert(alignment != 0);
392	assert((alignment & chunksize_mask) == 0);
393
394	commit = true;
395	ret = chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_cached,
396	    &arena->chunks_ad_cached, true, new_addr, size, alignment, zero,
397	    &commit, dalloc_node);
398	if (ret == NULL)
399		return (NULL);
400	assert(commit);
401	if (config_valgrind)
402		JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
403	return (ret);
404}
405
406static arena_t *
407chunk_arena_get(unsigned arena_ind)
408{
409	arena_t *arena;
410
411	arena = arena_get(arena_ind, false);
412	/*
413	 * The arena we're allocating on behalf of must have been initialized
414	 * already.
415	 */
416	assert(arena != NULL);
417	return (arena);
418}
419
420static void *
421chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
422    bool *commit, unsigned arena_ind)
423{
424	void *ret;
425	arena_t *arena;
426
427	arena = chunk_arena_get(arena_ind);
428	ret = chunk_alloc_core(arena, new_addr, size, alignment, zero,
429	    commit, arena->dss_prec);
430	if (ret == NULL)
431		return (NULL);
432	if (config_valgrind)
433		JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
434
435	return (ret);
436}
437
438static void *
439chunk_alloc_retained(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
440    size_t size, size_t alignment, bool *zero, bool *commit)
441{
442
443	assert(size != 0);
444	assert((size & chunksize_mask) == 0);
445	assert(alignment != 0);
446	assert((alignment & chunksize_mask) == 0);
447
448	return (chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_retained,
449	    &arena->chunks_ad_retained, false, new_addr, size, alignment, zero,
450	    commit, true));
451}
452
453void *
454chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
455    size_t size, size_t alignment, bool *zero, bool *commit)
456{
457	void *ret;
458
459	chunk_hooks_assure_initialized(arena, chunk_hooks);
460
461	ret = chunk_alloc_retained(arena, chunk_hooks, new_addr, size,
462	    alignment, zero, commit);
463	if (ret == NULL) {
464		ret = chunk_hooks->alloc(new_addr, size, alignment, zero,
465		    commit, arena->ind);
466		if (ret == NULL)
467			return (NULL);
468	}
469
470	if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default)
471		JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
472	return (ret);
473}
474
475static void
476chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
477    extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
478    void *chunk, size_t size, bool zeroed, bool committed)
479{
480	bool unzeroed;
481	extent_node_t *node, *prev;
482	extent_node_t key;
483
484	assert(!cache || !zeroed);
485	unzeroed = cache || !zeroed;
486	JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
487
488	malloc_mutex_lock(&arena->chunks_mtx);
489	chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
490	extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0,
491	    false, false);
492	node = extent_tree_ad_nsearch(chunks_ad, &key);
493	/* Try to coalesce forward. */
494	if (node != NULL && extent_node_addr_get(node) ==
495	    extent_node_addr_get(&key) && extent_node_committed_get(node) ==
496	    committed && !chunk_hooks->merge(chunk, size,
497	    extent_node_addr_get(node), extent_node_size_get(node), false,
498	    arena->ind)) {
499		/*
500		 * Coalesce chunk with the following address range.  This does
501		 * not change the position within chunks_ad, so only
502		 * remove/insert from/into chunks_szad.
503		 */
504		extent_tree_szad_remove(chunks_szad, node);
505		arena_chunk_cache_maybe_remove(arena, node, cache);
506		extent_node_addr_set(node, chunk);
507		extent_node_size_set(node, size + extent_node_size_get(node));
508		extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
509		    !unzeroed);
510		extent_tree_szad_insert(chunks_szad, node);
511		arena_chunk_cache_maybe_insert(arena, node, cache);
512	} else {
513		/* Coalescing forward failed, so insert a new node. */
514		node = arena_node_alloc(arena);
515		if (node == NULL) {
516			/*
517			 * Node allocation failed, which is an exceedingly
518			 * unlikely failure.  Leak chunk after making sure its
519			 * pages have already been purged, so that this is only
520			 * a virtual memory leak.
521			 */
522			if (cache) {
523				chunk_purge_wrapper(arena, chunk_hooks, chunk,
524				    size, 0, size);
525			}
526			goto label_return;
527		}
528		extent_node_init(node, arena, chunk, size, !unzeroed,
529		    committed);
530		extent_tree_ad_insert(chunks_ad, node);
531		extent_tree_szad_insert(chunks_szad, node);
532		arena_chunk_cache_maybe_insert(arena, node, cache);
533	}
534
535	/* Try to coalesce backward. */
536	prev = extent_tree_ad_prev(chunks_ad, node);
537	if (prev != NULL && (void *)((uintptr_t)extent_node_addr_get(prev) +
538	    extent_node_size_get(prev)) == chunk &&
539	    extent_node_committed_get(prev) == committed &&
540	    !chunk_hooks->merge(extent_node_addr_get(prev),
541	    extent_node_size_get(prev), chunk, size, false, arena->ind)) {
542		/*
543		 * Coalesce chunk with the previous address range.  This does
544		 * not change the position within chunks_ad, so only
545		 * remove/insert node from/into chunks_szad.
546		 */
547		extent_tree_szad_remove(chunks_szad, prev);
548		extent_tree_ad_remove(chunks_ad, prev);
549		arena_chunk_cache_maybe_remove(arena, prev, cache);
550		extent_tree_szad_remove(chunks_szad, node);
551		arena_chunk_cache_maybe_remove(arena, node, cache);
552		extent_node_addr_set(node, extent_node_addr_get(prev));
553		extent_node_size_set(node, extent_node_size_get(prev) +
554		    extent_node_size_get(node));
555		extent_node_zeroed_set(node, extent_node_zeroed_get(prev) &&
556		    extent_node_zeroed_get(node));
557		extent_tree_szad_insert(chunks_szad, node);
558		arena_chunk_cache_maybe_insert(arena, node, cache);
559
560		arena_node_dalloc(arena, prev);
561	}
562
563label_return:
564	malloc_mutex_unlock(&arena->chunks_mtx);
565}
566
567void
568chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
569    size_t size, bool committed)
570{
571
572	assert(chunk != NULL);
573	assert(CHUNK_ADDR2BASE(chunk) == chunk);
574	assert(size != 0);
575	assert((size & chunksize_mask) == 0);
576
577	chunk_record(arena, chunk_hooks, &arena->chunks_szad_cached,
578	    &arena->chunks_ad_cached, true, chunk, size, false, committed);
579	arena_maybe_purge(arena);
580}
581
582void
583chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
584    size_t size, bool zeroed, bool committed)
585{
586
587	assert(chunk != NULL);
588	assert(CHUNK_ADDR2BASE(chunk) == chunk);
589	assert(size != 0);
590	assert((size & chunksize_mask) == 0);
591
592	chunk_hooks_assure_initialized(arena, chunk_hooks);
593	/* Try to deallocate. */
594	if (!chunk_hooks->dalloc(chunk, size, committed, arena->ind))
595		return;
596	/* Try to decommit; purge if that fails. */
597	if (committed) {
598		committed = chunk_hooks->decommit(chunk, size, 0, size,
599		    arena->ind);
600	}
601	zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size,
602	    arena->ind);
603	chunk_record(arena, chunk_hooks, &arena->chunks_szad_retained,
604	    &arena->chunks_ad_retained, false, chunk, size, zeroed, committed);
605}
606
607static bool
608chunk_dalloc_default(void *chunk, size_t size, bool committed,
609    unsigned arena_ind)
610{
611
612	if (!have_dss || !chunk_in_dss(chunk))
613		return (chunk_dalloc_mmap(chunk, size));
614	return (true);
615}
616
617void
618chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
619    size_t size, bool committed)
620{
621
622	chunk_hooks_assure_initialized(arena, chunk_hooks);
623	chunk_hooks->dalloc(chunk, size, committed, arena->ind);
624	if (config_valgrind && chunk_hooks->dalloc != chunk_dalloc_default)
625		JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
626}
627
628static bool
629chunk_commit_default(void *chunk, size_t size, size_t offset, size_t length,
630    unsigned arena_ind)
631{
632
633	return (pages_commit((void *)((uintptr_t)chunk + (uintptr_t)offset),
634	    length));
635}
636
637static bool
638chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length,
639    unsigned arena_ind)
640{
641
642	return (pages_decommit((void *)((uintptr_t)chunk + (uintptr_t)offset),
643	    length));
644}
645
646bool
647chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length)
648{
649
650	assert(chunk != NULL);
651	assert(CHUNK_ADDR2BASE(chunk) == chunk);
652	assert((offset & PAGE_MASK) == 0);
653	assert(length != 0);
654	assert((length & PAGE_MASK) == 0);
655
656	return (pages_purge((void *)((uintptr_t)chunk + (uintptr_t)offset),
657	    length));
658}
659
660static bool
661chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
662    unsigned arena_ind)
663{
664
665	return (chunk_purge_arena(chunk_arena_get(arena_ind), chunk, offset,
666	    length));
667}
668
669bool
670chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
671    size_t size, size_t offset, size_t length)
672{
673
674	chunk_hooks_assure_initialized(arena, chunk_hooks);
675	return (chunk_hooks->purge(chunk, size, offset, length, arena->ind));
676}
677
678static bool
679chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b,
680    bool committed, unsigned arena_ind)
681{
682
683	if (!maps_coalesce)
684		return (true);
685	return (false);
686}
687
688static bool
689chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
690    bool committed, unsigned arena_ind)
691{
692
693	if (!maps_coalesce)
694		return (true);
695	if (have_dss && chunk_in_dss(chunk_a) != chunk_in_dss(chunk_b))
696		return (true);
697
698	return (false);
699}
700
701static rtree_node_elm_t *
702chunks_rtree_node_alloc(size_t nelms)
703{
704
705	return ((rtree_node_elm_t *)base_alloc(nelms *
706	    sizeof(rtree_node_elm_t)));
707}
708
709bool
710chunk_boot(void)
711{
712#ifdef _WIN32
713	SYSTEM_INFO info;
714	GetSystemInfo(&info);
715
716	/*
717	 * Verify actual page size is equal to or an integral multiple of
718	 * configured page size.
719	 */
720	if (info.dwPageSize & ((1U << LG_PAGE) - 1))
721		return (true);
722
723	/*
724	 * Configure chunksize (if not set) to match granularity (usually 64K),
725	 * so pages_map will always take fast path.
726	 */
727	if (!opt_lg_chunk) {
728		opt_lg_chunk = ffs_u((unsigned)info.dwAllocationGranularity)
729		    - 1;
730	}
731#else
732	if (!opt_lg_chunk)
733		opt_lg_chunk = LG_CHUNK_DEFAULT;
734#endif
735
736	/* Set variables according to the value of opt_lg_chunk. */
737	chunksize = (ZU(1) << opt_lg_chunk);
738	assert(chunksize >= PAGE);
739	chunksize_mask = chunksize - 1;
740	chunk_npages = (chunksize >> LG_PAGE);
741
742	if (have_dss && chunk_dss_boot())
743		return (true);
744	if (rtree_new(&chunks_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) -
745	    opt_lg_chunk), chunks_rtree_node_alloc, NULL))
746		return (true);
747
748	return (false);
749}
750
751void
752chunk_prefork(void)
753{
754
755	chunk_dss_prefork();
756}
757
758void
759chunk_postfork_parent(void)
760{
761
762	chunk_dss_postfork_parent();
763}
764
765void
766chunk_postfork_child(void)
767{
768
769	chunk_dss_postfork_child();
770}
771