1234370Sjasone#define	JEMALLOC_BASE_C_
2234370Sjasone#include "jemalloc/internal/jemalloc_internal.h"
3234370Sjasone
4234370Sjasone/******************************************************************************/
5234370Sjasone/* Data. */
6234370Sjasone
7234370Sjasonestatic malloc_mutex_t	base_mtx;
8286866Sjasonestatic extent_tree_t	base_avail_szad;
9234370Sjasonestatic extent_node_t	*base_nodes;
10286866Sjasonestatic size_t		base_allocated;
11286866Sjasonestatic size_t		base_resident;
12286866Sjasonestatic size_t		base_mapped;
13234370Sjasone
14234370Sjasone/******************************************************************************/
15234370Sjasone
16286866Sjasonestatic extent_node_t *
17299587Sjasonebase_node_try_alloc(tsdn_t *tsdn)
18286866Sjasone{
19286866Sjasone	extent_node_t *node;
20234370Sjasone
21299587Sjasone	malloc_mutex_assert_owner(tsdn, &base_mtx);
22299587Sjasone
23286866Sjasone	if (base_nodes == NULL)
24286866Sjasone		return (NULL);
25286866Sjasone	node = base_nodes;
26286866Sjasone	base_nodes = *(extent_node_t **)node;
27286866Sjasone	JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
28286866Sjasone	return (node);
29286866Sjasone}
30234370Sjasone
31286866Sjasonestatic void
32299587Sjasonebase_node_dalloc(tsdn_t *tsdn, extent_node_t *node)
33234370Sjasone{
34234370Sjasone
35299587Sjasone	malloc_mutex_assert_owner(tsdn, &base_mtx);
36299587Sjasone
37286866Sjasone	JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
38286866Sjasone	*(extent_node_t **)node = base_nodes;
39286866Sjasone	base_nodes = node;
40234370Sjasone}
41234370Sjasone
42286866Sjasonestatic extent_node_t *
43299587Sjasonebase_chunk_alloc(tsdn_t *tsdn, size_t minsize)
44234370Sjasone{
45286866Sjasone	extent_node_t *node;
46286866Sjasone	size_t csize, nsize;
47286866Sjasone	void *addr;
48234370Sjasone
49299587Sjasone	malloc_mutex_assert_owner(tsdn, &base_mtx);
50286866Sjasone	assert(minsize != 0);
51299587Sjasone	node = base_node_try_alloc(tsdn);
52286866Sjasone	/* Allocate enough space to also carve a node out if necessary. */
53286866Sjasone	nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0;
54286866Sjasone	csize = CHUNK_CEILING(minsize + nsize);
55286866Sjasone	addr = chunk_alloc_base(csize);
56286866Sjasone	if (addr == NULL) {
57286866Sjasone		if (node != NULL)
58299587Sjasone			base_node_dalloc(tsdn, node);
59286866Sjasone		return (NULL);
60286866Sjasone	}
61286866Sjasone	base_mapped += csize;
62286866Sjasone	if (node == NULL) {
63286866Sjasone		node = (extent_node_t *)addr;
64286866Sjasone		addr = (void *)((uintptr_t)addr + nsize);
65286866Sjasone		csize -= nsize;
66286866Sjasone		if (config_stats) {
67286866Sjasone			base_allocated += nsize;
68286866Sjasone			base_resident += PAGE_CEILING(nsize);
69234370Sjasone		}
70234370Sjasone	}
71286866Sjasone	extent_node_init(node, NULL, addr, csize, true, true);
72286866Sjasone	return (node);
73234370Sjasone}
74234370Sjasone
75286866Sjasone/*
76286866Sjasone * base_alloc() guarantees demand-zeroed memory, in order to make multi-page
77286866Sjasone * sparse data structures such as radix tree nodes efficient with respect to
78286866Sjasone * physical memory usage.
79286866Sjasone */
80234370Sjasonevoid *
81299587Sjasonebase_alloc(tsdn_t *tsdn, size_t size)
82234370Sjasone{
83286866Sjasone	void *ret;
84286866Sjasone	size_t csize, usize;
85286866Sjasone	extent_node_t *node;
86286866Sjasone	extent_node_t key;
87234370Sjasone
88286866Sjasone	/*
89286866Sjasone	 * Round size up to nearest multiple of the cacheline size, so that
90286866Sjasone	 * there is no chance of false cache line sharing.
91286866Sjasone	 */
92286866Sjasone	csize = CACHELINE_CEILING(size);
93234370Sjasone
94286866Sjasone	usize = s2u(csize);
95286866Sjasone	extent_node_init(&key, NULL, NULL, usize, false, false);
96299587Sjasone	malloc_mutex_lock(tsdn, &base_mtx);
97286866Sjasone	node = extent_tree_szad_nsearch(&base_avail_szad, &key);
98286866Sjasone	if (node != NULL) {
99286866Sjasone		/* Use existing space. */
100286866Sjasone		extent_tree_szad_remove(&base_avail_szad, node);
101234370Sjasone	} else {
102286866Sjasone		/* Try to allocate more space. */
103299587Sjasone		node = base_chunk_alloc(tsdn, csize);
104234370Sjasone	}
105286866Sjasone	if (node == NULL) {
106286866Sjasone		ret = NULL;
107286866Sjasone		goto label_return;
108286866Sjasone	}
109234370Sjasone
110286866Sjasone	ret = extent_node_addr_get(node);
111286866Sjasone	if (extent_node_size_get(node) > csize) {
112286866Sjasone		extent_node_addr_set(node, (void *)((uintptr_t)ret + csize));
113286866Sjasone		extent_node_size_set(node, extent_node_size_get(node) - csize);
114286866Sjasone		extent_tree_szad_insert(&base_avail_szad, node);
115286866Sjasone	} else
116299587Sjasone		base_node_dalloc(tsdn, node);
117286866Sjasone	if (config_stats) {
118286866Sjasone		base_allocated += csize;
119286866Sjasone		/*
120286866Sjasone		 * Add one PAGE to base_resident for every page boundary that is
121286866Sjasone		 * crossed by the new allocation.
122286866Sjasone		 */
123286866Sjasone		base_resident += PAGE_CEILING((uintptr_t)ret + csize) -
124286866Sjasone		    PAGE_CEILING((uintptr_t)ret);
125286866Sjasone	}
126286866Sjasone	JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize);
127286866Sjasonelabel_return:
128299587Sjasone	malloc_mutex_unlock(tsdn, &base_mtx);
129234370Sjasone	return (ret);
130234370Sjasone}
131234370Sjasone
132234370Sjasonevoid
133299587Sjasonebase_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident,
134299587Sjasone    size_t *mapped)
135234370Sjasone{
136234370Sjasone
137299587Sjasone	malloc_mutex_lock(tsdn, &base_mtx);
138286866Sjasone	assert(base_allocated <= base_resident);
139286866Sjasone	assert(base_resident <= base_mapped);
140286866Sjasone	*allocated = base_allocated;
141286866Sjasone	*resident = base_resident;
142286866Sjasone	*mapped = base_mapped;
143299587Sjasone	malloc_mutex_unlock(tsdn, &base_mtx);
144234370Sjasone}
145234370Sjasone
146234370Sjasonebool
147234370Sjasonebase_boot(void)
148234370Sjasone{
149234370Sjasone
150299587Sjasone	if (malloc_mutex_init(&base_mtx, "base", WITNESS_RANK_BASE))
151234370Sjasone		return (true);
152286866Sjasone	extent_tree_szad_new(&base_avail_szad);
153286866Sjasone	base_nodes = NULL;
154234370Sjasone
155234370Sjasone	return (false);
156234370Sjasone}
157234370Sjasone
158234370Sjasonevoid
159299587Sjasonebase_prefork(tsdn_t *tsdn)
160234370Sjasone{
161234370Sjasone
162299587Sjasone	malloc_mutex_prefork(tsdn, &base_mtx);
163234370Sjasone}
164234370Sjasone
165234370Sjasonevoid
166299587Sjasonebase_postfork_parent(tsdn_t *tsdn)
167234370Sjasone{
168234370Sjasone
169299587Sjasone	malloc_mutex_postfork_parent(tsdn, &base_mtx);
170234370Sjasone}
171234370Sjasone
172234370Sjasonevoid
173299587Sjasonebase_postfork_child(tsdn_t *tsdn)
174234370Sjasone{
175234370Sjasone
176299587Sjasone	malloc_mutex_postfork_child(tsdn, &base_mtx);
177234370Sjasone}
178