base.c revision 234370
1234370Sjasone#define	JEMALLOC_BASE_C_
2234370Sjasone#include "jemalloc/internal/jemalloc_internal.h"
3234370Sjasone
4234370Sjasone/******************************************************************************/
5234370Sjasone/* Data. */
6234370Sjasone
7234370Sjasonestatic malloc_mutex_t	base_mtx;
8234370Sjasone
9234370Sjasone/*
10234370Sjasone * Current pages that are being used for internal memory allocations.  These
11234370Sjasone * pages are carved up in cacheline-size quanta, so that there is no chance of
12234370Sjasone * false cache line sharing.
13234370Sjasone */
14234370Sjasonestatic void		*base_pages;
15234370Sjasonestatic void		*base_next_addr;
16234370Sjasonestatic void		*base_past_addr; /* Addr immediately past base_pages. */
17234370Sjasonestatic extent_node_t	*base_nodes;
18234370Sjasone
19234370Sjasone/******************************************************************************/
20234370Sjasone/* Function prototypes for non-inline static functions. */
21234370Sjasone
22234370Sjasonestatic bool	base_pages_alloc(size_t minsize);
23234370Sjasone
24234370Sjasone/******************************************************************************/
25234370Sjasone
26234370Sjasonestatic bool
27234370Sjasonebase_pages_alloc(size_t minsize)
28234370Sjasone{
29234370Sjasone	size_t csize;
30234370Sjasone	bool zero;
31234370Sjasone
32234370Sjasone	assert(minsize != 0);
33234370Sjasone	csize = CHUNK_CEILING(minsize);
34234370Sjasone	zero = false;
35234370Sjasone	base_pages = chunk_alloc(csize, chunksize, true, &zero);
36234370Sjasone	if (base_pages == NULL)
37234370Sjasone		return (true);
38234370Sjasone	base_next_addr = base_pages;
39234370Sjasone	base_past_addr = (void *)((uintptr_t)base_pages + csize);
40234370Sjasone
41234370Sjasone	return (false);
42234370Sjasone}
43234370Sjasone
44234370Sjasonevoid *
45234370Sjasonebase_alloc(size_t size)
46234370Sjasone{
47234370Sjasone	void *ret;
48234370Sjasone	size_t csize;
49234370Sjasone
50234370Sjasone	/* Round size up to nearest multiple of the cacheline size. */
51234370Sjasone	csize = CACHELINE_CEILING(size);
52234370Sjasone
53234370Sjasone	malloc_mutex_lock(&base_mtx);
54234370Sjasone	/* Make sure there's enough space for the allocation. */
55234370Sjasone	if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) {
56234370Sjasone		if (base_pages_alloc(csize)) {
57234370Sjasone			malloc_mutex_unlock(&base_mtx);
58234370Sjasone			return (NULL);
59234370Sjasone		}
60234370Sjasone	}
61234370Sjasone	/* Allocate. */
62234370Sjasone	ret = base_next_addr;
63234370Sjasone	base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
64234370Sjasone	malloc_mutex_unlock(&base_mtx);
65234370Sjasone
66234370Sjasone	return (ret);
67234370Sjasone}
68234370Sjasone
69234370Sjasonevoid *
70234370Sjasonebase_calloc(size_t number, size_t size)
71234370Sjasone{
72234370Sjasone	void *ret = base_alloc(number * size);
73234370Sjasone
74234370Sjasone	if (ret != NULL)
75234370Sjasone		memset(ret, 0, number * size);
76234370Sjasone
77234370Sjasone	return (ret);
78234370Sjasone}
79234370Sjasone
80234370Sjasoneextent_node_t *
81234370Sjasonebase_node_alloc(void)
82234370Sjasone{
83234370Sjasone	extent_node_t *ret;
84234370Sjasone
85234370Sjasone	malloc_mutex_lock(&base_mtx);
86234370Sjasone	if (base_nodes != NULL) {
87234370Sjasone		ret = base_nodes;
88234370Sjasone		base_nodes = *(extent_node_t **)ret;
89234370Sjasone		malloc_mutex_unlock(&base_mtx);
90234370Sjasone	} else {
91234370Sjasone		malloc_mutex_unlock(&base_mtx);
92234370Sjasone		ret = (extent_node_t *)base_alloc(sizeof(extent_node_t));
93234370Sjasone	}
94234370Sjasone
95234370Sjasone	return (ret);
96234370Sjasone}
97234370Sjasone
98234370Sjasonevoid
99234370Sjasonebase_node_dealloc(extent_node_t *node)
100234370Sjasone{
101234370Sjasone
102234370Sjasone	malloc_mutex_lock(&base_mtx);
103234370Sjasone	*(extent_node_t **)node = base_nodes;
104234370Sjasone	base_nodes = node;
105234370Sjasone	malloc_mutex_unlock(&base_mtx);
106234370Sjasone}
107234370Sjasone
108234370Sjasonebool
109234370Sjasonebase_boot(void)
110234370Sjasone{
111234370Sjasone
112234370Sjasone	base_nodes = NULL;
113234370Sjasone	if (malloc_mutex_init(&base_mtx))
114234370Sjasone		return (true);
115234370Sjasone
116234370Sjasone	return (false);
117234370Sjasone}
118234370Sjasone
119234370Sjasonevoid
120234370Sjasonebase_prefork(void)
121234370Sjasone{
122234370Sjasone
123234370Sjasone	malloc_mutex_prefork(&base_mtx);
124234370Sjasone}
125234370Sjasone
126234370Sjasonevoid
127234370Sjasonebase_postfork_parent(void)
128234370Sjasone{
129234370Sjasone
130234370Sjasone	malloc_mutex_postfork_parent(&base_mtx);
131234370Sjasone}
132234370Sjasone
133234370Sjasonevoid
134234370Sjasonebase_postfork_child(void)
135234370Sjasone{
136234370Sjasone
137234370Sjasone	malloc_mutex_postfork_child(&base_mtx);
138234370Sjasone}
139