chunk_dss.c revision 251300
1#define	JEMALLOC_CHUNK_DSS_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3/******************************************************************************/
4/* Data. */
5
6const char	*dss_prec_names[] = {
7	"disabled",
8	"primary",
9	"secondary",
10	"N/A"
11};
12
13/* Current dss precedence default, used when creating new arenas. */
14static dss_prec_t	dss_prec_default = DSS_PREC_DEFAULT;
15
16/*
17 * Protects sbrk() calls.  This avoids malloc races among threads, though it
18 * does not protect against races with threads that call sbrk() directly.
19 */
20static malloc_mutex_t	dss_mtx;
21
22/* Base address of the DSS. */
23static void		*dss_base;
24/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
25static void		*dss_prev;
26/* Current upper limit on DSS addresses. */
27static void		*dss_max;
28
29/******************************************************************************/
30
31#ifndef JEMALLOC_HAVE_SBRK
32static void *
33sbrk(intptr_t increment)
34{
35
36	not_implemented();
37
38	return (NULL);
39}
40#endif
41
42dss_prec_t
43chunk_dss_prec_get(void)
44{
45	dss_prec_t ret;
46
47	if (config_dss == false)
48		return (dss_prec_disabled);
49	malloc_mutex_lock(&dss_mtx);
50	ret = dss_prec_default;
51	malloc_mutex_unlock(&dss_mtx);
52	return (ret);
53}
54
55bool
56chunk_dss_prec_set(dss_prec_t dss_prec)
57{
58
59	if (config_dss == false)
60		return (true);
61	malloc_mutex_lock(&dss_mtx);
62	dss_prec_default = dss_prec;
63	malloc_mutex_unlock(&dss_mtx);
64	return (false);
65}
66
67void *
68chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
69{
70	void *ret;
71
72	cassert(config_dss);
73	assert(size > 0 && (size & chunksize_mask) == 0);
74	assert(alignment > 0 && (alignment & chunksize_mask) == 0);
75
76	/*
77	 * sbrk() uses a signed increment argument, so take care not to
78	 * interpret a huge allocation request as a negative increment.
79	 */
80	if ((intptr_t)size < 0)
81		return (NULL);
82
83	malloc_mutex_lock(&dss_mtx);
84	if (dss_prev != (void *)-1) {
85		size_t gap_size, cpad_size;
86		void *cpad, *dss_next;
87		intptr_t incr;
88
89		/*
90		 * The loop is necessary to recover from races with other
91		 * threads that are using the DSS for something other than
92		 * malloc.
93		 */
94		do {
95			/* Get the current end of the DSS. */
96			dss_max = sbrk(0);
97			/*
98			 * Calculate how much padding is necessary to
99			 * chunk-align the end of the DSS.
100			 */
101			gap_size = (chunksize - CHUNK_ADDR2OFFSET(dss_max)) &
102			    chunksize_mask;
103			/*
104			 * Compute how much chunk-aligned pad space (if any) is
105			 * necessary to satisfy alignment.  This space can be
106			 * recycled for later use.
107			 */
108			cpad = (void *)((uintptr_t)dss_max + gap_size);
109			ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max,
110			    alignment);
111			cpad_size = (uintptr_t)ret - (uintptr_t)cpad;
112			dss_next = (void *)((uintptr_t)ret + size);
113			if ((uintptr_t)ret < (uintptr_t)dss_max ||
114			    (uintptr_t)dss_next < (uintptr_t)dss_max) {
115				/* Wrap-around. */
116				malloc_mutex_unlock(&dss_mtx);
117				return (NULL);
118			}
119			incr = gap_size + cpad_size + size;
120			dss_prev = sbrk(incr);
121			if (dss_prev == dss_max) {
122				/* Success. */
123				dss_max = dss_next;
124				malloc_mutex_unlock(&dss_mtx);
125				if (cpad_size != 0)
126					chunk_unmap(cpad, cpad_size);
127				if (*zero) {
128					VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
129					memset(ret, 0, size);
130				}
131				return (ret);
132			}
133		} while (dss_prev != (void *)-1);
134	}
135	malloc_mutex_unlock(&dss_mtx);
136
137	return (NULL);
138}
139
140bool
141chunk_in_dss(void *chunk)
142{
143	bool ret;
144
145	cassert(config_dss);
146
147	malloc_mutex_lock(&dss_mtx);
148	if ((uintptr_t)chunk >= (uintptr_t)dss_base
149	    && (uintptr_t)chunk < (uintptr_t)dss_max)
150		ret = true;
151	else
152		ret = false;
153	malloc_mutex_unlock(&dss_mtx);
154
155	return (ret);
156}
157
158bool
159chunk_dss_boot(void)
160{
161
162	cassert(config_dss);
163
164	if (malloc_mutex_init(&dss_mtx))
165		return (true);
166	dss_base = sbrk(0);
167	dss_prev = dss_base;
168	dss_max = dss_base;
169
170	return (false);
171}
172
173void
174chunk_dss_prefork(void)
175{
176
177	if (config_dss)
178		malloc_mutex_prefork(&dss_mtx);
179}
180
181void
182chunk_dss_postfork_parent(void)
183{
184
185	if (config_dss)
186		malloc_mutex_postfork_parent(&dss_mtx);
187}
188
189void
190chunk_dss_postfork_child(void)
191{
192
193	if (config_dss)
194		malloc_mutex_postfork_child(&dss_mtx);
195}
196
197/******************************************************************************/
198