chunk_dss.c revision 245868
1#define	JEMALLOC_CHUNK_DSS_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3/******************************************************************************/
4/* Data. */
5
6const char	*dss_prec_names[] = {
7	"disabled",
8	"primary",
9	"secondary",
10	"N/A"
11};
12
13/* Current dss precedence default, used when creating new arenas. */
14static dss_prec_t	dss_prec_default = DSS_PREC_DEFAULT;
15
16/*
17 * Protects sbrk() calls.  This avoids malloc races among threads, though it
18 * does not protect against races with threads that call sbrk() directly.
19 */
20static malloc_mutex_t	dss_mtx;
21
22/* Base address of the DSS. */
23static void		*dss_base;
24/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
25static void		*dss_prev;
26/* Current upper limit on DSS addresses. */
27static void		*dss_max;
28
29/******************************************************************************/
30
31#ifndef JEMALLOC_HAVE_SBRK
32static void *
33sbrk(intptr_t increment)
34{
35
36	not_implemented();
37
38	return (NULL);
39}
40#endif
41
42dss_prec_t
43chunk_dss_prec_get(void)
44{
45	dss_prec_t ret;
46
47	if (config_dss == false)
48		return (dss_prec_disabled);
49	malloc_mutex_lock(&dss_mtx);
50	ret = dss_prec_default;
51	malloc_mutex_unlock(&dss_mtx);
52	return (ret);
53}
54
55bool
56chunk_dss_prec_set(dss_prec_t dss_prec)
57{
58
59	if (config_dss == false)
60		return (true);
61	malloc_mutex_lock(&dss_mtx);
62	dss_prec_default = dss_prec;
63	malloc_mutex_unlock(&dss_mtx);
64	return (false);
65}
66
67void *
68chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
69{
70	void *ret;
71
72	cassert(config_dss);
73	assert(size > 0 && (size & chunksize_mask) == 0);
74	assert(alignment > 0 && (alignment & chunksize_mask) == 0);
75
76	/*
77	 * sbrk() uses a signed increment argument, so take care not to
78	 * interpret a huge allocation request as a negative increment.
79	 */
80	if ((intptr_t)size < 0)
81		return (NULL);
82
83	malloc_mutex_lock(&dss_mtx);
84	if (dss_prev != (void *)-1) {
85		size_t gap_size, cpad_size;
86		void *cpad, *dss_next;
87		intptr_t incr;
88
89		/*
90		 * The loop is necessary to recover from races with other
91		 * threads that are using the DSS for something other than
92		 * malloc.
93		 */
94		do {
95			/* Get the current end of the DSS. */
96			dss_max = sbrk(0);
97			/*
98			 * Calculate how much padding is necessary to
99			 * chunk-align the end of the DSS.
100			 */
101			gap_size = (chunksize - CHUNK_ADDR2OFFSET(dss_max)) &
102			    chunksize_mask;
103			/*
104			 * Compute how much chunk-aligned pad space (if any) is
105			 * necessary to satisfy alignment.  This space can be
106			 * recycled for later use.
107			 */
108			cpad = (void *)((uintptr_t)dss_max + gap_size);
109			ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max,
110			    alignment);
111			cpad_size = (uintptr_t)ret - (uintptr_t)cpad;
112			dss_next = (void *)((uintptr_t)ret + size);
113			if ((uintptr_t)ret < (uintptr_t)dss_max ||
114			    (uintptr_t)dss_next < (uintptr_t)dss_max) {
115				/* Wrap-around. */
116				malloc_mutex_unlock(&dss_mtx);
117				return (NULL);
118			}
119			incr = gap_size + cpad_size + size;
120			dss_prev = sbrk(incr);
121			if (dss_prev == dss_max) {
122				/* Success. */
123				dss_max = dss_next;
124				malloc_mutex_unlock(&dss_mtx);
125				if (cpad_size != 0)
126					chunk_unmap(cpad, cpad_size);
127				if (*zero) {
128					VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
129					memset(ret, 0, size);
130					VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
131				}
132				return (ret);
133			}
134		} while (dss_prev != (void *)-1);
135	}
136	malloc_mutex_unlock(&dss_mtx);
137
138	return (NULL);
139}
140
141bool
142chunk_in_dss(void *chunk)
143{
144	bool ret;
145
146	cassert(config_dss);
147
148	malloc_mutex_lock(&dss_mtx);
149	if ((uintptr_t)chunk >= (uintptr_t)dss_base
150	    && (uintptr_t)chunk < (uintptr_t)dss_max)
151		ret = true;
152	else
153		ret = false;
154	malloc_mutex_unlock(&dss_mtx);
155
156	return (ret);
157}
158
159bool
160chunk_dss_boot(void)
161{
162
163	cassert(config_dss);
164
165	if (malloc_mutex_init(&dss_mtx))
166		return (true);
167	dss_base = sbrk(0);
168	dss_prev = dss_base;
169	dss_max = dss_base;
170
171	return (false);
172}
173
174void
175chunk_dss_prefork(void)
176{
177
178	if (config_dss)
179		malloc_mutex_prefork(&dss_mtx);
180}
181
182void
183chunk_dss_postfork_parent(void)
184{
185
186	if (config_dss)
187		malloc_mutex_postfork_parent(&dss_mtx);
188}
189
190void
191chunk_dss_postfork_child(void)
192{
193
194	if (config_dss)
195		malloc_mutex_postfork_child(&dss_mtx);
196}
197
198/******************************************************************************/
199