chunk_mmap.c revision 234543
1#define	JEMALLOC_CHUNK_MMAP_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
7/*
8 * Used by chunk_alloc_mmap() to decide whether to attempt the fast path and
9 * potentially avoid some system calls.
10 */
11malloc_tsd_data(static, mmap_unaligned, bool, false)
12malloc_tsd_funcs(JEMALLOC_INLINE, mmap_unaligned, bool, false,
13    malloc_tsd_no_cleanup)
14
15/******************************************************************************/
16/* Function prototypes for non-inline static functions. */
17
18static void	*pages_map(void *addr, size_t size);
19static void	pages_unmap(void *addr, size_t size);
20static void	*chunk_alloc_mmap_slow(size_t size, size_t alignment,
21    bool unaligned);
22
23/******************************************************************************/
24
25static void *
26pages_map(void *addr, size_t size)
27{
28	void *ret;
29
30	/*
31	 * We don't use MAP_FIXED here, because it can cause the *replacement*
32	 * of existing mappings, and we only want to create new mappings.
33	 */
34	ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
35	    -1, 0);
36	assert(ret != NULL);
37
38	if (ret == MAP_FAILED)
39		ret = NULL;
40	else if (addr != NULL && ret != addr) {
41		/*
42		 * We succeeded in mapping memory, but not in the right place.
43		 */
44		if (munmap(ret, size) == -1) {
45			char buf[BUFERROR_BUF];
46
47			buferror(errno, buf, sizeof(buf));
48			malloc_printf("<jemalloc: Error in munmap(): %s\n",
49			    buf);
50			if (opt_abort)
51				abort();
52		}
53		ret = NULL;
54	}
55
56	assert(ret == NULL || (addr == NULL && ret != addr)
57	    || (addr != NULL && ret == addr));
58	return (ret);
59}
60
61static void
62pages_unmap(void *addr, size_t size)
63{
64
65	if (munmap(addr, size) == -1) {
66		char buf[BUFERROR_BUF];
67
68		buferror(errno, buf, sizeof(buf));
69		malloc_printf("<jemalloc>: Error in munmap(): %s\n", buf);
70		if (opt_abort)
71			abort();
72	}
73}
74
75void
76pages_purge(void *addr, size_t length)
77{
78
79#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
80#  define JEMALLOC_MADV_PURGE MADV_DONTNEED
81#elif defined(JEMALLOC_PURGE_MADVISE_FREE)
82#  define JEMALLOC_MADV_PURGE MADV_FREE
83#else
84#  error "No method defined for purging unused dirty pages."
85#endif
86	madvise(addr, length, JEMALLOC_MADV_PURGE);
87}
88
89static void *
90chunk_alloc_mmap_slow(size_t size, size_t alignment, bool unaligned)
91{
92	void *ret, *pages;
93	size_t alloc_size, leadsize, trailsize;
94
95	alloc_size = size + alignment - PAGE;
96	/* Beware size_t wrap-around. */
97	if (alloc_size < size)
98		return (NULL);
99	pages = pages_map(NULL, alloc_size);
100	if (pages == NULL)
101		return (NULL);
102	leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
103	    (uintptr_t)pages;
104	assert(alloc_size >= leadsize + size);
105	trailsize = alloc_size - leadsize - size;
106	ret = (void *)((uintptr_t)pages + leadsize);
107	if (leadsize != 0) {
108		/* Note that mmap() returned an unaligned mapping. */
109		unaligned = true;
110		pages_unmap(pages, leadsize);
111	}
112	if (trailsize != 0)
113		pages_unmap((void *)((uintptr_t)ret + size), trailsize);
114
115	/*
116	 * If mmap() returned an aligned mapping, reset mmap_unaligned so that
117	 * the next chunk_alloc_mmap() execution tries the fast allocation
118	 * method.
119	 */
120	if (unaligned == false && mmap_unaligned_booted) {
121		bool mu = false;
122		mmap_unaligned_tsd_set(&mu);
123	}
124
125	return (ret);
126}
127
128void *
129chunk_alloc_mmap(size_t size, size_t alignment)
130{
131	void *ret;
132
133	/*
134	 * Ideally, there would be a way to specify alignment to mmap() (like
135	 * NetBSD has), but in the absence of such a feature, we have to work
136	 * hard to efficiently create aligned mappings.  The reliable, but
137	 * slow method is to create a mapping that is over-sized, then trim the
138	 * excess.  However, that always results in at least one call to
139	 * pages_unmap().
140	 *
141	 * A more optimistic approach is to try mapping precisely the right
142	 * amount, then try to append another mapping if alignment is off.  In
143	 * practice, this works out well as long as the application is not
144	 * interleaving mappings via direct mmap() calls.  If we do run into a
145	 * situation where there is an interleaved mapping and we are unable to
146	 * extend an unaligned mapping, our best option is to switch to the
147	 * slow method until mmap() returns another aligned mapping.  This will
148	 * tend to leave a gap in the memory map that is too small to cause
149	 * later problems for the optimistic method.
150	 *
151	 * Another possible confounding factor is address space layout
152	 * randomization (ASLR), which causes mmap(2) to disregard the
153	 * requested address.  mmap_unaligned tracks whether the previous
154	 * chunk_alloc_mmap() execution received any unaligned or relocated
155	 * mappings, and if so, the current execution will immediately fall
156	 * back to the slow method.  However, we keep track of whether the fast
157	 * method would have succeeded, and if so, we make a note to try the
158	 * fast method next time.
159	 */
160
161	if (mmap_unaligned_booted && *mmap_unaligned_tsd_get() == false) {
162		size_t offset;
163
164		ret = pages_map(NULL, size);
165		if (ret == NULL)
166			return (NULL);
167
168		offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
169		if (offset != 0) {
170			bool mu = true;
171			mmap_unaligned_tsd_set(&mu);
172			/* Try to extend chunk boundary. */
173			if (pages_map((void *)((uintptr_t)ret + size),
174			    chunksize - offset) == NULL) {
175				/*
176				 * Extension failed.  Clean up, then revert to
177				 * the reliable-but-expensive method.
178				 */
179				pages_unmap(ret, size);
180				ret = chunk_alloc_mmap_slow(size, alignment,
181				    true);
182			} else {
183				/* Clean up unneeded leading space. */
184				pages_unmap(ret, chunksize - offset);
185				ret = (void *)((uintptr_t)ret + (chunksize -
186				    offset));
187			}
188		}
189	} else
190		ret = chunk_alloc_mmap_slow(size, alignment, false);
191
192	return (ret);
193}
194
195bool
196chunk_dealloc_mmap(void *chunk, size_t size)
197{
198
199	if (config_munmap)
200		pages_unmap(chunk, size);
201
202	return (config_munmap == false);
203}
204
205bool
206chunk_mmap_boot(void)
207{
208
209	/*
210	 * XXX For the non-TLS implementation of tsd, the first access from
211	 * each thread causes memory allocation.  The result is a bootstrapping
212	 * problem for this particular use case, so for now just disable it by
213	 * leaving it in an unbooted state.
214	 */
215#ifdef JEMALLOC_TLS
216	if (mmap_unaligned_tsd_boot())
217		return (true);
218#endif
219
220	return (false);
221}
222