Deleted Added
full compact
chunk_mmap.c (234370) chunk_mmap.c (234543)
1#define JEMALLOC_CHUNK_MMAP_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
7/*
8 * Used by chunk_alloc_mmap() to decide whether to attempt the fast path and
9 * potentially avoid some system calls.
10 */
11malloc_tsd_data(static, mmap_unaligned, bool, false)
12malloc_tsd_funcs(JEMALLOC_INLINE, mmap_unaligned, bool, false,
13 malloc_tsd_no_cleanup)
14
15/******************************************************************************/
16/* Function prototypes for non-inline static functions. */
17
18static void *pages_map(void *addr, size_t size);
19static void pages_unmap(void *addr, size_t size);
20static void *chunk_alloc_mmap_slow(size_t size, size_t alignment,
21 bool unaligned);
22
23/******************************************************************************/
24
25static void *
26pages_map(void *addr, size_t size)
27{
28 void *ret;
29
30 /*
31 * We don't use MAP_FIXED here, because it can cause the *replacement*
32 * of existing mappings, and we only want to create new mappings.
33 */
34 ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
35 -1, 0);
36 assert(ret != NULL);
37
38 if (ret == MAP_FAILED)
39 ret = NULL;
40 else if (addr != NULL && ret != addr) {
41 /*
42 * We succeeded in mapping memory, but not in the right place.
43 */
44 if (munmap(ret, size) == -1) {
45 char buf[BUFERROR_BUF];
46
47 buferror(errno, buf, sizeof(buf));
48 malloc_printf("<jemalloc: Error in munmap(): %s\n",
49 buf);
50 if (opt_abort)
51 abort();
52 }
53 ret = NULL;
54 }
55
56 assert(ret == NULL || (addr == NULL && ret != addr)
57 || (addr != NULL && ret == addr));
58 return (ret);
59}
60
61static void
62pages_unmap(void *addr, size_t size)
63{
64
65 if (munmap(addr, size) == -1) {
66 char buf[BUFERROR_BUF];
67
68 buferror(errno, buf, sizeof(buf));
69 malloc_printf("<jemalloc>: Error in munmap(): %s\n", buf);
70 if (opt_abort)
71 abort();
72 }
73}
74
1#define JEMALLOC_CHUNK_MMAP_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
7/*
8 * Used by chunk_alloc_mmap() to decide whether to attempt the fast path and
9 * potentially avoid some system calls.
10 */
11malloc_tsd_data(static, mmap_unaligned, bool, false)
12malloc_tsd_funcs(JEMALLOC_INLINE, mmap_unaligned, bool, false,
13 malloc_tsd_no_cleanup)
14
15/******************************************************************************/
16/* Function prototypes for non-inline static functions. */
17
18static void *pages_map(void *addr, size_t size);
19static void pages_unmap(void *addr, size_t size);
20static void *chunk_alloc_mmap_slow(size_t size, size_t alignment,
21 bool unaligned);
22
23/******************************************************************************/
24
25static void *
26pages_map(void *addr, size_t size)
27{
28 void *ret;
29
30 /*
31 * We don't use MAP_FIXED here, because it can cause the *replacement*
32 * of existing mappings, and we only want to create new mappings.
33 */
34 ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
35 -1, 0);
36 assert(ret != NULL);
37
38 if (ret == MAP_FAILED)
39 ret = NULL;
40 else if (addr != NULL && ret != addr) {
41 /*
42 * We succeeded in mapping memory, but not in the right place.
43 */
44 if (munmap(ret, size) == -1) {
45 char buf[BUFERROR_BUF];
46
47 buferror(errno, buf, sizeof(buf));
48 malloc_printf("<jemalloc: Error in munmap(): %s\n",
49 buf);
50 if (opt_abort)
51 abort();
52 }
53 ret = NULL;
54 }
55
56 assert(ret == NULL || (addr == NULL && ret != addr)
57 || (addr != NULL && ret == addr));
58 return (ret);
59}
60
61static void
62pages_unmap(void *addr, size_t size)
63{
64
65 if (munmap(addr, size) == -1) {
66 char buf[BUFERROR_BUF];
67
68 buferror(errno, buf, sizeof(buf));
69 malloc_printf("<jemalloc>: Error in munmap(): %s\n", buf);
70 if (opt_abort)
71 abort();
72 }
73}
74
75void
76pages_purge(void *addr, size_t length)
77{
78
79#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
80# define JEMALLOC_MADV_PURGE MADV_DONTNEED
81#elif defined(JEMALLOC_PURGE_MADVISE_FREE)
82# define JEMALLOC_MADV_PURGE MADV_FREE
83#else
84# error "No method defined for purging unused dirty pages."
85#endif
86 madvise(addr, length, JEMALLOC_MADV_PURGE);
87}
88
75static void *
76chunk_alloc_mmap_slow(size_t size, size_t alignment, bool unaligned)
77{
78 void *ret, *pages;
79 size_t alloc_size, leadsize, trailsize;
80
81 alloc_size = size + alignment - PAGE;
82 /* Beware size_t wrap-around. */
83 if (alloc_size < size)
84 return (NULL);
85 pages = pages_map(NULL, alloc_size);
86 if (pages == NULL)
87 return (NULL);
88 leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
89 (uintptr_t)pages;
90 assert(alloc_size >= leadsize + size);
91 trailsize = alloc_size - leadsize - size;
92 ret = (void *)((uintptr_t)pages + leadsize);
93 if (leadsize != 0) {
94 /* Note that mmap() returned an unaligned mapping. */
95 unaligned = true;
96 pages_unmap(pages, leadsize);
97 }
98 if (trailsize != 0)
99 pages_unmap((void *)((uintptr_t)ret + size), trailsize);
100
101 /*
102 * If mmap() returned an aligned mapping, reset mmap_unaligned so that
103 * the next chunk_alloc_mmap() execution tries the fast allocation
104 * method.
105 */
106 if (unaligned == false && mmap_unaligned_booted) {
107 bool mu = false;
108 mmap_unaligned_tsd_set(&mu);
109 }
110
111 return (ret);
112}
113
114void *
115chunk_alloc_mmap(size_t size, size_t alignment)
116{
117 void *ret;
118
119 /*
120 * Ideally, there would be a way to specify alignment to mmap() (like
121 * NetBSD has), but in the absence of such a feature, we have to work
122 * hard to efficiently create aligned mappings. The reliable, but
123 * slow method is to create a mapping that is over-sized, then trim the
124 * excess. However, that always results in at least one call to
125 * pages_unmap().
126 *
127 * A more optimistic approach is to try mapping precisely the right
128 * amount, then try to append another mapping if alignment is off. In
129 * practice, this works out well as long as the application is not
130 * interleaving mappings via direct mmap() calls. If we do run into a
131 * situation where there is an interleaved mapping and we are unable to
132 * extend an unaligned mapping, our best option is to switch to the
133 * slow method until mmap() returns another aligned mapping. This will
134 * tend to leave a gap in the memory map that is too small to cause
135 * later problems for the optimistic method.
136 *
137 * Another possible confounding factor is address space layout
138 * randomization (ASLR), which causes mmap(2) to disregard the
139 * requested address. mmap_unaligned tracks whether the previous
140 * chunk_alloc_mmap() execution received any unaligned or relocated
141 * mappings, and if so, the current execution will immediately fall
142 * back to the slow method. However, we keep track of whether the fast
143 * method would have succeeded, and if so, we make a note to try the
144 * fast method next time.
145 */
146
147 if (mmap_unaligned_booted && *mmap_unaligned_tsd_get() == false) {
148 size_t offset;
149
150 ret = pages_map(NULL, size);
151 if (ret == NULL)
152 return (NULL);
153
154 offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
155 if (offset != 0) {
156 bool mu = true;
157 mmap_unaligned_tsd_set(&mu);
158 /* Try to extend chunk boundary. */
159 if (pages_map((void *)((uintptr_t)ret + size),
160 chunksize - offset) == NULL) {
161 /*
162 * Extension failed. Clean up, then revert to
163 * the reliable-but-expensive method.
164 */
165 pages_unmap(ret, size);
166 ret = chunk_alloc_mmap_slow(size, alignment,
167 true);
168 } else {
169 /* Clean up unneeded leading space. */
170 pages_unmap(ret, chunksize - offset);
171 ret = (void *)((uintptr_t)ret + (chunksize -
172 offset));
173 }
174 }
175 } else
176 ret = chunk_alloc_mmap_slow(size, alignment, false);
177
178 return (ret);
179}
180
181bool
182chunk_dealloc_mmap(void *chunk, size_t size)
183{
184
185 if (config_munmap)
186 pages_unmap(chunk, size);
187
188 return (config_munmap == false);
189}
190
191bool
192chunk_mmap_boot(void)
193{
194
195 /*
196 * XXX For the non-TLS implementation of tsd, the first access from
197 * each thread causes memory allocation. The result is a bootstrapping
198 * problem for this particular use case, so for now just disable it by
199 * leaving it in an unbooted state.
200 */
201#ifdef JEMALLOC_TLS
202 if (mmap_unaligned_tsd_boot())
203 return (true);
204#endif
205
206 return (false);
207}
89static void *
90chunk_alloc_mmap_slow(size_t size, size_t alignment, bool unaligned)
91{
92 void *ret, *pages;
93 size_t alloc_size, leadsize, trailsize;
94
95 alloc_size = size + alignment - PAGE;
96 /* Beware size_t wrap-around. */
97 if (alloc_size < size)
98 return (NULL);
99 pages = pages_map(NULL, alloc_size);
100 if (pages == NULL)
101 return (NULL);
102 leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
103 (uintptr_t)pages;
104 assert(alloc_size >= leadsize + size);
105 trailsize = alloc_size - leadsize - size;
106 ret = (void *)((uintptr_t)pages + leadsize);
107 if (leadsize != 0) {
108 /* Note that mmap() returned an unaligned mapping. */
109 unaligned = true;
110 pages_unmap(pages, leadsize);
111 }
112 if (trailsize != 0)
113 pages_unmap((void *)((uintptr_t)ret + size), trailsize);
114
115 /*
116 * If mmap() returned an aligned mapping, reset mmap_unaligned so that
117 * the next chunk_alloc_mmap() execution tries the fast allocation
118 * method.
119 */
120 if (unaligned == false && mmap_unaligned_booted) {
121 bool mu = false;
122 mmap_unaligned_tsd_set(&mu);
123 }
124
125 return (ret);
126}
127
128void *
129chunk_alloc_mmap(size_t size, size_t alignment)
130{
131 void *ret;
132
133 /*
134 * Ideally, there would be a way to specify alignment to mmap() (like
135 * NetBSD has), but in the absence of such a feature, we have to work
136 * hard to efficiently create aligned mappings. The reliable, but
137 * slow method is to create a mapping that is over-sized, then trim the
138 * excess. However, that always results in at least one call to
139 * pages_unmap().
140 *
141 * A more optimistic approach is to try mapping precisely the right
142 * amount, then try to append another mapping if alignment is off. In
143 * practice, this works out well as long as the application is not
144 * interleaving mappings via direct mmap() calls. If we do run into a
145 * situation where there is an interleaved mapping and we are unable to
146 * extend an unaligned mapping, our best option is to switch to the
147 * slow method until mmap() returns another aligned mapping. This will
148 * tend to leave a gap in the memory map that is too small to cause
149 * later problems for the optimistic method.
150 *
151 * Another possible confounding factor is address space layout
152 * randomization (ASLR), which causes mmap(2) to disregard the
153 * requested address. mmap_unaligned tracks whether the previous
154 * chunk_alloc_mmap() execution received any unaligned or relocated
155 * mappings, and if so, the current execution will immediately fall
156 * back to the slow method. However, we keep track of whether the fast
157 * method would have succeeded, and if so, we make a note to try the
158 * fast method next time.
159 */
160
161 if (mmap_unaligned_booted && *mmap_unaligned_tsd_get() == false) {
162 size_t offset;
163
164 ret = pages_map(NULL, size);
165 if (ret == NULL)
166 return (NULL);
167
168 offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
169 if (offset != 0) {
170 bool mu = true;
171 mmap_unaligned_tsd_set(&mu);
172 /* Try to extend chunk boundary. */
173 if (pages_map((void *)((uintptr_t)ret + size),
174 chunksize - offset) == NULL) {
175 /*
176 * Extension failed. Clean up, then revert to
177 * the reliable-but-expensive method.
178 */
179 pages_unmap(ret, size);
180 ret = chunk_alloc_mmap_slow(size, alignment,
181 true);
182 } else {
183 /* Clean up unneeded leading space. */
184 pages_unmap(ret, chunksize - offset);
185 ret = (void *)((uintptr_t)ret + (chunksize -
186 offset));
187 }
188 }
189 } else
190 ret = chunk_alloc_mmap_slow(size, alignment, false);
191
192 return (ret);
193}
194
195bool
196chunk_dealloc_mmap(void *chunk, size_t size)
197{
198
199 if (config_munmap)
200 pages_unmap(chunk, size);
201
202 return (config_munmap == false);
203}
204
205bool
206chunk_mmap_boot(void)
207{
208
209 /*
210 * XXX For the non-TLS implementation of tsd, the first access from
211 * each thread causes memory allocation. The result is a bootstrapping
212 * problem for this particular use case, so for now just disable it by
213 * leaving it in an unbooted state.
214 */
215#ifdef JEMALLOC_TLS
216 if (mmap_unaligned_tsd_boot())
217 return (true);
218#endif
219
220 return (false);
221}