Deleted Added
full compact
chunk_mmap.c (235238) chunk_mmap.c (242844)
1#define JEMALLOC_CHUNK_MMAP_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Function prototypes for non-inline static functions. */
6
7static void *pages_map(void *addr, size_t size);
8static void pages_unmap(void *addr, size_t size);
9static void *chunk_alloc_mmap_slow(size_t size, size_t alignment,
10 bool *zero);
11
12/******************************************************************************/
13
14static void *
15pages_map(void *addr, size_t size)
16{
17 void *ret;
18
19 assert(size != 0);
20
21#ifdef _WIN32
22 /*
23 * If VirtualAlloc can't allocate at the given address when one is
24 * given, it fails and returns NULL.
25 */
26 ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
27 PAGE_READWRITE);
28#else
29 /*
30 * We don't use MAP_FIXED here, because it can cause the *replacement*
31 * of existing mappings, and we only want to create new mappings.
32 */
33 ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
34 -1, 0);
35 assert(ret != NULL);
36
37 if (ret == MAP_FAILED)
38 ret = NULL;
39 else if (addr != NULL && ret != addr) {
40 /*
41 * We succeeded in mapping memory, but not in the right place.
42 */
43 if (munmap(ret, size) == -1) {
44 char buf[BUFERROR_BUF];
45
46 buferror(buf, sizeof(buf));
47 malloc_printf("<jemalloc: Error in munmap(): %s\n",
48 buf);
49 if (opt_abort)
50 abort();
51 }
52 ret = NULL;
53 }
54#endif
55 assert(ret == NULL || (addr == NULL && ret != addr)
56 || (addr != NULL && ret == addr));
57 return (ret);
58}
59
60static void
61pages_unmap(void *addr, size_t size)
62{
63
64#ifdef _WIN32
65 if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
66#else
67 if (munmap(addr, size) == -1)
68#endif
69 {
70 char buf[BUFERROR_BUF];
71
72 buferror(buf, sizeof(buf));
73 malloc_printf("<jemalloc>: Error in "
74#ifdef _WIN32
75 "VirtualFree"
76#else
77 "munmap"
78#endif
79 "(): %s\n", buf);
80 if (opt_abort)
81 abort();
82 }
83}
84
85static void *
86pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
87{
88 void *ret = (void *)((uintptr_t)addr + leadsize);
89
90 assert(alloc_size >= leadsize + size);
91#ifdef _WIN32
92 {
93 void *new_addr;
94
95 pages_unmap(addr, alloc_size);
96 new_addr = pages_map(ret, size);
97 if (new_addr == ret)
98 return (ret);
99 if (new_addr)
100 pages_unmap(new_addr, size);
101 return (NULL);
102 }
103#else
104 {
105 size_t trailsize = alloc_size - leadsize - size;
106
107 if (leadsize != 0)
108 pages_unmap(addr, leadsize);
109 if (trailsize != 0)
110 pages_unmap((void *)((uintptr_t)ret + size), trailsize);
111 return (ret);
112 }
113#endif
114}
115
1#define JEMALLOC_CHUNK_MMAP_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Function prototypes for non-inline static functions. */
6
7static void *pages_map(void *addr, size_t size);
8static void pages_unmap(void *addr, size_t size);
9static void *chunk_alloc_mmap_slow(size_t size, size_t alignment,
10 bool *zero);
11
12/******************************************************************************/
13
14static void *
15pages_map(void *addr, size_t size)
16{
17 void *ret;
18
19 assert(size != 0);
20
21#ifdef _WIN32
22 /*
23 * If VirtualAlloc can't allocate at the given address when one is
24 * given, it fails and returns NULL.
25 */
26 ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
27 PAGE_READWRITE);
28#else
29 /*
30 * We don't use MAP_FIXED here, because it can cause the *replacement*
31 * of existing mappings, and we only want to create new mappings.
32 */
33 ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
34 -1, 0);
35 assert(ret != NULL);
36
37 if (ret == MAP_FAILED)
38 ret = NULL;
39 else if (addr != NULL && ret != addr) {
40 /*
41 * We succeeded in mapping memory, but not in the right place.
42 */
43 if (munmap(ret, size) == -1) {
44 char buf[BUFERROR_BUF];
45
46 buferror(buf, sizeof(buf));
47 malloc_printf("<jemalloc: Error in munmap(): %s\n",
48 buf);
49 if (opt_abort)
50 abort();
51 }
52 ret = NULL;
53 }
54#endif
55 assert(ret == NULL || (addr == NULL && ret != addr)
56 || (addr != NULL && ret == addr));
57 return (ret);
58}
59
60static void
61pages_unmap(void *addr, size_t size)
62{
63
64#ifdef _WIN32
65 if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
66#else
67 if (munmap(addr, size) == -1)
68#endif
69 {
70 char buf[BUFERROR_BUF];
71
72 buferror(buf, sizeof(buf));
73 malloc_printf("<jemalloc>: Error in "
74#ifdef _WIN32
75 "VirtualFree"
76#else
77 "munmap"
78#endif
79 "(): %s\n", buf);
80 if (opt_abort)
81 abort();
82 }
83}
84
85static void *
86pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
87{
88 void *ret = (void *)((uintptr_t)addr + leadsize);
89
90 assert(alloc_size >= leadsize + size);
91#ifdef _WIN32
92 {
93 void *new_addr;
94
95 pages_unmap(addr, alloc_size);
96 new_addr = pages_map(ret, size);
97 if (new_addr == ret)
98 return (ret);
99 if (new_addr)
100 pages_unmap(new_addr, size);
101 return (NULL);
102 }
103#else
104 {
105 size_t trailsize = alloc_size - leadsize - size;
106
107 if (leadsize != 0)
108 pages_unmap(addr, leadsize);
109 if (trailsize != 0)
110 pages_unmap((void *)((uintptr_t)ret + size), trailsize);
111 return (ret);
112 }
113#endif
114}
115
116void
116bool
117pages_purge(void *addr, size_t length)
118{
117pages_purge(void *addr, size_t length)
118{
119 bool unzeroed;
119
120#ifdef _WIN32
121 VirtualAlloc(addr, length, MEM_RESET, PAGE_READWRITE);
120
121#ifdef _WIN32
122 VirtualAlloc(addr, length, MEM_RESET, PAGE_READWRITE);
123 unzeroed = true;
122#else
123# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
124# define JEMALLOC_MADV_PURGE MADV_DONTNEED
124#else
125# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
126# define JEMALLOC_MADV_PURGE MADV_DONTNEED
127# define JEMALLOC_MADV_ZEROS true
125# elif defined(JEMALLOC_PURGE_MADVISE_FREE)
126# define JEMALLOC_MADV_PURGE MADV_FREE
128# elif defined(JEMALLOC_PURGE_MADVISE_FREE)
129# define JEMALLOC_MADV_PURGE MADV_FREE
130# define JEMALLOC_MADV_ZEROS false
127# else
128# error "No method defined for purging unused dirty pages."
129# endif
131# else
132# error "No method defined for purging unused dirty pages."
133# endif
130 madvise(addr, length, JEMALLOC_MADV_PURGE);
134 int err = madvise(addr, length, JEMALLOC_MADV_PURGE);
135 unzeroed = (JEMALLOC_MADV_ZEROS == false || err != 0);
136# undef JEMALLOC_MADV_PURGE
137# undef JEMALLOC_MADV_ZEROS
131#endif
138#endif
139 return (unzeroed);
132}
133
134static void *
135chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero)
136{
137 void *ret, *pages;
138 size_t alloc_size, leadsize;
139
140 alloc_size = size + alignment - PAGE;
141 /* Beware size_t wrap-around. */
142 if (alloc_size < size)
143 return (NULL);
144 do {
145 pages = pages_map(NULL, alloc_size);
146 if (pages == NULL)
147 return (NULL);
148 leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
149 (uintptr_t)pages;
150 ret = pages_trim(pages, alloc_size, leadsize, size);
151 } while (ret == NULL);
152
153 assert(ret != NULL);
154 *zero = true;
155 return (ret);
156}
157
158void *
159chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
160{
161 void *ret;
162 size_t offset;
163
164 /*
165 * Ideally, there would be a way to specify alignment to mmap() (like
166 * NetBSD has), but in the absence of such a feature, we have to work
167 * hard to efficiently create aligned mappings. The reliable, but
168 * slow method is to create a mapping that is over-sized, then trim the
169 * excess. However, that always results in one or two calls to
170 * pages_unmap().
171 *
172 * Optimistically try mapping precisely the right amount before falling
173 * back to the slow method, with the expectation that the optimistic
174 * approach works most of the time.
175 */
176
177 assert(alignment != 0);
178 assert((alignment & chunksize_mask) == 0);
179
180 ret = pages_map(NULL, size);
181 if (ret == NULL)
182 return (NULL);
183 offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
184 if (offset != 0) {
185 pages_unmap(ret, size);
186 return (chunk_alloc_mmap_slow(size, alignment, zero));
187 }
188
189 assert(ret != NULL);
190 *zero = true;
191 return (ret);
192}
193
194bool
195chunk_dealloc_mmap(void *chunk, size_t size)
196{
197
198 if (config_munmap)
199 pages_unmap(chunk, size);
200
201 return (config_munmap == false);
202}
140}
141
142static void *
143chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero)
144{
145 void *ret, *pages;
146 size_t alloc_size, leadsize;
147
148 alloc_size = size + alignment - PAGE;
149 /* Beware size_t wrap-around. */
150 if (alloc_size < size)
151 return (NULL);
152 do {
153 pages = pages_map(NULL, alloc_size);
154 if (pages == NULL)
155 return (NULL);
156 leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
157 (uintptr_t)pages;
158 ret = pages_trim(pages, alloc_size, leadsize, size);
159 } while (ret == NULL);
160
161 assert(ret != NULL);
162 *zero = true;
163 return (ret);
164}
165
166void *
167chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
168{
169 void *ret;
170 size_t offset;
171
172 /*
173 * Ideally, there would be a way to specify alignment to mmap() (like
174 * NetBSD has), but in the absence of such a feature, we have to work
175 * hard to efficiently create aligned mappings. The reliable, but
176 * slow method is to create a mapping that is over-sized, then trim the
177 * excess. However, that always results in one or two calls to
178 * pages_unmap().
179 *
180 * Optimistically try mapping precisely the right amount before falling
181 * back to the slow method, with the expectation that the optimistic
182 * approach works most of the time.
183 */
184
185 assert(alignment != 0);
186 assert((alignment & chunksize_mask) == 0);
187
188 ret = pages_map(NULL, size);
189 if (ret == NULL)
190 return (NULL);
191 offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
192 if (offset != 0) {
193 pages_unmap(ret, size);
194 return (chunk_alloc_mmap_slow(size, alignment, zero));
195 }
196
197 assert(ret != NULL);
198 *zero = true;
199 return (ret);
200}
201
202bool
203chunk_dealloc_mmap(void *chunk, size_t size)
204{
205
206 if (config_munmap)
207 pages_unmap(chunk, size);
208
209 return (config_munmap == false);
210}