Deleted Added
full compact
chunk_mmap.c (288090) chunk_mmap.c (296221)
1#define JEMALLOC_CHUNK_MMAP_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5
6static void *
7chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
8{
9 void *ret;
10 size_t alloc_size;
11
12 alloc_size = size + alignment - PAGE;
13 /* Beware size_t wrap-around. */
14 if (alloc_size < size)
15 return (NULL);
16 do {
17 void *pages;
18 size_t leadsize;
19 pages = pages_map(NULL, alloc_size);
20 if (pages == NULL)
21 return (NULL);
22 leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
23 (uintptr_t)pages;
24 ret = pages_trim(pages, alloc_size, leadsize, size);
25 } while (ret == NULL);
26
27 assert(ret != NULL);
28 *zero = true;
29 if (!*commit)
30 *commit = pages_decommit(ret, size);
31 return (ret);
32}
33
34void *
1#define JEMALLOC_CHUNK_MMAP_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5
6static void *
7chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
8{
9 void *ret;
10 size_t alloc_size;
11
12 alloc_size = size + alignment - PAGE;
13 /* Beware size_t wrap-around. */
14 if (alloc_size < size)
15 return (NULL);
16 do {
17 void *pages;
18 size_t leadsize;
19 pages = pages_map(NULL, alloc_size);
20 if (pages == NULL)
21 return (NULL);
22 leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
23 (uintptr_t)pages;
24 ret = pages_trim(pages, alloc_size, leadsize, size);
25 } while (ret == NULL);
26
27 assert(ret != NULL);
28 *zero = true;
29 if (!*commit)
30 *commit = pages_decommit(ret, size);
31 return (ret);
32}
33
34void *
35chunk_alloc_mmap(size_t size, size_t alignment, bool *zero, bool *commit)
35chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
36 bool *commit)
36{
37 void *ret;
38 size_t offset;
39
40 /*
41 * Ideally, there would be a way to specify alignment to mmap() (like
42 * NetBSD has), but in the absence of such a feature, we have to work
43 * hard to efficiently create aligned mappings. The reliable, but
44 * slow method is to create a mapping that is over-sized, then trim the
45 * excess. However, that always results in one or two calls to
46 * pages_unmap().
47 *
48 * Optimistically try mapping precisely the right amount before falling
49 * back to the slow method, with the expectation that the optimistic
50 * approach works most of the time.
51 */
52
53 assert(alignment != 0);
54 assert((alignment & chunksize_mask) == 0);
55
37{
38 void *ret;
39 size_t offset;
40
41 /*
42 * Ideally, there would be a way to specify alignment to mmap() (like
43 * NetBSD has), but in the absence of such a feature, we have to work
44 * hard to efficiently create aligned mappings. The reliable, but
45 * slow method is to create a mapping that is over-sized, then trim the
46 * excess. However, that always results in one or two calls to
47 * pages_unmap().
48 *
49 * Optimistically try mapping precisely the right amount before falling
50 * back to the slow method, with the expectation that the optimistic
51 * approach works most of the time.
52 */
53
54 assert(alignment != 0);
55 assert((alignment & chunksize_mask) == 0);
56
56 ret = pages_map(NULL, size);
57 if (ret == NULL)
58 return (NULL);
57 ret = pages_map(new_addr, size);
58 if (ret == NULL || ret == new_addr)
59 return (ret);
60 assert(new_addr == NULL);
59 offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
60 if (offset != 0) {
61 pages_unmap(ret, size);
62 return (chunk_alloc_mmap_slow(size, alignment, zero, commit));
63 }
64
65 assert(ret != NULL);
66 *zero = true;
67 if (!*commit)
68 *commit = pages_decommit(ret, size);
69 return (ret);
70}
71
72bool
73chunk_dalloc_mmap(void *chunk, size_t size)
74{
75
76 if (config_munmap)
77 pages_unmap(chunk, size);
78
79 return (!config_munmap);
80}
61 offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
62 if (offset != 0) {
63 pages_unmap(ret, size);
64 return (chunk_alloc_mmap_slow(size, alignment, zero, commit));
65 }
66
67 assert(ret != NULL);
68 *zero = true;
69 if (!*commit)
70 *commit = pages_decommit(ret, size);
71 return (ret);
72}
73
74bool
75chunk_dalloc_mmap(void *chunk, size_t size)
76{
77
78 if (config_munmap)
79 pages_unmap(chunk, size);
80
81 return (!config_munmap);
82}