Deleted Added
full compact
chunk_mmap.c (296221) chunk_mmap.c (299587)
1#define JEMALLOC_CHUNK_MMAP_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5
6static void *
7chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
8{
9 void *ret;
10 size_t alloc_size;
11
1#define JEMALLOC_CHUNK_MMAP_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5
6static void *
7chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
8{
9 void *ret;
10 size_t alloc_size;
11
12 alloc_size = size + alignment - PAGE;
12 alloc_size = size + alignment;
13 /* Beware size_t wrap-around. */
14 if (alloc_size < size)
15 return (NULL);
16 do {
17 void *pages;
18 size_t leadsize;
13 /* Beware size_t wrap-around. */
14 if (alloc_size < size)
15 return (NULL);
16 do {
17 void *pages;
18 size_t leadsize;
19 pages = pages_map(NULL, alloc_size);
19 pages = pages_map(NULL, alloc_size, commit);
20 if (pages == NULL)
21 return (NULL);
22 leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
23 (uintptr_t)pages;
20 if (pages == NULL)
21 return (NULL);
22 leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
23 (uintptr_t)pages;
24 ret = pages_trim(pages, alloc_size, leadsize, size);
24 ret = pages_trim(pages, alloc_size, leadsize, size, commit);
25 } while (ret == NULL);
26
27 assert(ret != NULL);
28 *zero = true;
25 } while (ret == NULL);
26
27 assert(ret != NULL);
28 *zero = true;
29 if (!*commit)
30 *commit = pages_decommit(ret, size);
31 return (ret);
32}
33
34void *
35chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
36 bool *commit)
37{
38 void *ret;

--- 10 unchanged lines hidden (view full) ---

49 * Optimistically try mapping precisely the right amount before falling
50 * back to the slow method, with the expectation that the optimistic
51 * approach works most of the time.
52 */
53
54 assert(alignment != 0);
55 assert((alignment & chunksize_mask) == 0);
56
29 return (ret);
30}
31
32void *
33chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
34 bool *commit)
35{
36 void *ret;

--- 10 unchanged lines hidden (view full) ---

47 * Optimistically try mapping precisely the right amount before falling
48 * back to the slow method, with the expectation that the optimistic
49 * approach works most of the time.
50 */
51
52 assert(alignment != 0);
53 assert((alignment & chunksize_mask) == 0);
54
57 ret = pages_map(new_addr, size);
55 ret = pages_map(new_addr, size, commit);
58 if (ret == NULL || ret == new_addr)
59 return (ret);
60 assert(new_addr == NULL);
61 offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
62 if (offset != 0) {
63 pages_unmap(ret, size);
64 return (chunk_alloc_mmap_slow(size, alignment, zero, commit));
65 }
66
67 assert(ret != NULL);
68 *zero = true;
56 if (ret == NULL || ret == new_addr)
57 return (ret);
58 assert(new_addr == NULL);
59 offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
60 if (offset != 0) {
61 pages_unmap(ret, size);
62 return (chunk_alloc_mmap_slow(size, alignment, zero, commit));
63 }
64
65 assert(ret != NULL);
66 *zero = true;
69 if (!*commit)
70 *commit = pages_decommit(ret, size);
71 return (ret);
72}
73
74bool
75chunk_dalloc_mmap(void *chunk, size_t size)
76{
77
78 if (config_munmap)
79 pages_unmap(chunk, size);
80
81 return (!config_munmap);
82}
67 return (ret);
68}
69
70bool
71chunk_dalloc_mmap(void *chunk, size_t size)
72{
73
74 if (config_munmap)
75 pages_unmap(chunk, size);
76
77 return (!config_munmap);
78}