Deleted Added
full compact
chunk_mmap.c (288090) chunk_mmap.c (296221)
1#define JEMALLOC_CHUNK_MMAP_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5
6static void *
7chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
8{

--- 18 unchanged lines hidden (view full) ---

27 assert(ret != NULL);
28 *zero = true;
29 if (!*commit)
30 *commit = pages_decommit(ret, size);
31 return (ret);
32}
33
34void *
1#define JEMALLOC_CHUNK_MMAP_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5
6static void *
7chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
8{

--- 18 unchanged lines hidden (view full) ---

27 assert(ret != NULL);
28 *zero = true;
29 if (!*commit)
30 *commit = pages_decommit(ret, size);
31 return (ret);
32}
33
34void *
35chunk_alloc_mmap(size_t size, size_t alignment, bool *zero, bool *commit)
35chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
36 bool *commit)
36{
37 void *ret;
38 size_t offset;
39
40 /*
41 * Ideally, there would be a way to specify alignment to mmap() (like
42 * NetBSD has), but in the absence of such a feature, we have to work
43 * hard to efficiently create aligned mappings. The reliable, but

--- 4 unchanged lines hidden (view full) ---

48 * Optimistically try mapping precisely the right amount before falling
49 * back to the slow method, with the expectation that the optimistic
50 * approach works most of the time.
51 */
52
53 assert(alignment != 0);
54 assert((alignment & chunksize_mask) == 0);
55
37{
38 void *ret;
39 size_t offset;
40
41 /*
42 * Ideally, there would be a way to specify alignment to mmap() (like
43 * NetBSD has), but in the absence of such a feature, we have to work
44 * hard to efficiently create aligned mappings. The reliable, but

--- 4 unchanged lines hidden (view full) ---

49 * Optimistically try mapping precisely the right amount before falling
50 * back to the slow method, with the expectation that the optimistic
51 * approach works most of the time.
52 */
53
54 assert(alignment != 0);
55 assert((alignment & chunksize_mask) == 0);
56
56 ret = pages_map(NULL, size);
57 if (ret == NULL)
58 return (NULL);
57 ret = pages_map(new_addr, size);
58 if (ret == NULL || ret == new_addr)
59 return (ret);
60 assert(new_addr == NULL);
59 offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
60 if (offset != 0) {
61 pages_unmap(ret, size);
62 return (chunk_alloc_mmap_slow(size, alignment, zero, commit));
63 }
64
65 assert(ret != NULL);
66 *zero = true;

--- 14 unchanged lines hidden ---
61 offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
62 if (offset != 0) {
63 pages_unmap(ret, size);
64 return (chunk_alloc_mmap_slow(size, alignment, zero, commit));
65 }
66
67 assert(ret != NULL);
68 *zero = true;

--- 14 unchanged lines hidden ---