1#define JEMALLOC_CHUNK_DSS_C_ 2#include "jemalloc/internal/jemalloc_internal.h" 3/******************************************************************************/ 4/* Data. */ 5 6const char *dss_prec_names[] = { 7 "disabled", 8 "primary", 9 "secondary", 10 "N/A" 11}; 12 13/* Current dss precedence default, used when creating new arenas. */ 14static dss_prec_t dss_prec_default = DSS_PREC_DEFAULT; 15 16/* 17 * Protects sbrk() calls. This avoids malloc races among threads, though it 18 * does not protect against races with threads that call sbrk() directly. 19 */ 20static malloc_mutex_t dss_mtx; 21 22/* Base address of the DSS. */ 23static void *dss_base; 24/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */ 25static void *dss_prev; 26/* Current upper limit on DSS addresses. */ 27static void *dss_max; 28 29/******************************************************************************/ 30 31static void * 32chunk_dss_sbrk(intptr_t increment) 33{ 34 35#ifdef JEMALLOC_DSS 36 return (sbrk(increment)); 37#else 38 not_implemented(); 39 return (NULL); 40#endif 41} 42 43dss_prec_t 44chunk_dss_prec_get(void) 45{ 46 dss_prec_t ret; 47 48 if (!have_dss) 49 return (dss_prec_disabled); 50 malloc_mutex_lock(&dss_mtx); 51 ret = dss_prec_default; 52 malloc_mutex_unlock(&dss_mtx); 53 return (ret); 54} 55 56bool 57chunk_dss_prec_set(dss_prec_t dss_prec) 58{ 59 60 if (!have_dss) 61 return (dss_prec != dss_prec_disabled); 62 malloc_mutex_lock(&dss_mtx); 63 dss_prec_default = dss_prec; 64 malloc_mutex_unlock(&dss_mtx); 65 return (false); 66} 67 68void * 69chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment, 70 bool *zero, bool *commit) 71{
| 1#define JEMALLOC_CHUNK_DSS_C_ 2#include "jemalloc/internal/jemalloc_internal.h" 3/******************************************************************************/ 4/* Data. */ 5 6const char *dss_prec_names[] = { 7 "disabled", 8 "primary", 9 "secondary", 10 "N/A" 11}; 12 13/* Current dss precedence default, used when creating new arenas. */ 14static dss_prec_t dss_prec_default = DSS_PREC_DEFAULT; 15 16/* 17 * Protects sbrk() calls. This avoids malloc races among threads, though it 18 * does not protect against races with threads that call sbrk() directly. 19 */ 20static malloc_mutex_t dss_mtx; 21 22/* Base address of the DSS. */ 23static void *dss_base; 24/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */ 25static void *dss_prev; 26/* Current upper limit on DSS addresses. */ 27static void *dss_max; 28 29/******************************************************************************/ 30 31static void * 32chunk_dss_sbrk(intptr_t increment) 33{ 34 35#ifdef JEMALLOC_DSS 36 return (sbrk(increment)); 37#else 38 not_implemented(); 39 return (NULL); 40#endif 41} 42 43dss_prec_t 44chunk_dss_prec_get(void) 45{ 46 dss_prec_t ret; 47 48 if (!have_dss) 49 return (dss_prec_disabled); 50 malloc_mutex_lock(&dss_mtx); 51 ret = dss_prec_default; 52 malloc_mutex_unlock(&dss_mtx); 53 return (ret); 54} 55 56bool 57chunk_dss_prec_set(dss_prec_t dss_prec) 58{ 59 60 if (!have_dss) 61 return (dss_prec != dss_prec_disabled); 62 malloc_mutex_lock(&dss_mtx); 63 dss_prec_default = dss_prec; 64 malloc_mutex_unlock(&dss_mtx); 65 return (false); 66} 67 68void * 69chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment, 70 bool *zero, bool *commit) 71{
|
72 void *ret; 73
| |
74 cassert(have_dss); 75 assert(size > 0 && (size & chunksize_mask) == 0); 76 assert(alignment > 0 && (alignment & chunksize_mask) == 0); 77 78 /* 79 * sbrk() uses a signed increment argument, so take care not to 80 * interpret a huge allocation request as a negative increment. 81 */ 82 if ((intptr_t)size < 0) 83 return (NULL); 84 85 malloc_mutex_lock(&dss_mtx); 86 if (dss_prev != (void *)-1) {
| 72 cassert(have_dss); 73 assert(size > 0 && (size & chunksize_mask) == 0); 74 assert(alignment > 0 && (alignment & chunksize_mask) == 0); 75 76 /* 77 * sbrk() uses a signed increment argument, so take care not to 78 * interpret a huge allocation request as a negative increment. 79 */ 80 if ((intptr_t)size < 0) 81 return (NULL); 82 83 malloc_mutex_lock(&dss_mtx); 84 if (dss_prev != (void *)-1) {
|
87 size_t gap_size, cpad_size; 88 void *cpad, *dss_next; 89 intptr_t incr;
| |
90 91 /* 92 * The loop is necessary to recover from races with other 93 * threads that are using the DSS for something other than 94 * malloc. 95 */ 96 do {
| 85 86 /* 87 * The loop is necessary to recover from races with other 88 * threads that are using the DSS for something other than 89 * malloc. 90 */ 91 do {
|
| 92 void *ret, *cpad, *dss_next; 93 size_t gap_size, cpad_size; 94 intptr_t incr;
|
97 /* Avoid an unnecessary system call. */ 98 if (new_addr != NULL && dss_max != new_addr) 99 break; 100 101 /* Get the current end of the DSS. */ 102 dss_max = chunk_dss_sbrk(0); 103 104 /* Make sure the earlier condition still holds. */ 105 if (new_addr != NULL && dss_max != new_addr) 106 break; 107 108 /* 109 * Calculate how much padding is necessary to 110 * chunk-align the end of the DSS. 111 */ 112 gap_size = (chunksize - CHUNK_ADDR2OFFSET(dss_max)) & 113 chunksize_mask; 114 /* 115 * Compute how much chunk-aligned pad space (if any) is 116 * necessary to satisfy alignment. This space can be 117 * recycled for later use. 118 */ 119 cpad = (void *)((uintptr_t)dss_max + gap_size); 120 ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max, 121 alignment); 122 cpad_size = (uintptr_t)ret - (uintptr_t)cpad; 123 dss_next = (void *)((uintptr_t)ret + size); 124 if ((uintptr_t)ret < (uintptr_t)dss_max || 125 (uintptr_t)dss_next < (uintptr_t)dss_max) { 126 /* Wrap-around. */ 127 malloc_mutex_unlock(&dss_mtx); 128 return (NULL); 129 } 130 incr = gap_size + cpad_size + size; 131 dss_prev = chunk_dss_sbrk(incr); 132 if (dss_prev == dss_max) { 133 /* Success. */ 134 dss_max = dss_next; 135 malloc_mutex_unlock(&dss_mtx); 136 if (cpad_size != 0) { 137 chunk_hooks_t chunk_hooks = 138 CHUNK_HOOKS_INITIALIZER; 139 chunk_dalloc_wrapper(arena, 140 &chunk_hooks, cpad, cpad_size, 141 true); 142 } 143 if (*zero) { 144 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED( 145 ret, size); 146 memset(ret, 0, size); 147 } 148 if (!*commit) 149 *commit = pages_decommit(ret, size); 150 return (ret); 151 } 152 } while (dss_prev != (void *)-1); 153 } 154 malloc_mutex_unlock(&dss_mtx); 155 156 return (NULL); 157} 158 159bool 160chunk_in_dss(void *chunk) 161{ 162 bool ret; 163 164 cassert(have_dss); 165 166 malloc_mutex_lock(&dss_mtx); 167 if ((uintptr_t)chunk >= (uintptr_t)dss_base 168 && (uintptr_t)chunk < (uintptr_t)dss_max) 169 ret = true; 170 else 171 ret = false; 172 malloc_mutex_unlock(&dss_mtx); 173 174 return (ret); 175} 176 177bool 178chunk_dss_boot(void) 179{ 180 181 cassert(have_dss); 182 183 if (malloc_mutex_init(&dss_mtx)) 184 return (true); 185 dss_base = chunk_dss_sbrk(0); 186 dss_prev = dss_base; 187 dss_max = dss_base; 188 189 return (false); 190} 191 192void 193chunk_dss_prefork(void) 194{ 195 196 if (have_dss) 197 malloc_mutex_prefork(&dss_mtx); 198} 199 200void 201chunk_dss_postfork_parent(void) 202{ 203 204 if (have_dss) 205 malloc_mutex_postfork_parent(&dss_mtx); 206} 207 208void 209chunk_dss_postfork_child(void) 210{ 211 212 if (have_dss) 213 malloc_mutex_postfork_child(&dss_mtx); 214} 215 216/******************************************************************************/
| 95 /* Avoid an unnecessary system call. */ 96 if (new_addr != NULL && dss_max != new_addr) 97 break; 98 99 /* Get the current end of the DSS. */ 100 dss_max = chunk_dss_sbrk(0); 101 102 /* Make sure the earlier condition still holds. */ 103 if (new_addr != NULL && dss_max != new_addr) 104 break; 105 106 /* 107 * Calculate how much padding is necessary to 108 * chunk-align the end of the DSS. 109 */ 110 gap_size = (chunksize - CHUNK_ADDR2OFFSET(dss_max)) & 111 chunksize_mask; 112 /* 113 * Compute how much chunk-aligned pad space (if any) is 114 * necessary to satisfy alignment. This space can be 115 * recycled for later use. 116 */ 117 cpad = (void *)((uintptr_t)dss_max + gap_size); 118 ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max, 119 alignment); 120 cpad_size = (uintptr_t)ret - (uintptr_t)cpad; 121 dss_next = (void *)((uintptr_t)ret + size); 122 if ((uintptr_t)ret < (uintptr_t)dss_max || 123 (uintptr_t)dss_next < (uintptr_t)dss_max) { 124 /* Wrap-around. */ 125 malloc_mutex_unlock(&dss_mtx); 126 return (NULL); 127 } 128 incr = gap_size + cpad_size + size; 129 dss_prev = chunk_dss_sbrk(incr); 130 if (dss_prev == dss_max) { 131 /* Success. */ 132 dss_max = dss_next; 133 malloc_mutex_unlock(&dss_mtx); 134 if (cpad_size != 0) { 135 chunk_hooks_t chunk_hooks = 136 CHUNK_HOOKS_INITIALIZER; 137 chunk_dalloc_wrapper(arena, 138 &chunk_hooks, cpad, cpad_size, 139 true); 140 } 141 if (*zero) { 142 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED( 143 ret, size); 144 memset(ret, 0, size); 145 } 146 if (!*commit) 147 *commit = pages_decommit(ret, size); 148 return (ret); 149 } 150 } while (dss_prev != (void *)-1); 151 } 152 malloc_mutex_unlock(&dss_mtx); 153 154 return (NULL); 155} 156 157bool 158chunk_in_dss(void *chunk) 159{ 160 bool ret; 161 162 cassert(have_dss); 163 164 malloc_mutex_lock(&dss_mtx); 165 if ((uintptr_t)chunk >= (uintptr_t)dss_base 166 && (uintptr_t)chunk < (uintptr_t)dss_max) 167 ret = true; 168 else 169 ret = false; 170 malloc_mutex_unlock(&dss_mtx); 171 172 return (ret); 173} 174 175bool 176chunk_dss_boot(void) 177{ 178 179 cassert(have_dss); 180 181 if (malloc_mutex_init(&dss_mtx)) 182 return (true); 183 dss_base = chunk_dss_sbrk(0); 184 dss_prev = dss_base; 185 dss_max = dss_base; 186 187 return (false); 188} 189 190void 191chunk_dss_prefork(void) 192{ 193 194 if (have_dss) 195 malloc_mutex_prefork(&dss_mtx); 196} 197 198void 199chunk_dss_postfork_parent(void) 200{ 201 202 if (have_dss) 203 malloc_mutex_postfork_parent(&dss_mtx); 204} 205 206void 207chunk_dss_postfork_child(void) 208{ 209 210 if (have_dss) 211 malloc_mutex_postfork_child(&dss_mtx); 212} 213 214/******************************************************************************/
|