1#define JEMALLOC_CHUNK_C_ 2#include "jemalloc/internal/jemalloc_internal.h" 3 4/******************************************************************************/ 5/* Data. */ 6 7const char *opt_dss = DSS_DEFAULT; 8size_t opt_lg_chunk = 0; 9 10/* Used exclusively for gdump triggering. */ 11static size_t curchunks; 12static size_t highchunks; 13 14rtree_t chunks_rtree; 15 16/* Various chunk-related settings. */ 17size_t chunksize; 18size_t chunksize_mask; /* (chunksize - 1). */ 19size_t chunk_npages; 20 21static void *chunk_alloc_default(void *new_addr, size_t size, 22 size_t alignment, bool *zero, bool *commit, unsigned arena_ind); 23static bool chunk_dalloc_default(void *chunk, size_t size, bool committed, 24 unsigned arena_ind); 25static bool chunk_commit_default(void *chunk, size_t size, size_t offset, 26 size_t length, unsigned arena_ind); 27static bool chunk_decommit_default(void *chunk, size_t size, size_t offset, 28 size_t length, unsigned arena_ind); 29static bool chunk_purge_default(void *chunk, size_t size, size_t offset, 30 size_t length, unsigned arena_ind); 31static bool chunk_split_default(void *chunk, size_t size, size_t size_a, 32 size_t size_b, bool committed, unsigned arena_ind); 33static bool chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, 34 size_t size_b, bool committed, unsigned arena_ind); 35 36const chunk_hooks_t chunk_hooks_default = { 37 chunk_alloc_default, 38 chunk_dalloc_default, 39 chunk_commit_default, 40 chunk_decommit_default, 41 chunk_purge_default, 42 chunk_split_default, 43 chunk_merge_default 44}; 45 46/******************************************************************************/ 47/* 48 * Function prototypes for static functions that are referenced prior to 49 * definition. 50 */ 51 52static void chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks, 53 extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache, 54 void *chunk, size_t size, bool zeroed, bool committed); 55 56/******************************************************************************/ 57 58static chunk_hooks_t 59chunk_hooks_get_locked(arena_t *arena) 60{ 61 62 return (arena->chunk_hooks); 63} 64 65chunk_hooks_t 66chunk_hooks_get(arena_t *arena) 67{ 68 chunk_hooks_t chunk_hooks; 69 70 malloc_mutex_lock(&arena->chunks_mtx); 71 chunk_hooks = chunk_hooks_get_locked(arena); 72 malloc_mutex_unlock(&arena->chunks_mtx); 73 74 return (chunk_hooks); 75} 76 77chunk_hooks_t 78chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks) 79{ 80 chunk_hooks_t old_chunk_hooks; 81 82 malloc_mutex_lock(&arena->chunks_mtx); 83 old_chunk_hooks = arena->chunk_hooks; 84 /* 85 * Copy each field atomically so that it is impossible for readers to 86 * see partially updated pointers. There are places where readers only 87 * need one hook function pointer (therefore no need to copy the 88 * entirety of arena->chunk_hooks), and stale reads do not affect 89 * correctness, so they perform unlocked reads. 90 */ 91#define ATOMIC_COPY_HOOK(n) do { \ 92 union { \ 93 chunk_##n##_t **n; \ 94 void **v; \ 95 } u; \ 96 u.n = &arena->chunk_hooks.n; \ 97 atomic_write_p(u.v, chunk_hooks->n); \ 98} while (0) 99 ATOMIC_COPY_HOOK(alloc); 100 ATOMIC_COPY_HOOK(dalloc); 101 ATOMIC_COPY_HOOK(commit); 102 ATOMIC_COPY_HOOK(decommit); 103 ATOMIC_COPY_HOOK(purge); 104 ATOMIC_COPY_HOOK(split); 105 ATOMIC_COPY_HOOK(merge); 106#undef ATOMIC_COPY_HOOK 107 malloc_mutex_unlock(&arena->chunks_mtx); 108 109 return (old_chunk_hooks); 110} 111 112static void 113chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks, 114 bool locked) 115{ 116 static const chunk_hooks_t uninitialized_hooks = 117 CHUNK_HOOKS_INITIALIZER; 118 119 if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) == 120 0) { 121 *chunk_hooks = locked ? chunk_hooks_get_locked(arena) : 122 chunk_hooks_get(arena); 123 } 124} 125 126static void 127chunk_hooks_assure_initialized_locked(arena_t *arena, 128 chunk_hooks_t *chunk_hooks) 129{ 130 131 chunk_hooks_assure_initialized_impl(arena, chunk_hooks, true); 132} 133 134static void 135chunk_hooks_assure_initialized(arena_t *arena, chunk_hooks_t *chunk_hooks) 136{ 137 138 chunk_hooks_assure_initialized_impl(arena, chunk_hooks, false); 139} 140 141bool 142chunk_register(const void *chunk, const extent_node_t *node) 143{ 144 145 assert(extent_node_addr_get(node) == chunk); 146 147 if (rtree_set(&chunks_rtree, (uintptr_t)chunk, node)) 148 return (true); 149 if (config_prof && opt_prof) { 150 size_t size = extent_node_size_get(node); 151 size_t nadd = (size == 0) ? 1 : size / chunksize; 152 size_t cur = atomic_add_z(&curchunks, nadd); 153 size_t high = atomic_read_z(&highchunks); 154 while (cur > high && atomic_cas_z(&highchunks, high, cur)) { 155 /* 156 * Don't refresh cur, because it may have decreased 157 * since this thread lost the highchunks update race. 158 */ 159 high = atomic_read_z(&highchunks); 160 } 161 if (cur > high && prof_gdump_get_unlocked()) 162 prof_gdump(); 163 } 164 165 return (false); 166} 167 168void 169chunk_deregister(const void *chunk, const extent_node_t *node) 170{ 171 bool err; 172 173 err = rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL); 174 assert(!err); 175 if (config_prof && opt_prof) { 176 size_t size = extent_node_size_get(node); 177 size_t nsub = (size == 0) ? 1 : size / chunksize; 178 assert(atomic_read_z(&curchunks) >= nsub); 179 atomic_sub_z(&curchunks, nsub); 180 } 181} 182 183/* 184 * Do first-best-fit chunk selection, i.e. select the lowest chunk that best 185 * fits. 186 */ 187static extent_node_t * 188chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad, 189 extent_tree_t *chunks_ad, size_t size) 190{ 191 extent_node_t key; 192 193 assert(size == CHUNK_CEILING(size)); 194 195 extent_node_init(&key, arena, NULL, size, false, false); 196 return (extent_tree_szad_nsearch(chunks_szad, &key)); 197} 198 199static void * 200chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks, 201 extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache, 202 void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, 203 bool dalloc_node) 204{ 205 void *ret; 206 extent_node_t *node; 207 size_t alloc_size, leadsize, trailsize; 208 bool zeroed, committed; 209 210 assert(new_addr == NULL || alignment == chunksize); 211 /* 212 * Cached chunks use the node linkage embedded in their headers, in 213 * which case dalloc_node is true, and new_addr is non-NULL because 214 * we're operating on a specific chunk. 215 */ 216 assert(dalloc_node || new_addr != NULL); 217 218 alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize)); 219 /* Beware size_t wrap-around. */ 220 if (alloc_size < size) 221 return (NULL); 222 malloc_mutex_lock(&arena->chunks_mtx); 223 chunk_hooks_assure_initialized_locked(arena, chunk_hooks); 224 if (new_addr != NULL) { 225 extent_node_t key; 226 extent_node_init(&key, arena, new_addr, alloc_size, false, 227 false); 228 node = extent_tree_ad_search(chunks_ad, &key); 229 } else { 230 node = chunk_first_best_fit(arena, chunks_szad, chunks_ad, 231 alloc_size); 232 } 233 if (node == NULL || (new_addr != NULL && extent_node_size_get(node) < 234 size)) { 235 malloc_mutex_unlock(&arena->chunks_mtx); 236 return (NULL); 237 } 238 leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node), 239 alignment) - (uintptr_t)extent_node_addr_get(node); 240 assert(new_addr == NULL || leadsize == 0); 241 assert(extent_node_size_get(node) >= leadsize + size); 242 trailsize = extent_node_size_get(node) - leadsize - size; 243 ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize); 244 zeroed = extent_node_zeroed_get(node); 245 if (zeroed) 246 *zero = true; 247 committed = extent_node_committed_get(node); 248 if (committed) 249 *commit = true; 250 /* Split the lead. */ 251 if (leadsize != 0 && 252 chunk_hooks->split(extent_node_addr_get(node), 253 extent_node_size_get(node), leadsize, size, false, arena->ind)) { 254 malloc_mutex_unlock(&arena->chunks_mtx); 255 return (NULL); 256 } 257 /* Remove node from the tree. */ 258 extent_tree_szad_remove(chunks_szad, node); 259 extent_tree_ad_remove(chunks_ad, node); 260 arena_chunk_cache_maybe_remove(arena, node, cache); 261 if (leadsize != 0) { 262 /* Insert the leading space as a smaller chunk. */ 263 extent_node_size_set(node, leadsize); 264 extent_tree_szad_insert(chunks_szad, node); 265 extent_tree_ad_insert(chunks_ad, node); 266 arena_chunk_cache_maybe_insert(arena, node, cache); 267 node = NULL; 268 } 269 if (trailsize != 0) { 270 /* Split the trail. */ 271 if (chunk_hooks->split(ret, size + trailsize, size, 272 trailsize, false, arena->ind)) { 273 if (dalloc_node && node != NULL) 274 arena_node_dalloc(arena, node); 275 malloc_mutex_unlock(&arena->chunks_mtx); 276 chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, 277 cache, ret, size + trailsize, zeroed, committed); 278 return (NULL); 279 } 280 /* Insert the trailing space as a smaller chunk. */ 281 if (node == NULL) { 282 node = arena_node_alloc(arena); 283 if (node == NULL) { 284 malloc_mutex_unlock(&arena->chunks_mtx); 285 chunk_record(arena, chunk_hooks, chunks_szad, 286 chunks_ad, cache, ret, size + trailsize, 287 zeroed, committed); 288 return (NULL); 289 } 290 } 291 extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size), 292 trailsize, zeroed, committed); 293 extent_tree_szad_insert(chunks_szad, node); 294 extent_tree_ad_insert(chunks_ad, node); 295 arena_chunk_cache_maybe_insert(arena, node, cache); 296 node = NULL; 297 } 298 if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) { 299 malloc_mutex_unlock(&arena->chunks_mtx); 300 chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, cache, 301 ret, size, zeroed, committed); 302 return (NULL); 303 } 304 malloc_mutex_unlock(&arena->chunks_mtx); 305 306 assert(dalloc_node || node != NULL); 307 if (dalloc_node && node != NULL) 308 arena_node_dalloc(arena, node); 309 if (*zero) { 310 if (!zeroed) 311 memset(ret, 0, size); 312 else if (config_debug) { 313 size_t i; 314 size_t *p = (size_t *)(uintptr_t)ret; 315 316 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size); 317 for (i = 0; i < size / sizeof(size_t); i++) 318 assert(p[i] == 0); 319 } 320 } 321 return (ret); 322} 323 324/* 325 * If the caller specifies (!*zero), it is still possible to receive zeroed 326 * memory, in which case *zero is toggled to true. arena_chunk_alloc() takes 327 * advantage of this to avoid demanding zeroed chunks, but taking advantage of 328 * them if they are returned. 329 */ 330static void * 331chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment, 332 bool *zero, bool *commit, dss_prec_t dss_prec) 333{ 334 void *ret;
| 1#define JEMALLOC_CHUNK_C_ 2#include "jemalloc/internal/jemalloc_internal.h" 3 4/******************************************************************************/ 5/* Data. */ 6 7const char *opt_dss = DSS_DEFAULT; 8size_t opt_lg_chunk = 0; 9 10/* Used exclusively for gdump triggering. */ 11static size_t curchunks; 12static size_t highchunks; 13 14rtree_t chunks_rtree; 15 16/* Various chunk-related settings. */ 17size_t chunksize; 18size_t chunksize_mask; /* (chunksize - 1). */ 19size_t chunk_npages; 20 21static void *chunk_alloc_default(void *new_addr, size_t size, 22 size_t alignment, bool *zero, bool *commit, unsigned arena_ind); 23static bool chunk_dalloc_default(void *chunk, size_t size, bool committed, 24 unsigned arena_ind); 25static bool chunk_commit_default(void *chunk, size_t size, size_t offset, 26 size_t length, unsigned arena_ind); 27static bool chunk_decommit_default(void *chunk, size_t size, size_t offset, 28 size_t length, unsigned arena_ind); 29static bool chunk_purge_default(void *chunk, size_t size, size_t offset, 30 size_t length, unsigned arena_ind); 31static bool chunk_split_default(void *chunk, size_t size, size_t size_a, 32 size_t size_b, bool committed, unsigned arena_ind); 33static bool chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, 34 size_t size_b, bool committed, unsigned arena_ind); 35 36const chunk_hooks_t chunk_hooks_default = { 37 chunk_alloc_default, 38 chunk_dalloc_default, 39 chunk_commit_default, 40 chunk_decommit_default, 41 chunk_purge_default, 42 chunk_split_default, 43 chunk_merge_default 44}; 45 46/******************************************************************************/ 47/* 48 * Function prototypes for static functions that are referenced prior to 49 * definition. 50 */ 51 52static void chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks, 53 extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache, 54 void *chunk, size_t size, bool zeroed, bool committed); 55 56/******************************************************************************/ 57 58static chunk_hooks_t 59chunk_hooks_get_locked(arena_t *arena) 60{ 61 62 return (arena->chunk_hooks); 63} 64 65chunk_hooks_t 66chunk_hooks_get(arena_t *arena) 67{ 68 chunk_hooks_t chunk_hooks; 69 70 malloc_mutex_lock(&arena->chunks_mtx); 71 chunk_hooks = chunk_hooks_get_locked(arena); 72 malloc_mutex_unlock(&arena->chunks_mtx); 73 74 return (chunk_hooks); 75} 76 77chunk_hooks_t 78chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks) 79{ 80 chunk_hooks_t old_chunk_hooks; 81 82 malloc_mutex_lock(&arena->chunks_mtx); 83 old_chunk_hooks = arena->chunk_hooks; 84 /* 85 * Copy each field atomically so that it is impossible for readers to 86 * see partially updated pointers. There are places where readers only 87 * need one hook function pointer (therefore no need to copy the 88 * entirety of arena->chunk_hooks), and stale reads do not affect 89 * correctness, so they perform unlocked reads. 90 */ 91#define ATOMIC_COPY_HOOK(n) do { \ 92 union { \ 93 chunk_##n##_t **n; \ 94 void **v; \ 95 } u; \ 96 u.n = &arena->chunk_hooks.n; \ 97 atomic_write_p(u.v, chunk_hooks->n); \ 98} while (0) 99 ATOMIC_COPY_HOOK(alloc); 100 ATOMIC_COPY_HOOK(dalloc); 101 ATOMIC_COPY_HOOK(commit); 102 ATOMIC_COPY_HOOK(decommit); 103 ATOMIC_COPY_HOOK(purge); 104 ATOMIC_COPY_HOOK(split); 105 ATOMIC_COPY_HOOK(merge); 106#undef ATOMIC_COPY_HOOK 107 malloc_mutex_unlock(&arena->chunks_mtx); 108 109 return (old_chunk_hooks); 110} 111 112static void 113chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks, 114 bool locked) 115{ 116 static const chunk_hooks_t uninitialized_hooks = 117 CHUNK_HOOKS_INITIALIZER; 118 119 if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) == 120 0) { 121 *chunk_hooks = locked ? chunk_hooks_get_locked(arena) : 122 chunk_hooks_get(arena); 123 } 124} 125 126static void 127chunk_hooks_assure_initialized_locked(arena_t *arena, 128 chunk_hooks_t *chunk_hooks) 129{ 130 131 chunk_hooks_assure_initialized_impl(arena, chunk_hooks, true); 132} 133 134static void 135chunk_hooks_assure_initialized(arena_t *arena, chunk_hooks_t *chunk_hooks) 136{ 137 138 chunk_hooks_assure_initialized_impl(arena, chunk_hooks, false); 139} 140 141bool 142chunk_register(const void *chunk, const extent_node_t *node) 143{ 144 145 assert(extent_node_addr_get(node) == chunk); 146 147 if (rtree_set(&chunks_rtree, (uintptr_t)chunk, node)) 148 return (true); 149 if (config_prof && opt_prof) { 150 size_t size = extent_node_size_get(node); 151 size_t nadd = (size == 0) ? 1 : size / chunksize; 152 size_t cur = atomic_add_z(&curchunks, nadd); 153 size_t high = atomic_read_z(&highchunks); 154 while (cur > high && atomic_cas_z(&highchunks, high, cur)) { 155 /* 156 * Don't refresh cur, because it may have decreased 157 * since this thread lost the highchunks update race. 158 */ 159 high = atomic_read_z(&highchunks); 160 } 161 if (cur > high && prof_gdump_get_unlocked()) 162 prof_gdump(); 163 } 164 165 return (false); 166} 167 168void 169chunk_deregister(const void *chunk, const extent_node_t *node) 170{ 171 bool err; 172 173 err = rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL); 174 assert(!err); 175 if (config_prof && opt_prof) { 176 size_t size = extent_node_size_get(node); 177 size_t nsub = (size == 0) ? 1 : size / chunksize; 178 assert(atomic_read_z(&curchunks) >= nsub); 179 atomic_sub_z(&curchunks, nsub); 180 } 181} 182 183/* 184 * Do first-best-fit chunk selection, i.e. select the lowest chunk that best 185 * fits. 186 */ 187static extent_node_t * 188chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad, 189 extent_tree_t *chunks_ad, size_t size) 190{ 191 extent_node_t key; 192 193 assert(size == CHUNK_CEILING(size)); 194 195 extent_node_init(&key, arena, NULL, size, false, false); 196 return (extent_tree_szad_nsearch(chunks_szad, &key)); 197} 198 199static void * 200chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks, 201 extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache, 202 void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, 203 bool dalloc_node) 204{ 205 void *ret; 206 extent_node_t *node; 207 size_t alloc_size, leadsize, trailsize; 208 bool zeroed, committed; 209 210 assert(new_addr == NULL || alignment == chunksize); 211 /* 212 * Cached chunks use the node linkage embedded in their headers, in 213 * which case dalloc_node is true, and new_addr is non-NULL because 214 * we're operating on a specific chunk. 215 */ 216 assert(dalloc_node || new_addr != NULL); 217 218 alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize)); 219 /* Beware size_t wrap-around. */ 220 if (alloc_size < size) 221 return (NULL); 222 malloc_mutex_lock(&arena->chunks_mtx); 223 chunk_hooks_assure_initialized_locked(arena, chunk_hooks); 224 if (new_addr != NULL) { 225 extent_node_t key; 226 extent_node_init(&key, arena, new_addr, alloc_size, false, 227 false); 228 node = extent_tree_ad_search(chunks_ad, &key); 229 } else { 230 node = chunk_first_best_fit(arena, chunks_szad, chunks_ad, 231 alloc_size); 232 } 233 if (node == NULL || (new_addr != NULL && extent_node_size_get(node) < 234 size)) { 235 malloc_mutex_unlock(&arena->chunks_mtx); 236 return (NULL); 237 } 238 leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node), 239 alignment) - (uintptr_t)extent_node_addr_get(node); 240 assert(new_addr == NULL || leadsize == 0); 241 assert(extent_node_size_get(node) >= leadsize + size); 242 trailsize = extent_node_size_get(node) - leadsize - size; 243 ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize); 244 zeroed = extent_node_zeroed_get(node); 245 if (zeroed) 246 *zero = true; 247 committed = extent_node_committed_get(node); 248 if (committed) 249 *commit = true; 250 /* Split the lead. */ 251 if (leadsize != 0 && 252 chunk_hooks->split(extent_node_addr_get(node), 253 extent_node_size_get(node), leadsize, size, false, arena->ind)) { 254 malloc_mutex_unlock(&arena->chunks_mtx); 255 return (NULL); 256 } 257 /* Remove node from the tree. */ 258 extent_tree_szad_remove(chunks_szad, node); 259 extent_tree_ad_remove(chunks_ad, node); 260 arena_chunk_cache_maybe_remove(arena, node, cache); 261 if (leadsize != 0) { 262 /* Insert the leading space as a smaller chunk. */ 263 extent_node_size_set(node, leadsize); 264 extent_tree_szad_insert(chunks_szad, node); 265 extent_tree_ad_insert(chunks_ad, node); 266 arena_chunk_cache_maybe_insert(arena, node, cache); 267 node = NULL; 268 } 269 if (trailsize != 0) { 270 /* Split the trail. */ 271 if (chunk_hooks->split(ret, size + trailsize, size, 272 trailsize, false, arena->ind)) { 273 if (dalloc_node && node != NULL) 274 arena_node_dalloc(arena, node); 275 malloc_mutex_unlock(&arena->chunks_mtx); 276 chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, 277 cache, ret, size + trailsize, zeroed, committed); 278 return (NULL); 279 } 280 /* Insert the trailing space as a smaller chunk. */ 281 if (node == NULL) { 282 node = arena_node_alloc(arena); 283 if (node == NULL) { 284 malloc_mutex_unlock(&arena->chunks_mtx); 285 chunk_record(arena, chunk_hooks, chunks_szad, 286 chunks_ad, cache, ret, size + trailsize, 287 zeroed, committed); 288 return (NULL); 289 } 290 } 291 extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size), 292 trailsize, zeroed, committed); 293 extent_tree_szad_insert(chunks_szad, node); 294 extent_tree_ad_insert(chunks_ad, node); 295 arena_chunk_cache_maybe_insert(arena, node, cache); 296 node = NULL; 297 } 298 if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) { 299 malloc_mutex_unlock(&arena->chunks_mtx); 300 chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, cache, 301 ret, size, zeroed, committed); 302 return (NULL); 303 } 304 malloc_mutex_unlock(&arena->chunks_mtx); 305 306 assert(dalloc_node || node != NULL); 307 if (dalloc_node && node != NULL) 308 arena_node_dalloc(arena, node); 309 if (*zero) { 310 if (!zeroed) 311 memset(ret, 0, size); 312 else if (config_debug) { 313 size_t i; 314 size_t *p = (size_t *)(uintptr_t)ret; 315 316 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size); 317 for (i = 0; i < size / sizeof(size_t); i++) 318 assert(p[i] == 0); 319 } 320 } 321 return (ret); 322} 323 324/* 325 * If the caller specifies (!*zero), it is still possible to receive zeroed 326 * memory, in which case *zero is toggled to true. arena_chunk_alloc() takes 327 * advantage of this to avoid demanding zeroed chunks, but taking advantage of 328 * them if they are returned. 329 */ 330static void * 331chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment, 332 bool *zero, bool *commit, dss_prec_t dss_prec) 333{ 334 void *ret;
|
335 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
| |
336 337 assert(size != 0); 338 assert((size & chunksize_mask) == 0); 339 assert(alignment != 0); 340 assert((alignment & chunksize_mask) == 0); 341
| 335 336 assert(size != 0); 337 assert((size & chunksize_mask) == 0); 338 assert(alignment != 0); 339 assert((alignment & chunksize_mask) == 0); 340
|
342 /* Retained. */ 343 if ((ret = chunk_recycle(arena, &chunk_hooks, 344 &arena->chunks_szad_retained, &arena->chunks_ad_retained, false, 345 new_addr, size, alignment, zero, commit, true)) != NULL) 346 return (ret); 347
| |
348 /* "primary" dss. */ 349 if (have_dss && dss_prec == dss_prec_primary && (ret = 350 chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) != 351 NULL) 352 return (ret);
| 341 /* "primary" dss. */ 342 if (have_dss && dss_prec == dss_prec_primary && (ret = 343 chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) != 344 NULL) 345 return (ret);
|
353 /* 354 * mmap. Requesting an address is not implemented for 355 * chunk_alloc_mmap(), so only call it if (new_addr == NULL). 356 */ 357 if (new_addr == NULL && (ret = chunk_alloc_mmap(size, alignment, zero, 358 commit)) != NULL)
| 346 /* mmap. */ 347 if ((ret = chunk_alloc_mmap(new_addr, size, alignment, zero, commit)) != 348 NULL)
|
359 return (ret); 360 /* "secondary" dss. */ 361 if (have_dss && dss_prec == dss_prec_secondary && (ret = 362 chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) != 363 NULL) 364 return (ret); 365 366 /* All strategies for allocation failed. */ 367 return (NULL); 368} 369 370void * 371chunk_alloc_base(size_t size) 372{ 373 void *ret; 374 bool zero, commit; 375 376 /* 377 * Directly call chunk_alloc_mmap() rather than chunk_alloc_core() 378 * because it's critical that chunk_alloc_base() return untouched 379 * demand-zeroed virtual memory. 380 */ 381 zero = true; 382 commit = true;
| 349 return (ret); 350 /* "secondary" dss. */ 351 if (have_dss && dss_prec == dss_prec_secondary && (ret = 352 chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) != 353 NULL) 354 return (ret); 355 356 /* All strategies for allocation failed. */ 357 return (NULL); 358} 359 360void * 361chunk_alloc_base(size_t size) 362{ 363 void *ret; 364 bool zero, commit; 365 366 /* 367 * Directly call chunk_alloc_mmap() rather than chunk_alloc_core() 368 * because it's critical that chunk_alloc_base() return untouched 369 * demand-zeroed virtual memory. 370 */ 371 zero = true; 372 commit = true;
|
383 ret = chunk_alloc_mmap(size, chunksize, &zero, &commit);
| 373 ret = chunk_alloc_mmap(NULL, size, chunksize, &zero, &commit);
|
384 if (ret == NULL) 385 return (NULL); 386 if (config_valgrind) 387 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); 388 389 return (ret); 390} 391 392void * 393chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, 394 size_t size, size_t alignment, bool *zero, bool dalloc_node) 395{ 396 void *ret; 397 bool commit; 398 399 assert(size != 0); 400 assert((size & chunksize_mask) == 0); 401 assert(alignment != 0); 402 assert((alignment & chunksize_mask) == 0); 403 404 commit = true; 405 ret = chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_cached, 406 &arena->chunks_ad_cached, true, new_addr, size, alignment, zero, 407 &commit, dalloc_node); 408 if (ret == NULL) 409 return (NULL); 410 assert(commit); 411 if (config_valgrind) 412 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); 413 return (ret); 414} 415 416static arena_t * 417chunk_arena_get(unsigned arena_ind) 418{ 419 arena_t *arena; 420
| 374 if (ret == NULL) 375 return (NULL); 376 if (config_valgrind) 377 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); 378 379 return (ret); 380} 381 382void * 383chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, 384 size_t size, size_t alignment, bool *zero, bool dalloc_node) 385{ 386 void *ret; 387 bool commit; 388 389 assert(size != 0); 390 assert((size & chunksize_mask) == 0); 391 assert(alignment != 0); 392 assert((alignment & chunksize_mask) == 0); 393 394 commit = true; 395 ret = chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_cached, 396 &arena->chunks_ad_cached, true, new_addr, size, alignment, zero, 397 &commit, dalloc_node); 398 if (ret == NULL) 399 return (NULL); 400 assert(commit); 401 if (config_valgrind) 402 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); 403 return (ret); 404} 405 406static arena_t * 407chunk_arena_get(unsigned arena_ind) 408{ 409 arena_t *arena; 410
|
421 /* Dodge tsd for a0 in order to avoid bootstrapping issues. */ 422 arena = (arena_ind == 0) ? a0get() : arena_get(tsd_fetch(), arena_ind, 423 false, true);
| 411 arena = arena_get(arena_ind, false);
|
424 /* 425 * The arena we're allocating on behalf of must have been initialized 426 * already. 427 */ 428 assert(arena != NULL); 429 return (arena); 430} 431 432static void * 433chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero, 434 bool *commit, unsigned arena_ind) 435{ 436 void *ret; 437 arena_t *arena; 438 439 arena = chunk_arena_get(arena_ind); 440 ret = chunk_alloc_core(arena, new_addr, size, alignment, zero, 441 commit, arena->dss_prec); 442 if (ret == NULL) 443 return (NULL); 444 if (config_valgrind) 445 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); 446 447 return (ret); 448} 449
| 412 /* 413 * The arena we're allocating on behalf of must have been initialized 414 * already. 415 */ 416 assert(arena != NULL); 417 return (arena); 418} 419 420static void * 421chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero, 422 bool *commit, unsigned arena_ind) 423{ 424 void *ret; 425 arena_t *arena; 426 427 arena = chunk_arena_get(arena_ind); 428 ret = chunk_alloc_core(arena, new_addr, size, alignment, zero, 429 commit, arena->dss_prec); 430 if (ret == NULL) 431 return (NULL); 432 if (config_valgrind) 433 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); 434 435 return (ret); 436} 437
|
| 438static void * 439chunk_alloc_retained(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, 440 size_t size, size_t alignment, bool *zero, bool *commit) 441{ 442 443 assert(size != 0); 444 assert((size & chunksize_mask) == 0); 445 assert(alignment != 0); 446 assert((alignment & chunksize_mask) == 0); 447 448 return (chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_retained, 449 &arena->chunks_ad_retained, false, new_addr, size, alignment, zero, 450 commit, true)); 451} 452
|
450void * 451chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, 452 size_t size, size_t alignment, bool *zero, bool *commit) 453{ 454 void *ret; 455 456 chunk_hooks_assure_initialized(arena, chunk_hooks);
| 453void * 454chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, 455 size_t size, size_t alignment, bool *zero, bool *commit) 456{ 457 void *ret; 458 459 chunk_hooks_assure_initialized(arena, chunk_hooks);
|
457 ret = chunk_hooks->alloc(new_addr, size, alignment, zero, commit, 458 arena->ind); 459 if (ret == NULL) 460 return (NULL);
| 460 461 ret = chunk_alloc_retained(arena, chunk_hooks, new_addr, size, 462 alignment, zero, commit); 463 if (ret == NULL) { 464 ret = chunk_hooks->alloc(new_addr, size, alignment, zero, 465 commit, arena->ind); 466 if (ret == NULL) 467 return (NULL); 468 } 469
|
461 if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default) 462 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize); 463 return (ret); 464} 465 466static void 467chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks, 468 extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache, 469 void *chunk, size_t size, bool zeroed, bool committed) 470{ 471 bool unzeroed; 472 extent_node_t *node, *prev; 473 extent_node_t key; 474 475 assert(!cache || !zeroed); 476 unzeroed = cache || !zeroed; 477 JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size); 478 479 malloc_mutex_lock(&arena->chunks_mtx); 480 chunk_hooks_assure_initialized_locked(arena, chunk_hooks); 481 extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0, 482 false, false); 483 node = extent_tree_ad_nsearch(chunks_ad, &key); 484 /* Try to coalesce forward. */ 485 if (node != NULL && extent_node_addr_get(node) == 486 extent_node_addr_get(&key) && extent_node_committed_get(node) == 487 committed && !chunk_hooks->merge(chunk, size, 488 extent_node_addr_get(node), extent_node_size_get(node), false, 489 arena->ind)) { 490 /* 491 * Coalesce chunk with the following address range. This does 492 * not change the position within chunks_ad, so only 493 * remove/insert from/into chunks_szad. 494 */ 495 extent_tree_szad_remove(chunks_szad, node); 496 arena_chunk_cache_maybe_remove(arena, node, cache); 497 extent_node_addr_set(node, chunk); 498 extent_node_size_set(node, size + extent_node_size_get(node)); 499 extent_node_zeroed_set(node, extent_node_zeroed_get(node) && 500 !unzeroed); 501 extent_tree_szad_insert(chunks_szad, node); 502 arena_chunk_cache_maybe_insert(arena, node, cache); 503 } else { 504 /* Coalescing forward failed, so insert a new node. */ 505 node = arena_node_alloc(arena); 506 if (node == NULL) { 507 /* 508 * Node allocation failed, which is an exceedingly 509 * unlikely failure. Leak chunk after making sure its 510 * pages have already been purged, so that this is only 511 * a virtual memory leak. 512 */ 513 if (cache) { 514 chunk_purge_wrapper(arena, chunk_hooks, chunk, 515 size, 0, size); 516 } 517 goto label_return; 518 } 519 extent_node_init(node, arena, chunk, size, !unzeroed, 520 committed); 521 extent_tree_ad_insert(chunks_ad, node); 522 extent_tree_szad_insert(chunks_szad, node); 523 arena_chunk_cache_maybe_insert(arena, node, cache); 524 } 525 526 /* Try to coalesce backward. */ 527 prev = extent_tree_ad_prev(chunks_ad, node); 528 if (prev != NULL && (void *)((uintptr_t)extent_node_addr_get(prev) + 529 extent_node_size_get(prev)) == chunk && 530 extent_node_committed_get(prev) == committed && 531 !chunk_hooks->merge(extent_node_addr_get(prev), 532 extent_node_size_get(prev), chunk, size, false, arena->ind)) { 533 /* 534 * Coalesce chunk with the previous address range. This does 535 * not change the position within chunks_ad, so only 536 * remove/insert node from/into chunks_szad. 537 */ 538 extent_tree_szad_remove(chunks_szad, prev); 539 extent_tree_ad_remove(chunks_ad, prev); 540 arena_chunk_cache_maybe_remove(arena, prev, cache); 541 extent_tree_szad_remove(chunks_szad, node); 542 arena_chunk_cache_maybe_remove(arena, node, cache); 543 extent_node_addr_set(node, extent_node_addr_get(prev)); 544 extent_node_size_set(node, extent_node_size_get(prev) + 545 extent_node_size_get(node)); 546 extent_node_zeroed_set(node, extent_node_zeroed_get(prev) && 547 extent_node_zeroed_get(node)); 548 extent_tree_szad_insert(chunks_szad, node); 549 arena_chunk_cache_maybe_insert(arena, node, cache); 550 551 arena_node_dalloc(arena, prev); 552 } 553 554label_return: 555 malloc_mutex_unlock(&arena->chunks_mtx); 556} 557 558void 559chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, 560 size_t size, bool committed) 561{ 562 563 assert(chunk != NULL); 564 assert(CHUNK_ADDR2BASE(chunk) == chunk); 565 assert(size != 0); 566 assert((size & chunksize_mask) == 0); 567 568 chunk_record(arena, chunk_hooks, &arena->chunks_szad_cached, 569 &arena->chunks_ad_cached, true, chunk, size, false, committed); 570 arena_maybe_purge(arena); 571} 572 573void 574chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, 575 size_t size, bool zeroed, bool committed) 576{ 577 578 assert(chunk != NULL); 579 assert(CHUNK_ADDR2BASE(chunk) == chunk); 580 assert(size != 0); 581 assert((size & chunksize_mask) == 0); 582 583 chunk_hooks_assure_initialized(arena, chunk_hooks); 584 /* Try to deallocate. */ 585 if (!chunk_hooks->dalloc(chunk, size, committed, arena->ind)) 586 return; 587 /* Try to decommit; purge if that fails. */ 588 if (committed) { 589 committed = chunk_hooks->decommit(chunk, size, 0, size, 590 arena->ind); 591 } 592 zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size, 593 arena->ind); 594 chunk_record(arena, chunk_hooks, &arena->chunks_szad_retained, 595 &arena->chunks_ad_retained, false, chunk, size, zeroed, committed); 596} 597 598static bool 599chunk_dalloc_default(void *chunk, size_t size, bool committed, 600 unsigned arena_ind) 601{ 602 603 if (!have_dss || !chunk_in_dss(chunk)) 604 return (chunk_dalloc_mmap(chunk, size)); 605 return (true); 606} 607 608void 609chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, 610 size_t size, bool committed) 611{ 612 613 chunk_hooks_assure_initialized(arena, chunk_hooks); 614 chunk_hooks->dalloc(chunk, size, committed, arena->ind); 615 if (config_valgrind && chunk_hooks->dalloc != chunk_dalloc_default) 616 JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size); 617} 618 619static bool 620chunk_commit_default(void *chunk, size_t size, size_t offset, size_t length, 621 unsigned arena_ind) 622{ 623 624 return (pages_commit((void *)((uintptr_t)chunk + (uintptr_t)offset), 625 length)); 626} 627 628static bool 629chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length, 630 unsigned arena_ind) 631{ 632 633 return (pages_decommit((void *)((uintptr_t)chunk + (uintptr_t)offset), 634 length)); 635} 636 637bool 638chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length) 639{ 640 641 assert(chunk != NULL); 642 assert(CHUNK_ADDR2BASE(chunk) == chunk); 643 assert((offset & PAGE_MASK) == 0); 644 assert(length != 0); 645 assert((length & PAGE_MASK) == 0); 646 647 return (pages_purge((void *)((uintptr_t)chunk + (uintptr_t)offset), 648 length)); 649} 650 651static bool 652chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length, 653 unsigned arena_ind) 654{ 655 656 return (chunk_purge_arena(chunk_arena_get(arena_ind), chunk, offset, 657 length)); 658} 659 660bool 661chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, 662 size_t size, size_t offset, size_t length) 663{ 664 665 chunk_hooks_assure_initialized(arena, chunk_hooks); 666 return (chunk_hooks->purge(chunk, size, offset, length, arena->ind)); 667} 668 669static bool 670chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b, 671 bool committed, unsigned arena_ind) 672{ 673 674 if (!maps_coalesce) 675 return (true); 676 return (false); 677} 678 679static bool 680chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b, 681 bool committed, unsigned arena_ind) 682{ 683 684 if (!maps_coalesce) 685 return (true); 686 if (have_dss && chunk_in_dss(chunk_a) != chunk_in_dss(chunk_b)) 687 return (true); 688 689 return (false); 690} 691 692static rtree_node_elm_t * 693chunks_rtree_node_alloc(size_t nelms) 694{ 695 696 return ((rtree_node_elm_t *)base_alloc(nelms * 697 sizeof(rtree_node_elm_t))); 698} 699 700bool 701chunk_boot(void) 702{ 703#ifdef _WIN32 704 SYSTEM_INFO info; 705 GetSystemInfo(&info); 706 707 /* 708 * Verify actual page size is equal to or an integral multiple of 709 * configured page size. 710 */ 711 if (info.dwPageSize & ((1U << LG_PAGE) - 1)) 712 return (true); 713 714 /* 715 * Configure chunksize (if not set) to match granularity (usually 64K), 716 * so pages_map will always take fast path. 717 */ 718 if (!opt_lg_chunk) {
| 470 if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default) 471 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize); 472 return (ret); 473} 474 475static void 476chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks, 477 extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache, 478 void *chunk, size_t size, bool zeroed, bool committed) 479{ 480 bool unzeroed; 481 extent_node_t *node, *prev; 482 extent_node_t key; 483 484 assert(!cache || !zeroed); 485 unzeroed = cache || !zeroed; 486 JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size); 487 488 malloc_mutex_lock(&arena->chunks_mtx); 489 chunk_hooks_assure_initialized_locked(arena, chunk_hooks); 490 extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0, 491 false, false); 492 node = extent_tree_ad_nsearch(chunks_ad, &key); 493 /* Try to coalesce forward. */ 494 if (node != NULL && extent_node_addr_get(node) == 495 extent_node_addr_get(&key) && extent_node_committed_get(node) == 496 committed && !chunk_hooks->merge(chunk, size, 497 extent_node_addr_get(node), extent_node_size_get(node), false, 498 arena->ind)) { 499 /* 500 * Coalesce chunk with the following address range. This does 501 * not change the position within chunks_ad, so only 502 * remove/insert from/into chunks_szad. 503 */ 504 extent_tree_szad_remove(chunks_szad, node); 505 arena_chunk_cache_maybe_remove(arena, node, cache); 506 extent_node_addr_set(node, chunk); 507 extent_node_size_set(node, size + extent_node_size_get(node)); 508 extent_node_zeroed_set(node, extent_node_zeroed_get(node) && 509 !unzeroed); 510 extent_tree_szad_insert(chunks_szad, node); 511 arena_chunk_cache_maybe_insert(arena, node, cache); 512 } else { 513 /* Coalescing forward failed, so insert a new node. */ 514 node = arena_node_alloc(arena); 515 if (node == NULL) { 516 /* 517 * Node allocation failed, which is an exceedingly 518 * unlikely failure. Leak chunk after making sure its 519 * pages have already been purged, so that this is only 520 * a virtual memory leak. 521 */ 522 if (cache) { 523 chunk_purge_wrapper(arena, chunk_hooks, chunk, 524 size, 0, size); 525 } 526 goto label_return; 527 } 528 extent_node_init(node, arena, chunk, size, !unzeroed, 529 committed); 530 extent_tree_ad_insert(chunks_ad, node); 531 extent_tree_szad_insert(chunks_szad, node); 532 arena_chunk_cache_maybe_insert(arena, node, cache); 533 } 534 535 /* Try to coalesce backward. */ 536 prev = extent_tree_ad_prev(chunks_ad, node); 537 if (prev != NULL && (void *)((uintptr_t)extent_node_addr_get(prev) + 538 extent_node_size_get(prev)) == chunk && 539 extent_node_committed_get(prev) == committed && 540 !chunk_hooks->merge(extent_node_addr_get(prev), 541 extent_node_size_get(prev), chunk, size, false, arena->ind)) { 542 /* 543 * Coalesce chunk with the previous address range. This does 544 * not change the position within chunks_ad, so only 545 * remove/insert node from/into chunks_szad. 546 */ 547 extent_tree_szad_remove(chunks_szad, prev); 548 extent_tree_ad_remove(chunks_ad, prev); 549 arena_chunk_cache_maybe_remove(arena, prev, cache); 550 extent_tree_szad_remove(chunks_szad, node); 551 arena_chunk_cache_maybe_remove(arena, node, cache); 552 extent_node_addr_set(node, extent_node_addr_get(prev)); 553 extent_node_size_set(node, extent_node_size_get(prev) + 554 extent_node_size_get(node)); 555 extent_node_zeroed_set(node, extent_node_zeroed_get(prev) && 556 extent_node_zeroed_get(node)); 557 extent_tree_szad_insert(chunks_szad, node); 558 arena_chunk_cache_maybe_insert(arena, node, cache); 559 560 arena_node_dalloc(arena, prev); 561 } 562 563label_return: 564 malloc_mutex_unlock(&arena->chunks_mtx); 565} 566 567void 568chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, 569 size_t size, bool committed) 570{ 571 572 assert(chunk != NULL); 573 assert(CHUNK_ADDR2BASE(chunk) == chunk); 574 assert(size != 0); 575 assert((size & chunksize_mask) == 0); 576 577 chunk_record(arena, chunk_hooks, &arena->chunks_szad_cached, 578 &arena->chunks_ad_cached, true, chunk, size, false, committed); 579 arena_maybe_purge(arena); 580} 581 582void 583chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, 584 size_t size, bool zeroed, bool committed) 585{ 586 587 assert(chunk != NULL); 588 assert(CHUNK_ADDR2BASE(chunk) == chunk); 589 assert(size != 0); 590 assert((size & chunksize_mask) == 0); 591 592 chunk_hooks_assure_initialized(arena, chunk_hooks); 593 /* Try to deallocate. */ 594 if (!chunk_hooks->dalloc(chunk, size, committed, arena->ind)) 595 return; 596 /* Try to decommit; purge if that fails. */ 597 if (committed) { 598 committed = chunk_hooks->decommit(chunk, size, 0, size, 599 arena->ind); 600 } 601 zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size, 602 arena->ind); 603 chunk_record(arena, chunk_hooks, &arena->chunks_szad_retained, 604 &arena->chunks_ad_retained, false, chunk, size, zeroed, committed); 605} 606 607static bool 608chunk_dalloc_default(void *chunk, size_t size, bool committed, 609 unsigned arena_ind) 610{ 611 612 if (!have_dss || !chunk_in_dss(chunk)) 613 return (chunk_dalloc_mmap(chunk, size)); 614 return (true); 615} 616 617void 618chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, 619 size_t size, bool committed) 620{ 621 622 chunk_hooks_assure_initialized(arena, chunk_hooks); 623 chunk_hooks->dalloc(chunk, size, committed, arena->ind); 624 if (config_valgrind && chunk_hooks->dalloc != chunk_dalloc_default) 625 JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size); 626} 627 628static bool 629chunk_commit_default(void *chunk, size_t size, size_t offset, size_t length, 630 unsigned arena_ind) 631{ 632 633 return (pages_commit((void *)((uintptr_t)chunk + (uintptr_t)offset), 634 length)); 635} 636 637static bool 638chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length, 639 unsigned arena_ind) 640{ 641 642 return (pages_decommit((void *)((uintptr_t)chunk + (uintptr_t)offset), 643 length)); 644} 645 646bool 647chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length) 648{ 649 650 assert(chunk != NULL); 651 assert(CHUNK_ADDR2BASE(chunk) == chunk); 652 assert((offset & PAGE_MASK) == 0); 653 assert(length != 0); 654 assert((length & PAGE_MASK) == 0); 655 656 return (pages_purge((void *)((uintptr_t)chunk + (uintptr_t)offset), 657 length)); 658} 659 660static bool 661chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length, 662 unsigned arena_ind) 663{ 664 665 return (chunk_purge_arena(chunk_arena_get(arena_ind), chunk, offset, 666 length)); 667} 668 669bool 670chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, 671 size_t size, size_t offset, size_t length) 672{ 673 674 chunk_hooks_assure_initialized(arena, chunk_hooks); 675 return (chunk_hooks->purge(chunk, size, offset, length, arena->ind)); 676} 677 678static bool 679chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b, 680 bool committed, unsigned arena_ind) 681{ 682 683 if (!maps_coalesce) 684 return (true); 685 return (false); 686} 687 688static bool 689chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b, 690 bool committed, unsigned arena_ind) 691{ 692 693 if (!maps_coalesce) 694 return (true); 695 if (have_dss && chunk_in_dss(chunk_a) != chunk_in_dss(chunk_b)) 696 return (true); 697 698 return (false); 699} 700 701static rtree_node_elm_t * 702chunks_rtree_node_alloc(size_t nelms) 703{ 704 705 return ((rtree_node_elm_t *)base_alloc(nelms * 706 sizeof(rtree_node_elm_t))); 707} 708 709bool 710chunk_boot(void) 711{ 712#ifdef _WIN32 713 SYSTEM_INFO info; 714 GetSystemInfo(&info); 715 716 /* 717 * Verify actual page size is equal to or an integral multiple of 718 * configured page size. 719 */ 720 if (info.dwPageSize & ((1U << LG_PAGE) - 1)) 721 return (true); 722 723 /* 724 * Configure chunksize (if not set) to match granularity (usually 64K), 725 * so pages_map will always take fast path. 726 */ 727 if (!opt_lg_chunk) {
|
719 opt_lg_chunk = jemalloc_ffs((int)info.dwAllocationGranularity)
| 728 opt_lg_chunk = ffs_u((unsigned)info.dwAllocationGranularity)
|
720 - 1; 721 } 722#else 723 if (!opt_lg_chunk) 724 opt_lg_chunk = LG_CHUNK_DEFAULT; 725#endif 726 727 /* Set variables according to the value of opt_lg_chunk. */ 728 chunksize = (ZU(1) << opt_lg_chunk); 729 assert(chunksize >= PAGE); 730 chunksize_mask = chunksize - 1; 731 chunk_npages = (chunksize >> LG_PAGE); 732 733 if (have_dss && chunk_dss_boot()) 734 return (true);
| 729 - 1; 730 } 731#else 732 if (!opt_lg_chunk) 733 opt_lg_chunk = LG_CHUNK_DEFAULT; 734#endif 735 736 /* Set variables according to the value of opt_lg_chunk. */ 737 chunksize = (ZU(1) << opt_lg_chunk); 738 assert(chunksize >= PAGE); 739 chunksize_mask = chunksize - 1; 740 chunk_npages = (chunksize >> LG_PAGE); 741 742 if (have_dss && chunk_dss_boot()) 743 return (true);
|
735 if (rtree_new(&chunks_rtree, (ZU(1) << (LG_SIZEOF_PTR+3)) - 736 opt_lg_chunk, chunks_rtree_node_alloc, NULL))
| 744 if (rtree_new(&chunks_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) - 745 opt_lg_chunk), chunks_rtree_node_alloc, NULL))
|
737 return (true); 738 739 return (false); 740} 741 742void 743chunk_prefork(void) 744{ 745 746 chunk_dss_prefork(); 747} 748 749void 750chunk_postfork_parent(void) 751{ 752 753 chunk_dss_postfork_parent(); 754} 755 756void 757chunk_postfork_child(void) 758{ 759 760 chunk_dss_postfork_child(); 761}
| 746 return (true); 747 748 return (false); 749} 750 751void 752chunk_prefork(void) 753{ 754 755 chunk_dss_prefork(); 756} 757 758void 759chunk_postfork_parent(void) 760{ 761 762 chunk_dss_postfork_parent(); 763} 764 765void 766chunk_postfork_child(void) 767{ 768 769 chunk_dss_postfork_child(); 770}
|