Deleted Added
full compact
jemalloc.c (235322) jemalloc.c (242844)
1#define JEMALLOC_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
7malloc_tsd_data(, arenas, arena_t *, NULL)
8malloc_tsd_data(, thread_allocated, thread_allocated_t,

--- 23 unchanged lines hidden (view full) ---

32bool opt_xmalloc = false;
33bool opt_zero = false;
34size_t opt_narenas = 0;
35
36unsigned ncpus;
37
38malloc_mutex_t arenas_lock;
39arena_t **arenas;
1#define JEMALLOC_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
7malloc_tsd_data(, arenas, arena_t *, NULL)
8malloc_tsd_data(, thread_allocated, thread_allocated_t,

--- 23 unchanged lines hidden (view full) ---

32bool opt_xmalloc = false;
33bool opt_zero = false;
34size_t opt_narenas = 0;
35
36unsigned ncpus;
37
38malloc_mutex_t arenas_lock;
39arena_t **arenas;
40unsigned narenas;
40unsigned narenas_total;
41unsigned narenas_auto;
41
42/* Set to true once the allocator has been initialized. */
43static bool malloc_initialized = false;
44
45#ifdef JEMALLOC_THREADED_INIT
46/* Used to let the initializing thread recursively allocate. */
47# define NO_INITIALIZER ((unsigned long)0)
48# define INITIALIZER pthread_self()

--- 94 unchanged lines hidden (view full) ---

143}
144
145/* Slow path, called only by choose_arena(). */
146arena_t *
147choose_arena_hard(void)
148{
149 arena_t *ret;
150
42
43/* Set to true once the allocator has been initialized. */
44static bool malloc_initialized = false;
45
46#ifdef JEMALLOC_THREADED_INIT
47/* Used to let the initializing thread recursively allocate. */
48# define NO_INITIALIZER ((unsigned long)0)
49# define INITIALIZER pthread_self()

--- 94 unchanged lines hidden (view full) ---

144}
145
146/* Slow path, called only by choose_arena(). */
147arena_t *
148choose_arena_hard(void)
149{
150 arena_t *ret;
151
151 if (narenas > 1) {
152 if (narenas_auto > 1) {
152 unsigned i, choose, first_null;
153
154 choose = 0;
153 unsigned i, choose, first_null;
154
155 choose = 0;
155 first_null = narenas;
156 first_null = narenas_auto;
156 malloc_mutex_lock(&arenas_lock);
157 assert(arenas[0] != NULL);
157 malloc_mutex_lock(&arenas_lock);
158 assert(arenas[0] != NULL);
158 for (i = 1; i < narenas; i++) {
159 for (i = 1; i < narenas_auto; i++) {
159 if (arenas[i] != NULL) {
160 /*
161 * Choose the first arena that has the lowest
162 * number of threads assigned to it.
163 */
164 if (arenas[i]->nthreads <
165 arenas[choose]->nthreads)
166 choose = i;
160 if (arenas[i] != NULL) {
161 /*
162 * Choose the first arena that has the lowest
163 * number of threads assigned to it.
164 */
165 if (arenas[i]->nthreads <
166 arenas[choose]->nthreads)
167 choose = i;
167 } else if (first_null == narenas) {
168 } else if (first_null == narenas_auto) {
168 /*
169 * Record the index of the first uninitialized
170 * arena, in case all extant arenas are in use.
171 *
172 * NB: It is possible for there to be
173 * discontinuities in terms of initialized
174 * versus uninitialized arenas, due to the
175 * "thread.arena" mallctl.
176 */
177 first_null = i;
178 }
179 }
180
169 /*
170 * Record the index of the first uninitialized
171 * arena, in case all extant arenas are in use.
172 *
173 * NB: It is possible for there to be
174 * discontinuities in terms of initialized
175 * versus uninitialized arenas, due to the
176 * "thread.arena" mallctl.
177 */
178 first_null = i;
179 }
180 }
181
181 if (arenas[choose]->nthreads == 0 || first_null == narenas) {
182 if (arenas[choose]->nthreads == 0
183 || first_null == narenas_auto) {
182 /*
183 * Use an unloaded arena, or the least loaded arena if
184 * all arenas are already initialized.
185 */
186 ret = arenas[choose];
187 } else {
188 /* Initialize a new arena. */
189 ret = arenas_extend(first_null);

--- 12 unchanged lines hidden (view full) ---

202 return (ret);
203}
204
205static void
206stats_print_atexit(void)
207{
208
209 if (config_tcache && config_stats) {
184 /*
185 * Use an unloaded arena, or the least loaded arena if
186 * all arenas are already initialized.
187 */
188 ret = arenas[choose];
189 } else {
190 /* Initialize a new arena. */
191 ret = arenas_extend(first_null);

--- 12 unchanged lines hidden (view full) ---

204 return (ret);
205}
206
207static void
208stats_print_atexit(void)
209{
210
211 if (config_tcache && config_stats) {
210 unsigned i;
212 unsigned narenas, i;
211
212 /*
213 * Merge stats from extant threads. This is racy, since
214 * individual threads do not lock when recording tcache stats
215 * events. As a consequence, the final stats may be slightly
216 * out of date by the time they are reported, if other threads
217 * continue to allocate.
218 */
213
214 /*
215 * Merge stats from extant threads. This is racy, since
216 * individual threads do not lock when recording tcache stats
217 * events. As a consequence, the final stats may be slightly
218 * out of date by the time they are reported, if other threads
219 * continue to allocate.
220 */
219 for (i = 0; i < narenas; i++) {
221 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
220 arena_t *arena = arenas[i];
221 if (arena != NULL) {
222 tcache_t *tcache;
223
224 /*
225 * tcache_stats_merge() locks bins, so if any
226 * code is introduced that acquires both arena
227 * and bin locks in the opposite order,

--- 25 unchanged lines hidden (view full) ---

253 long result;
254
255#ifdef _WIN32
256 SYSTEM_INFO si;
257 GetSystemInfo(&si);
258 result = si.dwNumberOfProcessors;
259#else
260 result = sysconf(_SC_NPROCESSORS_ONLN);
222 arena_t *arena = arenas[i];
223 if (arena != NULL) {
224 tcache_t *tcache;
225
226 /*
227 * tcache_stats_merge() locks bins, so if any
228 * code is introduced that acquires both arena
229 * and bin locks in the opposite order,

--- 25 unchanged lines hidden (view full) ---

255 long result;
256
257#ifdef _WIN32
258 SYSTEM_INFO si;
259 GetSystemInfo(&si);
260 result = si.dwNumberOfProcessors;
261#else
262 result = sysconf(_SC_NPROCESSORS_ONLN);
263#endif
261 if (result == -1) {
262 /* Error. */
263 ret = 1;
264 if (result == -1) {
265 /* Error. */
266 ret = 1;
264 }
265#endif
266 ret = (unsigned)result;
267 } else {
268 ret = (unsigned)result;
269 }
267
268 return (ret);
269}
270
271void
272arenas_cleanup(void *arg)
273{
274 arena_t *arena = *(arena_t **)arg;

--- 101 unchanged lines hidden (view full) ---

376static void
377malloc_conf_init(void)
378{
379 unsigned i;
380 char buf[PATH_MAX + 1];
381 const char *opts, *k, *v;
382 size_t klen, vlen;
383
270
271 return (ret);
272}
273
274void
275arenas_cleanup(void *arg)
276{
277 arena_t *arena = *(arena_t **)arg;

--- 101 unchanged lines hidden (view full) ---

379static void
380malloc_conf_init(void)
381{
382 unsigned i;
383 char buf[PATH_MAX + 1];
384 const char *opts, *k, *v;
385 size_t klen, vlen;
386
387 /*
388 * Automatically configure valgrind before processing options. The
389 * valgrind option remains in jemalloc 3.x for compatibility reasons.
390 */
391 if (config_valgrind) {
392 opt_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
393 if (config_fill && opt_valgrind) {
394 opt_junk = false;
395 assert(opt_zero == false);
396 opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
397 opt_redzone = true;
398 }
399 if (config_tcache && opt_valgrind)
400 opt_tcache = false;
401 }
402
384 for (i = 0; i < 3; i++) {
385 /* Get runtime configuration. */
386 switch (i) {
387 case 0:
388 if (je_malloc_conf != NULL) {
389 /*
390 * Use options that were compiled into the
391 * program.

--- 145 unchanged lines hidden (view full) ---

537 * Chunks always require at least one header page, plus
538 * one data page in the absence of redzones, or three
539 * pages in the presence of redzones. In order to
540 * simplify options processing, fix the limit based on
541 * config_fill.
542 */
543 CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
544 (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1)
403 for (i = 0; i < 3; i++) {
404 /* Get runtime configuration. */
405 switch (i) {
406 case 0:
407 if (je_malloc_conf != NULL) {
408 /*
409 * Use options that were compiled into the
410 * program.

--- 145 unchanged lines hidden (view full) ---

556 * Chunks always require at least one header page, plus
557 * one data page in the absence of redzones, or three
558 * pages in the presence of redzones. In order to
559 * simplify options processing, fix the limit based on
560 * config_fill.
561 */
562 CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
563 (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1)
564 if (strncmp("dss", k, klen) == 0) {
565 int i;
566 bool match = false;
567 for (i = 0; i < dss_prec_limit; i++) {
568 if (strncmp(dss_prec_names[i], v, vlen)
569 == 0) {
570 if (chunk_dss_prec_set(i)) {
571 malloc_conf_error(
572 "Error setting dss",
573 k, klen, v, vlen);
574 } else {
575 opt_dss =
576 dss_prec_names[i];
577 match = true;
578 break;
579 }
580 }
581 }
582 if (match == false) {
583 malloc_conf_error("Invalid conf value",
584 k, klen, v, vlen);
585 }
586 continue;
587 }
545 CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
546 SIZE_T_MAX)
547 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
548 -1, (sizeof(size_t) << 3) - 1)
549 CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
550 if (config_fill) {
551 CONF_HANDLE_BOOL(opt_junk, "junk")
552 CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
553 0, SIZE_T_MAX)
554 CONF_HANDLE_BOOL(opt_redzone, "redzone")
555 CONF_HANDLE_BOOL(opt_zero, "zero")
556 }
557 if (config_utrace) {
558 CONF_HANDLE_BOOL(opt_utrace, "utrace")
559 }
560 if (config_valgrind) {
588 CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
589 SIZE_T_MAX)
590 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
591 -1, (sizeof(size_t) << 3) - 1)
592 CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
593 if (config_fill) {
594 CONF_HANDLE_BOOL(opt_junk, "junk")
595 CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
596 0, SIZE_T_MAX)
597 CONF_HANDLE_BOOL(opt_redzone, "redzone")
598 CONF_HANDLE_BOOL(opt_zero, "zero")
599 }
600 if (config_utrace) {
601 CONF_HANDLE_BOOL(opt_utrace, "utrace")
602 }
603 if (config_valgrind) {
561 bool hit;
562 CONF_HANDLE_BOOL_HIT(opt_valgrind,
563 "valgrind", hit)
564 if (config_fill && opt_valgrind && hit) {
565 opt_junk = false;
566 opt_zero = false;
567 if (opt_quarantine == 0) {
568 opt_quarantine =
569 JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
570 }
571 opt_redzone = true;
572 }
573 if (hit)
574 continue;
604 CONF_HANDLE_BOOL(opt_valgrind, "valgrind")
575 }
576 if (config_xmalloc) {
577 CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
578 }
579 if (config_tcache) {
580 CONF_HANDLE_BOOL(opt_tcache, "tcache")
581 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
582 "lg_tcache_max", -1,

--- 112 unchanged lines hidden (view full) ---

695
696 if (malloc_mutex_init(&arenas_lock))
697 return (true);
698
699 /*
700 * Create enough scaffolding to allow recursive allocation in
701 * malloc_ncpus().
702 */
605 }
606 if (config_xmalloc) {
607 CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
608 }
609 if (config_tcache) {
610 CONF_HANDLE_BOOL(opt_tcache, "tcache")
611 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
612 "lg_tcache_max", -1,

--- 112 unchanged lines hidden (view full) ---

725
726 if (malloc_mutex_init(&arenas_lock))
727 return (true);
728
729 /*
730 * Create enough scaffolding to allow recursive allocation in
731 * malloc_ncpus().
732 */
703 narenas = 1;
733 narenas_total = narenas_auto = 1;
704 arenas = init_arenas;
734 arenas = init_arenas;
705 memset(arenas, 0, sizeof(arena_t *) * narenas);
735 memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
706
707 /*
708 * Initialize one arena here. The rest are lazily created in
709 * choose_arena_hard().
710 */
711 arenas_extend(0);
712 if (arenas[0] == NULL) {
713 malloc_mutex_unlock(&init_lock);

--- 41 unchanged lines hidden (view full) ---

755 * For SMP systems, create more than one arena per CPU by
756 * default.
757 */
758 if (ncpus > 1)
759 opt_narenas = ncpus << 2;
760 else
761 opt_narenas = 1;
762 }
736
737 /*
738 * Initialize one arena here. The rest are lazily created in
739 * choose_arena_hard().
740 */
741 arenas_extend(0);
742 if (arenas[0] == NULL) {
743 malloc_mutex_unlock(&init_lock);

--- 41 unchanged lines hidden (view full) ---

785 * For SMP systems, create more than one arena per CPU by
786 * default.
787 */
788 if (ncpus > 1)
789 opt_narenas = ncpus << 2;
790 else
791 opt_narenas = 1;
792 }
763 narenas = opt_narenas;
793 narenas_auto = opt_narenas;
764 /*
765 * Make sure that the arenas array can be allocated. In practice, this
766 * limit is enough to allow the allocator to function, but the ctl
767 * machinery will fail to allocate memory at far lower limits.
768 */
794 /*
795 * Make sure that the arenas array can be allocated. In practice, this
796 * limit is enough to allow the allocator to function, but the ctl
797 * machinery will fail to allocate memory at far lower limits.
798 */
769 if (narenas > chunksize / sizeof(arena_t *)) {
770 narenas = chunksize / sizeof(arena_t *);
799 if (narenas_auto > chunksize / sizeof(arena_t *)) {
800 narenas_auto = chunksize / sizeof(arena_t *);
771 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
801 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
772 narenas);
802 narenas_auto);
773 }
803 }
804 narenas_total = narenas_auto;
774
775 /* Allocate and initialize arenas. */
805
806 /* Allocate and initialize arenas. */
776 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
807 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total);
777 if (arenas == NULL) {
778 malloc_mutex_unlock(&init_lock);
779 return (true);
780 }
781 /*
782 * Zero the array. In practice, this should always be pre-zeroed,
783 * since it was just mmap()ed, but let's be sure.
784 */
808 if (arenas == NULL) {
809 malloc_mutex_unlock(&init_lock);
810 return (true);
811 }
812 /*
813 * Zero the array. In practice, this should always be pre-zeroed,
814 * since it was just mmap()ed, but let's be sure.
815 */
785 memset(arenas, 0, sizeof(arena_t *) * narenas);
816 memset(arenas, 0, sizeof(arena_t *) * narenas_total);
786 /* Copy the pointer to the one arena that was already initialized. */
787 arenas[0] = init_arenas[0];
788
789 malloc_initialized = true;
790 malloc_mutex_unlock(&init_lock);
791 return (false);
792}
793

--- 468 unchanged lines hidden (view full) ---

1262 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1263 * to inconsistently reference libc's malloc(3)-compatible functions
1264 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1265 *
1266 * These definitions interpose hooks in glibc. The functions are actually
1267 * passed an extra argument for the caller return address, which will be
1268 * ignored.
1269 */
817 /* Copy the pointer to the one arena that was already initialized. */
818 arenas[0] = init_arenas[0];
819
820 malloc_initialized = true;
821 malloc_mutex_unlock(&init_lock);
822 return (false);
823}
824

--- 468 unchanged lines hidden (view full) ---

1293 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1294 * to inconsistently reference libc's malloc(3)-compatible functions
1295 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1296 *
1297 * These definitions interpose hooks in glibc. The functions are actually
1298 * passed an extra argument for the caller return address, which will be
1299 * ignored.
1300 */
1270JEMALLOC_EXPORT void (* const __free_hook)(void *ptr) = je_free;
1271JEMALLOC_EXPORT void *(* const __malloc_hook)(size_t size) = je_malloc;
1272JEMALLOC_EXPORT void *(* const __realloc_hook)(void *ptr, size_t size) =
1273 je_realloc;
1274JEMALLOC_EXPORT void *(* const __memalign_hook)(size_t alignment, size_t size) =
1301JEMALLOC_EXPORT void (* __free_hook)(void *ptr) = je_free;
1302JEMALLOC_EXPORT void *(* __malloc_hook)(size_t size) = je_malloc;
1303JEMALLOC_EXPORT void *(* __realloc_hook)(void *ptr, size_t size) = je_realloc;
1304JEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) =
1275 je_memalign;
1276#endif
1277
1278/*
1279 * End non-standard override functions.
1280 */
1281/******************************************************************************/
1282/*
1283 * Begin non-standard functions.
1284 */
1285
1286size_t
1305 je_memalign;
1306#endif
1307
1308/*
1309 * End non-standard override functions.
1310 */
1311/******************************************************************************/
1312/*
1313 * Begin non-standard functions.
1314 */
1315
1316size_t
1287je_malloc_usable_size(const void *ptr)
1317je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
1288{
1289 size_t ret;
1290
1291 assert(malloc_initialized || IS_INITIALIZER);
1292
1293 if (config_ivsalloc)
1294 ret = ivsalloc(ptr, config_prof);
1295 else

--- 47 unchanged lines hidden (view full) ---

1343 */
1344/******************************************************************************/
1345/*
1346 * Begin experimental functions.
1347 */
1348#ifdef JEMALLOC_EXPERIMENTAL
1349
1350JEMALLOC_INLINE void *
1318{
1319 size_t ret;
1320
1321 assert(malloc_initialized || IS_INITIALIZER);
1322
1323 if (config_ivsalloc)
1324 ret = ivsalloc(ptr, config_prof);
1325 else

--- 47 unchanged lines hidden (view full) ---

1373 */
1374/******************************************************************************/
1375/*
1376 * Begin experimental functions.
1377 */
1378#ifdef JEMALLOC_EXPERIMENTAL
1379
1380JEMALLOC_INLINE void *
1351iallocm(size_t usize, size_t alignment, bool zero)
1381iallocm(size_t usize, size_t alignment, bool zero, bool try_tcache,
1382 arena_t *arena)
1352{
1353
1354 assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
1355 alignment)));
1356
1357 if (alignment != 0)
1383{
1384
1385 assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
1386 alignment)));
1387
1388 if (alignment != 0)
1358 return (ipalloc(usize, alignment, zero));
1389 return (ipallocx(usize, alignment, zero, try_tcache, arena));
1359 else if (zero)
1390 else if (zero)
1360 return (icalloc(usize));
1391 return (icallocx(usize, try_tcache, arena));
1361 else
1392 else
1362 return (imalloc(usize));
1393 return (imallocx(usize, try_tcache, arena));
1363}
1364
1365int
1366je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
1367{
1368 void *p;
1369 size_t usize;
1370 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1371 & (SIZE_T_MAX-1));
1372 bool zero = flags & ALLOCM_ZERO;
1394}
1395
1396int
1397je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
1398{
1399 void *p;
1400 size_t usize;
1401 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1402 & (SIZE_T_MAX-1));
1403 bool zero = flags & ALLOCM_ZERO;
1404 unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
1405 arena_t *arena;
1406 bool try_tcache;
1373
1374 assert(ptr != NULL);
1375 assert(size != 0);
1376
1377 if (malloc_init())
1378 goto label_oom;
1379
1407
1408 assert(ptr != NULL);
1409 assert(size != 0);
1410
1411 if (malloc_init())
1412 goto label_oom;
1413
1414 if (arena_ind != UINT_MAX) {
1415 arena = arenas[arena_ind];
1416 try_tcache = false;
1417 } else {
1418 arena = NULL;
1419 try_tcache = true;
1420 }
1421
1380 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
1381 if (usize == 0)
1382 goto label_oom;
1383
1384 if (config_prof && opt_prof) {
1385 prof_thr_cnt_t *cnt;
1386
1387 PROF_ALLOC_PREP(1, usize, cnt);
1388 if (cnt == NULL)
1389 goto label_oom;
1390 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
1391 SMALL_MAXCLASS) {
1392 size_t usize_promoted = (alignment == 0) ?
1393 s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
1394 alignment);
1395 assert(usize_promoted != 0);
1422 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
1423 if (usize == 0)
1424 goto label_oom;
1425
1426 if (config_prof && opt_prof) {
1427 prof_thr_cnt_t *cnt;
1428
1429 PROF_ALLOC_PREP(1, usize, cnt);
1430 if (cnt == NULL)
1431 goto label_oom;
1432 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
1433 SMALL_MAXCLASS) {
1434 size_t usize_promoted = (alignment == 0) ?
1435 s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
1436 alignment);
1437 assert(usize_promoted != 0);
1396 p = iallocm(usize_promoted, alignment, zero);
1438 p = iallocm(usize_promoted, alignment, zero,
1439 try_tcache, arena);
1397 if (p == NULL)
1398 goto label_oom;
1399 arena_prof_promoted(p, usize);
1400 } else {
1440 if (p == NULL)
1441 goto label_oom;
1442 arena_prof_promoted(p, usize);
1443 } else {
1401 p = iallocm(usize, alignment, zero);
1444 p = iallocm(usize, alignment, zero, try_tcache, arena);
1402 if (p == NULL)
1403 goto label_oom;
1404 }
1405 prof_malloc(p, usize, cnt);
1406 } else {
1445 if (p == NULL)
1446 goto label_oom;
1447 }
1448 prof_malloc(p, usize, cnt);
1449 } else {
1407 p = iallocm(usize, alignment, zero);
1450 p = iallocm(usize, alignment, zero, try_tcache, arena);
1408 if (p == NULL)
1409 goto label_oom;
1410 }
1411 if (rsize != NULL)
1412 *rsize = usize;
1413
1414 *ptr = p;
1415 if (config_stats) {

--- 20 unchanged lines hidden (view full) ---

1436 void *p, *q;
1437 size_t usize;
1438 size_t old_size;
1439 size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1440 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1441 & (SIZE_T_MAX-1));
1442 bool zero = flags & ALLOCM_ZERO;
1443 bool no_move = flags & ALLOCM_NO_MOVE;
1451 if (p == NULL)
1452 goto label_oom;
1453 }
1454 if (rsize != NULL)
1455 *rsize = usize;
1456
1457 *ptr = p;
1458 if (config_stats) {

--- 20 unchanged lines hidden (view full) ---

1479 void *p, *q;
1480 size_t usize;
1481 size_t old_size;
1482 size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1483 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1484 & (SIZE_T_MAX-1));
1485 bool zero = flags & ALLOCM_ZERO;
1486 bool no_move = flags & ALLOCM_NO_MOVE;
1487 unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
1488 bool try_tcache_alloc, try_tcache_dalloc;
1489 arena_t *arena;
1444
1445 assert(ptr != NULL);
1446 assert(*ptr != NULL);
1447 assert(size != 0);
1448 assert(SIZE_T_MAX - size >= extra);
1449 assert(malloc_initialized || IS_INITIALIZER);
1450
1490
1491 assert(ptr != NULL);
1492 assert(*ptr != NULL);
1493 assert(size != 0);
1494 assert(SIZE_T_MAX - size >= extra);
1495 assert(malloc_initialized || IS_INITIALIZER);
1496
1497 if (arena_ind != UINT_MAX) {
1498 arena_chunk_t *chunk;
1499 try_tcache_alloc = true;
1500 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(*ptr);
1501 try_tcache_dalloc = (chunk == *ptr || chunk->arena !=
1502 arenas[arena_ind]);
1503 arena = arenas[arena_ind];
1504 } else {
1505 try_tcache_alloc = true;
1506 try_tcache_dalloc = true;
1507 arena = NULL;
1508 }
1509
1451 p = *ptr;
1452 if (config_prof && opt_prof) {
1453 prof_thr_cnt_t *cnt;
1454
1455 /*
1456 * usize isn't knowable before iralloc() returns when extra is
1457 * non-zero. Therefore, compute its maximum possible value and
1458 * use that in PROF_ALLOC_PREP() to decide whether to capture a

--- 10 unchanged lines hidden (view full) ---

1469 if (cnt == NULL)
1470 goto label_oom;
1471 /*
1472 * Use minimum usize to determine whether promotion may happen.
1473 */
1474 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
1475 && ((alignment == 0) ? s2u(size) : sa2u(size, alignment))
1476 <= SMALL_MAXCLASS) {
1510 p = *ptr;
1511 if (config_prof && opt_prof) {
1512 prof_thr_cnt_t *cnt;
1513
1514 /*
1515 * usize isn't knowable before iralloc() returns when extra is
1516 * non-zero. Therefore, compute its maximum possible value and
1517 * use that in PROF_ALLOC_PREP() to decide whether to capture a

--- 10 unchanged lines hidden (view full) ---

1528 if (cnt == NULL)
1529 goto label_oom;
1530 /*
1531 * Use minimum usize to determine whether promotion may happen.
1532 */
1533 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
1534 && ((alignment == 0) ? s2u(size) : sa2u(size, alignment))
1535 <= SMALL_MAXCLASS) {
1477 q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1536 q = irallocx(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1478 size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
1537 size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
1479 alignment, zero, no_move);
1538 alignment, zero, no_move, try_tcache_alloc,
1539 try_tcache_dalloc, arena);
1480 if (q == NULL)
1481 goto label_err;
1482 if (max_usize < PAGE) {
1483 usize = max_usize;
1484 arena_prof_promoted(q, usize);
1485 } else
1486 usize = isalloc(q, config_prof);
1487 } else {
1540 if (q == NULL)
1541 goto label_err;
1542 if (max_usize < PAGE) {
1543 usize = max_usize;
1544 arena_prof_promoted(q, usize);
1545 } else
1546 usize = isalloc(q, config_prof);
1547 } else {
1488 q = iralloc(p, size, extra, alignment, zero, no_move);
1548 q = irallocx(p, size, extra, alignment, zero, no_move,
1549 try_tcache_alloc, try_tcache_dalloc, arena);
1489 if (q == NULL)
1490 goto label_err;
1491 usize = isalloc(q, config_prof);
1492 }
1493 prof_realloc(q, usize, cnt, old_size, old_ctx);
1494 if (rsize != NULL)
1495 *rsize = usize;
1496 } else {
1497 if (config_stats) {
1498 old_size = isalloc(p, false);
1499 if (config_valgrind && opt_valgrind)
1500 old_rzsize = u2rz(old_size);
1501 } else if (config_valgrind && opt_valgrind) {
1502 old_size = isalloc(p, false);
1503 old_rzsize = u2rz(old_size);
1504 }
1550 if (q == NULL)
1551 goto label_err;
1552 usize = isalloc(q, config_prof);
1553 }
1554 prof_realloc(q, usize, cnt, old_size, old_ctx);
1555 if (rsize != NULL)
1556 *rsize = usize;
1557 } else {
1558 if (config_stats) {
1559 old_size = isalloc(p, false);
1560 if (config_valgrind && opt_valgrind)
1561 old_rzsize = u2rz(old_size);
1562 } else if (config_valgrind && opt_valgrind) {
1563 old_size = isalloc(p, false);
1564 old_rzsize = u2rz(old_size);
1565 }
1505 q = iralloc(p, size, extra, alignment, zero, no_move);
1566 q = irallocx(p, size, extra, alignment, zero, no_move,
1567 try_tcache_alloc, try_tcache_dalloc, arena);
1506 if (q == NULL)
1507 goto label_err;
1508 if (config_stats)
1509 usize = isalloc(q, config_prof);
1510 if (rsize != NULL) {
1511 if (config_stats == false)
1512 usize = isalloc(q, config_prof);
1513 *rsize = usize;

--- 44 unchanged lines hidden (view full) ---

1558 return (ALLOCM_SUCCESS);
1559}
1560
1561int
1562je_dallocm(void *ptr, int flags)
1563{
1564 size_t usize;
1565 size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1568 if (q == NULL)
1569 goto label_err;
1570 if (config_stats)
1571 usize = isalloc(q, config_prof);
1572 if (rsize != NULL) {
1573 if (config_stats == false)
1574 usize = isalloc(q, config_prof);
1575 *rsize = usize;

--- 44 unchanged lines hidden (view full) ---

1620 return (ALLOCM_SUCCESS);
1621}
1622
1623int
1624je_dallocm(void *ptr, int flags)
1625{
1626 size_t usize;
1627 size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1628 unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
1629 bool try_tcache;
1566
1567 assert(ptr != NULL);
1568 assert(malloc_initialized || IS_INITIALIZER);
1569
1630
1631 assert(ptr != NULL);
1632 assert(malloc_initialized || IS_INITIALIZER);
1633
1634 if (arena_ind != UINT_MAX) {
1635 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1636 try_tcache = (chunk == ptr || chunk->arena !=
1637 arenas[arena_ind]);
1638 } else
1639 try_tcache = true;
1640
1570 UTRACE(ptr, 0, 0);
1571 if (config_stats || config_valgrind)
1572 usize = isalloc(ptr, config_prof);
1573 if (config_prof && opt_prof) {
1574 if (config_stats == false && config_valgrind == false)
1575 usize = isalloc(ptr, config_prof);
1576 prof_free(ptr, usize);
1577 }
1578 if (config_stats)
1579 thread_allocated_tsd_get()->deallocated += usize;
1580 if (config_valgrind && opt_valgrind)
1581 rzsize = p2rz(ptr);
1641 UTRACE(ptr, 0, 0);
1642 if (config_stats || config_valgrind)
1643 usize = isalloc(ptr, config_prof);
1644 if (config_prof && opt_prof) {
1645 if (config_stats == false && config_valgrind == false)
1646 usize = isalloc(ptr, config_prof);
1647 prof_free(ptr, usize);
1648 }
1649 if (config_stats)
1650 thread_allocated_tsd_get()->deallocated += usize;
1651 if (config_valgrind && opt_valgrind)
1652 rzsize = p2rz(ptr);
1582 iqalloc(ptr);
1653 iqallocx(ptr, try_tcache);
1583 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1584
1585 return (ALLOCM_SUCCESS);
1586}
1587
1588int
1589je_nallocm(size_t *rsize, size_t size, int flags)
1590{

--- 20 unchanged lines hidden (view full) ---

1611 * End experimental functions.
1612 */
1613/******************************************************************************/
1614/*
1615 * The following functions are used by threading libraries for protection of
1616 * malloc during fork().
1617 */
1618
1654 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1655
1656 return (ALLOCM_SUCCESS);
1657}
1658
1659int
1660je_nallocm(size_t *rsize, size_t size, int flags)
1661{

--- 20 unchanged lines hidden (view full) ---

1682 * End experimental functions.
1683 */
1684/******************************************************************************/
1685/*
1686 * The following functions are used by threading libraries for protection of
1687 * malloc during fork().
1688 */
1689
1690/*
1691 * If an application creates a thread before doing any allocation in the main
1692 * thread, then calls fork(2) in the main thread followed by memory allocation
1693 * in the child process, a race can occur that results in deadlock within the
1694 * child: the main thread may have forked while the created thread had
1695 * partially initialized the allocator. Ordinarily jemalloc prevents
1696 * fork/malloc races via the following functions it registers during
1697 * initialization using pthread_atfork(), but of course that does no good if
1698 * the allocator isn't fully initialized at fork time. The following library
1699 * constructor is a partial solution to this problem. It may still possible to
1700 * trigger the deadlock described above, but doing so would involve forking via
1701 * a library constructor that runs before jemalloc's runs.
1702 */
1703JEMALLOC_ATTR(constructor)
1704static void
1705jemalloc_constructor(void)
1706{
1707
1708 malloc_init();
1709}
1710
1619#ifndef JEMALLOC_MUTEX_INIT_CB
1620void
1621jemalloc_prefork(void)
1622#else
1623JEMALLOC_EXPORT void
1624_malloc_prefork(void)
1625#endif
1626{
1627 unsigned i;
1628
1629#ifdef JEMALLOC_MUTEX_INIT_CB
1630 if (malloc_initialized == false)
1631 return;
1632#endif
1633 assert(malloc_initialized);
1634
1635 /* Acquire all mutexes in a safe order. */
1711#ifndef JEMALLOC_MUTEX_INIT_CB
1712void
1713jemalloc_prefork(void)
1714#else
1715JEMALLOC_EXPORT void
1716_malloc_prefork(void)
1717#endif
1718{
1719 unsigned i;
1720
1721#ifdef JEMALLOC_MUTEX_INIT_CB
1722 if (malloc_initialized == false)
1723 return;
1724#endif
1725 assert(malloc_initialized);
1726
1727 /* Acquire all mutexes in a safe order. */
1728 ctl_prefork();
1636 malloc_mutex_prefork(&arenas_lock);
1729 malloc_mutex_prefork(&arenas_lock);
1637 for (i = 0; i < narenas; i++) {
1730 for (i = 0; i < narenas_total; i++) {
1638 if (arenas[i] != NULL)
1639 arena_prefork(arenas[i]);
1640 }
1731 if (arenas[i] != NULL)
1732 arena_prefork(arenas[i]);
1733 }
1734 prof_prefork();
1735 chunk_prefork();
1641 base_prefork();
1642 huge_prefork();
1736 base_prefork();
1737 huge_prefork();
1643 chunk_dss_prefork();
1644}
1645
1646#ifndef JEMALLOC_MUTEX_INIT_CB
1647void
1648jemalloc_postfork_parent(void)
1649#else
1650JEMALLOC_EXPORT void
1651_malloc_postfork(void)
1652#endif
1653{
1654 unsigned i;
1655
1656#ifdef JEMALLOC_MUTEX_INIT_CB
1657 if (malloc_initialized == false)
1658 return;
1659#endif
1660 assert(malloc_initialized);
1661
1662 /* Release all mutexes, now that fork() has completed. */
1738}
1739
1740#ifndef JEMALLOC_MUTEX_INIT_CB
1741void
1742jemalloc_postfork_parent(void)
1743#else
1744JEMALLOC_EXPORT void
1745_malloc_postfork(void)
1746#endif
1747{
1748 unsigned i;
1749
1750#ifdef JEMALLOC_MUTEX_INIT_CB
1751 if (malloc_initialized == false)
1752 return;
1753#endif
1754 assert(malloc_initialized);
1755
1756 /* Release all mutexes, now that fork() has completed. */
1663 chunk_dss_postfork_parent();
1664 huge_postfork_parent();
1665 base_postfork_parent();
1757 huge_postfork_parent();
1758 base_postfork_parent();
1666 for (i = 0; i < narenas; i++) {
1759 chunk_postfork_parent();
1760 prof_postfork_parent();
1761 for (i = 0; i < narenas_total; i++) {
1667 if (arenas[i] != NULL)
1668 arena_postfork_parent(arenas[i]);
1669 }
1670 malloc_mutex_postfork_parent(&arenas_lock);
1762 if (arenas[i] != NULL)
1763 arena_postfork_parent(arenas[i]);
1764 }
1765 malloc_mutex_postfork_parent(&arenas_lock);
1766 ctl_postfork_parent();
1671}
1672
1673void
1674jemalloc_postfork_child(void)
1675{
1676 unsigned i;
1677
1678 assert(malloc_initialized);
1679
1680 /* Release all mutexes, now that fork() has completed. */
1767}
1768
1769void
1770jemalloc_postfork_child(void)
1771{
1772 unsigned i;
1773
1774 assert(malloc_initialized);
1775
1776 /* Release all mutexes, now that fork() has completed. */
1681 chunk_dss_postfork_child();
1682 huge_postfork_child();
1683 base_postfork_child();
1777 huge_postfork_child();
1778 base_postfork_child();
1684 for (i = 0; i < narenas; i++) {
1779 chunk_postfork_child();
1780 prof_postfork_child();
1781 for (i = 0; i < narenas_total; i++) {
1685 if (arenas[i] != NULL)
1686 arena_postfork_child(arenas[i]);
1687 }
1688 malloc_mutex_postfork_child(&arenas_lock);
1782 if (arenas[i] != NULL)
1783 arena_postfork_child(arenas[i]);
1784 }
1785 malloc_mutex_postfork_child(&arenas_lock);
1786 ctl_postfork_child();
1689}
1690
1691/******************************************************************************/
1692/*
1693 * The following functions are used for TLS allocation/deallocation in static
1694 * binaries on FreeBSD. The primary difference between these and i[mcd]alloc()
1695 * is that these avoid accessing TLS variables.
1696 */

--- 47 unchanged lines hidden ---
1787}
1788
1789/******************************************************************************/
1790/*
1791 * The following functions are used for TLS allocation/deallocation in static
1792 * binaries on FreeBSD. The primary difference between these and i[mcd]alloc()
1793 * is that these avoid accessing TLS variables.
1794 */

--- 47 unchanged lines hidden ---