Deleted Added
full compact
arena.c (286866) arena.c (288090)
1#define JEMALLOC_ARENA_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
7ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
8static ssize_t lg_dirty_mult_default;
9arena_bin_info_t arena_bin_info[NBINS];
10
11size_t map_bias;
12size_t map_misc_offset;
13size_t arena_maxrun; /* Max run size for arenas. */
1#define JEMALLOC_ARENA_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
7ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
8static ssize_t lg_dirty_mult_default;
9arena_bin_info_t arena_bin_info[NBINS];
10
11size_t map_bias;
12size_t map_misc_offset;
13size_t arena_maxrun; /* Max run size for arenas. */
14size_t arena_maxclass; /* Max size class for arenas. */
14size_t large_maxclass; /* Max large size class. */
15static size_t small_maxrun; /* Max run size used for small size classes. */
16static bool *small_run_tab; /* Valid small run page multiples. */
17unsigned nlclasses; /* Number of large size classes. */
18unsigned nhclasses; /* Number of huge size classes. */
19
20/******************************************************************************/
21/*
22 * Function prototypes for static functions that are referenced prior to

--- 11 unchanged lines hidden (view full) ---

34/******************************************************************************/
35
36#define CHUNK_MAP_KEY ((uintptr_t)0x1U)
37
38JEMALLOC_INLINE_C arena_chunk_map_misc_t *
39arena_miscelm_key_create(size_t size)
40{
41
15static size_t small_maxrun; /* Max run size used for small size classes. */
16static bool *small_run_tab; /* Valid small run page multiples. */
17unsigned nlclasses; /* Number of large size classes. */
18unsigned nhclasses; /* Number of huge size classes. */
19
20/******************************************************************************/
21/*
22 * Function prototypes for static functions that are referenced prior to

--- 11 unchanged lines hidden (view full) ---

34/******************************************************************************/
35
36#define CHUNK_MAP_KEY ((uintptr_t)0x1U)
37
38JEMALLOC_INLINE_C arena_chunk_map_misc_t *
39arena_miscelm_key_create(size_t size)
40{
41
42 return ((arena_chunk_map_misc_t *)((size << CHUNK_MAP_SIZE_SHIFT) |
42 return ((arena_chunk_map_misc_t *)(arena_mapbits_size_encode(size) |
43 CHUNK_MAP_KEY));
44}
45
46JEMALLOC_INLINE_C bool
47arena_miscelm_is_key(const arena_chunk_map_misc_t *miscelm)
48{
49
50 return (((uintptr_t)miscelm & CHUNK_MAP_KEY) != 0);
51}
52
53#undef CHUNK_MAP_KEY
54
55JEMALLOC_INLINE_C size_t
56arena_miscelm_key_size_get(const arena_chunk_map_misc_t *miscelm)
57{
58
59 assert(arena_miscelm_is_key(miscelm));
60
43 CHUNK_MAP_KEY));
44}
45
46JEMALLOC_INLINE_C bool
47arena_miscelm_is_key(const arena_chunk_map_misc_t *miscelm)
48{
49
50 return (((uintptr_t)miscelm & CHUNK_MAP_KEY) != 0);
51}
52
53#undef CHUNK_MAP_KEY
54
55JEMALLOC_INLINE_C size_t
56arena_miscelm_key_size_get(const arena_chunk_map_misc_t *miscelm)
57{
58
59 assert(arena_miscelm_is_key(miscelm));
60
61 return (((uintptr_t)miscelm & CHUNK_MAP_SIZE_MASK) >>
62 CHUNK_MAP_SIZE_SHIFT);
61 return (arena_mapbits_size_decode((uintptr_t)miscelm));
63}
64
65JEMALLOC_INLINE_C size_t
66arena_miscelm_size_get(arena_chunk_map_misc_t *miscelm)
67{
68 arena_chunk_t *chunk;
69 size_t pageind, mapbits;
70
71 assert(!arena_miscelm_is_key(miscelm));
72
73 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
74 pageind = arena_miscelm_to_pageind(miscelm);
75 mapbits = arena_mapbits_get(chunk, pageind);
62}
63
64JEMALLOC_INLINE_C size_t
65arena_miscelm_size_get(arena_chunk_map_misc_t *miscelm)
66{
67 arena_chunk_t *chunk;
68 size_t pageind, mapbits;
69
70 assert(!arena_miscelm_is_key(miscelm));
71
72 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
73 pageind = arena_miscelm_to_pageind(miscelm);
74 mapbits = arena_mapbits_get(chunk, pageind);
76 return ((mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT);
75 return (arena_mapbits_size_decode(mapbits));
77}
78
79JEMALLOC_INLINE_C int
80arena_run_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b)
81{
82 uintptr_t a_miscelm = (uintptr_t)a;
83 uintptr_t b_miscelm = (uintptr_t)b;
84

--- 225 unchanged lines hidden (view full) ---

310}
311
312JEMALLOC_INLINE_C void
313arena_run_reg_dalloc(arena_run_t *run, void *ptr)
314{
315 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
316 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
317 size_t mapbits = arena_mapbits_get(chunk, pageind);
76}
77
78JEMALLOC_INLINE_C int
79arena_run_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b)
80{
81 uintptr_t a_miscelm = (uintptr_t)a;
82 uintptr_t b_miscelm = (uintptr_t)b;
83

--- 225 unchanged lines hidden (view full) ---

309}
310
311JEMALLOC_INLINE_C void
312arena_run_reg_dalloc(arena_run_t *run, void *ptr)
313{
314 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
315 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
316 size_t mapbits = arena_mapbits_get(chunk, pageind);
318 index_t binind = arena_ptr_small_binind_get(ptr, mapbits);
317 szind_t binind = arena_ptr_small_binind_get(ptr, mapbits);
319 arena_bin_info_t *bin_info = &arena_bin_info[binind];
320 unsigned regind = arena_run_regind(run, bin_info, ptr);
321
322 assert(run->nfree < bin_info->nregs);
323 /* Freeing an interior pointer can cause assertion failure. */
324 assert(((uintptr_t)ptr -
325 ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
326 (uintptr_t)bin_info->reg0_offset)) %

--- 94 unchanged lines hidden (view full) ---

421}
422
423static bool
424arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
425 bool remove, bool zero)
426{
427 arena_chunk_t *chunk;
428 arena_chunk_map_misc_t *miscelm;
318 arena_bin_info_t *bin_info = &arena_bin_info[binind];
319 unsigned regind = arena_run_regind(run, bin_info, ptr);
320
321 assert(run->nfree < bin_info->nregs);
322 /* Freeing an interior pointer can cause assertion failure. */
323 assert(((uintptr_t)ptr -
324 ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
325 (uintptr_t)bin_info->reg0_offset)) %

--- 94 unchanged lines hidden (view full) ---

420}
421
422static bool
423arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
424 bool remove, bool zero)
425{
426 arena_chunk_t *chunk;
427 arena_chunk_map_misc_t *miscelm;
429 size_t flag_dirty, flag_decommitted, run_ind, need_pages, i;
428 size_t flag_dirty, flag_decommitted, run_ind, need_pages;
430 size_t flag_unzeroed_mask;
431
432 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
433 miscelm = arena_run_to_miscelm(run);
434 run_ind = arena_miscelm_to_pageind(miscelm);
435 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
436 flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
437 need_pages = (size >> LG_PAGE);

--- 17 unchanged lines hidden (view full) ---

455 } else if (flag_dirty != 0) {
456 /* The run is dirty, so all pages must be zeroed. */
457 arena_run_zero(chunk, run_ind, need_pages);
458 } else {
459 /*
460 * The run is clean, so some pages may be zeroed (i.e.
461 * never before touched).
462 */
429 size_t flag_unzeroed_mask;
430
431 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
432 miscelm = arena_run_to_miscelm(run);
433 run_ind = arena_miscelm_to_pageind(miscelm);
434 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
435 flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
436 need_pages = (size >> LG_PAGE);

--- 17 unchanged lines hidden (view full) ---

454 } else if (flag_dirty != 0) {
455 /* The run is dirty, so all pages must be zeroed. */
456 arena_run_zero(chunk, run_ind, need_pages);
457 } else {
458 /*
459 * The run is clean, so some pages may be zeroed (i.e.
460 * never before touched).
461 */
462 size_t i;
463 for (i = 0; i < need_pages; i++) {
464 if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
465 != 0)
466 arena_run_zero(chunk, run_ind+i, 1);
467 else if (config_debug) {
468 arena_run_page_validate_zeroed(chunk,
469 run_ind+i);
470 } else {

--- 32 unchanged lines hidden (view full) ---

503arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
504{
505
506 return (arena_run_split_large_helper(arena, run, size, false, zero));
507}
508
509static bool
510arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
463 for (i = 0; i < need_pages; i++) {
464 if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
465 != 0)
466 arena_run_zero(chunk, run_ind+i, 1);
467 else if (config_debug) {
468 arena_run_page_validate_zeroed(chunk,
469 run_ind+i);
470 } else {

--- 32 unchanged lines hidden (view full) ---

503arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
504{
505
506 return (arena_run_split_large_helper(arena, run, size, false, zero));
507}
508
509static bool
510arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
511 index_t binind)
511 szind_t binind)
512{
513 arena_chunk_t *chunk;
514 arena_chunk_map_misc_t *miscelm;
515 size_t flag_dirty, flag_decommitted, run_ind, need_pages, i;
516
517 assert(binind != BININD_INVALID);
518
519 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);

--- 255 unchanged lines hidden (view full) ---

775 }
776 } else
777 arena->spare = chunk;
778}
779
780static void
781arena_huge_malloc_stats_update(arena_t *arena, size_t usize)
782{
512{
513 arena_chunk_t *chunk;
514 arena_chunk_map_misc_t *miscelm;
515 size_t flag_dirty, flag_decommitted, run_ind, need_pages, i;
516
517 assert(binind != BININD_INVALID);
518
519 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);

--- 255 unchanged lines hidden (view full) ---

775 }
776 } else
777 arena->spare = chunk;
778}
779
780static void
781arena_huge_malloc_stats_update(arena_t *arena, size_t usize)
782{
783 index_t index = size2index(usize) - nlclasses - NBINS;
783 szind_t index = size2index(usize) - nlclasses - NBINS;
784
785 cassert(config_stats);
786
787 arena->stats.nmalloc_huge++;
788 arena->stats.allocated_huge += usize;
789 arena->stats.hstats[index].nmalloc++;
790 arena->stats.hstats[index].curhchunks++;
791}
792
793static void
794arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize)
795{
784
785 cassert(config_stats);
786
787 arena->stats.nmalloc_huge++;
788 arena->stats.allocated_huge += usize;
789 arena->stats.hstats[index].nmalloc++;
790 arena->stats.hstats[index].curhchunks++;
791}
792
793static void
794arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize)
795{
796 index_t index = size2index(usize) - nlclasses - NBINS;
796 szind_t index = size2index(usize) - nlclasses - NBINS;
797
798 cassert(config_stats);
799
800 arena->stats.nmalloc_huge--;
801 arena->stats.allocated_huge -= usize;
802 arena->stats.hstats[index].nmalloc--;
803 arena->stats.hstats[index].curhchunks--;
804}
805
806static void
807arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
808{
797
798 cassert(config_stats);
799
800 arena->stats.nmalloc_huge--;
801 arena->stats.allocated_huge -= usize;
802 arena->stats.hstats[index].nmalloc--;
803 arena->stats.hstats[index].curhchunks--;
804}
805
806static void
807arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
808{
809 index_t index = size2index(usize) - nlclasses - NBINS;
809 szind_t index = size2index(usize) - nlclasses - NBINS;
810
811 cassert(config_stats);
812
813 arena->stats.ndalloc_huge++;
814 arena->stats.allocated_huge -= usize;
815 arena->stats.hstats[index].ndalloc++;
816 arena->stats.hstats[index].curhchunks--;
817}
818
819static void
820arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
821{
810
811 cassert(config_stats);
812
813 arena->stats.ndalloc_huge++;
814 arena->stats.allocated_huge -= usize;
815 arena->stats.hstats[index].ndalloc++;
816 arena->stats.hstats[index].curhchunks--;
817}
818
819static void
820arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
821{
822 index_t index = size2index(usize) - nlclasses - NBINS;
822 szind_t index = size2index(usize) - nlclasses - NBINS;
823
824 cassert(config_stats);
825
826 arena->stats.ndalloc_huge--;
827 arena->stats.allocated_huge += usize;
828 arena->stats.hstats[index].ndalloc--;
829 arena->stats.hstats[index].curhchunks++;
830}

--- 289 unchanged lines hidden (view full) ---

1120 * arena_chunk_alloc() failed, but another thread may have made
1121 * sufficient memory available while this one dropped arena->lock in
1122 * arena_chunk_alloc(), so search one more time.
1123 */
1124 return (arena_run_alloc_large_helper(arena, size, zero));
1125}
1126
1127static arena_run_t *
823
824 cassert(config_stats);
825
826 arena->stats.ndalloc_huge--;
827 arena->stats.allocated_huge += usize;
828 arena->stats.hstats[index].ndalloc--;
829 arena->stats.hstats[index].curhchunks++;
830}

--- 289 unchanged lines hidden (view full) ---

1120 * arena_chunk_alloc() failed, but another thread may have made
1121 * sufficient memory available while this one dropped arena->lock in
1122 * arena_chunk_alloc(), so search one more time.
1123 */
1124 return (arena_run_alloc_large_helper(arena, size, zero));
1125}
1126
1127static arena_run_t *
1128arena_run_alloc_small_helper(arena_t *arena, size_t size, index_t binind)
1128arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind)
1129{
1130 arena_run_t *run = arena_run_first_best_fit(arena, size);
1131 if (run != NULL) {
1132 if (arena_run_split_small(arena, run, size, binind))
1133 run = NULL;
1134 }
1135 return (run);
1136}
1137
1138static arena_run_t *
1129{
1130 arena_run_t *run = arena_run_first_best_fit(arena, size);
1131 if (run != NULL) {
1132 if (arena_run_split_small(arena, run, size, binind))
1133 run = NULL;
1134 }
1135 return (run);
1136}
1137
1138static arena_run_t *
1139arena_run_alloc_small(arena_t *arena, size_t size, index_t binind)
1139arena_run_alloc_small(arena_t *arena, size_t size, szind_t binind)
1140{
1141 arena_chunk_t *chunk;
1142 arena_run_t *run;
1143
1144 assert(size <= arena_maxrun);
1145 assert(size == PAGE_CEILING(size));
1146 assert(binind != BININD_INVALID);
1147

--- 736 unchanged lines hidden (view full) ---

1884 }
1885 return (run);
1886}
1887
1888static arena_run_t *
1889arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
1890{
1891 arena_run_t *run;
1140{
1141 arena_chunk_t *chunk;
1142 arena_run_t *run;
1143
1144 assert(size <= arena_maxrun);
1145 assert(size == PAGE_CEILING(size));
1146 assert(binind != BININD_INVALID);
1147

--- 736 unchanged lines hidden (view full) ---

1884 }
1885 return (run);
1886}
1887
1888static arena_run_t *
1889arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
1890{
1891 arena_run_t *run;
1892 index_t binind;
1892 szind_t binind;
1893 arena_bin_info_t *bin_info;
1894
1895 /* Look for a usable run. */
1896 run = arena_bin_nonfull_run_tryget(bin);
1897 if (run != NULL)
1898 return (run);
1899 /* No existing runs have any space available. */
1900

--- 33 unchanged lines hidden (view full) ---

1934
1935 return (NULL);
1936}
1937
1938/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
1939static void *
1940arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
1941{
1893 arena_bin_info_t *bin_info;
1894
1895 /* Look for a usable run. */
1896 run = arena_bin_nonfull_run_tryget(bin);
1897 if (run != NULL)
1898 return (run);
1899 /* No existing runs have any space available. */
1900

--- 33 unchanged lines hidden (view full) ---

1934
1935 return (NULL);
1936}
1937
1938/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
1939static void *
1940arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
1941{
1942 void *ret;
1943 index_t binind;
1942 szind_t binind;
1944 arena_bin_info_t *bin_info;
1945 arena_run_t *run;
1946
1947 binind = arena_bin_index(arena, bin);
1948 bin_info = &arena_bin_info[binind];
1949 bin->runcur = NULL;
1950 run = arena_bin_nonfull_run_get(arena, bin);
1951 if (bin->runcur != NULL && bin->runcur->nfree > 0) {
1952 /*
1953 * Another thread updated runcur while this one ran without the
1954 * bin lock in arena_bin_nonfull_run_get().
1955 */
1943 arena_bin_info_t *bin_info;
1944 arena_run_t *run;
1945
1946 binind = arena_bin_index(arena, bin);
1947 bin_info = &arena_bin_info[binind];
1948 bin->runcur = NULL;
1949 run = arena_bin_nonfull_run_get(arena, bin);
1950 if (bin->runcur != NULL && bin->runcur->nfree > 0) {
1951 /*
1952 * Another thread updated runcur while this one ran without the
1953 * bin lock in arena_bin_nonfull_run_get().
1954 */
1955 void *ret;
1956 assert(bin->runcur->nfree > 0);
1957 ret = arena_run_reg_alloc(bin->runcur, bin_info);
1958 if (run != NULL) {
1959 arena_chunk_t *chunk;
1960
1961 /*
1962 * arena_run_alloc_small() may have allocated run, or
1963 * it may have pulled run from the bin's run tree.

--- 17 unchanged lines hidden (view full) ---

1981 bin->runcur = run;
1982
1983 assert(bin->runcur->nfree > 0);
1984
1985 return (arena_run_reg_alloc(bin->runcur, bin_info));
1986}
1987
1988void
1956 assert(bin->runcur->nfree > 0);
1957 ret = arena_run_reg_alloc(bin->runcur, bin_info);
1958 if (run != NULL) {
1959 arena_chunk_t *chunk;
1960
1961 /*
1962 * arena_run_alloc_small() may have allocated run, or
1963 * it may have pulled run from the bin's run tree.

--- 17 unchanged lines hidden (view full) ---

1981 bin->runcur = run;
1982
1983 assert(bin->runcur->nfree > 0);
1984
1985 return (arena_run_reg_alloc(bin->runcur, bin_info));
1986}
1987
1988void
1989arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, index_t binind,
1989arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind,
1990 uint64_t prof_accumbytes)
1991{
1992 unsigned i, nfill;
1993 arena_bin_t *bin;
1990 uint64_t prof_accumbytes)
1991{
1992 unsigned i, nfill;
1993 arena_bin_t *bin;
1994 arena_run_t *run;
1995 void *ptr;
1996
1997 assert(tbin->ncached == 0);
1998
1999 if (config_prof && arena_prof_accum(arena, prof_accumbytes))
2000 prof_idump();
2001 bin = &arena->bins[binind];
2002 malloc_mutex_lock(&bin->lock);
2003 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
2004 tbin->lg_fill_div); i < nfill; i++) {
1994
1995 assert(tbin->ncached == 0);
1996
1997 if (config_prof && arena_prof_accum(arena, prof_accumbytes))
1998 prof_idump();
1999 bin = &arena->bins[binind];
2000 malloc_mutex_lock(&bin->lock);
2001 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
2002 tbin->lg_fill_div); i < nfill; i++) {
2003 arena_run_t *run;
2004 void *ptr;
2005 if ((run = bin->runcur) != NULL && run->nfree > 0)
2006 ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
2007 else
2008 ptr = arena_bin_malloc_hard(arena, bin);
2009 if (ptr == NULL) {
2010 /*
2011 * OOM. tbin->avail isn't yet filled down to its first
2012 * element, so the successful allocations (if any) must

--- 58 unchanged lines hidden (view full) ---

2071#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
2072arena_redzone_corruption_t *arena_redzone_corruption =
2073 JEMALLOC_N(arena_redzone_corruption_impl);
2074#endif
2075
2076static void
2077arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
2078{
2005 if ((run = bin->runcur) != NULL && run->nfree > 0)
2006 ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
2007 else
2008 ptr = arena_bin_malloc_hard(arena, bin);
2009 if (ptr == NULL) {
2010 /*
2011 * OOM. tbin->avail isn't yet filled down to its first
2012 * element, so the successful allocations (if any) must

--- 58 unchanged lines hidden (view full) ---

2071#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
2072arena_redzone_corruption_t *arena_redzone_corruption =
2073 JEMALLOC_N(arena_redzone_corruption_impl);
2074#endif
2075
2076static void
2077arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
2078{
2079 size_t size = bin_info->reg_size;
2080 size_t redzone_size = bin_info->redzone_size;
2081 size_t i;
2082 bool error = false;
2083
2084 if (opt_junk_alloc) {
2079 bool error = false;
2080
2081 if (opt_junk_alloc) {
2082 size_t size = bin_info->reg_size;
2083 size_t redzone_size = bin_info->redzone_size;
2084 size_t i;
2085
2085 for (i = 1; i <= redzone_size; i++) {
2086 uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
2087 if (*byte != 0xa5) {
2088 error = true;
2089 arena_redzone_corruption(ptr, size, false, i,
2090 *byte);
2091 if (reset)
2092 *byte = 0xa5;

--- 33 unchanged lines hidden (view full) ---

2126#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
2127arena_dalloc_junk_small_t *arena_dalloc_junk_small =
2128 JEMALLOC_N(arena_dalloc_junk_small_impl);
2129#endif
2130
2131void
2132arena_quarantine_junk_small(void *ptr, size_t usize)
2133{
2086 for (i = 1; i <= redzone_size; i++) {
2087 uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
2088 if (*byte != 0xa5) {
2089 error = true;
2090 arena_redzone_corruption(ptr, size, false, i,
2091 *byte);
2092 if (reset)
2093 *byte = 0xa5;

--- 33 unchanged lines hidden (view full) ---

2127#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
2128arena_dalloc_junk_small_t *arena_dalloc_junk_small =
2129 JEMALLOC_N(arena_dalloc_junk_small_impl);
2130#endif
2131
2132void
2133arena_quarantine_junk_small(void *ptr, size_t usize)
2134{
2134 index_t binind;
2135 szind_t binind;
2135 arena_bin_info_t *bin_info;
2136 cassert(config_fill);
2137 assert(opt_junk_free);
2138 assert(opt_quarantine);
2139 assert(usize <= SMALL_MAXCLASS);
2140
2141 binind = size2index(usize);
2142 bin_info = &arena_bin_info[binind];
2143 arena_redzones_validate(ptr, bin_info, true);
2144}
2145
2146void *
2147arena_malloc_small(arena_t *arena, size_t size, bool zero)
2148{
2149 void *ret;
2150 arena_bin_t *bin;
2151 arena_run_t *run;
2136 arena_bin_info_t *bin_info;
2137 cassert(config_fill);
2138 assert(opt_junk_free);
2139 assert(opt_quarantine);
2140 assert(usize <= SMALL_MAXCLASS);
2141
2142 binind = size2index(usize);
2143 bin_info = &arena_bin_info[binind];
2144 arena_redzones_validate(ptr, bin_info, true);
2145}
2146
2147void *
2148arena_malloc_small(arena_t *arena, size_t size, bool zero)
2149{
2150 void *ret;
2151 arena_bin_t *bin;
2152 arena_run_t *run;
2152 index_t binind;
2153 szind_t binind;
2153
2154 binind = size2index(size);
2155 assert(binind < NBINS);
2156 bin = &arena->bins[binind];
2157 size = index2size(binind);
2158
2159 malloc_mutex_lock(&bin->lock);
2160 if ((run = bin->runcur) != NULL && run->nfree > 0)

--- 67 unchanged lines hidden (view full) ---

2228 if (run == NULL) {
2229 malloc_mutex_unlock(&arena->lock);
2230 return (NULL);
2231 }
2232 miscelm = arena_run_to_miscelm(run);
2233 ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) +
2234 random_offset);
2235 if (config_stats) {
2154
2155 binind = size2index(size);
2156 assert(binind < NBINS);
2157 bin = &arena->bins[binind];
2158 size = index2size(binind);
2159
2160 malloc_mutex_lock(&bin->lock);
2161 if ((run = bin->runcur) != NULL && run->nfree > 0)

--- 67 unchanged lines hidden (view full) ---

2229 if (run == NULL) {
2230 malloc_mutex_unlock(&arena->lock);
2231 return (NULL);
2232 }
2233 miscelm = arena_run_to_miscelm(run);
2234 ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) +
2235 random_offset);
2236 if (config_stats) {
2236 index_t index = size2index(usize) - NBINS;
2237 szind_t index = size2index(usize) - NBINS;
2237
2238 arena->stats.nmalloc_large++;
2239 arena->stats.nrequests_large++;
2240 arena->stats.allocated_large += usize;
2241 arena->stats.lstats[index].nmalloc++;
2242 arena->stats.lstats[index].nrequests++;
2243 arena->stats.lstats[index].curruns++;
2244 }

--- 76 unchanged lines hidden (view full) ---

2321 assert(decommitted); /* Cause of OOM. */
2322 arena_run_dalloc(arena, run, dirty, false, decommitted);
2323 malloc_mutex_unlock(&arena->lock);
2324 return (NULL);
2325 }
2326 ret = arena_miscelm_to_rpages(miscelm);
2327
2328 if (config_stats) {
2238
2239 arena->stats.nmalloc_large++;
2240 arena->stats.nrequests_large++;
2241 arena->stats.allocated_large += usize;
2242 arena->stats.lstats[index].nmalloc++;
2243 arena->stats.lstats[index].nrequests++;
2244 arena->stats.lstats[index].curruns++;
2245 }

--- 76 unchanged lines hidden (view full) ---

2322 assert(decommitted); /* Cause of OOM. */
2323 arena_run_dalloc(arena, run, dirty, false, decommitted);
2324 malloc_mutex_unlock(&arena->lock);
2325 return (NULL);
2326 }
2327 ret = arena_miscelm_to_rpages(miscelm);
2328
2329 if (config_stats) {
2329 index_t index = size2index(usize) - NBINS;
2330 szind_t index = size2index(usize) - NBINS;
2330
2331 arena->stats.nmalloc_large++;
2332 arena->stats.nrequests_large++;
2333 arena->stats.allocated_large += usize;
2334 arena->stats.lstats[index].nmalloc++;
2335 arena->stats.lstats[index].nrequests++;
2336 arena->stats.lstats[index].curruns++;
2337 }

--- 13 unchanged lines hidden (view full) ---

2351 bool zero, tcache_t *tcache)
2352{
2353 void *ret;
2354
2355 if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
2356 && (usize & PAGE_MASK) == 0))) {
2357 /* Small; alignment doesn't require special run placement. */
2358 ret = arena_malloc(tsd, arena, usize, zero, tcache);
2331
2332 arena->stats.nmalloc_large++;
2333 arena->stats.nrequests_large++;
2334 arena->stats.allocated_large += usize;
2335 arena->stats.lstats[index].nmalloc++;
2336 arena->stats.lstats[index].nrequests++;
2337 arena->stats.lstats[index].curruns++;
2338 }

--- 13 unchanged lines hidden (view full) ---

2352 bool zero, tcache_t *tcache)
2353{
2354 void *ret;
2355
2356 if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
2357 && (usize & PAGE_MASK) == 0))) {
2358 /* Small; alignment doesn't require special run placement. */
2359 ret = arena_malloc(tsd, arena, usize, zero, tcache);
2359 } else if (usize <= arena_maxclass && alignment <= PAGE) {
2360 } else if (usize <= large_maxclass && alignment <= PAGE) {
2360 /*
2361 * Large; alignment doesn't require special run placement.
2362 * However, the cached pointer may be at a random offset from
2363 * the base of the run, so do some bit manipulation to retrieve
2364 * the base.
2365 */
2366 ret = arena_malloc(tsd, arena, usize, zero, tcache);
2367 if (config_cache_oblivious)
2368 ret = (void *)((uintptr_t)ret & ~PAGE_MASK);
2369 } else {
2361 /*
2362 * Large; alignment doesn't require special run placement.
2363 * However, the cached pointer may be at a random offset from
2364 * the base of the run, so do some bit manipulation to retrieve
2365 * the base.
2366 */
2367 ret = arena_malloc(tsd, arena, usize, zero, tcache);
2368 if (config_cache_oblivious)
2369 ret = (void *)((uintptr_t)ret & ~PAGE_MASK);
2370 } else {
2370 if (likely(usize <= arena_maxclass)) {
2371 if (likely(usize <= large_maxclass)) {
2371 ret = arena_palloc_large(tsd, arena, usize, alignment,
2372 zero);
2373 } else if (likely(alignment <= chunksize))
2374 ret = huge_malloc(tsd, arena, usize, zero, tcache);
2375 else {
2376 ret = huge_palloc(tsd, arena, usize, alignment, zero,
2377 tcache);
2378 }
2379 }
2380 return (ret);
2381}
2382
2383void
2384arena_prof_promoted(const void *ptr, size_t size)
2385{
2386 arena_chunk_t *chunk;
2387 size_t pageind;
2372 ret = arena_palloc_large(tsd, arena, usize, alignment,
2373 zero);
2374 } else if (likely(alignment <= chunksize))
2375 ret = huge_malloc(tsd, arena, usize, zero, tcache);
2376 else {
2377 ret = huge_palloc(tsd, arena, usize, alignment, zero,
2378 tcache);
2379 }
2380 }
2381 return (ret);
2382}
2383
2384void
2385arena_prof_promoted(const void *ptr, size_t size)
2386{
2387 arena_chunk_t *chunk;
2388 size_t pageind;
2388 index_t binind;
2389 szind_t binind;
2389
2390 cassert(config_prof);
2391 assert(ptr != NULL);
2392 assert(CHUNK_ADDR2BASE(ptr) != ptr);
2393 assert(isalloc(ptr, false) == LARGE_MINCLASS);
2394 assert(isalloc(ptr, true) == LARGE_MINCLASS);
2395 assert(size <= SMALL_MAXCLASS);
2396

--- 11 unchanged lines hidden (view full) ---

2408arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
2409 arena_bin_t *bin)
2410{
2411
2412 /* Dissociate run from bin. */
2413 if (run == bin->runcur)
2414 bin->runcur = NULL;
2415 else {
2390
2391 cassert(config_prof);
2392 assert(ptr != NULL);
2393 assert(CHUNK_ADDR2BASE(ptr) != ptr);
2394 assert(isalloc(ptr, false) == LARGE_MINCLASS);
2395 assert(isalloc(ptr, true) == LARGE_MINCLASS);
2396 assert(size <= SMALL_MAXCLASS);
2397

--- 11 unchanged lines hidden (view full) ---

2409arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
2410 arena_bin_t *bin)
2411{
2412
2413 /* Dissociate run from bin. */
2414 if (run == bin->runcur)
2415 bin->runcur = NULL;
2416 else {
2416 index_t binind = arena_bin_index(extent_node_arena_get(
2417 szind_t binind = arena_bin_index(extent_node_arena_get(
2417 &chunk->node), bin);
2418 arena_bin_info_t *bin_info = &arena_bin_info[binind];
2419
2420 if (bin_info->nregs != 1) {
2421 /*
2422 * This block's conditional is necessary because if the
2423 * run only contains one region, then it never gets
2424 * inserted into the non-full runs tree.

--- 47 unchanged lines hidden (view full) ---

2472static void
2473arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr,
2474 arena_chunk_map_bits_t *bitselm, bool junked)
2475{
2476 size_t pageind, rpages_ind;
2477 arena_run_t *run;
2478 arena_bin_t *bin;
2479 arena_bin_info_t *bin_info;
2418 &chunk->node), bin);
2419 arena_bin_info_t *bin_info = &arena_bin_info[binind];
2420
2421 if (bin_info->nregs != 1) {
2422 /*
2423 * This block's conditional is necessary because if the
2424 * run only contains one region, then it never gets
2425 * inserted into the non-full runs tree.

--- 47 unchanged lines hidden (view full) ---

2473static void
2474arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr,
2475 arena_chunk_map_bits_t *bitselm, bool junked)
2476{
2477 size_t pageind, rpages_ind;
2478 arena_run_t *run;
2479 arena_bin_t *bin;
2480 arena_bin_info_t *bin_info;
2480 index_t binind;
2481 szind_t binind;
2481
2482 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
2483 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
2484 run = &arena_miscelm_get(chunk, rpages_ind)->run;
2485 binind = run->binind;
2486 bin = &arena->bins[binind];
2487 bin_info = &arena_bin_info[binind];
2488

--- 65 unchanged lines hidden (view full) ---

2554}
2555#ifdef JEMALLOC_JET
2556#undef arena_dalloc_junk_large
2557#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
2558arena_dalloc_junk_large_t *arena_dalloc_junk_large =
2559 JEMALLOC_N(arena_dalloc_junk_large_impl);
2560#endif
2561
2482
2483 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
2484 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
2485 run = &arena_miscelm_get(chunk, rpages_ind)->run;
2486 binind = run->binind;
2487 bin = &arena->bins[binind];
2488 bin_info = &arena_bin_info[binind];
2489

--- 65 unchanged lines hidden (view full) ---

2555}
2556#ifdef JEMALLOC_JET
2557#undef arena_dalloc_junk_large
2558#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
2559arena_dalloc_junk_large_t *arena_dalloc_junk_large =
2560 JEMALLOC_N(arena_dalloc_junk_large_impl);
2561#endif
2562
2562void
2563static void
2563arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk,
2564 void *ptr, bool junked)
2565{
2566 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
2567 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
2568 arena_run_t *run = &miscelm->run;
2569
2570 if (config_fill || config_stats) {
2571 size_t usize = arena_mapbits_large_size_get(chunk, pageind) -
2572 large_pad;
2573
2574 if (!junked)
2575 arena_dalloc_junk_large(ptr, usize);
2576 if (config_stats) {
2564arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk,
2565 void *ptr, bool junked)
2566{
2567 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
2568 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
2569 arena_run_t *run = &miscelm->run;
2570
2571 if (config_fill || config_stats) {
2572 size_t usize = arena_mapbits_large_size_get(chunk, pageind) -
2573 large_pad;
2574
2575 if (!junked)
2576 arena_dalloc_junk_large(ptr, usize);
2577 if (config_stats) {
2577 index_t index = size2index(usize) - NBINS;
2578 szind_t index = size2index(usize) - NBINS;
2578
2579 arena->stats.ndalloc_large++;
2580 arena->stats.allocated_large -= usize;
2581 arena->stats.lstats[index].ndalloc++;
2582 arena->stats.lstats[index].curruns--;
2583 }
2584 }
2585

--- 30 unchanged lines hidden (view full) ---

2616 /*
2617 * Shrink the run, and make trailing pages available for other
2618 * allocations.
2619 */
2620 malloc_mutex_lock(&arena->lock);
2621 arena_run_trim_tail(arena, chunk, run, oldsize + large_pad, size +
2622 large_pad, true);
2623 if (config_stats) {
2579
2580 arena->stats.ndalloc_large++;
2581 arena->stats.allocated_large -= usize;
2582 arena->stats.lstats[index].ndalloc++;
2583 arena->stats.lstats[index].curruns--;
2584 }
2585 }
2586

--- 30 unchanged lines hidden (view full) ---

2617 /*
2618 * Shrink the run, and make trailing pages available for other
2619 * allocations.
2620 */
2621 malloc_mutex_lock(&arena->lock);
2622 arena_run_trim_tail(arena, chunk, run, oldsize + large_pad, size +
2623 large_pad, true);
2624 if (config_stats) {
2624 index_t oldindex = size2index(oldsize) - NBINS;
2625 index_t index = size2index(size) - NBINS;
2625 szind_t oldindex = size2index(oldsize) - NBINS;
2626 szind_t index = size2index(size) - NBINS;
2626
2627 arena->stats.ndalloc_large++;
2628 arena->stats.allocated_large -= oldsize;
2629 arena->stats.lstats[oldindex].ndalloc++;
2630 arena->stats.lstats[oldindex].curruns--;
2631
2632 arena->stats.nmalloc_large++;
2633 arena->stats.nrequests_large++;
2634 arena->stats.allocated_large += size;
2635 arena->stats.lstats[index].nmalloc++;
2636 arena->stats.lstats[index].nrequests++;
2637 arena->stats.lstats[index].curruns++;
2638 }
2639 malloc_mutex_unlock(&arena->lock);
2640}
2641
2642static bool
2643arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
2627
2628 arena->stats.ndalloc_large++;
2629 arena->stats.allocated_large -= oldsize;
2630 arena->stats.lstats[oldindex].ndalloc++;
2631 arena->stats.lstats[oldindex].curruns--;
2632
2633 arena->stats.nmalloc_large++;
2634 arena->stats.nrequests_large++;
2635 arena->stats.allocated_large += size;
2636 arena->stats.lstats[index].nmalloc++;
2637 arena->stats.lstats[index].nrequests++;
2638 arena->stats.lstats[index].curruns++;
2639 }
2640 malloc_mutex_unlock(&arena->lock);
2641}
2642
2643static bool
2644arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
2644 size_t oldsize, size_t size, size_t extra, bool zero)
2645 size_t oldsize, size_t usize_min, size_t usize_max, bool zero)
2645{
2646 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
2647 size_t npages = (oldsize + large_pad) >> LG_PAGE;
2648 size_t followsize;
2646{
2647 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
2648 size_t npages = (oldsize + large_pad) >> LG_PAGE;
2649 size_t followsize;
2649 size_t usize_min = s2u(size);
2650
2651 assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) -
2652 large_pad);
2653
2654 /* Try to extend the run. */
2650
2651 assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) -
2652 large_pad);
2653
2654 /* Try to extend the run. */
2655 assert(usize_min > oldsize);
2656 malloc_mutex_lock(&arena->lock);
2655 malloc_mutex_lock(&arena->lock);
2657 if (pageind+npages < chunk_npages &&
2658 arena_mapbits_allocated_get(chunk, pageind+npages) == 0 &&
2659 (followsize = arena_mapbits_unallocated_size_get(chunk,
2660 pageind+npages)) >= usize_min - oldsize) {
2656 if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk,
2657 pageind+npages) != 0)
2658 goto label_fail;
2659 followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages);
2660 if (oldsize + followsize >= usize_min) {
2661 /*
2662 * The next run is available and sufficiently large. Split the
2663 * following run, then merge the first part with the existing
2664 * allocation.
2665 */
2666 arena_run_t *run;
2661 /*
2662 * The next run is available and sufficiently large. Split the
2663 * following run, then merge the first part with the existing
2664 * allocation.
2665 */
2666 arena_run_t *run;
2667 size_t flag_dirty, flag_unzeroed_mask, splitsize, usize;
2667 size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask;
2668
2668
2669 usize = s2u(size + extra);
2669 usize = usize_max;
2670 while (oldsize + followsize < usize)
2671 usize = index2size(size2index(usize)-1);
2672 assert(usize >= usize_min);
2670 while (oldsize + followsize < usize)
2671 usize = index2size(size2index(usize)-1);
2672 assert(usize >= usize_min);
2673 assert(usize >= oldsize);
2673 splitsize = usize - oldsize;
2674 splitsize = usize - oldsize;
2675 if (splitsize == 0)
2676 goto label_fail;
2674
2675 run = &arena_miscelm_get(chunk, pageind+npages)->run;
2677
2678 run = &arena_miscelm_get(chunk, pageind+npages)->run;
2676 if (arena_run_split_large(arena, run, splitsize, zero)) {
2677 malloc_mutex_unlock(&arena->lock);
2678 return (true);
2679 }
2679 if (arena_run_split_large(arena, run, splitsize, zero))
2680 goto label_fail;
2680
2681 size = oldsize + splitsize;
2682 npages = (size + large_pad) >> LG_PAGE;
2683
2684 /*
2685 * Mark the extended run as dirty if either portion of the run
2686 * was dirty before allocation. This is rather pedantic,
2687 * because there's not actually any sequence of events that

--- 7 unchanged lines hidden (view full) ---

2695 arena_mapbits_large_set(chunk, pageind, size + large_pad,
2696 flag_dirty | (flag_unzeroed_mask &
2697 arena_mapbits_unzeroed_get(chunk, pageind)));
2698 arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty |
2699 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2700 pageind+npages-1)));
2701
2702 if (config_stats) {
2681
2682 size = oldsize + splitsize;
2683 npages = (size + large_pad) >> LG_PAGE;
2684
2685 /*
2686 * Mark the extended run as dirty if either portion of the run
2687 * was dirty before allocation. This is rather pedantic,
2688 * because there's not actually any sequence of events that

--- 7 unchanged lines hidden (view full) ---

2696 arena_mapbits_large_set(chunk, pageind, size + large_pad,
2697 flag_dirty | (flag_unzeroed_mask &
2698 arena_mapbits_unzeroed_get(chunk, pageind)));
2699 arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty |
2700 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2701 pageind+npages-1)));
2702
2703 if (config_stats) {
2703 index_t oldindex = size2index(oldsize) - NBINS;
2704 index_t index = size2index(size) - NBINS;
2704 szind_t oldindex = size2index(oldsize) - NBINS;
2705 szind_t index = size2index(size) - NBINS;
2705
2706 arena->stats.ndalloc_large++;
2707 arena->stats.allocated_large -= oldsize;
2708 arena->stats.lstats[oldindex].ndalloc++;
2709 arena->stats.lstats[oldindex].curruns--;
2710
2711 arena->stats.nmalloc_large++;
2712 arena->stats.nrequests_large++;
2713 arena->stats.allocated_large += size;
2714 arena->stats.lstats[index].nmalloc++;
2715 arena->stats.lstats[index].nrequests++;
2716 arena->stats.lstats[index].curruns++;
2717 }
2718 malloc_mutex_unlock(&arena->lock);
2719 return (false);
2720 }
2706
2707 arena->stats.ndalloc_large++;
2708 arena->stats.allocated_large -= oldsize;
2709 arena->stats.lstats[oldindex].ndalloc++;
2710 arena->stats.lstats[oldindex].curruns--;
2711
2712 arena->stats.nmalloc_large++;
2713 arena->stats.nrequests_large++;
2714 arena->stats.allocated_large += size;
2715 arena->stats.lstats[index].nmalloc++;
2716 arena->stats.lstats[index].nrequests++;
2717 arena->stats.lstats[index].curruns++;
2718 }
2719 malloc_mutex_unlock(&arena->lock);
2720 return (false);
2721 }
2722label_fail:
2721 malloc_mutex_unlock(&arena->lock);
2723 malloc_mutex_unlock(&arena->lock);
2722
2723 return (true);
2724}
2725
2726#ifdef JEMALLOC_JET
2727#undef arena_ralloc_junk_large
2728#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl)
2729#endif
2730static void

--- 12 unchanged lines hidden (view full) ---

2743 JEMALLOC_N(arena_ralloc_junk_large_impl);
2744#endif
2745
2746/*
2747 * Try to resize a large allocation, in order to avoid copying. This will
2748 * always fail if growing an object, and the following run is already in use.
2749 */
2750static bool
2724 return (true);
2725}
2726
2727#ifdef JEMALLOC_JET
2728#undef arena_ralloc_junk_large
2729#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl)
2730#endif
2731static void

--- 12 unchanged lines hidden (view full) ---

2744 JEMALLOC_N(arena_ralloc_junk_large_impl);
2745#endif
2746
2747/*
2748 * Try to resize a large allocation, in order to avoid copying. This will
2749 * always fail if growing an object, and the following run is already in use.
2750 */
2751static bool
2751arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
2752 bool zero)
2752arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min,
2753 size_t usize_max, bool zero)
2753{
2754{
2754 size_t usize;
2755 arena_chunk_t *chunk;
2756 arena_t *arena;
2755
2757
2756 /* Make sure extra can't cause size_t overflow. */
2757 if (unlikely(extra >= arena_maxclass))
2758 return (true);
2759
2760 usize = s2u(size + extra);
2761 if (usize == oldsize) {
2762 /* Same size class. */
2758 if (oldsize == usize_max) {
2759 /* Current size class is compatible and maximal. */
2763 return (false);
2760 return (false);
2764 } else {
2765 arena_chunk_t *chunk;
2766 arena_t *arena;
2761 }
2767
2762
2768 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
2769 arena = extent_node_arena_get(&chunk->node);
2763 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
2764 arena = extent_node_arena_get(&chunk->node);
2770
2765
2771 if (usize < oldsize) {
2772 /* Fill before shrinking in order avoid a race. */
2773 arena_ralloc_junk_large(ptr, oldsize, usize);
2774 arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
2775 usize);
2776 return (false);
2777 } else {
2778 bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
2779 oldsize, size, extra, zero);
2780 if (config_fill && !ret && !zero) {
2781 if (unlikely(opt_junk_alloc)) {
2782 memset((void *)((uintptr_t)ptr +
2783 oldsize), 0xa5, isalloc(ptr,
2784 config_prof) - oldsize);
2785 } else if (unlikely(opt_zero)) {
2786 memset((void *)((uintptr_t)ptr +
2787 oldsize), 0, isalloc(ptr,
2788 config_prof) - oldsize);
2789 }
2766 if (oldsize < usize_max) {
2767 bool ret = arena_ralloc_large_grow(arena, chunk, ptr, oldsize,
2768 usize_min, usize_max, zero);
2769 if (config_fill && !ret && !zero) {
2770 if (unlikely(opt_junk_alloc)) {
2771 memset((void *)((uintptr_t)ptr + oldsize), 0xa5,
2772 isalloc(ptr, config_prof) - oldsize);
2773 } else if (unlikely(opt_zero)) {
2774 memset((void *)((uintptr_t)ptr + oldsize), 0,
2775 isalloc(ptr, config_prof) - oldsize);
2790 }
2776 }
2791 return (ret);
2792 }
2777 }
2778 return (ret);
2793 }
2779 }
2780
2781 assert(oldsize > usize_max);
2782 /* Fill before shrinking in order avoid a race. */
2783 arena_ralloc_junk_large(ptr, oldsize, usize_max);
2784 arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, usize_max);
2785 return (false);
2794}
2795
2796bool
2797arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
2798 bool zero)
2799{
2786}
2787
2788bool
2789arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
2790 bool zero)
2791{
2792 size_t usize_min, usize_max;
2800
2793
2801 if (likely(size <= arena_maxclass)) {
2794 usize_min = s2u(size);
2795 usize_max = s2u(size + extra);
2796 if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) {
2802 /*
2803 * Avoid moving the allocation if the size class can be left the
2804 * same.
2805 */
2797 /*
2798 * Avoid moving the allocation if the size class can be left the
2799 * same.
2800 */
2806 if (likely(oldsize <= arena_maxclass)) {
2807 if (oldsize <= SMALL_MAXCLASS) {
2808 assert(
2809 arena_bin_info[size2index(oldsize)].reg_size
2810 == oldsize);
2811 if ((size + extra <= SMALL_MAXCLASS &&
2812 size2index(size + extra) ==
2813 size2index(oldsize)) || (size <= oldsize &&
2814 size + extra >= oldsize))
2801 if (oldsize <= SMALL_MAXCLASS) {
2802 assert(arena_bin_info[size2index(oldsize)].reg_size ==
2803 oldsize);
2804 if ((usize_max <= SMALL_MAXCLASS &&
2805 size2index(usize_max) == size2index(oldsize)) ||
2806 (size <= oldsize && usize_max >= oldsize))
2807 return (false);
2808 } else {
2809 if (usize_max > SMALL_MAXCLASS) {
2810 if (!arena_ralloc_large(ptr, oldsize, usize_min,
2811 usize_max, zero))
2815 return (false);
2812 return (false);
2816 } else {
2817 assert(size <= arena_maxclass);
2818 if (size + extra > SMALL_MAXCLASS) {
2819 if (!arena_ralloc_large(ptr, oldsize,
2820 size, extra, zero))
2821 return (false);
2822 }
2823 }
2824 }
2825
2826 /* Reallocation would require a move. */
2827 return (true);
2813 }
2814 }
2815
2816 /* Reallocation would require a move. */
2817 return (true);
2828 } else
2829 return (huge_ralloc_no_move(ptr, oldsize, size, extra, zero));
2818 } else {
2819 return (huge_ralloc_no_move(ptr, oldsize, usize_min, usize_max,
2820 zero));
2821 }
2830}
2831
2822}
2823
2824static void *
2825arena_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
2826 size_t alignment, bool zero, tcache_t *tcache)
2827{
2828
2829 if (alignment == 0)
2830 return (arena_malloc(tsd, arena, usize, zero, tcache));
2831 usize = sa2u(usize, alignment);
2832 if (usize == 0)
2833 return (NULL);
2834 return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
2835}
2836
2832void *
2833arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
2837void *
2838arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
2834 size_t extra, size_t alignment, bool zero, tcache_t *tcache)
2839 size_t alignment, bool zero, tcache_t *tcache)
2835{
2836 void *ret;
2840{
2841 void *ret;
2842 size_t usize;
2837
2843
2838 if (likely(size <= arena_maxclass)) {
2844 usize = s2u(size);
2845 if (usize == 0)
2846 return (NULL);
2847
2848 if (likely(usize <= large_maxclass)) {
2839 size_t copysize;
2840
2841 /* Try to avoid moving the allocation. */
2849 size_t copysize;
2850
2851 /* Try to avoid moving the allocation. */
2842 if (!arena_ralloc_no_move(ptr, oldsize, size, extra, zero))
2852 if (!arena_ralloc_no_move(ptr, oldsize, usize, 0, zero))
2843 return (ptr);
2844
2845 /*
2846 * size and oldsize are different enough that we need to move
2847 * the object. In that case, fall back to allocating new space
2848 * and copying.
2849 */
2853 return (ptr);
2854
2855 /*
2856 * size and oldsize are different enough that we need to move
2857 * the object. In that case, fall back to allocating new space
2858 * and copying.
2859 */
2850 if (alignment != 0) {
2851 size_t usize = sa2u(size + extra, alignment);
2852 if (usize == 0)
2853 return (NULL);
2854 ret = ipalloct(tsd, usize, alignment, zero, tcache,
2855 arena);
2856 } else {
2857 ret = arena_malloc(tsd, arena, size + extra, zero,
2858 tcache);
2859 }
2860 ret = arena_ralloc_move_helper(tsd, arena, usize, alignment,
2861 zero, tcache);
2862 if (ret == NULL)
2863 return (NULL);
2860
2864
2861 if (ret == NULL) {
2862 if (extra == 0)
2863 return (NULL);
2864 /* Try again, this time without extra. */
2865 if (alignment != 0) {
2866 size_t usize = sa2u(size, alignment);
2867 if (usize == 0)
2868 return (NULL);
2869 ret = ipalloct(tsd, usize, alignment, zero,
2870 tcache, arena);
2871 } else {
2872 ret = arena_malloc(tsd, arena, size, zero,
2873 tcache);
2874 }
2875
2876 if (ret == NULL)
2877 return (NULL);
2878 }
2879
2880 /*
2881 * Junk/zero-filling were already done by
2882 * ipalloc()/arena_malloc().
2883 */
2884
2865 /*
2866 * Junk/zero-filling were already done by
2867 * ipalloc()/arena_malloc().
2868 */
2869
2885 /*
2886 * Copy at most size bytes (not size+extra), since the caller
2887 * has no expectation that the extra bytes will be reliably
2888 * preserved.
2889 */
2890 copysize = (size < oldsize) ? size : oldsize;
2870 copysize = (usize < oldsize) ? usize : oldsize;
2891 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
2892 memcpy(ret, ptr, copysize);
2893 isqalloc(tsd, ptr, oldsize, tcache);
2894 } else {
2871 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
2872 memcpy(ret, ptr, copysize);
2873 isqalloc(tsd, ptr, oldsize, tcache);
2874 } else {
2895 ret = huge_ralloc(tsd, arena, ptr, oldsize, size, extra,
2896 alignment, zero, tcache);
2875 ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment,
2876 zero, tcache);
2897 }
2898 return (ret);
2899}
2900
2901dss_prec_t
2902arena_dss_prec_get(arena_t *arena)
2903{
2904 dss_prec_t ret;

--- 331 unchanged lines hidden (view full) ---

3236#undef SC
3237
3238 return (false);
3239}
3240
3241bool
3242arena_boot(void)
3243{
2877 }
2878 return (ret);
2879}
2880
2881dss_prec_t
2882arena_dss_prec_get(arena_t *arena)
2883{
2884 dss_prec_t ret;

--- 331 unchanged lines hidden (view full) ---

3216#undef SC
3217
3218 return (false);
3219}
3220
3221bool
3222arena_boot(void)
3223{
3244 size_t header_size;
3245 unsigned i;
3246
3247 arena_lg_dirty_mult_default_set(opt_lg_dirty_mult);
3248
3249 /*
3250 * Compute the header size such that it is large enough to contain the
3251 * page map. The page map is biased to omit entries for the header
3252 * itself, so some iteration is necessary to compute the map bias.
3253 *
3254 * 1) Compute safe header_size and map_bias values that include enough
3255 * space for an unbiased page map.
3256 * 2) Refine map_bias based on (1) to omit the header pages in the page
3257 * map. The resulting map_bias may be one too small.
3258 * 3) Refine map_bias based on (2). The result will be >= the result
3259 * from (2), and will always be correct.
3260 */
3261 map_bias = 0;
3262 for (i = 0; i < 3; i++) {
3224 unsigned i;
3225
3226 arena_lg_dirty_mult_default_set(opt_lg_dirty_mult);
3227
3228 /*
3229 * Compute the header size such that it is large enough to contain the
3230 * page map. The page map is biased to omit entries for the header
3231 * itself, so some iteration is necessary to compute the map bias.
3232 *
3233 * 1) Compute safe header_size and map_bias values that include enough
3234 * space for an unbiased page map.
3235 * 2) Refine map_bias based on (1) to omit the header pages in the page
3236 * map. The resulting map_bias may be one too small.
3237 * 3) Refine map_bias based on (2). The result will be >= the result
3238 * from (2), and will always be correct.
3239 */
3240 map_bias = 0;
3241 for (i = 0; i < 3; i++) {
3263 header_size = offsetof(arena_chunk_t, map_bits) +
3242 size_t header_size = offsetof(arena_chunk_t, map_bits) +
3264 ((sizeof(arena_chunk_map_bits_t) +
3265 sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias));
3266 map_bias = (header_size + PAGE_MASK) >> LG_PAGE;
3267 }
3268 assert(map_bias > 0);
3269
3270 map_misc_offset = offsetof(arena_chunk_t, map_bits) +
3271 sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias);
3272
3273 arena_maxrun = chunksize - (map_bias << LG_PAGE);
3274 assert(arena_maxrun > 0);
3243 ((sizeof(arena_chunk_map_bits_t) +
3244 sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias));
3245 map_bias = (header_size + PAGE_MASK) >> LG_PAGE;
3246 }
3247 assert(map_bias > 0);
3248
3249 map_misc_offset = offsetof(arena_chunk_t, map_bits) +
3250 sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias);
3251
3252 arena_maxrun = chunksize - (map_bias << LG_PAGE);
3253 assert(arena_maxrun > 0);
3275 arena_maxclass = index2size(size2index(chunksize)-1);
3276 if (arena_maxclass > arena_maxrun) {
3254 large_maxclass = index2size(size2index(chunksize)-1);
3255 if (large_maxclass > arena_maxrun) {
3277 /*
3278 * For small chunk sizes it's possible for there to be fewer
3279 * non-header pages available than are necessary to serve the
3280 * size classes just below chunksize.
3281 */
3256 /*
3257 * For small chunk sizes it's possible for there to be fewer
3258 * non-header pages available than are necessary to serve the
3259 * size classes just below chunksize.
3260 */
3282 arena_maxclass = arena_maxrun;
3261 large_maxclass = arena_maxrun;
3283 }
3262 }
3284 assert(arena_maxclass > 0);
3285 nlclasses = size2index(arena_maxclass) - size2index(SMALL_MAXCLASS);
3263 assert(large_maxclass > 0);
3264 nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS);
3286 nhclasses = NSIZES - nlclasses - NBINS;
3287
3288 bin_info_init();
3289 return (small_run_size_init());
3290}
3291
3292void
3293arena_prefork(arena_t *arena)

--- 36 unchanged lines hidden ---
3265 nhclasses = NSIZES - nlclasses - NBINS;
3266
3267 bin_info_init();
3268 return (small_run_size_init());
3269}
3270
3271void
3272arena_prefork(arena_t *arena)

--- 36 unchanged lines hidden ---