Deleted Added
full compact
arena.c (234543) arena.c (235238)
1#define JEMALLOC_ARENA_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
7ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
8arena_bin_info_t arena_bin_info[NBINS];
9
1#define JEMALLOC_ARENA_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
7ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
8arena_bin_info_t arena_bin_info[NBINS];
9
10JEMALLOC_ATTR(aligned(CACHELINE))
10JEMALLOC_ALIGNED(CACHELINE)
11const uint8_t small_size2bin[] = {
12#define S2B_8(i) i,
13#define S2B_16(i) S2B_8(i) S2B_8(i)
14#define S2B_32(i) S2B_16(i) S2B_16(i)
15#define S2B_64(i) S2B_32(i) S2B_32(i)
16#define S2B_128(i) S2B_64(i) S2B_64(i)
17#define S2B_256(i) S2B_128(i) S2B_128(i)
18#define S2B_512(i) S2B_256(i) S2B_256(i)

--- 17 unchanged lines hidden (view full) ---

36#undef S2B_8192
37#undef SIZE_CLASS
38};
39
40/******************************************************************************/
41/* Function prototypes for non-inline static functions. */
42
43static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
11const uint8_t small_size2bin[] = {
12#define S2B_8(i) i,
13#define S2B_16(i) S2B_8(i) S2B_8(i)
14#define S2B_32(i) S2B_16(i) S2B_16(i)
15#define S2B_64(i) S2B_32(i) S2B_32(i)
16#define S2B_128(i) S2B_64(i) S2B_64(i)
17#define S2B_256(i) S2B_128(i) S2B_128(i)
18#define S2B_512(i) S2B_256(i) S2B_256(i)

--- 17 unchanged lines hidden (view full) ---

36#undef S2B_8192
37#undef SIZE_CLASS
38};
39
40/******************************************************************************/
41/* Function prototypes for non-inline static functions. */
42
43static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
44 bool large, bool zero);
44 bool large, size_t binind, bool zero);
45static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
46static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
47static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool large,
45static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
46static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
47static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool large,
48 bool zero);
48 size_t binind, bool zero);
49static void arena_purge(arena_t *arena, bool all);
50static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty);
51static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk,
52 arena_run_t *run, size_t oldsize, size_t newsize);
53static void arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk,
54 arena_run_t *run, size_t oldsize, size_t newsize, bool dirty);
55static arena_run_t *arena_bin_runs_first(arena_bin_t *bin);
56static void arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run);

--- 90 unchanged lines hidden (view full) ---

147 assert(regind < run->nextind);
148 return (ret);
149}
150
151static inline void
152arena_run_reg_dalloc(arena_run_t *run, void *ptr)
153{
154 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
49static void arena_purge(arena_t *arena, bool all);
50static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty);
51static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk,
52 arena_run_t *run, size_t oldsize, size_t newsize);
53static void arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk,
54 arena_run_t *run, size_t oldsize, size_t newsize, bool dirty);
55static arena_run_t *arena_bin_runs_first(arena_bin_t *bin);
56static void arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run);

--- 90 unchanged lines hidden (view full) ---

147 assert(regind < run->nextind);
148 return (ret);
149}
150
151static inline void
152arena_run_reg_dalloc(arena_run_t *run, void *ptr)
153{
154 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
155 size_t binind = arena_bin_index(chunk->arena, run->bin);
155 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
156 size_t mapbits = arena_mapbits_get(chunk, pageind);
157 size_t binind = arena_ptr_small_binind_get(ptr, mapbits);
156 arena_bin_info_t *bin_info = &arena_bin_info[binind];
157 unsigned regind = arena_run_regind(run, bin_info, ptr);
158 bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
159 (uintptr_t)bin_info->bitmap_offset);
160
161 assert(run->nfree < bin_info->nregs);
162 /* Freeing an interior pointer can cause assertion failure. */
163 assert(((uintptr_t)ptr - ((uintptr_t)run +

--- 15 unchanged lines hidden (view full) ---

179 UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
180
181 for (i = 0; i < PAGE / sizeof(size_t); i++)
182 assert(p[i] == 0);
183}
184
185static void
186arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
158 arena_bin_info_t *bin_info = &arena_bin_info[binind];
159 unsigned regind = arena_run_regind(run, bin_info, ptr);
160 bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
161 (uintptr_t)bin_info->bitmap_offset);
162
163 assert(run->nfree < bin_info->nregs);
164 /* Freeing an interior pointer can cause assertion failure. */
165 assert(((uintptr_t)ptr - ((uintptr_t)run +

--- 15 unchanged lines hidden (view full) ---

181 UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
182
183 for (i = 0; i < PAGE / sizeof(size_t); i++)
184 assert(p[i] == 0);
185}
186
187static void
188arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
187 bool zero)
189 size_t binind, bool zero)
188{
189 arena_chunk_t *chunk;
190 size_t run_ind, total_pages, need_pages, rem_pages, i;
191 size_t flag_dirty;
192 arena_avail_tree_t *runs_avail;
193
190{
191 arena_chunk_t *chunk;
192 size_t run_ind, total_pages, need_pages, rem_pages, i;
193 size_t flag_dirty;
194 arena_avail_tree_t *runs_avail;
195
196 assert((large && binind == BININD_INVALID) || (large == false && binind
197 != BININD_INVALID));
198
194 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
195 run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
199 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
200 run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
196 flag_dirty = chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY;
201 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
197 runs_avail = (flag_dirty != 0) ? &arena->runs_avail_dirty :
198 &arena->runs_avail_clean;
202 runs_avail = (flag_dirty != 0) ? &arena->runs_avail_dirty :
203 &arena->runs_avail_clean;
199 total_pages = (chunk->map[run_ind-map_bias].bits & ~PAGE_MASK) >>
204 total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
200 LG_PAGE;
205 LG_PAGE;
201 assert((chunk->map[run_ind+total_pages-1-map_bias].bits &
202 CHUNK_MAP_DIRTY) == flag_dirty);
206 assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
207 flag_dirty);
203 need_pages = (size >> LG_PAGE);
204 assert(need_pages > 0);
205 assert(need_pages <= total_pages);
206 rem_pages = total_pages - need_pages;
207
208 need_pages = (size >> LG_PAGE);
209 assert(need_pages > 0);
210 assert(need_pages <= total_pages);
211 rem_pages = total_pages - need_pages;
212
208 arena_avail_tree_remove(runs_avail, &chunk->map[run_ind-map_bias]);
213 arena_avail_tree_remove(runs_avail, arena_mapp_get(chunk, run_ind));
209 if (config_stats) {
210 /*
211 * Update stats_cactive if nactive is crossing a chunk
212 * multiple.
213 */
214 size_t cactive_diff = CHUNK_CEILING((arena->nactive +
215 need_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
216 LG_PAGE);
217 if (cactive_diff != 0)
218 stats_cactive_add(cactive_diff);
219 }
220 arena->nactive += need_pages;
221
222 /* Keep track of trailing unused pages for later use. */
223 if (rem_pages > 0) {
224 if (flag_dirty != 0) {
214 if (config_stats) {
215 /*
216 * Update stats_cactive if nactive is crossing a chunk
217 * multiple.
218 */
219 size_t cactive_diff = CHUNK_CEILING((arena->nactive +
220 need_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
221 LG_PAGE);
222 if (cactive_diff != 0)
223 stats_cactive_add(cactive_diff);
224 }
225 arena->nactive += need_pages;
226
227 /* Keep track of trailing unused pages for later use. */
228 if (rem_pages > 0) {
229 if (flag_dirty != 0) {
225 chunk->map[run_ind+need_pages-map_bias].bits =
226 (rem_pages << LG_PAGE) | CHUNK_MAP_DIRTY;
227 chunk->map[run_ind+total_pages-1-map_bias].bits =
228 (rem_pages << LG_PAGE) | CHUNK_MAP_DIRTY;
230 arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
231 (rem_pages << LG_PAGE), CHUNK_MAP_DIRTY);
232 arena_mapbits_unallocated_set(chunk,
233 run_ind+total_pages-1, (rem_pages << LG_PAGE),
234 CHUNK_MAP_DIRTY);
229 } else {
235 } else {
230 chunk->map[run_ind+need_pages-map_bias].bits =
231 (rem_pages << LG_PAGE) |
232 (chunk->map[run_ind+need_pages-map_bias].bits &
233 CHUNK_MAP_UNZEROED);
234 chunk->map[run_ind+total_pages-1-map_bias].bits =
235 (rem_pages << LG_PAGE) |
236 (chunk->map[run_ind+total_pages-1-map_bias].bits &
237 CHUNK_MAP_UNZEROED);
236 arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
237 (rem_pages << LG_PAGE),
238 arena_mapbits_unzeroed_get(chunk,
239 run_ind+need_pages));
240 arena_mapbits_unallocated_set(chunk,
241 run_ind+total_pages-1, (rem_pages << LG_PAGE),
242 arena_mapbits_unzeroed_get(chunk,
243 run_ind+total_pages-1));
238 }
244 }
239 arena_avail_tree_insert(runs_avail,
240 &chunk->map[run_ind+need_pages-map_bias]);
245 arena_avail_tree_insert(runs_avail, arena_mapp_get(chunk,
246 run_ind+need_pages));
241 }
242
243 /* Update dirty page accounting. */
244 if (flag_dirty != 0) {
245 chunk->ndirty -= need_pages;
246 arena->ndirty -= need_pages;
247 }
248

--- 4 unchanged lines hidden (view full) ---

253 if (large) {
254 if (zero) {
255 if (flag_dirty == 0) {
256 /*
257 * The run is clean, so some pages may be
258 * zeroed (i.e. never before touched).
259 */
260 for (i = 0; i < need_pages; i++) {
247 }
248
249 /* Update dirty page accounting. */
250 if (flag_dirty != 0) {
251 chunk->ndirty -= need_pages;
252 arena->ndirty -= need_pages;
253 }
254

--- 4 unchanged lines hidden (view full) ---

259 if (large) {
260 if (zero) {
261 if (flag_dirty == 0) {
262 /*
263 * The run is clean, so some pages may be
264 * zeroed (i.e. never before touched).
265 */
266 for (i = 0; i < need_pages; i++) {
261 if ((chunk->map[run_ind+i-map_bias].bits
262 & CHUNK_MAP_UNZEROED) != 0) {
267 if (arena_mapbits_unzeroed_get(chunk,
268 run_ind+i) != 0) {
263 VALGRIND_MAKE_MEM_UNDEFINED(
264 (void *)((uintptr_t)
265 chunk + ((run_ind+i) <<
266 LG_PAGE)), PAGE);
267 memset((void *)((uintptr_t)
268 chunk + ((run_ind+i) <<
269 LG_PAGE)), 0, PAGE);
270 } else if (config_debug) {

--- 17 unchanged lines hidden (view full) ---

288 LG_PAGE)), 0, (need_pages << LG_PAGE));
289 }
290 }
291
292 /*
293 * Set the last element first, in case the run only contains one
294 * page (i.e. both statements set the same element).
295 */
269 VALGRIND_MAKE_MEM_UNDEFINED(
270 (void *)((uintptr_t)
271 chunk + ((run_ind+i) <<
272 LG_PAGE)), PAGE);
273 memset((void *)((uintptr_t)
274 chunk + ((run_ind+i) <<
275 LG_PAGE)), 0, PAGE);
276 } else if (config_debug) {

--- 17 unchanged lines hidden (view full) ---

294 LG_PAGE)), 0, (need_pages << LG_PAGE));
295 }
296 }
297
298 /*
299 * Set the last element first, in case the run only contains one
300 * page (i.e. both statements set the same element).
301 */
296 chunk->map[run_ind+need_pages-1-map_bias].bits =
297 CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED | flag_dirty;
298 chunk->map[run_ind-map_bias].bits = size | flag_dirty |
299 CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
302 arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0,
303 flag_dirty);
304 arena_mapbits_large_set(chunk, run_ind, size, flag_dirty);
300 } else {
301 assert(zero == false);
302 /*
303 * Propagate the dirty and unzeroed flags to the allocated
304 * small run, so that arena_dalloc_bin_run() has the ability to
305 * conditionally trim clean pages.
306 */
305 } else {
306 assert(zero == false);
307 /*
308 * Propagate the dirty and unzeroed flags to the allocated
309 * small run, so that arena_dalloc_bin_run() has the ability to
310 * conditionally trim clean pages.
311 */
307 chunk->map[run_ind-map_bias].bits =
308 (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED) |
309 CHUNK_MAP_ALLOCATED | flag_dirty;
312 arena_mapbits_small_set(chunk, run_ind, 0, binind,
313 arena_mapbits_unzeroed_get(chunk, run_ind) | flag_dirty);
310 /*
311 * The first page will always be dirtied during small run
312 * initialization, so a validation failure here would not
313 * actually cause an observable failure.
314 */
315 if (config_debug && flag_dirty == 0 &&
314 /*
315 * The first page will always be dirtied during small run
316 * initialization, so a validation failure here would not
317 * actually cause an observable failure.
318 */
319 if (config_debug && flag_dirty == 0 &&
316 (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED)
317 == 0)
320 arena_mapbits_unzeroed_get(chunk, run_ind) == 0)
318 arena_chunk_validate_zeroed(chunk, run_ind);
319 for (i = 1; i < need_pages - 1; i++) {
321 arena_chunk_validate_zeroed(chunk, run_ind);
322 for (i = 1; i < need_pages - 1; i++) {
320 chunk->map[run_ind+i-map_bias].bits = (i << LG_PAGE)
321 | (chunk->map[run_ind+i-map_bias].bits &
322 CHUNK_MAP_UNZEROED) | CHUNK_MAP_ALLOCATED;
323 arena_mapbits_small_set(chunk, run_ind+i, i,
324 binind, arena_mapbits_unzeroed_get(chunk,
325 run_ind+i));
323 if (config_debug && flag_dirty == 0 &&
326 if (config_debug && flag_dirty == 0 &&
324 (chunk->map[run_ind+i-map_bias].bits &
325 CHUNK_MAP_UNZEROED) == 0)
327 arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0)
326 arena_chunk_validate_zeroed(chunk, run_ind+i);
327 }
328 arena_chunk_validate_zeroed(chunk, run_ind+i);
329 }
328 chunk->map[run_ind+need_pages-1-map_bias].bits = ((need_pages
329 - 1) << LG_PAGE) |
330 (chunk->map[run_ind+need_pages-1-map_bias].bits &
331 CHUNK_MAP_UNZEROED) | CHUNK_MAP_ALLOCATED | flag_dirty;
330 arena_mapbits_small_set(chunk, run_ind+need_pages-1,
331 need_pages-1, binind, arena_mapbits_unzeroed_get(chunk,
332 run_ind+need_pages-1) | flag_dirty);
332 if (config_debug && flag_dirty == 0 &&
333 if (config_debug && flag_dirty == 0 &&
333 (chunk->map[run_ind+need_pages-1-map_bias].bits &
334 CHUNK_MAP_UNZEROED) == 0) {
334 arena_mapbits_unzeroed_get(chunk, run_ind+need_pages-1) ==
335 0) {
335 arena_chunk_validate_zeroed(chunk,
336 run_ind+need_pages-1);
337 }
338 }
339}
340
341static arena_chunk_t *
342arena_chunk_alloc(arena_t *arena)
343{
344 arena_chunk_t *chunk;
345 size_t i;
346
347 if (arena->spare != NULL) {
348 arena_avail_tree_t *runs_avail;
349
350 chunk = arena->spare;
351 arena->spare = NULL;
352
353 /* Insert the run into the appropriate runs_avail_* tree. */
336 arena_chunk_validate_zeroed(chunk,
337 run_ind+need_pages-1);
338 }
339 }
340}
341
342static arena_chunk_t *
343arena_chunk_alloc(arena_t *arena)
344{
345 arena_chunk_t *chunk;
346 size_t i;
347
348 if (arena->spare != NULL) {
349 arena_avail_tree_t *runs_avail;
350
351 chunk = arena->spare;
352 arena->spare = NULL;
353
354 /* Insert the run into the appropriate runs_avail_* tree. */
354 if ((chunk->map[0].bits & CHUNK_MAP_DIRTY) == 0)
355 if (arena_mapbits_dirty_get(chunk, map_bias) == 0)
355 runs_avail = &arena->runs_avail_clean;
356 else
357 runs_avail = &arena->runs_avail_dirty;
356 runs_avail = &arena->runs_avail_clean;
357 else
358 runs_avail = &arena->runs_avail_dirty;
358 assert((chunk->map[0].bits & ~PAGE_MASK) == arena_maxclass);
359 assert((chunk->map[chunk_npages-1-map_bias].bits & ~PAGE_MASK)
360 == arena_maxclass);
361 assert((chunk->map[0].bits & CHUNK_MAP_DIRTY) ==
362 (chunk->map[chunk_npages-1-map_bias].bits &
363 CHUNK_MAP_DIRTY));
364 arena_avail_tree_insert(runs_avail, &chunk->map[0]);
359 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
360 arena_maxclass);
361 assert(arena_mapbits_unallocated_size_get(chunk,
362 chunk_npages-1) == arena_maxclass);
363 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
364 arena_mapbits_dirty_get(chunk, chunk_npages-1));
365 arena_avail_tree_insert(runs_avail, arena_mapp_get(chunk,
366 map_bias));
365 } else {
366 bool zero;
367 size_t unzeroed;
368
369 zero = false;
370 malloc_mutex_unlock(&arena->lock);
371 chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize,
372 false, &zero);

--- 14 unchanged lines hidden (view full) ---

387 chunk->ndirty = 0;
388
389 /*
390 * Initialize the map to contain one maximal free untouched run.
391 * Mark the pages as zeroed iff chunk_alloc() returned a zeroed
392 * chunk.
393 */
394 unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED;
367 } else {
368 bool zero;
369 size_t unzeroed;
370
371 zero = false;
372 malloc_mutex_unlock(&arena->lock);
373 chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize,
374 false, &zero);

--- 14 unchanged lines hidden (view full) ---

389 chunk->ndirty = 0;
390
391 /*
392 * Initialize the map to contain one maximal free untouched run.
393 * Mark the pages as zeroed iff chunk_alloc() returned a zeroed
394 * chunk.
395 */
396 unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED;
395 chunk->map[0].bits = arena_maxclass | unzeroed;
397 arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass,
398 unzeroed);
396 /*
397 * There is no need to initialize the internal page map entries
398 * unless the chunk is not zeroed.
399 */
400 if (zero == false) {
401 for (i = map_bias+1; i < chunk_npages-1; i++)
399 /*
400 * There is no need to initialize the internal page map entries
401 * unless the chunk is not zeroed.
402 */
403 if (zero == false) {
404 for (i = map_bias+1; i < chunk_npages-1; i++)
402 chunk->map[i-map_bias].bits = unzeroed;
405 arena_mapbits_unzeroed_set(chunk, i, unzeroed);
403 } else if (config_debug) {
406 } else if (config_debug) {
404 for (i = map_bias+1; i < chunk_npages-1; i++)
405 assert(chunk->map[i-map_bias].bits == unzeroed);
407 for (i = map_bias+1; i < chunk_npages-1; i++) {
408 assert(arena_mapbits_unzeroed_get(chunk, i) ==
409 unzeroed);
410 }
406 }
411 }
407 chunk->map[chunk_npages-1-map_bias].bits = arena_maxclass |
408 unzeroed;
412 arena_mapbits_unallocated_set(chunk, chunk_npages-1,
413 arena_maxclass, unzeroed);
409
410 /* Insert the run into the runs_avail_clean tree. */
411 arena_avail_tree_insert(&arena->runs_avail_clean,
414
415 /* Insert the run into the runs_avail_clean tree. */
416 arena_avail_tree_insert(&arena->runs_avail_clean,
412 &chunk->map[0]);
417 arena_mapp_get(chunk, map_bias));
413 }
414
415 return (chunk);
416}
417
418static void
419arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
420{
421 arena_avail_tree_t *runs_avail;
422
423 /*
424 * Remove run from the appropriate runs_avail_* tree, so that the arena
425 * does not use it.
426 */
418 }
419
420 return (chunk);
421}
422
423static void
424arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
425{
426 arena_avail_tree_t *runs_avail;
427
428 /*
429 * Remove run from the appropriate runs_avail_* tree, so that the arena
430 * does not use it.
431 */
427 if ((chunk->map[0].bits & CHUNK_MAP_DIRTY) == 0)
432 if (arena_mapbits_dirty_get(chunk, map_bias) == 0)
428 runs_avail = &arena->runs_avail_clean;
429 else
430 runs_avail = &arena->runs_avail_dirty;
433 runs_avail = &arena->runs_avail_clean;
434 else
435 runs_avail = &arena->runs_avail_dirty;
431 arena_avail_tree_remove(runs_avail, &chunk->map[0]);
436 arena_avail_tree_remove(runs_avail, arena_mapp_get(chunk, map_bias));
432
433 if (arena->spare != NULL) {
434 arena_chunk_t *spare = arena->spare;
435
436 arena->spare = chunk;
437 if (spare->dirtied) {
438 ql_remove(&chunk->arena->chunks_dirty, spare,
439 link_dirty);

--- 4 unchanged lines hidden (view full) ---

444 malloc_mutex_lock(&arena->lock);
445 if (config_stats)
446 arena->stats.mapped -= chunksize;
447 } else
448 arena->spare = chunk;
449}
450
451static arena_run_t *
437
438 if (arena->spare != NULL) {
439 arena_chunk_t *spare = arena->spare;
440
441 arena->spare = chunk;
442 if (spare->dirtied) {
443 ql_remove(&chunk->arena->chunks_dirty, spare,
444 link_dirty);

--- 4 unchanged lines hidden (view full) ---

449 malloc_mutex_lock(&arena->lock);
450 if (config_stats)
451 arena->stats.mapped -= chunksize;
452 } else
453 arena->spare = chunk;
454}
455
456static arena_run_t *
452arena_run_alloc(arena_t *arena, size_t size, bool large, bool zero)
457arena_run_alloc(arena_t *arena, size_t size, bool large, size_t binind,
458 bool zero)
453{
454 arena_chunk_t *chunk;
455 arena_run_t *run;
456 arena_chunk_map_t *mapelm, key;
457
458 assert(size <= arena_maxclass);
459 assert((size & PAGE_MASK) == 0);
459{
460 arena_chunk_t *chunk;
461 arena_run_t *run;
462 arena_chunk_map_t *mapelm, key;
463
464 assert(size <= arena_maxclass);
465 assert((size & PAGE_MASK) == 0);
466 assert((large && binind == BININD_INVALID) || (large == false && binind
467 != BININD_INVALID));
460
461 /* Search the arena's chunks for the lowest best fit. */
462 key.bits = size | CHUNK_MAP_KEY;
463 mapelm = arena_avail_tree_nsearch(&arena->runs_avail_dirty, &key);
464 if (mapelm != NULL) {
465 arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
466 size_t pageind = (((uintptr_t)mapelm -
467 (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
468 + map_bias;
469
470 run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
471 LG_PAGE));
468
469 /* Search the arena's chunks for the lowest best fit. */
470 key.bits = size | CHUNK_MAP_KEY;
471 mapelm = arena_avail_tree_nsearch(&arena->runs_avail_dirty, &key);
472 if (mapelm != NULL) {
473 arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
474 size_t pageind = (((uintptr_t)mapelm -
475 (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
476 + map_bias;
477
478 run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
479 LG_PAGE));
472 arena_run_split(arena, run, size, large, zero);
480 arena_run_split(arena, run, size, large, binind, zero);
473 return (run);
474 }
475 mapelm = arena_avail_tree_nsearch(&arena->runs_avail_clean, &key);
476 if (mapelm != NULL) {
477 arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
478 size_t pageind = (((uintptr_t)mapelm -
479 (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
480 + map_bias;
481
482 run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
483 LG_PAGE));
481 return (run);
482 }
483 mapelm = arena_avail_tree_nsearch(&arena->runs_avail_clean, &key);
484 if (mapelm != NULL) {
485 arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
486 size_t pageind = (((uintptr_t)mapelm -
487 (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
488 + map_bias;
489
490 run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
491 LG_PAGE));
484 arena_run_split(arena, run, size, large, zero);
492 arena_run_split(arena, run, size, large, binind, zero);
485 return (run);
486 }
487
488 /*
489 * No usable runs. Create a new chunk from which to allocate the run.
490 */
491 chunk = arena_chunk_alloc(arena);
492 if (chunk != NULL) {
493 run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
493 return (run);
494 }
495
496 /*
497 * No usable runs. Create a new chunk from which to allocate the run.
498 */
499 chunk = arena_chunk_alloc(arena);
500 if (chunk != NULL) {
501 run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
494 arena_run_split(arena, run, size, large, zero);
502 arena_run_split(arena, run, size, large, binind, zero);
495 return (run);
496 }
497
498 /*
499 * arena_chunk_alloc() failed, but another thread may have made
500 * sufficient memory available while this one dropped arena->lock in
501 * arena_chunk_alloc(), so search one more time.
502 */
503 mapelm = arena_avail_tree_nsearch(&arena->runs_avail_dirty, &key);
504 if (mapelm != NULL) {
505 arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
506 size_t pageind = (((uintptr_t)mapelm -
507 (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
508 + map_bias;
509
510 run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
511 LG_PAGE));
503 return (run);
504 }
505
506 /*
507 * arena_chunk_alloc() failed, but another thread may have made
508 * sufficient memory available while this one dropped arena->lock in
509 * arena_chunk_alloc(), so search one more time.
510 */
511 mapelm = arena_avail_tree_nsearch(&arena->runs_avail_dirty, &key);
512 if (mapelm != NULL) {
513 arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
514 size_t pageind = (((uintptr_t)mapelm -
515 (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
516 + map_bias;
517
518 run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
519 LG_PAGE));
512 arena_run_split(arena, run, size, large, zero);
520 arena_run_split(arena, run, size, large, binind, zero);
513 return (run);
514 }
515 mapelm = arena_avail_tree_nsearch(&arena->runs_avail_clean, &key);
516 if (mapelm != NULL) {
517 arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
518 size_t pageind = (((uintptr_t)mapelm -
519 (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
520 + map_bias;
521
522 run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
523 LG_PAGE));
521 return (run);
522 }
523 mapelm = arena_avail_tree_nsearch(&arena->runs_avail_clean, &key);
524 if (mapelm != NULL) {
525 arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
526 size_t pageind = (((uintptr_t)mapelm -
527 (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
528 + map_bias;
529
530 run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
531 LG_PAGE));
524 arena_run_split(arena, run, size, large, zero);
532 arena_run_split(arena, run, size, large, binind, zero);
525 return (run);
526 }
527
528 return (NULL);
529}
530
531static inline void
532arena_maybe_purge(arena_t *arena)

--- 41 unchanged lines hidden (view full) ---

574 * Note that once a chunk contains dirty pages, it cannot again contain
575 * a single run unless 1) it is a dirty run, or 2) this function purges
576 * dirty pages and causes the transition to a single clean run. Thus
577 * (chunk == arena->spare) is possible, but it is not possible for
578 * this function to be called on the spare unless it contains a dirty
579 * run.
580 */
581 if (chunk == arena->spare) {
533 return (run);
534 }
535
536 return (NULL);
537}
538
539static inline void
540arena_maybe_purge(arena_t *arena)

--- 41 unchanged lines hidden (view full) ---

582 * Note that once a chunk contains dirty pages, it cannot again contain
583 * a single run unless 1) it is a dirty run, or 2) this function purges
584 * dirty pages and causes the transition to a single clean run. Thus
585 * (chunk == arena->spare) is possible, but it is not possible for
586 * this function to be called on the spare unless it contains a dirty
587 * run.
588 */
589 if (chunk == arena->spare) {
582 assert((chunk->map[0].bits & CHUNK_MAP_DIRTY) != 0);
590 assert(arena_mapbits_dirty_get(chunk, map_bias) != 0);
583 arena_chunk_alloc(arena);
584 }
585
586 /* Temporarily allocate all free dirty runs within chunk. */
587 for (pageind = map_bias; pageind < chunk_npages;) {
591 arena_chunk_alloc(arena);
592 }
593
594 /* Temporarily allocate all free dirty runs within chunk. */
595 for (pageind = map_bias; pageind < chunk_npages;) {
588 mapelm = &chunk->map[pageind-map_bias];
589 if ((mapelm->bits & CHUNK_MAP_ALLOCATED) == 0) {
596 mapelm = arena_mapp_get(chunk, pageind);
597 if (arena_mapbits_allocated_get(chunk, pageind) == 0) {
590 size_t npages;
591
598 size_t npages;
599
592 npages = mapelm->bits >> LG_PAGE;
600 npages = arena_mapbits_unallocated_size_get(chunk,
601 pageind) >> LG_PAGE;
593 assert(pageind + npages <= chunk_npages);
602 assert(pageind + npages <= chunk_npages);
594 if (mapelm->bits & CHUNK_MAP_DIRTY) {
603 if (arena_mapbits_dirty_get(chunk, pageind)) {
595 size_t i;
596
597 arena_avail_tree_remove(
598 &arena->runs_avail_dirty, mapelm);
599
604 size_t i;
605
606 arena_avail_tree_remove(
607 &arena->runs_avail_dirty, mapelm);
608
600 mapelm->bits = (npages << LG_PAGE) |
601 flag_unzeroed | CHUNK_MAP_LARGE |
602 CHUNK_MAP_ALLOCATED;
609 arena_mapbits_large_set(chunk, pageind,
610 (npages << LG_PAGE), flag_unzeroed);
603 /*
604 * Update internal elements in the page map, so
605 * that CHUNK_MAP_UNZEROED is properly set.
606 */
607 for (i = 1; i < npages - 1; i++) {
611 /*
612 * Update internal elements in the page map, so
613 * that CHUNK_MAP_UNZEROED is properly set.
614 */
615 for (i = 1; i < npages - 1; i++) {
608 chunk->map[pageind+i-map_bias].bits =
609 flag_unzeroed;
616 arena_mapbits_unzeroed_set(chunk,
617 pageind+i, flag_unzeroed);
610 }
611 if (npages > 1) {
618 }
619 if (npages > 1) {
612 chunk->map[
613 pageind+npages-1-map_bias].bits =
614 flag_unzeroed | CHUNK_MAP_LARGE |
615 CHUNK_MAP_ALLOCATED;
620 arena_mapbits_large_set(chunk,
621 pageind+npages-1, 0, flag_unzeroed);
616 }
617
618 if (config_stats) {
619 /*
620 * Update stats_cactive if nactive is
621 * crossing a chunk multiple.
622 */
623 size_t cactive_diff =

--- 8 unchanged lines hidden (view full) ---

632 /* Append to list for later processing. */
633 ql_elm_new(mapelm, u.ql_link);
634 ql_tail_insert(&mapelms, mapelm, u.ql_link);
635 }
636
637 pageind += npages;
638 } else {
639 /* Skip allocated run. */
622 }
623
624 if (config_stats) {
625 /*
626 * Update stats_cactive if nactive is
627 * crossing a chunk multiple.
628 */
629 size_t cactive_diff =

--- 8 unchanged lines hidden (view full) ---

638 /* Append to list for later processing. */
639 ql_elm_new(mapelm, u.ql_link);
640 ql_tail_insert(&mapelms, mapelm, u.ql_link);
641 }
642
643 pageind += npages;
644 } else {
645 /* Skip allocated run. */
640 if (mapelm->bits & CHUNK_MAP_LARGE)
641 pageind += mapelm->bits >> LG_PAGE;
646 if (arena_mapbits_large_get(chunk, pageind))
647 pageind += arena_mapbits_large_size_get(chunk,
648 pageind) >> LG_PAGE;
642 else {
649 else {
650 size_t binind;
651 arena_bin_info_t *bin_info;
643 arena_run_t *run = (arena_run_t *)((uintptr_t)
644 chunk + (uintptr_t)(pageind << LG_PAGE));
645
652 arena_run_t *run = (arena_run_t *)((uintptr_t)
653 chunk + (uintptr_t)(pageind << LG_PAGE));
654
646 assert((mapelm->bits >> LG_PAGE) == 0);
647 size_t binind = arena_bin_index(arena,
648 run->bin);
649 arena_bin_info_t *bin_info =
650 &arena_bin_info[binind];
655 assert(arena_mapbits_small_runind_get(chunk,
656 pageind) == 0);
657 binind = arena_bin_index(arena, run->bin);
658 bin_info = &arena_bin_info[binind];
651 pageind += bin_info->run_size >> LG_PAGE;
652 }
653 }
654 }
655 assert(pageind == chunk_npages);
656
657 if (config_debug)
658 ndirty = chunk->ndirty;

--- 5 unchanged lines hidden (view full) ---

664 chunk->dirtied = false;
665
666 malloc_mutex_unlock(&arena->lock);
667 if (config_stats)
668 nmadvise = 0;
669 ql_foreach(mapelm, &mapelms, u.ql_link) {
670 size_t pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
671 sizeof(arena_chunk_map_t)) + map_bias;
659 pageind += bin_info->run_size >> LG_PAGE;
660 }
661 }
662 }
663 assert(pageind == chunk_npages);
664
665 if (config_debug)
666 ndirty = chunk->ndirty;

--- 5 unchanged lines hidden (view full) ---

672 chunk->dirtied = false;
673
674 malloc_mutex_unlock(&arena->lock);
675 if (config_stats)
676 nmadvise = 0;
677 ql_foreach(mapelm, &mapelms, u.ql_link) {
678 size_t pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
679 sizeof(arena_chunk_map_t)) + map_bias;
672 size_t npages = mapelm->bits >> LG_PAGE;
680 size_t npages = arena_mapbits_large_size_get(chunk, pageind) >>
681 LG_PAGE;
673
674 assert(pageind + npages <= chunk_npages);
675 assert(ndirty >= npages);
676 if (config_debug)
677 ndirty -= npages;
678
679 pages_purge((void *)((uintptr_t)chunk + (pageind << LG_PAGE)),
680 (npages << LG_PAGE));

--- 120 unchanged lines hidden (view full) ---

801 arena_chunk_t *chunk;
802 size_t size, run_ind, run_pages, flag_dirty;
803 arena_avail_tree_t *runs_avail;
804
805 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
806 run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
807 assert(run_ind >= map_bias);
808 assert(run_ind < chunk_npages);
682
683 assert(pageind + npages <= chunk_npages);
684 assert(ndirty >= npages);
685 if (config_debug)
686 ndirty -= npages;
687
688 pages_purge((void *)((uintptr_t)chunk + (pageind << LG_PAGE)),
689 (npages << LG_PAGE));

--- 120 unchanged lines hidden (view full) ---

810 arena_chunk_t *chunk;
811 size_t size, run_ind, run_pages, flag_dirty;
812 arena_avail_tree_t *runs_avail;
813
814 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
815 run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
816 assert(run_ind >= map_bias);
817 assert(run_ind < chunk_npages);
809 if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_LARGE) != 0) {
810 size = chunk->map[run_ind-map_bias].bits & ~PAGE_MASK;
818 if (arena_mapbits_large_get(chunk, run_ind) != 0) {
819 size = arena_mapbits_large_size_get(chunk, run_ind);
811 assert(size == PAGE ||
820 assert(size == PAGE ||
812 (chunk->map[run_ind+(size>>LG_PAGE)-1-map_bias].bits &
813 ~PAGE_MASK) == 0);
814 assert((chunk->map[run_ind+(size>>LG_PAGE)-1-map_bias].bits &
815 CHUNK_MAP_LARGE) != 0);
816 assert((chunk->map[run_ind+(size>>LG_PAGE)-1-map_bias].bits &
817 CHUNK_MAP_ALLOCATED) != 0);
821 arena_mapbits_large_size_get(chunk,
822 run_ind+(size>>LG_PAGE)-1) == 0);
818 } else {
819 size_t binind = arena_bin_index(arena, run->bin);
820 arena_bin_info_t *bin_info = &arena_bin_info[binind];
821 size = bin_info->run_size;
822 }
823 run_pages = (size >> LG_PAGE);
824 if (config_stats) {
825 /*

--- 6 unchanged lines hidden (view full) ---

832 stats_cactive_sub(cactive_diff);
833 }
834 arena->nactive -= run_pages;
835
836 /*
837 * The run is dirty if the caller claims to have dirtied it, as well as
838 * if it was already dirty before being allocated.
839 */
823 } else {
824 size_t binind = arena_bin_index(arena, run->bin);
825 arena_bin_info_t *bin_info = &arena_bin_info[binind];
826 size = bin_info->run_size;
827 }
828 run_pages = (size >> LG_PAGE);
829 if (config_stats) {
830 /*

--- 6 unchanged lines hidden (view full) ---

837 stats_cactive_sub(cactive_diff);
838 }
839 arena->nactive -= run_pages;
840
841 /*
842 * The run is dirty if the caller claims to have dirtied it, as well as
843 * if it was already dirty before being allocated.
844 */
840 if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) != 0)
845 if (arena_mapbits_dirty_get(chunk, run_ind) != 0)
841 dirty = true;
842 flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
843 runs_avail = dirty ? &arena->runs_avail_dirty :
844 &arena->runs_avail_clean;
845
846 /* Mark pages as unallocated in the chunk map. */
847 if (dirty) {
846 dirty = true;
847 flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
848 runs_avail = dirty ? &arena->runs_avail_dirty :
849 &arena->runs_avail_clean;
850
851 /* Mark pages as unallocated in the chunk map. */
852 if (dirty) {
848 chunk->map[run_ind-map_bias].bits = size | CHUNK_MAP_DIRTY;
849 chunk->map[run_ind+run_pages-1-map_bias].bits = size |
850 CHUNK_MAP_DIRTY;
853 arena_mapbits_unallocated_set(chunk, run_ind, size,
854 CHUNK_MAP_DIRTY);
855 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
856 CHUNK_MAP_DIRTY);
851
852 chunk->ndirty += run_pages;
853 arena->ndirty += run_pages;
854 } else {
857
858 chunk->ndirty += run_pages;
859 arena->ndirty += run_pages;
860 } else {
855 chunk->map[run_ind-map_bias].bits = size |
856 (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED);
857 chunk->map[run_ind+run_pages-1-map_bias].bits = size |
858 (chunk->map[run_ind+run_pages-1-map_bias].bits &
859 CHUNK_MAP_UNZEROED);
861 arena_mapbits_unallocated_set(chunk, run_ind, size,
862 arena_mapbits_unzeroed_get(chunk, run_ind));
863 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
864 arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
860 }
861
862 /* Try to coalesce forward. */
863 if (run_ind + run_pages < chunk_npages &&
865 }
866
867 /* Try to coalesce forward. */
868 if (run_ind + run_pages < chunk_npages &&
864 (chunk->map[run_ind+run_pages-map_bias].bits & CHUNK_MAP_ALLOCATED)
865 == 0 && (chunk->map[run_ind+run_pages-map_bias].bits &
866 CHUNK_MAP_DIRTY) == flag_dirty) {
867 size_t nrun_size = chunk->map[run_ind+run_pages-map_bias].bits &
868 ~PAGE_MASK;
869 arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
870 arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty) {
871 size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
872 run_ind+run_pages);
869 size_t nrun_pages = nrun_size >> LG_PAGE;
870
871 /*
872 * Remove successor from runs_avail; the coalesced run is
873 * inserted later.
874 */
873 size_t nrun_pages = nrun_size >> LG_PAGE;
874
875 /*
876 * Remove successor from runs_avail; the coalesced run is
877 * inserted later.
878 */
875 assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits
876 & ~PAGE_MASK) == nrun_size);
877 assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits
878 & CHUNK_MAP_ALLOCATED) == 0);
879 assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits
880 & CHUNK_MAP_DIRTY) == flag_dirty);
879 assert(arena_mapbits_unallocated_size_get(chunk,
880 run_ind+run_pages+nrun_pages-1) == nrun_size);
881 assert(arena_mapbits_dirty_get(chunk,
882 run_ind+run_pages+nrun_pages-1) == flag_dirty);
881 arena_avail_tree_remove(runs_avail,
883 arena_avail_tree_remove(runs_avail,
882 &chunk->map[run_ind+run_pages-map_bias]);
884 arena_mapp_get(chunk, run_ind+run_pages));
883
884 size += nrun_size;
885 run_pages += nrun_pages;
886
885
886 size += nrun_size;
887 run_pages += nrun_pages;
888
887 chunk->map[run_ind-map_bias].bits = size |
888 (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_FLAGS_MASK);
889 chunk->map[run_ind+run_pages-1-map_bias].bits = size |
890 (chunk->map[run_ind+run_pages-1-map_bias].bits &
891 CHUNK_MAP_FLAGS_MASK);
889 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
890 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
891 size);
892 }
893
894 /* Try to coalesce backward. */
892 }
893
894 /* Try to coalesce backward. */
895 if (run_ind > map_bias && (chunk->map[run_ind-1-map_bias].bits &
896 CHUNK_MAP_ALLOCATED) == 0 && (chunk->map[run_ind-1-map_bias].bits &
897 CHUNK_MAP_DIRTY) == flag_dirty) {
898 size_t prun_size = chunk->map[run_ind-1-map_bias].bits &
899 ~PAGE_MASK;
895 if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, run_ind-1)
896 == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == flag_dirty) {
897 size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
898 run_ind-1);
900 size_t prun_pages = prun_size >> LG_PAGE;
901
902 run_ind -= prun_pages;
903
904 /*
905 * Remove predecessor from runs_avail; the coalesced run is
906 * inserted later.
907 */
899 size_t prun_pages = prun_size >> LG_PAGE;
900
901 run_ind -= prun_pages;
902
903 /*
904 * Remove predecessor from runs_avail; the coalesced run is
905 * inserted later.
906 */
908 assert((chunk->map[run_ind-map_bias].bits & ~PAGE_MASK)
909 == prun_size);
910 assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_ALLOCATED)
911 == 0);
912 assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY)
913 == flag_dirty);
914 arena_avail_tree_remove(runs_avail,
915 &chunk->map[run_ind-map_bias]);
907 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
908 prun_size);
909 assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
910 arena_avail_tree_remove(runs_avail, arena_mapp_get(chunk,
911 run_ind));
916
917 size += prun_size;
918 run_pages += prun_pages;
919
912
913 size += prun_size;
914 run_pages += prun_pages;
915
920 chunk->map[run_ind-map_bias].bits = size |
921 (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_FLAGS_MASK);
922 chunk->map[run_ind+run_pages-1-map_bias].bits = size |
923 (chunk->map[run_ind+run_pages-1-map_bias].bits &
924 CHUNK_MAP_FLAGS_MASK);
916 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
917 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
918 size);
925 }
926
927 /* Insert into runs_avail, now that coalescing is complete. */
919 }
920
921 /* Insert into runs_avail, now that coalescing is complete. */
928 assert((chunk->map[run_ind-map_bias].bits & ~PAGE_MASK) ==
929 (chunk->map[run_ind+run_pages-1-map_bias].bits & ~PAGE_MASK));
930 assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) ==
931 (chunk->map[run_ind+run_pages-1-map_bias].bits & CHUNK_MAP_DIRTY));
932 arena_avail_tree_insert(runs_avail, &chunk->map[run_ind-map_bias]);
922 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
923 arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
924 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
925 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
926 arena_avail_tree_insert(runs_avail, arena_mapp_get(chunk, run_ind));
933
934 if (dirty) {
935 /*
936 * Insert into chunks_dirty before potentially calling
937 * arena_chunk_dealloc(), so that chunks_dirty and
938 * arena->ndirty are consistent.
939 */
940 if (chunk->dirtied == false) {
941 ql_tail_insert(&arena->chunks_dirty, chunk, link_dirty);
942 chunk->dirtied = true;
943 }
944 }
945
927
928 if (dirty) {
929 /*
930 * Insert into chunks_dirty before potentially calling
931 * arena_chunk_dealloc(), so that chunks_dirty and
932 * arena->ndirty are consistent.
933 */
934 if (chunk->dirtied == false) {
935 ql_tail_insert(&arena->chunks_dirty, chunk, link_dirty);
936 chunk->dirtied = true;
937 }
938 }
939
946 /*
947 * Deallocate chunk if it is now completely unused. The bit
948 * manipulation checks whether the first run is unallocated and extends
949 * to the end of the chunk.
950 */
951 if ((chunk->map[0].bits & (~PAGE_MASK | CHUNK_MAP_ALLOCATED)) ==
952 arena_maxclass)
940 /* Deallocate chunk if it is now completely unused. */
941 if (size == arena_maxclass) {
942 assert(run_ind == map_bias);
943 assert(run_pages == (arena_maxclass >> LG_PAGE));
944 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
945 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
946 arena_maxclass);
953 arena_chunk_dealloc(arena, chunk);
947 arena_chunk_dealloc(arena, chunk);
948 }
954
955 /*
956 * It is okay to do dirty page processing here even if the chunk was
957 * deallocated above, since in that case it is the spare. Waiting
958 * until after possible chunk deallocation to do dirty processing
959 * allows for an old spare to be fully deallocated, thus decreasing the
960 * chances of spuriously crossing the dirty page purging threshold.
961 */
962 if (dirty)
963 arena_maybe_purge(arena);
964}
965
966static void
967arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
968 size_t oldsize, size_t newsize)
969{
970 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
971 size_t head_npages = (oldsize - newsize) >> LG_PAGE;
949
950 /*
951 * It is okay to do dirty page processing here even if the chunk was
952 * deallocated above, since in that case it is the spare. Waiting
953 * until after possible chunk deallocation to do dirty processing
954 * allows for an old spare to be fully deallocated, thus decreasing the
955 * chances of spuriously crossing the dirty page purging threshold.
956 */
957 if (dirty)
958 arena_maybe_purge(arena);
959}
960
961static void
962arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
963 size_t oldsize, size_t newsize)
964{
965 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
966 size_t head_npages = (oldsize - newsize) >> LG_PAGE;
972 size_t flag_dirty = chunk->map[pageind-map_bias].bits & CHUNK_MAP_DIRTY;
967 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
973
974 assert(oldsize > newsize);
975
976 /*
977 * Update the chunk map so that arena_run_dalloc() can treat the
978 * leading run as separately allocated. Set the last element of each
979 * run first, in case of single-page runs.
980 */
968
969 assert(oldsize > newsize);
970
971 /*
972 * Update the chunk map so that arena_run_dalloc() can treat the
973 * leading run as separately allocated. Set the last element of each
974 * run first, in case of single-page runs.
975 */
981 assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_LARGE) != 0);
982 assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_ALLOCATED) != 0);
983 chunk->map[pageind+head_npages-1-map_bias].bits = flag_dirty |
984 (chunk->map[pageind+head_npages-1-map_bias].bits &
985 CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
986 chunk->map[pageind-map_bias].bits = (oldsize - newsize)
987 | flag_dirty | (chunk->map[pageind-map_bias].bits &
988 CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
976 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
977 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
978 arena_mapbits_unzeroed_get(chunk, pageind+head_npages-1));
979 arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty |
980 arena_mapbits_unzeroed_get(chunk, pageind));
989
990 if (config_debug) {
991 UNUSED size_t tail_npages = newsize >> LG_PAGE;
981
982 if (config_debug) {
983 UNUSED size_t tail_npages = newsize >> LG_PAGE;
992 assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
993 .bits & ~PAGE_MASK) == 0);
994 assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
995 .bits & CHUNK_MAP_DIRTY) == flag_dirty);
996 assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
997 .bits & CHUNK_MAP_LARGE) != 0);
998 assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
999 .bits & CHUNK_MAP_ALLOCATED) != 0);
984 assert(arena_mapbits_large_size_get(chunk,
985 pageind+head_npages+tail_npages-1) == 0);
986 assert(arena_mapbits_dirty_get(chunk,
987 pageind+head_npages+tail_npages-1) == flag_dirty);
1000 }
988 }
1001 chunk->map[pageind+head_npages-map_bias].bits = newsize | flag_dirty |
1002 (chunk->map[pageind+head_npages-map_bias].bits &
1003 CHUNK_MAP_FLAGS_MASK) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
989 arena_mapbits_large_set(chunk, pageind+head_npages, newsize, flag_dirty
990 | arena_mapbits_unzeroed_get(chunk, pageind+head_npages));
1004
1005 arena_run_dalloc(arena, run, false);
1006}
1007
1008static void
1009arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1010 size_t oldsize, size_t newsize, bool dirty)
1011{
1012 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1013 size_t head_npages = newsize >> LG_PAGE;
991
992 arena_run_dalloc(arena, run, false);
993}
994
995static void
996arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
997 size_t oldsize, size_t newsize, bool dirty)
998{
999 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1000 size_t head_npages = newsize >> LG_PAGE;
1014 size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
1015 size_t flag_dirty = chunk->map[pageind-map_bias].bits &
1016 CHUNK_MAP_DIRTY;
1001 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
1017
1018 assert(oldsize > newsize);
1019
1020 /*
1021 * Update the chunk map so that arena_run_dalloc() can treat the
1022 * trailing run as separately allocated. Set the last element of each
1023 * run first, in case of single-page runs.
1024 */
1002
1003 assert(oldsize > newsize);
1004
1005 /*
1006 * Update the chunk map so that arena_run_dalloc() can treat the
1007 * trailing run as separately allocated. Set the last element of each
1008 * run first, in case of single-page runs.
1009 */
1025 assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_LARGE) != 0);
1026 assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_ALLOCATED) != 0);
1027 chunk->map[pageind+head_npages-1-map_bias].bits = flag_dirty |
1028 (chunk->map[pageind+head_npages-1-map_bias].bits &
1029 CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
1030 chunk->map[pageind-map_bias].bits = newsize | flag_dirty |
1031 (chunk->map[pageind-map_bias].bits & CHUNK_MAP_UNZEROED) |
1032 CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
1010 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
1011 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
1012 arena_mapbits_unzeroed_get(chunk, pageind+head_npages-1));
1013 arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty |
1014 arena_mapbits_unzeroed_get(chunk, pageind));
1033
1015
1034 assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits &
1035 ~PAGE_MASK) == 0);
1036 assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits &
1037 CHUNK_MAP_LARGE) != 0);
1038 assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits &
1039 CHUNK_MAP_ALLOCATED) != 0);
1040 chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits =
1041 flag_dirty |
1042 (chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits &
1043 CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
1044 chunk->map[pageind+head_npages-map_bias].bits = (oldsize - newsize) |
1045 flag_dirty | (chunk->map[pageind+head_npages-map_bias].bits &
1046 CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
1016 if (config_debug) {
1017 UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
1018 assert(arena_mapbits_large_size_get(chunk,
1019 pageind+head_npages+tail_npages-1) == 0);
1020 assert(arena_mapbits_dirty_get(chunk,
1021 pageind+head_npages+tail_npages-1) == flag_dirty);
1022 }
1023 arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
1024 flag_dirty | arena_mapbits_unzeroed_get(chunk,
1025 pageind+head_npages));
1047
1048 arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
1049 dirty);
1050}
1051
1052static arena_run_t *
1053arena_bin_runs_first(arena_bin_t *bin)
1054{
1055 arena_chunk_map_t *mapelm = arena_run_tree_first(&bin->runs);
1056 if (mapelm != NULL) {
1057 arena_chunk_t *chunk;
1058 size_t pageind;
1026
1027 arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
1028 dirty);
1029}
1030
1031static arena_run_t *
1032arena_bin_runs_first(arena_bin_t *bin)
1033{
1034 arena_chunk_map_t *mapelm = arena_run_tree_first(&bin->runs);
1035 if (mapelm != NULL) {
1036 arena_chunk_t *chunk;
1037 size_t pageind;
1038 arena_run_t *run;
1059
1060 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
1061 pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) /
1062 sizeof(arena_chunk_map_t))) + map_bias;
1039
1040 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
1041 pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) /
1042 sizeof(arena_chunk_map_t))) + map_bias;
1063 arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
1064 (uintptr_t)((pageind - (mapelm->bits >> LG_PAGE)) <<
1043 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
1044 arena_mapbits_small_runind_get(chunk, pageind)) <<
1065 LG_PAGE));
1066 return (run);
1067 }
1068
1069 return (NULL);
1070}
1071
1072static void
1073arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
1074{
1075 arena_chunk_t *chunk = CHUNK_ADDR2BASE(run);
1076 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1045 LG_PAGE));
1046 return (run);
1047 }
1048
1049 return (NULL);
1050}
1051
1052static void
1053arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
1054{
1055 arena_chunk_t *chunk = CHUNK_ADDR2BASE(run);
1056 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1077 arena_chunk_map_t *mapelm = &chunk->map[pageind-map_bias];
1057 arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
1078
1079 assert(arena_run_tree_search(&bin->runs, mapelm) == NULL);
1080
1081 arena_run_tree_insert(&bin->runs, mapelm);
1082}
1083
1084static void
1085arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
1086{
1087 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
1088 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1058
1059 assert(arena_run_tree_search(&bin->runs, mapelm) == NULL);
1060
1061 arena_run_tree_insert(&bin->runs, mapelm);
1062}
1063
1064static void
1065arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
1066{
1067 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
1068 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1089 arena_chunk_map_t *mapelm = &chunk->map[pageind-map_bias];
1069 arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
1090
1091 assert(arena_run_tree_search(&bin->runs, mapelm) != NULL);
1092
1093 arena_run_tree_remove(&bin->runs, mapelm);
1094}
1095
1096static arena_run_t *
1097arena_bin_nonfull_run_tryget(arena_bin_t *bin)

--- 22 unchanged lines hidden (view full) ---

1120
1121 binind = arena_bin_index(arena, bin);
1122 bin_info = &arena_bin_info[binind];
1123
1124 /* Allocate a new run. */
1125 malloc_mutex_unlock(&bin->lock);
1126 /******************************/
1127 malloc_mutex_lock(&arena->lock);
1070
1071 assert(arena_run_tree_search(&bin->runs, mapelm) != NULL);
1072
1073 arena_run_tree_remove(&bin->runs, mapelm);
1074}
1075
1076static arena_run_t *
1077arena_bin_nonfull_run_tryget(arena_bin_t *bin)

--- 22 unchanged lines hidden (view full) ---

1100
1101 binind = arena_bin_index(arena, bin);
1102 bin_info = &arena_bin_info[binind];
1103
1104 /* Allocate a new run. */
1105 malloc_mutex_unlock(&bin->lock);
1106 /******************************/
1107 malloc_mutex_lock(&arena->lock);
1128 run = arena_run_alloc(arena, bin_info->run_size, false, false);
1108 run = arena_run_alloc(arena, bin_info->run_size, false, binind, false);
1129 if (run != NULL) {
1130 bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
1131 (uintptr_t)bin_info->bitmap_offset);
1132
1133 /* Initialize run internals. */
1109 if (run != NULL) {
1110 bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
1111 (uintptr_t)bin_info->bitmap_offset);
1112
1113 /* Initialize run internals. */
1114 VALGRIND_MAKE_MEM_UNDEFINED(run, bin_info->reg0_offset -
1115 bin_info->redzone_size);
1134 run->bin = bin;
1135 run->nextind = 0;
1136 run->nfree = bin_info->nregs;
1137 bitmap_init(bitmap, &bin_info->bitmap_info);
1138 }
1139 malloc_mutex_unlock(&arena->lock);
1140 /********************************/
1141 malloc_mutex_lock(&bin->lock);

--- 234 unchanged lines hidden (view full) ---

1376void *
1377arena_malloc_large(arena_t *arena, size_t size, bool zero)
1378{
1379 void *ret;
1380
1381 /* Large allocation. */
1382 size = PAGE_CEILING(size);
1383 malloc_mutex_lock(&arena->lock);
1116 run->bin = bin;
1117 run->nextind = 0;
1118 run->nfree = bin_info->nregs;
1119 bitmap_init(bitmap, &bin_info->bitmap_info);
1120 }
1121 malloc_mutex_unlock(&arena->lock);
1122 /********************************/
1123 malloc_mutex_lock(&bin->lock);

--- 234 unchanged lines hidden (view full) ---

1358void *
1359arena_malloc_large(arena_t *arena, size_t size, bool zero)
1360{
1361 void *ret;
1362
1363 /* Large allocation. */
1364 size = PAGE_CEILING(size);
1365 malloc_mutex_lock(&arena->lock);
1384 ret = (void *)arena_run_alloc(arena, size, true, zero);
1366 ret = (void *)arena_run_alloc(arena, size, true, BININD_INVALID, zero);
1385 if (ret == NULL) {
1386 malloc_mutex_unlock(&arena->lock);
1387 return (NULL);
1388 }
1389 if (config_stats) {
1390 arena->stats.nmalloc_large++;
1391 arena->stats.nrequests_large++;
1392 arena->stats.allocated_large += size;

--- 27 unchanged lines hidden (view full) ---

1420 arena_chunk_t *chunk;
1421
1422 assert((size & PAGE_MASK) == 0);
1423
1424 alignment = PAGE_CEILING(alignment);
1425 alloc_size = size + alignment - PAGE;
1426
1427 malloc_mutex_lock(&arena->lock);
1367 if (ret == NULL) {
1368 malloc_mutex_unlock(&arena->lock);
1369 return (NULL);
1370 }
1371 if (config_stats) {
1372 arena->stats.nmalloc_large++;
1373 arena->stats.nrequests_large++;
1374 arena->stats.allocated_large += size;

--- 27 unchanged lines hidden (view full) ---

1402 arena_chunk_t *chunk;
1403
1404 assert((size & PAGE_MASK) == 0);
1405
1406 alignment = PAGE_CEILING(alignment);
1407 alloc_size = size + alignment - PAGE;
1408
1409 malloc_mutex_lock(&arena->lock);
1428 run = arena_run_alloc(arena, alloc_size, true, zero);
1410 run = arena_run_alloc(arena, alloc_size, true, BININD_INVALID, zero);
1429 if (run == NULL) {
1430 malloc_mutex_unlock(&arena->lock);
1431 return (NULL);
1432 }
1433 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
1434
1435 leadsize = ALIGNMENT_CEILING((uintptr_t)run, alignment) -
1436 (uintptr_t)run;

--- 40 unchanged lines hidden (view full) ---

1477 assert(isalloc(ptr, false) == PAGE);
1478 assert(isalloc(ptr, true) == PAGE);
1479 assert(size <= SMALL_MAXCLASS);
1480
1481 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1482 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1483 binind = SMALL_SIZE2BIN(size);
1484 assert(binind < NBINS);
1411 if (run == NULL) {
1412 malloc_mutex_unlock(&arena->lock);
1413 return (NULL);
1414 }
1415 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
1416
1417 leadsize = ALIGNMENT_CEILING((uintptr_t)run, alignment) -
1418 (uintptr_t)run;

--- 40 unchanged lines hidden (view full) ---

1459 assert(isalloc(ptr, false) == PAGE);
1460 assert(isalloc(ptr, true) == PAGE);
1461 assert(size <= SMALL_MAXCLASS);
1462
1463 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1464 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1465 binind = SMALL_SIZE2BIN(size);
1466 assert(binind < NBINS);
1485 chunk->map[pageind-map_bias].bits = (chunk->map[pageind-map_bias].bits &
1486 ~CHUNK_MAP_CLASS_MASK) | ((binind+1) << CHUNK_MAP_CLASS_SHIFT);
1467 arena_mapbits_large_binind_set(chunk, pageind, binind);
1487
1488 assert(isalloc(ptr, false) == PAGE);
1489 assert(isalloc(ptr, true) == size);
1490}
1491
1492static void
1493arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
1494 arena_bin_t *bin)

--- 21 unchanged lines hidden (view full) ---

1516arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1517 arena_bin_t *bin)
1518{
1519 size_t binind;
1520 arena_bin_info_t *bin_info;
1521 size_t npages, run_ind, past;
1522
1523 assert(run != bin->runcur);
1468
1469 assert(isalloc(ptr, false) == PAGE);
1470 assert(isalloc(ptr, true) == size);
1471}
1472
1473static void
1474arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
1475 arena_bin_t *bin)

--- 21 unchanged lines hidden (view full) ---

1497arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1498 arena_bin_t *bin)
1499{
1500 size_t binind;
1501 arena_bin_info_t *bin_info;
1502 size_t npages, run_ind, past;
1503
1504 assert(run != bin->runcur);
1524 assert(arena_run_tree_search(&bin->runs, &chunk->map[
1525 (((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE)-map_bias]) == NULL);
1505 assert(arena_run_tree_search(&bin->runs,
1506 arena_mapp_get(chunk, ((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE))
1507 == NULL);
1526
1527 binind = arena_bin_index(chunk->arena, run->bin);
1528 bin_info = &arena_bin_info[binind];
1529
1530 malloc_mutex_unlock(&bin->lock);
1531 /******************************/
1532 npages = bin_info->run_size >> LG_PAGE;
1533 run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
1534 past = (size_t)(PAGE_CEILING((uintptr_t)run +
1535 (uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind *
1536 bin_info->reg_interval - bin_info->redzone_size) -
1537 (uintptr_t)chunk) >> LG_PAGE);
1538 malloc_mutex_lock(&arena->lock);
1539
1540 /*
1541 * If the run was originally clean, and some pages were never touched,
1542 * trim the clean pages before deallocating the dirty portion of the
1543 * run.
1544 */
1508
1509 binind = arena_bin_index(chunk->arena, run->bin);
1510 bin_info = &arena_bin_info[binind];
1511
1512 malloc_mutex_unlock(&bin->lock);
1513 /******************************/
1514 npages = bin_info->run_size >> LG_PAGE;
1515 run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
1516 past = (size_t)(PAGE_CEILING((uintptr_t)run +
1517 (uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind *
1518 bin_info->reg_interval - bin_info->redzone_size) -
1519 (uintptr_t)chunk) >> LG_PAGE);
1520 malloc_mutex_lock(&arena->lock);
1521
1522 /*
1523 * If the run was originally clean, and some pages were never touched,
1524 * trim the clean pages before deallocating the dirty portion of the
1525 * run.
1526 */
1545 if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) == 0 && past
1546 - run_ind < npages) {
1527 if (arena_mapbits_dirty_get(chunk, run_ind) == 0 && past - run_ind <
1528 npages) {
1547 /*
1548 * Trim clean pages. Convert to large run beforehand. Set the
1549 * last map element first, in case this is a one-page run.
1550 */
1529 /*
1530 * Trim clean pages. Convert to large run beforehand. Set the
1531 * last map element first, in case this is a one-page run.
1532 */
1551 chunk->map[run_ind+npages-1-map_bias].bits = CHUNK_MAP_LARGE |
1552 (chunk->map[run_ind+npages-1-map_bias].bits &
1553 CHUNK_MAP_FLAGS_MASK);
1554 chunk->map[run_ind-map_bias].bits = bin_info->run_size |
1555 CHUNK_MAP_LARGE | (chunk->map[run_ind-map_bias].bits &
1556 CHUNK_MAP_FLAGS_MASK);
1533 arena_mapbits_large_set(chunk, run_ind+npages-1, 0,
1534 arena_mapbits_unzeroed_get(chunk, run_ind+npages-1));
1535 arena_mapbits_large_set(chunk, run_ind, bin_info->run_size,
1536 arena_mapbits_unzeroed_get(chunk, run_ind));
1557 arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE),
1558 ((past - run_ind) << LG_PAGE), false);
1559 /* npages = past - run_ind; */
1560 }
1561 arena_run_dalloc(arena, run, true);
1562 malloc_mutex_unlock(&arena->lock);
1563 /****************************/
1564 malloc_mutex_lock(&bin->lock);

--- 18 unchanged lines hidden (view full) ---

1583 bin->runcur = run;
1584 if (config_stats)
1585 bin->stats.reruns++;
1586 } else
1587 arena_bin_runs_insert(bin, run);
1588}
1589
1590void
1537 arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE),
1538 ((past - run_ind) << LG_PAGE), false);
1539 /* npages = past - run_ind; */
1540 }
1541 arena_run_dalloc(arena, run, true);
1542 malloc_mutex_unlock(&arena->lock);
1543 /****************************/
1544 malloc_mutex_lock(&bin->lock);

--- 18 unchanged lines hidden (view full) ---

1563 bin->runcur = run;
1564 if (config_stats)
1565 bin->stats.reruns++;
1566 } else
1567 arena_bin_runs_insert(bin, run);
1568}
1569
1570void
1591arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1571arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1592 arena_chunk_map_t *mapelm)
1593{
1594 size_t pageind;
1595 arena_run_t *run;
1596 arena_bin_t *bin;
1572 arena_chunk_map_t *mapelm)
1573{
1574 size_t pageind;
1575 arena_run_t *run;
1576 arena_bin_t *bin;
1597 size_t size;
1577 arena_bin_info_t *bin_info;
1578 size_t size, binind;
1598
1599 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1600 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
1579
1580 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1581 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
1601 (mapelm->bits >> LG_PAGE)) << LG_PAGE));
1582 arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
1602 bin = run->bin;
1583 bin = run->bin;
1603 size_t binind = arena_bin_index(arena, bin);
1604 arena_bin_info_t *bin_info = &arena_bin_info[binind];
1584 binind = arena_ptr_small_binind_get(ptr, mapelm->bits);
1585 bin_info = &arena_bin_info[binind];
1605 if (config_fill || config_stats)
1606 size = bin_info->reg_size;
1607
1608 if (config_fill && opt_junk)
1609 arena_dalloc_junk_small(ptr, bin_info);
1610
1611 arena_run_reg_dalloc(run, ptr);
1612 if (run->nfree == bin_info->nregs) {

--- 4 unchanged lines hidden (view full) ---

1617
1618 if (config_stats) {
1619 bin->stats.allocated -= size;
1620 bin->stats.ndalloc++;
1621 }
1622}
1623
1624void
1586 if (config_fill || config_stats)
1587 size = bin_info->reg_size;
1588
1589 if (config_fill && opt_junk)
1590 arena_dalloc_junk_small(ptr, bin_info);
1591
1592 arena_run_reg_dalloc(run, ptr);
1593 if (run->nfree == bin_info->nregs) {

--- 4 unchanged lines hidden (view full) ---

1598
1599 if (config_stats) {
1600 bin->stats.allocated -= size;
1601 bin->stats.ndalloc++;
1602 }
1603}
1604
1605void
1606arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1607 size_t pageind, arena_chunk_map_t *mapelm)
1608{
1609 arena_run_t *run;
1610 arena_bin_t *bin;
1611
1612 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
1613 arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
1614 bin = run->bin;
1615 malloc_mutex_lock(&bin->lock);
1616 arena_dalloc_bin_locked(arena, chunk, ptr, mapelm);
1617 malloc_mutex_unlock(&bin->lock);
1618}
1619
1620void
1621arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1622 size_t pageind)
1623{
1624 arena_chunk_map_t *mapelm;
1625
1626 if (config_debug) {
1627 /* arena_ptr_small_binind_get() does extra sanity checking. */
1628 assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
1629 pageind)) != BININD_INVALID);
1630 }
1631 mapelm = arena_mapp_get(chunk, pageind);
1632 arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm);
1633}
1634void
1625arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
1626 arena_stats_t *astats, malloc_bin_stats_t *bstats,
1627 malloc_large_stats_t *lstats)
1628{
1629 unsigned i;
1630
1631 malloc_mutex_lock(&arena->lock);
1632 *nactive += arena->nactive;

--- 31 unchanged lines hidden (view full) ---

1664 bstats[i].nruns += bin->stats.nruns;
1665 bstats[i].reruns += bin->stats.reruns;
1666 bstats[i].curruns += bin->stats.curruns;
1667 malloc_mutex_unlock(&bin->lock);
1668 }
1669}
1670
1671void
1635arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
1636 arena_stats_t *astats, malloc_bin_stats_t *bstats,
1637 malloc_large_stats_t *lstats)
1638{
1639 unsigned i;
1640
1641 malloc_mutex_lock(&arena->lock);
1642 *nactive += arena->nactive;

--- 31 unchanged lines hidden (view full) ---

1674 bstats[i].nruns += bin->stats.nruns;
1675 bstats[i].reruns += bin->stats.reruns;
1676 bstats[i].curruns += bin->stats.curruns;
1677 malloc_mutex_unlock(&bin->lock);
1678 }
1679}
1680
1681void
1672arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
1682arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr)
1673{
1674
1675 if (config_fill || config_stats) {
1676 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1683{
1684
1685 if (config_fill || config_stats) {
1686 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1677 size_t size = chunk->map[pageind-map_bias].bits & ~PAGE_MASK;
1687 size_t size = arena_mapbits_large_size_get(chunk, pageind);
1678
1679 if (config_fill && config_stats && opt_junk)
1680 memset(ptr, 0x5a, size);
1681 if (config_stats) {
1682 arena->stats.ndalloc_large++;
1683 arena->stats.allocated_large -= size;
1684 arena->stats.lstats[(size >> LG_PAGE) - 1].ndalloc++;
1685 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns--;
1686 }
1687 }
1688
1689 arena_run_dalloc(arena, (arena_run_t *)ptr, true);
1690}
1691
1688
1689 if (config_fill && config_stats && opt_junk)
1690 memset(ptr, 0x5a, size);
1691 if (config_stats) {
1692 arena->stats.ndalloc_large++;
1693 arena->stats.allocated_large -= size;
1694 arena->stats.lstats[(size >> LG_PAGE) - 1].ndalloc++;
1695 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns--;
1696 }
1697 }
1698
1699 arena_run_dalloc(arena, (arena_run_t *)ptr, true);
1700}
1701
1702void
1703arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
1704{
1705
1706 malloc_mutex_lock(&arena->lock);
1707 arena_dalloc_large_locked(arena, chunk, ptr);
1708 malloc_mutex_unlock(&arena->lock);
1709}
1710
1692static void
1693arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1694 size_t oldsize, size_t size)
1695{
1696
1697 assert(size < oldsize);
1698
1699 /*

--- 22 unchanged lines hidden (view full) ---

1722static bool
1723arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1724 size_t oldsize, size_t size, size_t extra, bool zero)
1725{
1726 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1727 size_t npages = oldsize >> LG_PAGE;
1728 size_t followsize;
1729
1711static void
1712arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1713 size_t oldsize, size_t size)
1714{
1715
1716 assert(size < oldsize);
1717
1718 /*

--- 22 unchanged lines hidden (view full) ---

1741static bool
1742arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1743 size_t oldsize, size_t size, size_t extra, bool zero)
1744{
1745 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1746 size_t npages = oldsize >> LG_PAGE;
1747 size_t followsize;
1748
1730 assert(oldsize == (chunk->map[pageind-map_bias].bits & ~PAGE_MASK));
1749 assert(oldsize == arena_mapbits_large_size_get(chunk, pageind));
1731
1732 /* Try to extend the run. */
1733 assert(size + extra > oldsize);
1734 malloc_mutex_lock(&arena->lock);
1735 if (pageind + npages < chunk_npages &&
1750
1751 /* Try to extend the run. */
1752 assert(size + extra > oldsize);
1753 malloc_mutex_lock(&arena->lock);
1754 if (pageind + npages < chunk_npages &&
1736 (chunk->map[pageind+npages-map_bias].bits
1737 & CHUNK_MAP_ALLOCATED) == 0 && (followsize =
1738 chunk->map[pageind+npages-map_bias].bits & ~PAGE_MASK) >= size -
1739 oldsize) {
1755 arena_mapbits_allocated_get(chunk, pageind+npages) == 0 &&
1756 (followsize = arena_mapbits_unallocated_size_get(chunk,
1757 pageind+npages)) >= size - oldsize) {
1740 /*
1741 * The next run is available and sufficiently large. Split the
1742 * following run, then merge the first part with the existing
1743 * allocation.
1744 */
1745 size_t flag_dirty;
1746 size_t splitsize = (oldsize + followsize <= size + extra)
1747 ? followsize : size + extra - oldsize;
1748 arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk +
1758 /*
1759 * The next run is available and sufficiently large. Split the
1760 * following run, then merge the first part with the existing
1761 * allocation.
1762 */
1763 size_t flag_dirty;
1764 size_t splitsize = (oldsize + followsize <= size + extra)
1765 ? followsize : size + extra - oldsize;
1766 arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk +
1749 ((pageind+npages) << LG_PAGE)), splitsize, true, zero);
1767 ((pageind+npages) << LG_PAGE)), splitsize, true,
1768 BININD_INVALID, zero);
1750
1751 size = oldsize + splitsize;
1752 npages = size >> LG_PAGE;
1753
1754 /*
1755 * Mark the extended run as dirty if either portion of the run
1756 * was dirty before allocation. This is rather pedantic,
1757 * because there's not actually any sequence of events that
1758 * could cause the resulting run to be passed to
1759 * arena_run_dalloc() with the dirty argument set to false
1760 * (which is when dirty flag consistency would really matter).
1761 */
1769
1770 size = oldsize + splitsize;
1771 npages = size >> LG_PAGE;
1772
1773 /*
1774 * Mark the extended run as dirty if either portion of the run
1775 * was dirty before allocation. This is rather pedantic,
1776 * because there's not actually any sequence of events that
1777 * could cause the resulting run to be passed to
1778 * arena_run_dalloc() with the dirty argument set to false
1779 * (which is when dirty flag consistency would really matter).
1780 */
1762 flag_dirty = (chunk->map[pageind-map_bias].bits &
1763 CHUNK_MAP_DIRTY) |
1764 (chunk->map[pageind+npages-1-map_bias].bits &
1765 CHUNK_MAP_DIRTY);
1766 chunk->map[pageind-map_bias].bits = size | flag_dirty
1767 | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
1768 chunk->map[pageind+npages-1-map_bias].bits = flag_dirty |
1769 CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
1781 flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
1782 arena_mapbits_dirty_get(chunk, pageind+npages-1);
1783 arena_mapbits_large_set(chunk, pageind, size, flag_dirty);
1784 arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty);
1770
1771 if (config_stats) {
1772 arena->stats.ndalloc_large++;
1773 arena->stats.allocated_large -= oldsize;
1785
1786 if (config_stats) {
1787 arena->stats.ndalloc_large++;
1788 arena->stats.allocated_large -= oldsize;
1774 arena->stats.lstats[(oldsize >> LG_PAGE)
1775 - 1].ndalloc++;
1776 arena->stats.lstats[(oldsize >> LG_PAGE)
1777 - 1].curruns--;
1789 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
1790 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
1778
1779 arena->stats.nmalloc_large++;
1780 arena->stats.nrequests_large++;
1781 arena->stats.allocated_large += size;
1782 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1791
1792 arena->stats.nmalloc_large++;
1793 arena->stats.nrequests_large++;
1794 arena->stats.allocated_large += size;
1795 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1783 arena->stats.lstats[(size >> LG_PAGE)
1784 - 1].nrequests++;
1796 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1785 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
1786 }
1787 malloc_mutex_unlock(&arena->lock);
1788 return (false);
1789 }
1790 malloc_mutex_unlock(&arena->lock);
1791
1792 return (true);

--- 126 unchanged lines hidden (view full) ---

1919
1920 /* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */
1921
1922 /*
1923 * Copy at most size bytes (not size+extra), since the caller has no
1924 * expectation that the extra bytes will be reliably preserved.
1925 */
1926 copysize = (size < oldsize) ? size : oldsize;
1797 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
1798 }
1799 malloc_mutex_unlock(&arena->lock);
1800 return (false);
1801 }
1802 malloc_mutex_unlock(&arena->lock);
1803
1804 return (true);

--- 126 unchanged lines hidden (view full) ---

1931
1932 /* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */
1933
1934 /*
1935 * Copy at most size bytes (not size+extra), since the caller has no
1936 * expectation that the extra bytes will be reliably preserved.
1937 */
1938 copysize = (size < oldsize) ? size : oldsize;
1939 VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
1927 memcpy(ret, ptr, copysize);
1928 iqalloc(ptr);
1929 return (ret);
1930}
1931
1932bool
1933arena_new(arena_t *arena, unsigned ind)
1934{

--- 276 unchanged lines hidden ---
1940 memcpy(ret, ptr, copysize);
1941 iqalloc(ptr);
1942 return (ret);
1943}
1944
1945bool
1946arena_new(arena_t *arena, unsigned ind)
1947{

--- 276 unchanged lines hidden ---