tcache.c revision 242844
1#define	JEMALLOC_TCACHE_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
7malloc_tsd_data(, tcache, tcache_t *, NULL)
8malloc_tsd_data(, tcache_enabled, tcache_enabled_t, tcache_enabled_default)
9
10bool	opt_tcache = true;
11ssize_t	opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
12
13tcache_bin_info_t	*tcache_bin_info;
14static unsigned		stack_nelms; /* Total stack elms per tcache. */
15
16size_t			nhbins;
17size_t			tcache_maxclass;
18
19/******************************************************************************/
20
21size_t	tcache_salloc(const void *ptr)
22{
23
24	return (arena_salloc(ptr, false));
25}
26
27void
28tcache_event_hard(tcache_t *tcache)
29{
30	size_t binind = tcache->next_gc_bin;
31	tcache_bin_t *tbin = &tcache->tbins[binind];
32	tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
33
34	if (tbin->low_water > 0) {
35		/*
36		 * Flush (ceiling) 3/4 of the objects below the low water mark.
37		 */
38		if (binind < NBINS) {
39			tcache_bin_flush_small(tbin, binind, tbin->ncached -
40			    tbin->low_water + (tbin->low_water >> 2), tcache);
41		} else {
42			tcache_bin_flush_large(tbin, binind, tbin->ncached -
43			    tbin->low_water + (tbin->low_water >> 2), tcache);
44		}
45		/*
46		 * Reduce fill count by 2X.  Limit lg_fill_div such that the
47		 * fill count is always at least 1.
48		 */
49		if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1)
50			tbin->lg_fill_div++;
51	} else if (tbin->low_water < 0) {
52		/*
53		 * Increase fill count by 2X.  Make sure lg_fill_div stays
54		 * greater than 0.
55		 */
56		if (tbin->lg_fill_div > 1)
57			tbin->lg_fill_div--;
58	}
59	tbin->low_water = tbin->ncached;
60
61	tcache->next_gc_bin++;
62	if (tcache->next_gc_bin == nhbins)
63		tcache->next_gc_bin = 0;
64	tcache->ev_cnt = 0;
65}
66
67void *
68tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind)
69{
70	void *ret;
71
72	arena_tcache_fill_small(tcache->arena, tbin, binind,
73	    config_prof ? tcache->prof_accumbytes : 0);
74	if (config_prof)
75		tcache->prof_accumbytes = 0;
76	ret = tcache_alloc_easy(tbin);
77
78	return (ret);
79}
80
81void
82tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
83    tcache_t *tcache)
84{
85	void *ptr;
86	unsigned i, nflush, ndeferred;
87	bool merged_stats = false;
88
89	assert(binind < NBINS);
90	assert(rem <= tbin->ncached);
91
92	for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
93		/* Lock the arena bin associated with the first object. */
94		arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
95		    tbin->avail[0]);
96		arena_t *arena = chunk->arena;
97		arena_bin_t *bin = &arena->bins[binind];
98
99		if (config_prof && arena == tcache->arena) {
100			malloc_mutex_lock(&arena->lock);
101			arena_prof_accum(arena, tcache->prof_accumbytes);
102			malloc_mutex_unlock(&arena->lock);
103			tcache->prof_accumbytes = 0;
104		}
105
106		malloc_mutex_lock(&bin->lock);
107		if (config_stats && arena == tcache->arena) {
108			assert(merged_stats == false);
109			merged_stats = true;
110			bin->stats.nflushes++;
111			bin->stats.nrequests += tbin->tstats.nrequests;
112			tbin->tstats.nrequests = 0;
113		}
114		ndeferred = 0;
115		for (i = 0; i < nflush; i++) {
116			ptr = tbin->avail[i];
117			assert(ptr != NULL);
118			chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
119			if (chunk->arena == arena) {
120				size_t pageind = ((uintptr_t)ptr -
121				    (uintptr_t)chunk) >> LG_PAGE;
122				arena_chunk_map_t *mapelm =
123				    arena_mapp_get(chunk, pageind);
124				if (config_fill && opt_junk) {
125					arena_alloc_junk_small(ptr,
126					    &arena_bin_info[binind], true);
127				}
128				arena_dalloc_bin_locked(arena, chunk, ptr,
129				    mapelm);
130			} else {
131				/*
132				 * This object was allocated via a different
133				 * arena bin than the one that is currently
134				 * locked.  Stash the object, so that it can be
135				 * handled in a future pass.
136				 */
137				tbin->avail[ndeferred] = ptr;
138				ndeferred++;
139			}
140		}
141		malloc_mutex_unlock(&bin->lock);
142	}
143	if (config_stats && merged_stats == false) {
144		/*
145		 * The flush loop didn't happen to flush to this thread's
146		 * arena, so the stats didn't get merged.  Manually do so now.
147		 */
148		arena_bin_t *bin = &tcache->arena->bins[binind];
149		malloc_mutex_lock(&bin->lock);
150		bin->stats.nflushes++;
151		bin->stats.nrequests += tbin->tstats.nrequests;
152		tbin->tstats.nrequests = 0;
153		malloc_mutex_unlock(&bin->lock);
154	}
155
156	memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
157	    rem * sizeof(void *));
158	tbin->ncached = rem;
159	if ((int)tbin->ncached < tbin->low_water)
160		tbin->low_water = tbin->ncached;
161}
162
163void
164tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
165    tcache_t *tcache)
166{
167	void *ptr;
168	unsigned i, nflush, ndeferred;
169	bool merged_stats = false;
170
171	assert(binind < nhbins);
172	assert(rem <= tbin->ncached);
173
174	for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
175		/* Lock the arena associated with the first object. */
176		arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
177		    tbin->avail[0]);
178		arena_t *arena = chunk->arena;
179
180		malloc_mutex_lock(&arena->lock);
181		if ((config_prof || config_stats) && arena == tcache->arena) {
182			if (config_prof) {
183				arena_prof_accum(arena,
184				    tcache->prof_accumbytes);
185				tcache->prof_accumbytes = 0;
186			}
187			if (config_stats) {
188				merged_stats = true;
189				arena->stats.nrequests_large +=
190				    tbin->tstats.nrequests;
191				arena->stats.lstats[binind - NBINS].nrequests +=
192				    tbin->tstats.nrequests;
193				tbin->tstats.nrequests = 0;
194			}
195		}
196		ndeferred = 0;
197		for (i = 0; i < nflush; i++) {
198			ptr = tbin->avail[i];
199			assert(ptr != NULL);
200			chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
201			if (chunk->arena == arena)
202				arena_dalloc_large_locked(arena, chunk, ptr);
203			else {
204				/*
205				 * This object was allocated via a different
206				 * arena than the one that is currently locked.
207				 * Stash the object, so that it can be handled
208				 * in a future pass.
209				 */
210				tbin->avail[ndeferred] = ptr;
211				ndeferred++;
212			}
213		}
214		malloc_mutex_unlock(&arena->lock);
215	}
216	if (config_stats && merged_stats == false) {
217		/*
218		 * The flush loop didn't happen to flush to this thread's
219		 * arena, so the stats didn't get merged.  Manually do so now.
220		 */
221		arena_t *arena = tcache->arena;
222		malloc_mutex_lock(&arena->lock);
223		arena->stats.nrequests_large += tbin->tstats.nrequests;
224		arena->stats.lstats[binind - NBINS].nrequests +=
225		    tbin->tstats.nrequests;
226		tbin->tstats.nrequests = 0;
227		malloc_mutex_unlock(&arena->lock);
228	}
229
230	memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
231	    rem * sizeof(void *));
232	tbin->ncached = rem;
233	if ((int)tbin->ncached < tbin->low_water)
234		tbin->low_water = tbin->ncached;
235}
236
237void
238tcache_arena_associate(tcache_t *tcache, arena_t *arena)
239{
240
241	if (config_stats) {
242		/* Link into list of extant tcaches. */
243		malloc_mutex_lock(&arena->lock);
244		ql_elm_new(tcache, link);
245		ql_tail_insert(&arena->tcache_ql, tcache, link);
246		malloc_mutex_unlock(&arena->lock);
247	}
248	tcache->arena = arena;
249}
250
251void
252tcache_arena_dissociate(tcache_t *tcache)
253{
254
255	if (config_stats) {
256		/* Unlink from list of extant tcaches. */
257		malloc_mutex_lock(&tcache->arena->lock);
258		ql_remove(&tcache->arena->tcache_ql, tcache, link);
259		malloc_mutex_unlock(&tcache->arena->lock);
260		tcache_stats_merge(tcache, tcache->arena);
261	}
262}
263
264tcache_t *
265tcache_create(arena_t *arena)
266{
267	tcache_t *tcache;
268	size_t size, stack_offset;
269	unsigned i;
270
271	size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins);
272	/* Naturally align the pointer stacks. */
273	size = PTR_CEILING(size);
274	stack_offset = size;
275	size += stack_nelms * sizeof(void *);
276	/*
277	 * Round up to the nearest multiple of the cacheline size, in order to
278	 * avoid the possibility of false cacheline sharing.
279	 *
280	 * That this works relies on the same logic as in ipalloc(), but we
281	 * cannot directly call ipalloc() here due to tcache bootstrapping
282	 * issues.
283	 */
284	size = (size + CACHELINE_MASK) & (-CACHELINE);
285
286	if (size <= SMALL_MAXCLASS)
287		tcache = (tcache_t *)arena_malloc_small(arena, size, true);
288	else if (size <= tcache_maxclass)
289		tcache = (tcache_t *)arena_malloc_large(arena, size, true);
290	else
291		tcache = (tcache_t *)icallocx(size, false, arena);
292
293	if (tcache == NULL)
294		return (NULL);
295
296	tcache_arena_associate(tcache, arena);
297
298	assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
299	for (i = 0; i < nhbins; i++) {
300		tcache->tbins[i].lg_fill_div = 1;
301		tcache->tbins[i].avail = (void **)((uintptr_t)tcache +
302		    (uintptr_t)stack_offset);
303		stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
304	}
305
306	tcache_tsd_set(&tcache);
307
308	return (tcache);
309}
310
311void
312tcache_destroy(tcache_t *tcache)
313{
314	unsigned i;
315	size_t tcache_size;
316
317	tcache_arena_dissociate(tcache);
318
319	for (i = 0; i < NBINS; i++) {
320		tcache_bin_t *tbin = &tcache->tbins[i];
321		tcache_bin_flush_small(tbin, i, 0, tcache);
322
323		if (config_stats && tbin->tstats.nrequests != 0) {
324			arena_t *arena = tcache->arena;
325			arena_bin_t *bin = &arena->bins[i];
326			malloc_mutex_lock(&bin->lock);
327			bin->stats.nrequests += tbin->tstats.nrequests;
328			malloc_mutex_unlock(&bin->lock);
329		}
330	}
331
332	for (; i < nhbins; i++) {
333		tcache_bin_t *tbin = &tcache->tbins[i];
334		tcache_bin_flush_large(tbin, i, 0, tcache);
335
336		if (config_stats && tbin->tstats.nrequests != 0) {
337			arena_t *arena = tcache->arena;
338			malloc_mutex_lock(&arena->lock);
339			arena->stats.nrequests_large += tbin->tstats.nrequests;
340			arena->stats.lstats[i - NBINS].nrequests +=
341			    tbin->tstats.nrequests;
342			malloc_mutex_unlock(&arena->lock);
343		}
344	}
345
346	if (config_prof && tcache->prof_accumbytes > 0) {
347		malloc_mutex_lock(&tcache->arena->lock);
348		arena_prof_accum(tcache->arena, tcache->prof_accumbytes);
349		malloc_mutex_unlock(&tcache->arena->lock);
350	}
351
352	tcache_size = arena_salloc(tcache, false);
353	if (tcache_size <= SMALL_MAXCLASS) {
354		arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
355		arena_t *arena = chunk->arena;
356		size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >>
357		    LG_PAGE;
358		arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
359
360		arena_dalloc_bin(arena, chunk, tcache, pageind, mapelm);
361	} else if (tcache_size <= tcache_maxclass) {
362		arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
363		arena_t *arena = chunk->arena;
364
365		arena_dalloc_large(arena, chunk, tcache);
366	} else
367		idallocx(tcache, false);
368}
369
370void
371tcache_thread_cleanup(void *arg)
372{
373	tcache_t *tcache = *(tcache_t **)arg;
374
375	if (tcache == TCACHE_STATE_DISABLED) {
376		/* Do nothing. */
377	} else if (tcache == TCACHE_STATE_REINCARNATED) {
378		/*
379		 * Another destructor called an allocator function after this
380		 * destructor was called.  Reset tcache to
381		 * TCACHE_STATE_PURGATORY in order to receive another callback.
382		 */
383		tcache = TCACHE_STATE_PURGATORY;
384		tcache_tsd_set(&tcache);
385	} else if (tcache == TCACHE_STATE_PURGATORY) {
386		/*
387		 * The previous time this destructor was called, we set the key
388		 * to TCACHE_STATE_PURGATORY so that other destructors wouldn't
389		 * cause re-creation of the tcache.  This time, do nothing, so
390		 * that the destructor will not be called again.
391		 */
392	} else if (tcache != NULL) {
393		assert(tcache != TCACHE_STATE_PURGATORY);
394		tcache_destroy(tcache);
395		tcache = TCACHE_STATE_PURGATORY;
396		tcache_tsd_set(&tcache);
397	}
398}
399
400void
401tcache_stats_merge(tcache_t *tcache, arena_t *arena)
402{
403	unsigned i;
404
405	/* Merge and reset tcache stats. */
406	for (i = 0; i < NBINS; i++) {
407		arena_bin_t *bin = &arena->bins[i];
408		tcache_bin_t *tbin = &tcache->tbins[i];
409		malloc_mutex_lock(&bin->lock);
410		bin->stats.nrequests += tbin->tstats.nrequests;
411		malloc_mutex_unlock(&bin->lock);
412		tbin->tstats.nrequests = 0;
413	}
414
415	for (; i < nhbins; i++) {
416		malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS];
417		tcache_bin_t *tbin = &tcache->tbins[i];
418		arena->stats.nrequests_large += tbin->tstats.nrequests;
419		lstats->nrequests += tbin->tstats.nrequests;
420		tbin->tstats.nrequests = 0;
421	}
422}
423
424bool
425tcache_boot0(void)
426{
427	unsigned i;
428
429	/*
430	 * If necessary, clamp opt_lg_tcache_max, now that arena_maxclass is
431	 * known.
432	 */
433	if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
434		tcache_maxclass = SMALL_MAXCLASS;
435	else if ((1U << opt_lg_tcache_max) > arena_maxclass)
436		tcache_maxclass = arena_maxclass;
437	else
438		tcache_maxclass = (1U << opt_lg_tcache_max);
439
440	nhbins = NBINS + (tcache_maxclass >> LG_PAGE);
441
442	/* Initialize tcache_bin_info. */
443	tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins *
444	    sizeof(tcache_bin_info_t));
445	if (tcache_bin_info == NULL)
446		return (true);
447	stack_nelms = 0;
448	for (i = 0; i < NBINS; i++) {
449		if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) {
450			tcache_bin_info[i].ncached_max =
451			    (arena_bin_info[i].nregs << 1);
452		} else {
453			tcache_bin_info[i].ncached_max =
454			    TCACHE_NSLOTS_SMALL_MAX;
455		}
456		stack_nelms += tcache_bin_info[i].ncached_max;
457	}
458	for (; i < nhbins; i++) {
459		tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE;
460		stack_nelms += tcache_bin_info[i].ncached_max;
461	}
462
463	return (false);
464}
465
466bool
467tcache_boot1(void)
468{
469
470	if (tcache_tsd_boot() || tcache_enabled_tsd_boot())
471		return (true);
472
473	return (false);
474}
475