ctl.c revision 261071
1#define	JEMALLOC_CTL_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
7/*
8 * ctl_mtx protects the following:
9 * - ctl_stats.*
10 * - opt_prof_active
11 */
12static malloc_mutex_t	ctl_mtx;
13static bool		ctl_initialized;
14static uint64_t		ctl_epoch;
15static ctl_stats_t	ctl_stats;
16
17/******************************************************************************/
18/* Helpers for named and indexed nodes. */
19
20static inline const ctl_named_node_t *
21ctl_named_node(const ctl_node_t *node)
22{
23
24	return ((node->named) ? (const ctl_named_node_t *)node : NULL);
25}
26
27static inline const ctl_named_node_t *
28ctl_named_children(const ctl_named_node_t *node, int index)
29{
30	const ctl_named_node_t *children = ctl_named_node(node->children);
31
32	return (children ? &children[index] : NULL);
33}
34
35static inline const ctl_indexed_node_t *
36ctl_indexed_node(const ctl_node_t *node)
37{
38
39	return ((node->named == false) ? (const ctl_indexed_node_t *)node :
40	    NULL);
41}
42
43/******************************************************************************/
44/* Function prototypes for non-inline static functions. */
45
46#define	CTL_PROTO(n)							\
47static int	n##_ctl(const size_t *mib, size_t miblen, void *oldp,	\
48    size_t *oldlenp, void *newp, size_t newlen);
49
50#define	INDEX_PROTO(n)							\
51static const ctl_named_node_t	*n##_index(const size_t *mib,		\
52    size_t miblen, size_t i);
53
54static bool	ctl_arena_init(ctl_arena_stats_t *astats);
55static void	ctl_arena_clear(ctl_arena_stats_t *astats);
56static void	ctl_arena_stats_amerge(ctl_arena_stats_t *cstats,
57    arena_t *arena);
58static void	ctl_arena_stats_smerge(ctl_arena_stats_t *sstats,
59    ctl_arena_stats_t *astats);
60static void	ctl_arena_refresh(arena_t *arena, unsigned i);
61static bool	ctl_grow(void);
62static void	ctl_refresh(void);
63static bool	ctl_init(void);
64static int	ctl_lookup(const char *name, ctl_node_t const **nodesp,
65    size_t *mibp, size_t *depthp);
66
67CTL_PROTO(version)
68CTL_PROTO(epoch)
69CTL_PROTO(thread_tcache_enabled)
70CTL_PROTO(thread_tcache_flush)
71CTL_PROTO(thread_arena)
72CTL_PROTO(thread_allocated)
73CTL_PROTO(thread_allocatedp)
74CTL_PROTO(thread_deallocated)
75CTL_PROTO(thread_deallocatedp)
76CTL_PROTO(config_debug)
77CTL_PROTO(config_dss)
78CTL_PROTO(config_fill)
79CTL_PROTO(config_lazy_lock)
80CTL_PROTO(config_mremap)
81CTL_PROTO(config_munmap)
82CTL_PROTO(config_prof)
83CTL_PROTO(config_prof_libgcc)
84CTL_PROTO(config_prof_libunwind)
85CTL_PROTO(config_stats)
86CTL_PROTO(config_tcache)
87CTL_PROTO(config_tls)
88CTL_PROTO(config_utrace)
89CTL_PROTO(config_valgrind)
90CTL_PROTO(config_xmalloc)
91CTL_PROTO(opt_abort)
92CTL_PROTO(opt_dss)
93CTL_PROTO(opt_lg_chunk)
94CTL_PROTO(opt_narenas)
95CTL_PROTO(opt_lg_dirty_mult)
96CTL_PROTO(opt_stats_print)
97CTL_PROTO(opt_junk)
98CTL_PROTO(opt_zero)
99CTL_PROTO(opt_quarantine)
100CTL_PROTO(opt_redzone)
101CTL_PROTO(opt_utrace)
102CTL_PROTO(opt_valgrind)
103CTL_PROTO(opt_xmalloc)
104CTL_PROTO(opt_tcache)
105CTL_PROTO(opt_lg_tcache_max)
106CTL_PROTO(opt_prof)
107CTL_PROTO(opt_prof_prefix)
108CTL_PROTO(opt_prof_active)
109CTL_PROTO(opt_lg_prof_sample)
110CTL_PROTO(opt_lg_prof_interval)
111CTL_PROTO(opt_prof_gdump)
112CTL_PROTO(opt_prof_final)
113CTL_PROTO(opt_prof_leak)
114CTL_PROTO(opt_prof_accum)
115CTL_PROTO(arena_i_purge)
116static void	arena_purge(unsigned arena_ind);
117CTL_PROTO(arena_i_dss)
118INDEX_PROTO(arena_i)
119CTL_PROTO(arenas_bin_i_size)
120CTL_PROTO(arenas_bin_i_nregs)
121CTL_PROTO(arenas_bin_i_run_size)
122INDEX_PROTO(arenas_bin_i)
123CTL_PROTO(arenas_lrun_i_size)
124INDEX_PROTO(arenas_lrun_i)
125CTL_PROTO(arenas_narenas)
126CTL_PROTO(arenas_initialized)
127CTL_PROTO(arenas_quantum)
128CTL_PROTO(arenas_page)
129CTL_PROTO(arenas_tcache_max)
130CTL_PROTO(arenas_nbins)
131CTL_PROTO(arenas_nhbins)
132CTL_PROTO(arenas_nlruns)
133CTL_PROTO(arenas_purge)
134CTL_PROTO(arenas_extend)
135CTL_PROTO(prof_active)
136CTL_PROTO(prof_dump)
137CTL_PROTO(prof_interval)
138CTL_PROTO(stats_chunks_current)
139CTL_PROTO(stats_chunks_total)
140CTL_PROTO(stats_chunks_high)
141CTL_PROTO(stats_huge_allocated)
142CTL_PROTO(stats_huge_nmalloc)
143CTL_PROTO(stats_huge_ndalloc)
144CTL_PROTO(stats_arenas_i_small_allocated)
145CTL_PROTO(stats_arenas_i_small_nmalloc)
146CTL_PROTO(stats_arenas_i_small_ndalloc)
147CTL_PROTO(stats_arenas_i_small_nrequests)
148CTL_PROTO(stats_arenas_i_large_allocated)
149CTL_PROTO(stats_arenas_i_large_nmalloc)
150CTL_PROTO(stats_arenas_i_large_ndalloc)
151CTL_PROTO(stats_arenas_i_large_nrequests)
152CTL_PROTO(stats_arenas_i_bins_j_allocated)
153CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
154CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
155CTL_PROTO(stats_arenas_i_bins_j_nrequests)
156CTL_PROTO(stats_arenas_i_bins_j_nfills)
157CTL_PROTO(stats_arenas_i_bins_j_nflushes)
158CTL_PROTO(stats_arenas_i_bins_j_nruns)
159CTL_PROTO(stats_arenas_i_bins_j_nreruns)
160CTL_PROTO(stats_arenas_i_bins_j_curruns)
161INDEX_PROTO(stats_arenas_i_bins_j)
162CTL_PROTO(stats_arenas_i_lruns_j_nmalloc)
163CTL_PROTO(stats_arenas_i_lruns_j_ndalloc)
164CTL_PROTO(stats_arenas_i_lruns_j_nrequests)
165CTL_PROTO(stats_arenas_i_lruns_j_curruns)
166INDEX_PROTO(stats_arenas_i_lruns_j)
167CTL_PROTO(stats_arenas_i_nthreads)
168CTL_PROTO(stats_arenas_i_dss)
169CTL_PROTO(stats_arenas_i_pactive)
170CTL_PROTO(stats_arenas_i_pdirty)
171CTL_PROTO(stats_arenas_i_mapped)
172CTL_PROTO(stats_arenas_i_npurge)
173CTL_PROTO(stats_arenas_i_nmadvise)
174CTL_PROTO(stats_arenas_i_purged)
175INDEX_PROTO(stats_arenas_i)
176CTL_PROTO(stats_cactive)
177CTL_PROTO(stats_allocated)
178CTL_PROTO(stats_active)
179CTL_PROTO(stats_mapped)
180
181/******************************************************************************/
182/* mallctl tree. */
183
184/* Maximum tree depth. */
185#define	CTL_MAX_DEPTH	6
186
187#define	NAME(n)	{true},	n
188#define	CHILD(t, c)							\
189	sizeof(c##_node) / sizeof(ctl_##t##_node_t),			\
190	(ctl_node_t *)c##_node,						\
191	NULL
192#define	CTL(c)	0, NULL, c##_ctl
193
194/*
195 * Only handles internal indexed nodes, since there are currently no external
196 * ones.
197 */
198#define	INDEX(i)	{false},	i##_index
199
200static const ctl_named_node_t	tcache_node[] = {
201	{NAME("enabled"),	CTL(thread_tcache_enabled)},
202	{NAME("flush"),		CTL(thread_tcache_flush)}
203};
204
205static const ctl_named_node_t	thread_node[] = {
206	{NAME("arena"),		CTL(thread_arena)},
207	{NAME("allocated"),	CTL(thread_allocated)},
208	{NAME("allocatedp"),	CTL(thread_allocatedp)},
209	{NAME("deallocated"),	CTL(thread_deallocated)},
210	{NAME("deallocatedp"),	CTL(thread_deallocatedp)},
211	{NAME("tcache"),	CHILD(named, tcache)}
212};
213
214static const ctl_named_node_t	config_node[] = {
215	{NAME("debug"),			CTL(config_debug)},
216	{NAME("dss"),			CTL(config_dss)},
217	{NAME("fill"),			CTL(config_fill)},
218	{NAME("lazy_lock"),		CTL(config_lazy_lock)},
219	{NAME("mremap"),		CTL(config_mremap)},
220	{NAME("munmap"),		CTL(config_munmap)},
221	{NAME("prof"),			CTL(config_prof)},
222	{NAME("prof_libgcc"),		CTL(config_prof_libgcc)},
223	{NAME("prof_libunwind"),	CTL(config_prof_libunwind)},
224	{NAME("stats"),			CTL(config_stats)},
225	{NAME("tcache"),		CTL(config_tcache)},
226	{NAME("tls"),			CTL(config_tls)},
227	{NAME("utrace"),		CTL(config_utrace)},
228	{NAME("valgrind"),		CTL(config_valgrind)},
229	{NAME("xmalloc"),		CTL(config_xmalloc)}
230};
231
232static const ctl_named_node_t opt_node[] = {
233	{NAME("abort"),			CTL(opt_abort)},
234	{NAME("dss"),			CTL(opt_dss)},
235	{NAME("lg_chunk"),		CTL(opt_lg_chunk)},
236	{NAME("narenas"),		CTL(opt_narenas)},
237	{NAME("lg_dirty_mult"),		CTL(opt_lg_dirty_mult)},
238	{NAME("stats_print"),		CTL(opt_stats_print)},
239	{NAME("junk"),			CTL(opt_junk)},
240	{NAME("zero"),			CTL(opt_zero)},
241	{NAME("quarantine"),		CTL(opt_quarantine)},
242	{NAME("redzone"),		CTL(opt_redzone)},
243	{NAME("utrace"),		CTL(opt_utrace)},
244	{NAME("valgrind"),		CTL(opt_valgrind)},
245	{NAME("xmalloc"),		CTL(opt_xmalloc)},
246	{NAME("tcache"),		CTL(opt_tcache)},
247	{NAME("lg_tcache_max"),		CTL(opt_lg_tcache_max)},
248	{NAME("prof"),			CTL(opt_prof)},
249	{NAME("prof_prefix"),		CTL(opt_prof_prefix)},
250	{NAME("prof_active"),		CTL(opt_prof_active)},
251	{NAME("lg_prof_sample"),	CTL(opt_lg_prof_sample)},
252	{NAME("lg_prof_interval"),	CTL(opt_lg_prof_interval)},
253	{NAME("prof_gdump"),		CTL(opt_prof_gdump)},
254	{NAME("prof_final"),		CTL(opt_prof_final)},
255	{NAME("prof_leak"),		CTL(opt_prof_leak)},
256	{NAME("prof_accum"),		CTL(opt_prof_accum)}
257};
258
259static const ctl_named_node_t arena_i_node[] = {
260	{NAME("purge"),			CTL(arena_i_purge)},
261	{NAME("dss"),			CTL(arena_i_dss)}
262};
263static const ctl_named_node_t super_arena_i_node[] = {
264	{NAME(""),			CHILD(named, arena_i)}
265};
266
267static const ctl_indexed_node_t arena_node[] = {
268	{INDEX(arena_i)}
269};
270
271static const ctl_named_node_t arenas_bin_i_node[] = {
272	{NAME("size"),			CTL(arenas_bin_i_size)},
273	{NAME("nregs"),			CTL(arenas_bin_i_nregs)},
274	{NAME("run_size"),		CTL(arenas_bin_i_run_size)}
275};
276static const ctl_named_node_t super_arenas_bin_i_node[] = {
277	{NAME(""),			CHILD(named, arenas_bin_i)}
278};
279
280static const ctl_indexed_node_t arenas_bin_node[] = {
281	{INDEX(arenas_bin_i)}
282};
283
284static const ctl_named_node_t arenas_lrun_i_node[] = {
285	{NAME("size"),			CTL(arenas_lrun_i_size)}
286};
287static const ctl_named_node_t super_arenas_lrun_i_node[] = {
288	{NAME(""),			CHILD(named, arenas_lrun_i)}
289};
290
291static const ctl_indexed_node_t arenas_lrun_node[] = {
292	{INDEX(arenas_lrun_i)}
293};
294
295static const ctl_named_node_t arenas_node[] = {
296	{NAME("narenas"),		CTL(arenas_narenas)},
297	{NAME("initialized"),		CTL(arenas_initialized)},
298	{NAME("quantum"),		CTL(arenas_quantum)},
299	{NAME("page"),			CTL(arenas_page)},
300	{NAME("tcache_max"),		CTL(arenas_tcache_max)},
301	{NAME("nbins"),			CTL(arenas_nbins)},
302	{NAME("nhbins"),		CTL(arenas_nhbins)},
303	{NAME("bin"),			CHILD(indexed, arenas_bin)},
304	{NAME("nlruns"),		CTL(arenas_nlruns)},
305	{NAME("lrun"),			CHILD(indexed, arenas_lrun)},
306	{NAME("purge"),			CTL(arenas_purge)},
307	{NAME("extend"),		CTL(arenas_extend)}
308};
309
310static const ctl_named_node_t	prof_node[] = {
311	{NAME("active"),	CTL(prof_active)},
312	{NAME("dump"),		CTL(prof_dump)},
313	{NAME("interval"),	CTL(prof_interval)}
314};
315
316static const ctl_named_node_t stats_chunks_node[] = {
317	{NAME("current"),		CTL(stats_chunks_current)},
318	{NAME("total"),			CTL(stats_chunks_total)},
319	{NAME("high"),			CTL(stats_chunks_high)}
320};
321
322static const ctl_named_node_t stats_huge_node[] = {
323	{NAME("allocated"),		CTL(stats_huge_allocated)},
324	{NAME("nmalloc"),		CTL(stats_huge_nmalloc)},
325	{NAME("ndalloc"),		CTL(stats_huge_ndalloc)}
326};
327
328static const ctl_named_node_t stats_arenas_i_small_node[] = {
329	{NAME("allocated"),		CTL(stats_arenas_i_small_allocated)},
330	{NAME("nmalloc"),		CTL(stats_arenas_i_small_nmalloc)},
331	{NAME("ndalloc"),		CTL(stats_arenas_i_small_ndalloc)},
332	{NAME("nrequests"),		CTL(stats_arenas_i_small_nrequests)}
333};
334
335static const ctl_named_node_t stats_arenas_i_large_node[] = {
336	{NAME("allocated"),		CTL(stats_arenas_i_large_allocated)},
337	{NAME("nmalloc"),		CTL(stats_arenas_i_large_nmalloc)},
338	{NAME("ndalloc"),		CTL(stats_arenas_i_large_ndalloc)},
339	{NAME("nrequests"),		CTL(stats_arenas_i_large_nrequests)}
340};
341
342static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
343	{NAME("allocated"),		CTL(stats_arenas_i_bins_j_allocated)},
344	{NAME("nmalloc"),		CTL(stats_arenas_i_bins_j_nmalloc)},
345	{NAME("ndalloc"),		CTL(stats_arenas_i_bins_j_ndalloc)},
346	{NAME("nrequests"),		CTL(stats_arenas_i_bins_j_nrequests)},
347	{NAME("nfills"),		CTL(stats_arenas_i_bins_j_nfills)},
348	{NAME("nflushes"),		CTL(stats_arenas_i_bins_j_nflushes)},
349	{NAME("nruns"),			CTL(stats_arenas_i_bins_j_nruns)},
350	{NAME("nreruns"),		CTL(stats_arenas_i_bins_j_nreruns)},
351	{NAME("curruns"),		CTL(stats_arenas_i_bins_j_curruns)}
352};
353static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = {
354	{NAME(""),			CHILD(named, stats_arenas_i_bins_j)}
355};
356
357static const ctl_indexed_node_t stats_arenas_i_bins_node[] = {
358	{INDEX(stats_arenas_i_bins_j)}
359};
360
361static const ctl_named_node_t stats_arenas_i_lruns_j_node[] = {
362	{NAME("nmalloc"),		CTL(stats_arenas_i_lruns_j_nmalloc)},
363	{NAME("ndalloc"),		CTL(stats_arenas_i_lruns_j_ndalloc)},
364	{NAME("nrequests"),		CTL(stats_arenas_i_lruns_j_nrequests)},
365	{NAME("curruns"),		CTL(stats_arenas_i_lruns_j_curruns)}
366};
367static const ctl_named_node_t super_stats_arenas_i_lruns_j_node[] = {
368	{NAME(""),			CHILD(named, stats_arenas_i_lruns_j)}
369};
370
371static const ctl_indexed_node_t stats_arenas_i_lruns_node[] = {
372	{INDEX(stats_arenas_i_lruns_j)}
373};
374
375static const ctl_named_node_t stats_arenas_i_node[] = {
376	{NAME("nthreads"),		CTL(stats_arenas_i_nthreads)},
377	{NAME("dss"),			CTL(stats_arenas_i_dss)},
378	{NAME("pactive"),		CTL(stats_arenas_i_pactive)},
379	{NAME("pdirty"),		CTL(stats_arenas_i_pdirty)},
380	{NAME("mapped"),		CTL(stats_arenas_i_mapped)},
381	{NAME("npurge"),		CTL(stats_arenas_i_npurge)},
382	{NAME("nmadvise"),		CTL(stats_arenas_i_nmadvise)},
383	{NAME("purged"),		CTL(stats_arenas_i_purged)},
384	{NAME("small"),			CHILD(named, stats_arenas_i_small)},
385	{NAME("large"),			CHILD(named, stats_arenas_i_large)},
386	{NAME("bins"),			CHILD(indexed, stats_arenas_i_bins)},
387	{NAME("lruns"),			CHILD(indexed, stats_arenas_i_lruns)}
388};
389static const ctl_named_node_t super_stats_arenas_i_node[] = {
390	{NAME(""),			CHILD(named, stats_arenas_i)}
391};
392
393static const ctl_indexed_node_t stats_arenas_node[] = {
394	{INDEX(stats_arenas_i)}
395};
396
397static const ctl_named_node_t stats_node[] = {
398	{NAME("cactive"),		CTL(stats_cactive)},
399	{NAME("allocated"),		CTL(stats_allocated)},
400	{NAME("active"),		CTL(stats_active)},
401	{NAME("mapped"),		CTL(stats_mapped)},
402	{NAME("chunks"),		CHILD(named, stats_chunks)},
403	{NAME("huge"),			CHILD(named, stats_huge)},
404	{NAME("arenas"),		CHILD(indexed, stats_arenas)}
405};
406
407static const ctl_named_node_t	root_node[] = {
408	{NAME("version"),	CTL(version)},
409	{NAME("epoch"),		CTL(epoch)},
410	{NAME("thread"),	CHILD(named, thread)},
411	{NAME("config"),	CHILD(named, config)},
412	{NAME("opt"),		CHILD(named, opt)},
413	{NAME("arena"),		CHILD(indexed, arena)},
414	{NAME("arenas"),	CHILD(named, arenas)},
415	{NAME("prof"),		CHILD(named, prof)},
416	{NAME("stats"),		CHILD(named, stats)}
417};
418static const ctl_named_node_t super_root_node[] = {
419	{NAME(""),		CHILD(named, root)}
420};
421
422#undef NAME
423#undef CHILD
424#undef CTL
425#undef INDEX
426
427/******************************************************************************/
428
429static bool
430ctl_arena_init(ctl_arena_stats_t *astats)
431{
432
433	if (astats->lstats == NULL) {
434		astats->lstats = (malloc_large_stats_t *)base_alloc(nlclasses *
435		    sizeof(malloc_large_stats_t));
436		if (astats->lstats == NULL)
437			return (true);
438	}
439
440	return (false);
441}
442
443static void
444ctl_arena_clear(ctl_arena_stats_t *astats)
445{
446
447	astats->dss = dss_prec_names[dss_prec_limit];
448	astats->pactive = 0;
449	astats->pdirty = 0;
450	if (config_stats) {
451		memset(&astats->astats, 0, sizeof(arena_stats_t));
452		astats->allocated_small = 0;
453		astats->nmalloc_small = 0;
454		astats->ndalloc_small = 0;
455		astats->nrequests_small = 0;
456		memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t));
457		memset(astats->lstats, 0, nlclasses *
458		    sizeof(malloc_large_stats_t));
459	}
460}
461
462static void
463ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena)
464{
465	unsigned i;
466
467	arena_stats_merge(arena, &cstats->dss, &cstats->pactive,
468	    &cstats->pdirty, &cstats->astats, cstats->bstats, cstats->lstats);
469
470	for (i = 0; i < NBINS; i++) {
471		cstats->allocated_small += cstats->bstats[i].allocated;
472		cstats->nmalloc_small += cstats->bstats[i].nmalloc;
473		cstats->ndalloc_small += cstats->bstats[i].ndalloc;
474		cstats->nrequests_small += cstats->bstats[i].nrequests;
475	}
476}
477
478static void
479ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
480{
481	unsigned i;
482
483	sstats->pactive += astats->pactive;
484	sstats->pdirty += astats->pdirty;
485
486	sstats->astats.mapped += astats->astats.mapped;
487	sstats->astats.npurge += astats->astats.npurge;
488	sstats->astats.nmadvise += astats->astats.nmadvise;
489	sstats->astats.purged += astats->astats.purged;
490
491	sstats->allocated_small += astats->allocated_small;
492	sstats->nmalloc_small += astats->nmalloc_small;
493	sstats->ndalloc_small += astats->ndalloc_small;
494	sstats->nrequests_small += astats->nrequests_small;
495
496	sstats->astats.allocated_large += astats->astats.allocated_large;
497	sstats->astats.nmalloc_large += astats->astats.nmalloc_large;
498	sstats->astats.ndalloc_large += astats->astats.ndalloc_large;
499	sstats->astats.nrequests_large += astats->astats.nrequests_large;
500
501	for (i = 0; i < nlclasses; i++) {
502		sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
503		sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
504		sstats->lstats[i].nrequests += astats->lstats[i].nrequests;
505		sstats->lstats[i].curruns += astats->lstats[i].curruns;
506	}
507
508	for (i = 0; i < NBINS; i++) {
509		sstats->bstats[i].allocated += astats->bstats[i].allocated;
510		sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
511		sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
512		sstats->bstats[i].nrequests += astats->bstats[i].nrequests;
513		if (config_tcache) {
514			sstats->bstats[i].nfills += astats->bstats[i].nfills;
515			sstats->bstats[i].nflushes +=
516			    astats->bstats[i].nflushes;
517		}
518		sstats->bstats[i].nruns += astats->bstats[i].nruns;
519		sstats->bstats[i].reruns += astats->bstats[i].reruns;
520		sstats->bstats[i].curruns += astats->bstats[i].curruns;
521	}
522}
523
524static void
525ctl_arena_refresh(arena_t *arena, unsigned i)
526{
527	ctl_arena_stats_t *astats = &ctl_stats.arenas[i];
528	ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas];
529
530	ctl_arena_clear(astats);
531
532	sstats->nthreads += astats->nthreads;
533	if (config_stats) {
534		ctl_arena_stats_amerge(astats, arena);
535		/* Merge into sum stats as well. */
536		ctl_arena_stats_smerge(sstats, astats);
537	} else {
538		astats->pactive += arena->nactive;
539		astats->pdirty += arena->ndirty;
540		/* Merge into sum stats as well. */
541		sstats->pactive += arena->nactive;
542		sstats->pdirty += arena->ndirty;
543	}
544}
545
546static bool
547ctl_grow(void)
548{
549	ctl_arena_stats_t *astats;
550	arena_t **tarenas;
551
552	/* Allocate extended arena stats and arenas arrays. */
553	astats = (ctl_arena_stats_t *)imalloc((ctl_stats.narenas + 2) *
554	    sizeof(ctl_arena_stats_t));
555	if (astats == NULL)
556		return (true);
557	tarenas = (arena_t **)imalloc((ctl_stats.narenas + 1) *
558	    sizeof(arena_t *));
559	if (tarenas == NULL) {
560		idalloc(astats);
561		return (true);
562	}
563
564	/* Initialize the new astats element. */
565	memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) *
566	    sizeof(ctl_arena_stats_t));
567	memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t));
568	if (ctl_arena_init(&astats[ctl_stats.narenas + 1])) {
569		idalloc(tarenas);
570		idalloc(astats);
571		return (true);
572	}
573	/* Swap merged stats to their new location. */
574	{
575		ctl_arena_stats_t tstats;
576		memcpy(&tstats, &astats[ctl_stats.narenas],
577		    sizeof(ctl_arena_stats_t));
578		memcpy(&astats[ctl_stats.narenas],
579		    &astats[ctl_stats.narenas + 1], sizeof(ctl_arena_stats_t));
580		memcpy(&astats[ctl_stats.narenas + 1], &tstats,
581		    sizeof(ctl_arena_stats_t));
582	}
583	/* Initialize the new arenas element. */
584	tarenas[ctl_stats.narenas] = NULL;
585	{
586		arena_t **arenas_old = arenas;
587		/*
588		 * Swap extended arenas array into place.  Although ctl_mtx
589		 * protects this function from other threads extending the
590		 * array, it does not protect from other threads mutating it
591		 * (i.e. initializing arenas and setting array elements to
592		 * point to them).  Therefore, array copying must happen under
593		 * the protection of arenas_lock.
594		 */
595		malloc_mutex_lock(&arenas_lock);
596		arenas = tarenas;
597		memcpy(arenas, arenas_old, ctl_stats.narenas *
598		    sizeof(arena_t *));
599		narenas_total++;
600		arenas_extend(narenas_total - 1);
601		malloc_mutex_unlock(&arenas_lock);
602		/*
603		 * Deallocate arenas_old only if it came from imalloc() (not
604		 * base_alloc()).
605		 */
606		if (ctl_stats.narenas != narenas_auto)
607			idalloc(arenas_old);
608	}
609	ctl_stats.arenas = astats;
610	ctl_stats.narenas++;
611
612	return (false);
613}
614
615static void
616ctl_refresh(void)
617{
618	unsigned i;
619	VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
620
621	if (config_stats) {
622		malloc_mutex_lock(&chunks_mtx);
623		ctl_stats.chunks.current = stats_chunks.curchunks;
624		ctl_stats.chunks.total = stats_chunks.nchunks;
625		ctl_stats.chunks.high = stats_chunks.highchunks;
626		malloc_mutex_unlock(&chunks_mtx);
627
628		malloc_mutex_lock(&huge_mtx);
629		ctl_stats.huge.allocated = huge_allocated;
630		ctl_stats.huge.nmalloc = huge_nmalloc;
631		ctl_stats.huge.ndalloc = huge_ndalloc;
632		malloc_mutex_unlock(&huge_mtx);
633	}
634
635	/*
636	 * Clear sum stats, since they will be merged into by
637	 * ctl_arena_refresh().
638	 */
639	ctl_stats.arenas[ctl_stats.narenas].nthreads = 0;
640	ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]);
641
642	malloc_mutex_lock(&arenas_lock);
643	memcpy(tarenas, arenas, sizeof(arena_t *) * ctl_stats.narenas);
644	for (i = 0; i < ctl_stats.narenas; i++) {
645		if (arenas[i] != NULL)
646			ctl_stats.arenas[i].nthreads = arenas[i]->nthreads;
647		else
648			ctl_stats.arenas[i].nthreads = 0;
649	}
650	malloc_mutex_unlock(&arenas_lock);
651	for (i = 0; i < ctl_stats.narenas; i++) {
652		bool initialized = (tarenas[i] != NULL);
653
654		ctl_stats.arenas[i].initialized = initialized;
655		if (initialized)
656			ctl_arena_refresh(tarenas[i], i);
657	}
658
659	if (config_stats) {
660		ctl_stats.allocated =
661		    ctl_stats.arenas[ctl_stats.narenas].allocated_small
662		    + ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large
663		    + ctl_stats.huge.allocated;
664		ctl_stats.active =
665		    (ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE)
666		    + ctl_stats.huge.allocated;
667		ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk);
668	}
669
670	ctl_epoch++;
671}
672
673static bool
674ctl_init(void)
675{
676	bool ret;
677
678	malloc_mutex_lock(&ctl_mtx);
679	if (ctl_initialized == false) {
680		/*
681		 * Allocate space for one extra arena stats element, which
682		 * contains summed stats across all arenas.
683		 */
684		assert(narenas_auto == narenas_total_get());
685		ctl_stats.narenas = narenas_auto;
686		ctl_stats.arenas = (ctl_arena_stats_t *)base_alloc(
687		    (ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t));
688		if (ctl_stats.arenas == NULL) {
689			ret = true;
690			goto label_return;
691		}
692		memset(ctl_stats.arenas, 0, (ctl_stats.narenas + 1) *
693		    sizeof(ctl_arena_stats_t));
694
695		/*
696		 * Initialize all stats structures, regardless of whether they
697		 * ever get used.  Lazy initialization would allow errors to
698		 * cause inconsistent state to be viewable by the application.
699		 */
700		if (config_stats) {
701			unsigned i;
702			for (i = 0; i <= ctl_stats.narenas; i++) {
703				if (ctl_arena_init(&ctl_stats.arenas[i])) {
704					ret = true;
705					goto label_return;
706				}
707			}
708		}
709		ctl_stats.arenas[ctl_stats.narenas].initialized = true;
710
711		ctl_epoch = 0;
712		ctl_refresh();
713		ctl_initialized = true;
714	}
715
716	ret = false;
717label_return:
718	malloc_mutex_unlock(&ctl_mtx);
719	return (ret);
720}
721
722static int
723ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp,
724    size_t *depthp)
725{
726	int ret;
727	const char *elm, *tdot, *dot;
728	size_t elen, i, j;
729	const ctl_named_node_t *node;
730
731	elm = name;
732	/* Equivalent to strchrnul(). */
733	dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0');
734	elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
735	if (elen == 0) {
736		ret = ENOENT;
737		goto label_return;
738	}
739	node = super_root_node;
740	for (i = 0; i < *depthp; i++) {
741		assert(node);
742		assert(node->nchildren > 0);
743		if (ctl_named_node(node->children) != NULL) {
744			const ctl_named_node_t *pnode = node;
745
746			/* Children are named. */
747			for (j = 0; j < node->nchildren; j++) {
748				const ctl_named_node_t *child =
749				    ctl_named_children(node, j);
750				if (strlen(child->name) == elen &&
751				    strncmp(elm, child->name, elen) == 0) {
752					node = child;
753					if (nodesp != NULL)
754						nodesp[i] =
755						    (const ctl_node_t *)node;
756					mibp[i] = j;
757					break;
758				}
759			}
760			if (node == pnode) {
761				ret = ENOENT;
762				goto label_return;
763			}
764		} else {
765			uintmax_t index;
766			const ctl_indexed_node_t *inode;
767
768			/* Children are indexed. */
769			index = malloc_strtoumax(elm, NULL, 10);
770			if (index == UINTMAX_MAX || index > SIZE_T_MAX) {
771				ret = ENOENT;
772				goto label_return;
773			}
774
775			inode = ctl_indexed_node(node->children);
776			node = inode->index(mibp, *depthp, (size_t)index);
777			if (node == NULL) {
778				ret = ENOENT;
779				goto label_return;
780			}
781
782			if (nodesp != NULL)
783				nodesp[i] = (const ctl_node_t *)node;
784			mibp[i] = (size_t)index;
785		}
786
787		if (node->ctl != NULL) {
788			/* Terminal node. */
789			if (*dot != '\0') {
790				/*
791				 * The name contains more elements than are
792				 * in this path through the tree.
793				 */
794				ret = ENOENT;
795				goto label_return;
796			}
797			/* Complete lookup successful. */
798			*depthp = i + 1;
799			break;
800		}
801
802		/* Update elm. */
803		if (*dot == '\0') {
804			/* No more elements. */
805			ret = ENOENT;
806			goto label_return;
807		}
808		elm = &dot[1];
809		dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot :
810		    strchr(elm, '\0');
811		elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
812	}
813
814	ret = 0;
815label_return:
816	return (ret);
817}
818
819int
820ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
821    size_t newlen)
822{
823	int ret;
824	size_t depth;
825	ctl_node_t const *nodes[CTL_MAX_DEPTH];
826	size_t mib[CTL_MAX_DEPTH];
827	const ctl_named_node_t *node;
828
829	if (ctl_initialized == false && ctl_init()) {
830		ret = EAGAIN;
831		goto label_return;
832	}
833
834	depth = CTL_MAX_DEPTH;
835	ret = ctl_lookup(name, nodes, mib, &depth);
836	if (ret != 0)
837		goto label_return;
838
839	node = ctl_named_node(nodes[depth-1]);
840	if (node != NULL && node->ctl)
841		ret = node->ctl(mib, depth, oldp, oldlenp, newp, newlen);
842	else {
843		/* The name refers to a partial path through the ctl tree. */
844		ret = ENOENT;
845	}
846
847label_return:
848	return(ret);
849}
850
851int
852ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp)
853{
854	int ret;
855
856	if (ctl_initialized == false && ctl_init()) {
857		ret = EAGAIN;
858		goto label_return;
859	}
860
861	ret = ctl_lookup(name, NULL, mibp, miblenp);
862label_return:
863	return(ret);
864}
865
866int
867ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
868    void *newp, size_t newlen)
869{
870	int ret;
871	const ctl_named_node_t *node;
872	size_t i;
873
874	if (ctl_initialized == false && ctl_init()) {
875		ret = EAGAIN;
876		goto label_return;
877	}
878
879	/* Iterate down the tree. */
880	node = super_root_node;
881	for (i = 0; i < miblen; i++) {
882		assert(node);
883		assert(node->nchildren > 0);
884		if (ctl_named_node(node->children) != NULL) {
885			/* Children are named. */
886			if (node->nchildren <= mib[i]) {
887				ret = ENOENT;
888				goto label_return;
889			}
890			node = ctl_named_children(node, mib[i]);
891		} else {
892			const ctl_indexed_node_t *inode;
893
894			/* Indexed element. */
895			inode = ctl_indexed_node(node->children);
896			node = inode->index(mib, miblen, mib[i]);
897			if (node == NULL) {
898				ret = ENOENT;
899				goto label_return;
900			}
901		}
902	}
903
904	/* Call the ctl function. */
905	if (node && node->ctl)
906		ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen);
907	else {
908		/* Partial MIB. */
909		ret = ENOENT;
910	}
911
912label_return:
913	return(ret);
914}
915
916bool
917ctl_boot(void)
918{
919
920	if (malloc_mutex_init(&ctl_mtx))
921		return (true);
922
923	ctl_initialized = false;
924
925	return (false);
926}
927
928void
929ctl_prefork(void)
930{
931
932	malloc_mutex_prefork(&ctl_mtx);
933}
934
935void
936ctl_postfork_parent(void)
937{
938
939	malloc_mutex_postfork_parent(&ctl_mtx);
940}
941
942void
943ctl_postfork_child(void)
944{
945
946	malloc_mutex_postfork_child(&ctl_mtx);
947}
948
949/******************************************************************************/
950/* *_ctl() functions. */
951
952#define	READONLY()	do {						\
953	if (newp != NULL || newlen != 0) {				\
954		ret = EPERM;						\
955		goto label_return;					\
956	}								\
957} while (0)
958
959#define	WRITEONLY()	do {						\
960	if (oldp != NULL || oldlenp != NULL) {				\
961		ret = EPERM;						\
962		goto label_return;					\
963	}								\
964} while (0)
965
966#define	READ(v, t)	do {						\
967	if (oldp != NULL && oldlenp != NULL) {				\
968		if (*oldlenp != sizeof(t)) {				\
969			size_t	copylen = (sizeof(t) <= *oldlenp)	\
970			    ? sizeof(t) : *oldlenp;			\
971			memcpy(oldp, (void *)&(v), copylen);		\
972			ret = EINVAL;					\
973			goto label_return;				\
974		} else							\
975			*(t *)oldp = (v);				\
976	}								\
977} while (0)
978
979#define	WRITE(v, t)	do {						\
980	if (newp != NULL) {						\
981		if (newlen != sizeof(t)) {				\
982			ret = EINVAL;					\
983			goto label_return;				\
984		}							\
985		(v) = *(t *)newp;					\
986	}								\
987} while (0)
988
989/*
990 * There's a lot of code duplication in the following macros due to limitations
991 * in how nested cpp macros are expanded.
992 */
993#define	CTL_RO_CLGEN(c, l, n, v, t)					\
994static int								\
995n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
996    void *newp, size_t newlen)						\
997{									\
998	int ret;							\
999	t oldval;							\
1000									\
1001	if ((c) == false)						\
1002		return (ENOENT);					\
1003	if (l)								\
1004		malloc_mutex_lock(&ctl_mtx);				\
1005	READONLY();							\
1006	oldval = (v);							\
1007	READ(oldval, t);						\
1008									\
1009	ret = 0;							\
1010label_return:								\
1011	if (l)								\
1012		malloc_mutex_unlock(&ctl_mtx);				\
1013	return (ret);							\
1014}
1015
1016#define	CTL_RO_CGEN(c, n, v, t)						\
1017static int								\
1018n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
1019    void *newp, size_t newlen)						\
1020{									\
1021	int ret;							\
1022	t oldval;							\
1023									\
1024	if ((c) == false)						\
1025		return (ENOENT);					\
1026	malloc_mutex_lock(&ctl_mtx);					\
1027	READONLY();							\
1028	oldval = (v);							\
1029	READ(oldval, t);						\
1030									\
1031	ret = 0;							\
1032label_return:								\
1033	malloc_mutex_unlock(&ctl_mtx);					\
1034	return (ret);							\
1035}
1036
1037#define	CTL_RO_GEN(n, v, t)						\
1038static int								\
1039n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
1040    void *newp, size_t newlen)						\
1041{									\
1042	int ret;							\
1043	t oldval;							\
1044									\
1045	malloc_mutex_lock(&ctl_mtx);					\
1046	READONLY();							\
1047	oldval = (v);							\
1048	READ(oldval, t);						\
1049									\
1050	ret = 0;							\
1051label_return:								\
1052	malloc_mutex_unlock(&ctl_mtx);					\
1053	return (ret);							\
1054}
1055
1056/*
1057 * ctl_mtx is not acquired, under the assumption that no pertinent data will
1058 * mutate during the call.
1059 */
1060#define	CTL_RO_NL_CGEN(c, n, v, t)					\
1061static int								\
1062n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
1063    void *newp, size_t newlen)						\
1064{									\
1065	int ret;							\
1066	t oldval;							\
1067									\
1068	if ((c) == false)						\
1069		return (ENOENT);					\
1070	READONLY();							\
1071	oldval = (v);							\
1072	READ(oldval, t);						\
1073									\
1074	ret = 0;							\
1075label_return:								\
1076	return (ret);							\
1077}
1078
1079#define	CTL_RO_NL_GEN(n, v, t)						\
1080static int								\
1081n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
1082    void *newp, size_t newlen)						\
1083{									\
1084	int ret;							\
1085	t oldval;							\
1086									\
1087	READONLY();							\
1088	oldval = (v);							\
1089	READ(oldval, t);						\
1090									\
1091	ret = 0;							\
1092label_return:								\
1093	return (ret);							\
1094}
1095
1096#define	CTL_RO_BOOL_CONFIG_GEN(n)					\
1097static int								\
1098n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
1099    void *newp, size_t newlen)						\
1100{									\
1101	int ret;							\
1102	bool oldval;							\
1103									\
1104	READONLY();							\
1105	oldval = n;							\
1106	READ(oldval, bool);						\
1107									\
1108	ret = 0;							\
1109label_return:								\
1110	return (ret);							\
1111}
1112
1113/******************************************************************************/
1114
1115CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
1116
1117static int
1118epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1119    void *newp, size_t newlen)
1120{
1121	int ret;
1122	UNUSED uint64_t newval;
1123
1124	malloc_mutex_lock(&ctl_mtx);
1125	WRITE(newval, uint64_t);
1126	if (newp != NULL)
1127		ctl_refresh();
1128	READ(ctl_epoch, uint64_t);
1129
1130	ret = 0;
1131label_return:
1132	malloc_mutex_unlock(&ctl_mtx);
1133	return (ret);
1134}
1135
1136/******************************************************************************/
1137
1138CTL_RO_BOOL_CONFIG_GEN(config_debug)
1139CTL_RO_BOOL_CONFIG_GEN(config_dss)
1140CTL_RO_BOOL_CONFIG_GEN(config_fill)
1141CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock)
1142CTL_RO_BOOL_CONFIG_GEN(config_mremap)
1143CTL_RO_BOOL_CONFIG_GEN(config_munmap)
1144CTL_RO_BOOL_CONFIG_GEN(config_prof)
1145CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc)
1146CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind)
1147CTL_RO_BOOL_CONFIG_GEN(config_stats)
1148CTL_RO_BOOL_CONFIG_GEN(config_tcache)
1149CTL_RO_BOOL_CONFIG_GEN(config_tls)
1150CTL_RO_BOOL_CONFIG_GEN(config_utrace)
1151CTL_RO_BOOL_CONFIG_GEN(config_valgrind)
1152CTL_RO_BOOL_CONFIG_GEN(config_xmalloc)
1153
1154/******************************************************************************/
1155
1156CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
1157CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
1158CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
1159CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t)
1160CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
1161CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
1162CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, bool)
1163CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t)
1164CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool)
1165CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
1166CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
1167CTL_RO_NL_CGEN(config_valgrind, opt_valgrind, opt_valgrind, bool)
1168CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
1169CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool)
1170CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
1171CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
1172CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
1173CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */
1174CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
1175CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
1176CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
1177CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
1178CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool)
1179CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
1180
1181/******************************************************************************/
1182
1183static int
1184thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1185    void *newp, size_t newlen)
1186{
1187	int ret;
1188	unsigned newind, oldind;
1189
1190	malloc_mutex_lock(&ctl_mtx);
1191	newind = oldind = choose_arena(NULL)->ind;
1192	WRITE(newind, unsigned);
1193	READ(oldind, unsigned);
1194	if (newind != oldind) {
1195		arena_t *arena;
1196
1197		if (newind >= ctl_stats.narenas) {
1198			/* New arena index is out of range. */
1199			ret = EFAULT;
1200			goto label_return;
1201		}
1202
1203		/* Initialize arena if necessary. */
1204		malloc_mutex_lock(&arenas_lock);
1205		if ((arena = arenas[newind]) == NULL && (arena =
1206		    arenas_extend(newind)) == NULL) {
1207			malloc_mutex_unlock(&arenas_lock);
1208			ret = EAGAIN;
1209			goto label_return;
1210		}
1211		assert(arena == arenas[newind]);
1212		arenas[oldind]->nthreads--;
1213		arenas[newind]->nthreads++;
1214		malloc_mutex_unlock(&arenas_lock);
1215
1216		/* Set new arena association. */
1217		if (config_tcache) {
1218			tcache_t *tcache;
1219			if ((uintptr_t)(tcache = *tcache_tsd_get()) >
1220			    (uintptr_t)TCACHE_STATE_MAX) {
1221				tcache_arena_dissociate(tcache);
1222				tcache_arena_associate(tcache, arena);
1223			}
1224		}
1225		arenas_tsd_set(&arena);
1226	}
1227
1228	ret = 0;
1229label_return:
1230	malloc_mutex_unlock(&ctl_mtx);
1231	return (ret);
1232}
1233
1234CTL_RO_NL_CGEN(config_stats, thread_allocated,
1235    thread_allocated_tsd_get()->allocated, uint64_t)
1236CTL_RO_NL_CGEN(config_stats, thread_allocatedp,
1237    &thread_allocated_tsd_get()->allocated, uint64_t *)
1238CTL_RO_NL_CGEN(config_stats, thread_deallocated,
1239    thread_allocated_tsd_get()->deallocated, uint64_t)
1240CTL_RO_NL_CGEN(config_stats, thread_deallocatedp,
1241    &thread_allocated_tsd_get()->deallocated, uint64_t *)
1242
1243static int
1244thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp,
1245    size_t *oldlenp, void *newp, size_t newlen)
1246{
1247	int ret;
1248	bool oldval;
1249
1250	if (config_tcache == false)
1251		return (ENOENT);
1252
1253	oldval = tcache_enabled_get();
1254	if (newp != NULL) {
1255		if (newlen != sizeof(bool)) {
1256			ret = EINVAL;
1257			goto label_return;
1258		}
1259		tcache_enabled_set(*(bool *)newp);
1260	}
1261	READ(oldval, bool);
1262
1263	ret = 0;
1264label_return:
1265	return (ret);
1266}
1267
1268static int
1269thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp,
1270    size_t *oldlenp, void *newp, size_t newlen)
1271{
1272	int ret;
1273
1274	if (config_tcache == false)
1275		return (ENOENT);
1276
1277	READONLY();
1278	WRITEONLY();
1279
1280	tcache_flush();
1281
1282	ret = 0;
1283label_return:
1284	return (ret);
1285}
1286
1287/******************************************************************************/
1288
1289/* ctl_mutex must be held during execution of this function. */
1290static void
1291arena_purge(unsigned arena_ind)
1292{
1293	VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
1294
1295	malloc_mutex_lock(&arenas_lock);
1296	memcpy(tarenas, arenas, sizeof(arena_t *) * ctl_stats.narenas);
1297	malloc_mutex_unlock(&arenas_lock);
1298
1299	if (arena_ind == ctl_stats.narenas) {
1300		unsigned i;
1301		for (i = 0; i < ctl_stats.narenas; i++) {
1302			if (tarenas[i] != NULL)
1303				arena_purge_all(tarenas[i]);
1304		}
1305	} else {
1306		assert(arena_ind < ctl_stats.narenas);
1307		if (tarenas[arena_ind] != NULL)
1308			arena_purge_all(tarenas[arena_ind]);
1309	}
1310}
1311
1312static int
1313arena_i_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1314    void *newp, size_t newlen)
1315{
1316	int ret;
1317
1318	READONLY();
1319	WRITEONLY();
1320	malloc_mutex_lock(&ctl_mtx);
1321	arena_purge(mib[1]);
1322	malloc_mutex_unlock(&ctl_mtx);
1323
1324	ret = 0;
1325label_return:
1326	return (ret);
1327}
1328
1329static int
1330arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1331    void *newp, size_t newlen)
1332{
1333	int ret, i;
1334	bool match, err;
1335	const char *dss;
1336	unsigned arena_ind = mib[1];
1337	dss_prec_t dss_prec_old = dss_prec_limit;
1338	dss_prec_t dss_prec = dss_prec_limit;
1339
1340	malloc_mutex_lock(&ctl_mtx);
1341	WRITE(dss, const char *);
1342	match = false;
1343	for (i = 0; i < dss_prec_limit; i++) {
1344		if (strcmp(dss_prec_names[i], dss) == 0) {
1345			dss_prec = i;
1346			match = true;
1347			break;
1348		}
1349	}
1350	if (match == false) {
1351		ret = EINVAL;
1352		goto label_return;
1353	}
1354
1355	if (arena_ind < ctl_stats.narenas) {
1356		arena_t *arena = arenas[arena_ind];
1357		if (arena != NULL) {
1358			dss_prec_old = arena_dss_prec_get(arena);
1359			arena_dss_prec_set(arena, dss_prec);
1360			err = false;
1361		} else
1362			err = true;
1363	} else {
1364		dss_prec_old = chunk_dss_prec_get();
1365		err = chunk_dss_prec_set(dss_prec);
1366	}
1367	dss = dss_prec_names[dss_prec_old];
1368	READ(dss, const char *);
1369	if (err) {
1370		ret = EFAULT;
1371		goto label_return;
1372	}
1373
1374	ret = 0;
1375label_return:
1376	malloc_mutex_unlock(&ctl_mtx);
1377	return (ret);
1378}
1379
1380static const ctl_named_node_t *
1381arena_i_index(const size_t *mib, size_t miblen, size_t i)
1382{
1383	const ctl_named_node_t * ret;
1384
1385	malloc_mutex_lock(&ctl_mtx);
1386	if (i > ctl_stats.narenas) {
1387		ret = NULL;
1388		goto label_return;
1389	}
1390
1391	ret = super_arena_i_node;
1392label_return:
1393	malloc_mutex_unlock(&ctl_mtx);
1394	return (ret);
1395}
1396
1397/******************************************************************************/
1398
1399static int
1400arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp,
1401    size_t *oldlenp, void *newp, size_t newlen)
1402{
1403	int ret;
1404	unsigned narenas;
1405
1406	malloc_mutex_lock(&ctl_mtx);
1407	READONLY();
1408	if (*oldlenp != sizeof(unsigned)) {
1409		ret = EINVAL;
1410		goto label_return;
1411	}
1412	narenas = ctl_stats.narenas;
1413	READ(narenas, unsigned);
1414
1415	ret = 0;
1416label_return:
1417	malloc_mutex_unlock(&ctl_mtx);
1418	return (ret);
1419}
1420
1421static int
1422arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp,
1423    size_t *oldlenp, void *newp, size_t newlen)
1424{
1425	int ret;
1426	unsigned nread, i;
1427
1428	malloc_mutex_lock(&ctl_mtx);
1429	READONLY();
1430	if (*oldlenp != ctl_stats.narenas * sizeof(bool)) {
1431		ret = EINVAL;
1432		nread = (*oldlenp < ctl_stats.narenas * sizeof(bool))
1433		    ? (*oldlenp / sizeof(bool)) : ctl_stats.narenas;
1434	} else {
1435		ret = 0;
1436		nread = ctl_stats.narenas;
1437	}
1438
1439	for (i = 0; i < nread; i++)
1440		((bool *)oldp)[i] = ctl_stats.arenas[i].initialized;
1441
1442label_return:
1443	malloc_mutex_unlock(&ctl_mtx);
1444	return (ret);
1445}
1446
1447CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
1448CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
1449CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t)
1450CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned)
1451CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned)
1452CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
1453CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
1454CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t)
1455static const ctl_named_node_t *
1456arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
1457{
1458
1459	if (i > NBINS)
1460		return (NULL);
1461	return (super_arenas_bin_i_node);
1462}
1463
1464CTL_RO_NL_GEN(arenas_nlruns, nlclasses, size_t)
1465CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << LG_PAGE), size_t)
1466static const ctl_named_node_t *
1467arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
1468{
1469
1470	if (i > nlclasses)
1471		return (NULL);
1472	return (super_arenas_lrun_i_node);
1473}
1474
1475static int
1476arenas_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1477    void *newp, size_t newlen)
1478{
1479	int ret;
1480	unsigned arena_ind;
1481
1482	malloc_mutex_lock(&ctl_mtx);
1483	WRITEONLY();
1484	arena_ind = UINT_MAX;
1485	WRITE(arena_ind, unsigned);
1486	if (newp != NULL && arena_ind >= ctl_stats.narenas)
1487		ret = EFAULT;
1488	else {
1489		if (arena_ind == UINT_MAX)
1490			arena_ind = ctl_stats.narenas;
1491		arena_purge(arena_ind);
1492		ret = 0;
1493	}
1494
1495label_return:
1496	malloc_mutex_unlock(&ctl_mtx);
1497	return (ret);
1498}
1499
1500static int
1501arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1502    void *newp, size_t newlen)
1503{
1504	int ret;
1505	unsigned narenas;
1506
1507	malloc_mutex_lock(&ctl_mtx);
1508	READONLY();
1509	if (ctl_grow()) {
1510		ret = EAGAIN;
1511		goto label_return;
1512	}
1513	narenas = ctl_stats.narenas - 1;
1514	READ(narenas, unsigned);
1515
1516	ret = 0;
1517label_return:
1518	malloc_mutex_unlock(&ctl_mtx);
1519	return (ret);
1520}
1521
1522/******************************************************************************/
1523
1524static int
1525prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1526    void *newp, size_t newlen)
1527{
1528	int ret;
1529	bool oldval;
1530
1531	if (config_prof == false)
1532		return (ENOENT);
1533
1534	malloc_mutex_lock(&ctl_mtx); /* Protect opt_prof_active. */
1535	oldval = opt_prof_active;
1536	if (newp != NULL) {
1537		/*
1538		 * The memory barriers will tend to make opt_prof_active
1539		 * propagate faster on systems with weak memory ordering.
1540		 */
1541		mb_write();
1542		WRITE(opt_prof_active, bool);
1543		mb_write();
1544	}
1545	READ(oldval, bool);
1546
1547	ret = 0;
1548label_return:
1549	malloc_mutex_unlock(&ctl_mtx);
1550	return (ret);
1551}
1552
1553static int
1554prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1555    void *newp, size_t newlen)
1556{
1557	int ret;
1558	const char *filename = NULL;
1559
1560	if (config_prof == false)
1561		return (ENOENT);
1562
1563	WRITEONLY();
1564	WRITE(filename, const char *);
1565
1566	if (prof_mdump(filename)) {
1567		ret = EFAULT;
1568		goto label_return;
1569	}
1570
1571	ret = 0;
1572label_return:
1573	return (ret);
1574}
1575
1576CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
1577
1578/******************************************************************************/
1579
1580CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *)
1581CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t)
1582CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
1583CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
1584
1585CTL_RO_CGEN(config_stats, stats_chunks_current, ctl_stats.chunks.current,
1586    size_t)
1587CTL_RO_CGEN(config_stats, stats_chunks_total, ctl_stats.chunks.total, uint64_t)
1588CTL_RO_CGEN(config_stats, stats_chunks_high, ctl_stats.chunks.high, size_t)
1589CTL_RO_CGEN(config_stats, stats_huge_allocated, huge_allocated, size_t)
1590CTL_RO_CGEN(config_stats, stats_huge_nmalloc, huge_nmalloc, uint64_t)
1591CTL_RO_CGEN(config_stats, stats_huge_ndalloc, huge_ndalloc, uint64_t)
1592
1593CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
1594CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
1595CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
1596CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
1597CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
1598    ctl_stats.arenas[mib[2]].astats.mapped, size_t)
1599CTL_RO_CGEN(config_stats, stats_arenas_i_npurge,
1600    ctl_stats.arenas[mib[2]].astats.npurge, uint64_t)
1601CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
1602    ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t)
1603CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
1604    ctl_stats.arenas[mib[2]].astats.purged, uint64_t)
1605
1606CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
1607    ctl_stats.arenas[mib[2]].allocated_small, size_t)
1608CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
1609    ctl_stats.arenas[mib[2]].nmalloc_small, uint64_t)
1610CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc,
1611    ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t)
1612CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests,
1613    ctl_stats.arenas[mib[2]].nrequests_small, uint64_t)
1614CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
1615    ctl_stats.arenas[mib[2]].astats.allocated_large, size_t)
1616CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
1617    ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t)
1618CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
1619    ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t)
1620CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
1621    ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t)
1622
1623CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_allocated,
1624    ctl_stats.arenas[mib[2]].bstats[mib[4]].allocated, size_t)
1625CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
1626    ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t)
1627CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
1628    ctl_stats.arenas[mib[2]].bstats[mib[4]].ndalloc, uint64_t)
1629CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
1630    ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t)
1631CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills,
1632    ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t)
1633CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes,
1634    ctl_stats.arenas[mib[2]].bstats[mib[4]].nflushes, uint64_t)
1635CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nruns,
1636    ctl_stats.arenas[mib[2]].bstats[mib[4]].nruns, uint64_t)
1637CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns,
1638    ctl_stats.arenas[mib[2]].bstats[mib[4]].reruns, uint64_t)
1639CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns,
1640    ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t)
1641
1642static const ctl_named_node_t *
1643stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j)
1644{
1645
1646	if (j > NBINS)
1647		return (NULL);
1648	return (super_stats_arenas_i_bins_j_node);
1649}
1650
1651CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nmalloc,
1652    ctl_stats.arenas[mib[2]].lstats[mib[4]].nmalloc, uint64_t)
1653CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_ndalloc,
1654    ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t)
1655CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests,
1656    ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t)
1657CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns,
1658    ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t)
1659
1660static const ctl_named_node_t *
1661stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
1662{
1663
1664	if (j > nlclasses)
1665		return (NULL);
1666	return (super_stats_arenas_i_lruns_j_node);
1667}
1668
1669static const ctl_named_node_t *
1670stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i)
1671{
1672	const ctl_named_node_t * ret;
1673
1674	malloc_mutex_lock(&ctl_mtx);
1675	if (i > ctl_stats.narenas || ctl_stats.arenas[i].initialized == false) {
1676		ret = NULL;
1677		goto label_return;
1678	}
1679
1680	ret = super_stats_arenas_i_node;
1681label_return:
1682	malloc_mutex_unlock(&ctl_mtx);
1683	return (ret);
1684}
1685