1#define	JEMALLOC_CTL_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
7/*
8 * ctl_mtx protects the following:
9 * - ctl_stats->*
10 */
11static malloc_mutex_t	ctl_mtx;
12static bool		ctl_initialized;
13static ctl_stats_t	*ctl_stats;
14static ctl_arenas_t	*ctl_arenas;
15
16/******************************************************************************/
17/* Helpers for named and indexed nodes. */
18
19JEMALLOC_INLINE_C const ctl_named_node_t *
20ctl_named_node(const ctl_node_t *node)
21{
22	return ((node->named) ? (const ctl_named_node_t *)node : NULL);
23}
24
25JEMALLOC_INLINE_C const ctl_named_node_t *
26ctl_named_children(const ctl_named_node_t *node, size_t index)
27{
28	const ctl_named_node_t *children = ctl_named_node(node->children);
29
30	return (children ? &children[index] : NULL);
31}
32
33JEMALLOC_INLINE_C const ctl_indexed_node_t *
34ctl_indexed_node(const ctl_node_t *node)
35{
36	return (!node->named ? (const ctl_indexed_node_t *)node : NULL);
37}
38
39/******************************************************************************/
40/* Function prototypes for non-inline static functions. */
41
42#define	CTL_PROTO(n)							\
43static int	n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,	\
44    void *oldp, size_t *oldlenp, void *newp, size_t newlen);
45
46#define	INDEX_PROTO(n)							\
47static const ctl_named_node_t	*n##_index(tsdn_t *tsdn,		\
48    const size_t *mib, size_t miblen, size_t i);
49
50CTL_PROTO(version)
51CTL_PROTO(epoch)
52CTL_PROTO(thread_tcache_enabled)
53CTL_PROTO(thread_tcache_flush)
54CTL_PROTO(thread_prof_name)
55CTL_PROTO(thread_prof_active)
56CTL_PROTO(thread_arena)
57CTL_PROTO(thread_allocated)
58CTL_PROTO(thread_allocatedp)
59CTL_PROTO(thread_deallocated)
60CTL_PROTO(thread_deallocatedp)
61CTL_PROTO(config_cache_oblivious)
62CTL_PROTO(config_debug)
63CTL_PROTO(config_fill)
64CTL_PROTO(config_lazy_lock)
65CTL_PROTO(config_malloc_conf)
66CTL_PROTO(config_munmap)
67CTL_PROTO(config_prof)
68CTL_PROTO(config_prof_libgcc)
69CTL_PROTO(config_prof_libunwind)
70CTL_PROTO(config_stats)
71CTL_PROTO(config_tcache)
72CTL_PROTO(config_tls)
73CTL_PROTO(config_utrace)
74CTL_PROTO(config_xmalloc)
75CTL_PROTO(opt_abort)
76CTL_PROTO(opt_dss)
77CTL_PROTO(opt_narenas)
78CTL_PROTO(opt_decay_time)
79CTL_PROTO(opt_stats_print)
80CTL_PROTO(opt_junk)
81CTL_PROTO(opt_zero)
82CTL_PROTO(opt_utrace)
83CTL_PROTO(opt_xmalloc)
84CTL_PROTO(opt_tcache)
85CTL_PROTO(opt_lg_tcache_max)
86CTL_PROTO(opt_prof)
87CTL_PROTO(opt_prof_prefix)
88CTL_PROTO(opt_prof_active)
89CTL_PROTO(opt_prof_thread_active_init)
90CTL_PROTO(opt_lg_prof_sample)
91CTL_PROTO(opt_lg_prof_interval)
92CTL_PROTO(opt_prof_gdump)
93CTL_PROTO(opt_prof_final)
94CTL_PROTO(opt_prof_leak)
95CTL_PROTO(opt_prof_accum)
96CTL_PROTO(tcache_create)
97CTL_PROTO(tcache_flush)
98CTL_PROTO(tcache_destroy)
99CTL_PROTO(arena_i_initialized)
100CTL_PROTO(arena_i_purge)
101CTL_PROTO(arena_i_decay)
102CTL_PROTO(arena_i_reset)
103CTL_PROTO(arena_i_destroy)
104CTL_PROTO(arena_i_dss)
105CTL_PROTO(arena_i_decay_time)
106CTL_PROTO(arena_i_extent_hooks)
107INDEX_PROTO(arena_i)
108CTL_PROTO(arenas_bin_i_size)
109CTL_PROTO(arenas_bin_i_nregs)
110CTL_PROTO(arenas_bin_i_slab_size)
111INDEX_PROTO(arenas_bin_i)
112CTL_PROTO(arenas_lextent_i_size)
113INDEX_PROTO(arenas_lextent_i)
114CTL_PROTO(arenas_narenas)
115CTL_PROTO(arenas_decay_time)
116CTL_PROTO(arenas_quantum)
117CTL_PROTO(arenas_page)
118CTL_PROTO(arenas_tcache_max)
119CTL_PROTO(arenas_nbins)
120CTL_PROTO(arenas_nhbins)
121CTL_PROTO(arenas_nlextents)
122CTL_PROTO(arenas_create)
123CTL_PROTO(prof_thread_active_init)
124CTL_PROTO(prof_active)
125CTL_PROTO(prof_dump)
126CTL_PROTO(prof_gdump)
127CTL_PROTO(prof_reset)
128CTL_PROTO(prof_interval)
129CTL_PROTO(lg_prof_sample)
130CTL_PROTO(stats_arenas_i_small_allocated)
131CTL_PROTO(stats_arenas_i_small_nmalloc)
132CTL_PROTO(stats_arenas_i_small_ndalloc)
133CTL_PROTO(stats_arenas_i_small_nrequests)
134CTL_PROTO(stats_arenas_i_large_allocated)
135CTL_PROTO(stats_arenas_i_large_nmalloc)
136CTL_PROTO(stats_arenas_i_large_ndalloc)
137CTL_PROTO(stats_arenas_i_large_nrequests)
138CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
139CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
140CTL_PROTO(stats_arenas_i_bins_j_nrequests)
141CTL_PROTO(stats_arenas_i_bins_j_curregs)
142CTL_PROTO(stats_arenas_i_bins_j_nfills)
143CTL_PROTO(stats_arenas_i_bins_j_nflushes)
144CTL_PROTO(stats_arenas_i_bins_j_nslabs)
145CTL_PROTO(stats_arenas_i_bins_j_nreslabs)
146CTL_PROTO(stats_arenas_i_bins_j_curslabs)
147INDEX_PROTO(stats_arenas_i_bins_j)
148CTL_PROTO(stats_arenas_i_lextents_j_nmalloc)
149CTL_PROTO(stats_arenas_i_lextents_j_ndalloc)
150CTL_PROTO(stats_arenas_i_lextents_j_nrequests)
151CTL_PROTO(stats_arenas_i_lextents_j_curlextents)
152INDEX_PROTO(stats_arenas_i_lextents_j)
153CTL_PROTO(stats_arenas_i_nthreads)
154CTL_PROTO(stats_arenas_i_dss)
155CTL_PROTO(stats_arenas_i_decay_time)
156CTL_PROTO(stats_arenas_i_pactive)
157CTL_PROTO(stats_arenas_i_pdirty)
158CTL_PROTO(stats_arenas_i_mapped)
159CTL_PROTO(stats_arenas_i_retained)
160CTL_PROTO(stats_arenas_i_npurge)
161CTL_PROTO(stats_arenas_i_nmadvise)
162CTL_PROTO(stats_arenas_i_purged)
163CTL_PROTO(stats_arenas_i_base)
164CTL_PROTO(stats_arenas_i_internal)
165CTL_PROTO(stats_arenas_i_tcache_bytes)
166CTL_PROTO(stats_arenas_i_resident)
167INDEX_PROTO(stats_arenas_i)
168CTL_PROTO(stats_allocated)
169CTL_PROTO(stats_active)
170CTL_PROTO(stats_metadata)
171CTL_PROTO(stats_resident)
172CTL_PROTO(stats_mapped)
173CTL_PROTO(stats_retained)
174
175/******************************************************************************/
176/* mallctl tree. */
177
178/* Maximum tree depth. */
179#define	CTL_MAX_DEPTH	6
180
181#define	NAME(n)	{true},	n
182#define	CHILD(t, c)							\
183	sizeof(c##_node) / sizeof(ctl_##t##_node_t),			\
184	(ctl_node_t *)c##_node,						\
185	NULL
186#define	CTL(c)	0, NULL, c##_ctl
187
188/*
189 * Only handles internal indexed nodes, since there are currently no external
190 * ones.
191 */
192#define	INDEX(i)	{false},	i##_index
193
194static const ctl_named_node_t	thread_tcache_node[] = {
195	{NAME("enabled"),	CTL(thread_tcache_enabled)},
196	{NAME("flush"),		CTL(thread_tcache_flush)}
197};
198
199static const ctl_named_node_t	thread_prof_node[] = {
200	{NAME("name"),		CTL(thread_prof_name)},
201	{NAME("active"),	CTL(thread_prof_active)}
202};
203
204static const ctl_named_node_t	thread_node[] = {
205	{NAME("arena"),		CTL(thread_arena)},
206	{NAME("allocated"),	CTL(thread_allocated)},
207	{NAME("allocatedp"),	CTL(thread_allocatedp)},
208	{NAME("deallocated"),	CTL(thread_deallocated)},
209	{NAME("deallocatedp"),	CTL(thread_deallocatedp)},
210	{NAME("tcache"),	CHILD(named, thread_tcache)},
211	{NAME("prof"),		CHILD(named, thread_prof)}
212};
213
214static const ctl_named_node_t	config_node[] = {
215	{NAME("cache_oblivious"), CTL(config_cache_oblivious)},
216	{NAME("debug"),		CTL(config_debug)},
217	{NAME("fill"),		CTL(config_fill)},
218	{NAME("lazy_lock"),	CTL(config_lazy_lock)},
219	{NAME("malloc_conf"),	CTL(config_malloc_conf)},
220	{NAME("munmap"),	CTL(config_munmap)},
221	{NAME("prof"),		CTL(config_prof)},
222	{NAME("prof_libgcc"),	CTL(config_prof_libgcc)},
223	{NAME("prof_libunwind"), CTL(config_prof_libunwind)},
224	{NAME("stats"),		CTL(config_stats)},
225	{NAME("tcache"),	CTL(config_tcache)},
226	{NAME("tls"),		CTL(config_tls)},
227	{NAME("utrace"),	CTL(config_utrace)},
228	{NAME("xmalloc"),	CTL(config_xmalloc)}
229};
230
231static const ctl_named_node_t opt_node[] = {
232	{NAME("abort"),		CTL(opt_abort)},
233	{NAME("dss"),		CTL(opt_dss)},
234	{NAME("narenas"),	CTL(opt_narenas)},
235	{NAME("decay_time"),	CTL(opt_decay_time)},
236	{NAME("stats_print"),	CTL(opt_stats_print)},
237	{NAME("junk"),		CTL(opt_junk)},
238	{NAME("zero"),		CTL(opt_zero)},
239	{NAME("utrace"),	CTL(opt_utrace)},
240	{NAME("xmalloc"),	CTL(opt_xmalloc)},
241	{NAME("tcache"),	CTL(opt_tcache)},
242	{NAME("lg_tcache_max"),	CTL(opt_lg_tcache_max)},
243	{NAME("prof"),		CTL(opt_prof)},
244	{NAME("prof_prefix"),	CTL(opt_prof_prefix)},
245	{NAME("prof_active"),	CTL(opt_prof_active)},
246	{NAME("prof_thread_active_init"), CTL(opt_prof_thread_active_init)},
247	{NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)},
248	{NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)},
249	{NAME("prof_gdump"),	CTL(opt_prof_gdump)},
250	{NAME("prof_final"),	CTL(opt_prof_final)},
251	{NAME("prof_leak"),	CTL(opt_prof_leak)},
252	{NAME("prof_accum"),	CTL(opt_prof_accum)}
253};
254
255static const ctl_named_node_t	tcache_node[] = {
256	{NAME("create"),	CTL(tcache_create)},
257	{NAME("flush"),		CTL(tcache_flush)},
258	{NAME("destroy"),	CTL(tcache_destroy)}
259};
260
261static const ctl_named_node_t arena_i_node[] = {
262	{NAME("initialized"),	CTL(arena_i_initialized)},
263	{NAME("purge"),		CTL(arena_i_purge)},
264	{NAME("decay"),		CTL(arena_i_decay)},
265	{NAME("reset"),		CTL(arena_i_reset)},
266	{NAME("destroy"),	CTL(arena_i_destroy)},
267	{NAME("dss"),		CTL(arena_i_dss)},
268	{NAME("decay_time"),	CTL(arena_i_decay_time)},
269	{NAME("extent_hooks"),	CTL(arena_i_extent_hooks)}
270};
271static const ctl_named_node_t super_arena_i_node[] = {
272	{NAME(""),		CHILD(named, arena_i)}
273};
274
275static const ctl_indexed_node_t arena_node[] = {
276	{INDEX(arena_i)}
277};
278
279static const ctl_named_node_t arenas_bin_i_node[] = {
280	{NAME("size"),		CTL(arenas_bin_i_size)},
281	{NAME("nregs"),		CTL(arenas_bin_i_nregs)},
282	{NAME("slab_size"),	CTL(arenas_bin_i_slab_size)}
283};
284static const ctl_named_node_t super_arenas_bin_i_node[] = {
285	{NAME(""),		CHILD(named, arenas_bin_i)}
286};
287
288static const ctl_indexed_node_t arenas_bin_node[] = {
289	{INDEX(arenas_bin_i)}
290};
291
292static const ctl_named_node_t arenas_lextent_i_node[] = {
293	{NAME("size"),		CTL(arenas_lextent_i_size)}
294};
295static const ctl_named_node_t super_arenas_lextent_i_node[] = {
296	{NAME(""),		CHILD(named, arenas_lextent_i)}
297};
298
299static const ctl_indexed_node_t arenas_lextent_node[] = {
300	{INDEX(arenas_lextent_i)}
301};
302
303static const ctl_named_node_t arenas_node[] = {
304	{NAME("narenas"),	CTL(arenas_narenas)},
305	{NAME("decay_time"),	CTL(arenas_decay_time)},
306	{NAME("quantum"),	CTL(arenas_quantum)},
307	{NAME("page"),		CTL(arenas_page)},
308	{NAME("tcache_max"),	CTL(arenas_tcache_max)},
309	{NAME("nbins"),		CTL(arenas_nbins)},
310	{NAME("nhbins"),	CTL(arenas_nhbins)},
311	{NAME("bin"),		CHILD(indexed, arenas_bin)},
312	{NAME("nlextents"),	CTL(arenas_nlextents)},
313	{NAME("lextent"),	CHILD(indexed, arenas_lextent)},
314	{NAME("create"),	CTL(arenas_create)}
315};
316
317static const ctl_named_node_t	prof_node[] = {
318	{NAME("thread_active_init"), CTL(prof_thread_active_init)},
319	{NAME("active"),	CTL(prof_active)},
320	{NAME("dump"),		CTL(prof_dump)},
321	{NAME("gdump"),		CTL(prof_gdump)},
322	{NAME("reset"),		CTL(prof_reset)},
323	{NAME("interval"),	CTL(prof_interval)},
324	{NAME("lg_sample"),	CTL(lg_prof_sample)}
325};
326
327static const ctl_named_node_t stats_arenas_i_small_node[] = {
328	{NAME("allocated"),	CTL(stats_arenas_i_small_allocated)},
329	{NAME("nmalloc"),	CTL(stats_arenas_i_small_nmalloc)},
330	{NAME("ndalloc"),	CTL(stats_arenas_i_small_ndalloc)},
331	{NAME("nrequests"),	CTL(stats_arenas_i_small_nrequests)}
332};
333
334static const ctl_named_node_t stats_arenas_i_large_node[] = {
335	{NAME("allocated"),	CTL(stats_arenas_i_large_allocated)},
336	{NAME("nmalloc"),	CTL(stats_arenas_i_large_nmalloc)},
337	{NAME("ndalloc"),	CTL(stats_arenas_i_large_ndalloc)},
338	{NAME("nrequests"),	CTL(stats_arenas_i_large_nrequests)}
339};
340
341static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
342	{NAME("nmalloc"),	CTL(stats_arenas_i_bins_j_nmalloc)},
343	{NAME("ndalloc"),	CTL(stats_arenas_i_bins_j_ndalloc)},
344	{NAME("nrequests"),	CTL(stats_arenas_i_bins_j_nrequests)},
345	{NAME("curregs"),	CTL(stats_arenas_i_bins_j_curregs)},
346	{NAME("nfills"),	CTL(stats_arenas_i_bins_j_nfills)},
347	{NAME("nflushes"),	CTL(stats_arenas_i_bins_j_nflushes)},
348	{NAME("nslabs"),	CTL(stats_arenas_i_bins_j_nslabs)},
349	{NAME("nreslabs"),	CTL(stats_arenas_i_bins_j_nreslabs)},
350	{NAME("curslabs"),	CTL(stats_arenas_i_bins_j_curslabs)}
351};
352static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = {
353	{NAME(""),		CHILD(named, stats_arenas_i_bins_j)}
354};
355
356static const ctl_indexed_node_t stats_arenas_i_bins_node[] = {
357	{INDEX(stats_arenas_i_bins_j)}
358};
359
360static const ctl_named_node_t stats_arenas_i_lextents_j_node[] = {
361	{NAME("nmalloc"),	CTL(stats_arenas_i_lextents_j_nmalloc)},
362	{NAME("ndalloc"),	CTL(stats_arenas_i_lextents_j_ndalloc)},
363	{NAME("nrequests"),	CTL(stats_arenas_i_lextents_j_nrequests)},
364	{NAME("curlextents"),	CTL(stats_arenas_i_lextents_j_curlextents)}
365};
366static const ctl_named_node_t super_stats_arenas_i_lextents_j_node[] = {
367	{NAME(""),		CHILD(named, stats_arenas_i_lextents_j)}
368};
369
370static const ctl_indexed_node_t stats_arenas_i_lextents_node[] = {
371	{INDEX(stats_arenas_i_lextents_j)}
372};
373
374static const ctl_named_node_t stats_arenas_i_node[] = {
375	{NAME("nthreads"),	CTL(stats_arenas_i_nthreads)},
376	{NAME("dss"),		CTL(stats_arenas_i_dss)},
377	{NAME("decay_time"),	CTL(stats_arenas_i_decay_time)},
378	{NAME("pactive"),	CTL(stats_arenas_i_pactive)},
379	{NAME("pdirty"),	CTL(stats_arenas_i_pdirty)},
380	{NAME("mapped"),	CTL(stats_arenas_i_mapped)},
381	{NAME("retained"),	CTL(stats_arenas_i_retained)},
382	{NAME("npurge"),	CTL(stats_arenas_i_npurge)},
383	{NAME("nmadvise"),	CTL(stats_arenas_i_nmadvise)},
384	{NAME("purged"),	CTL(stats_arenas_i_purged)},
385	{NAME("base"),		CTL(stats_arenas_i_base)},
386	{NAME("internal"),	CTL(stats_arenas_i_internal)},
387	{NAME("tcache_bytes"),	CTL(stats_arenas_i_tcache_bytes)},
388	{NAME("resident"),	CTL(stats_arenas_i_resident)},
389	{NAME("small"),		CHILD(named, stats_arenas_i_small)},
390	{NAME("large"),		CHILD(named, stats_arenas_i_large)},
391	{NAME("bins"),		CHILD(indexed, stats_arenas_i_bins)},
392	{NAME("lextents"),	CHILD(indexed, stats_arenas_i_lextents)}
393};
394static const ctl_named_node_t super_stats_arenas_i_node[] = {
395	{NAME(""),		CHILD(named, stats_arenas_i)}
396};
397
398static const ctl_indexed_node_t stats_arenas_node[] = {
399	{INDEX(stats_arenas_i)}
400};
401
402static const ctl_named_node_t stats_node[] = {
403	{NAME("allocated"),	CTL(stats_allocated)},
404	{NAME("active"),	CTL(stats_active)},
405	{NAME("metadata"),	CTL(stats_metadata)},
406	{NAME("resident"),	CTL(stats_resident)},
407	{NAME("mapped"),	CTL(stats_mapped)},
408	{NAME("retained"),	CTL(stats_retained)},
409	{NAME("arenas"),	CHILD(indexed, stats_arenas)}
410};
411
412static const ctl_named_node_t	root_node[] = {
413	{NAME("version"),	CTL(version)},
414	{NAME("epoch"),		CTL(epoch)},
415	{NAME("thread"),	CHILD(named, thread)},
416	{NAME("config"),	CHILD(named, config)},
417	{NAME("opt"),		CHILD(named, opt)},
418	{NAME("tcache"),	CHILD(named, tcache)},
419	{NAME("arena"),		CHILD(indexed, arena)},
420	{NAME("arenas"),	CHILD(named, arenas)},
421	{NAME("prof"),		CHILD(named, prof)},
422	{NAME("stats"),		CHILD(named, stats)}
423};
424static const ctl_named_node_t super_root_node[] = {
425	{NAME(""),		CHILD(named, root)}
426};
427
428#undef NAME
429#undef CHILD
430#undef CTL
431#undef INDEX
432
433/******************************************************************************/
434
435static unsigned
436arenas_i2a_impl(size_t i, bool compat, bool validate)
437{
438	unsigned a;
439
440	switch (i) {
441	case MALLCTL_ARENAS_ALL:
442		a = 0;
443		break;
444	case MALLCTL_ARENAS_DESTROYED:
445		a = 1;
446		break;
447	default:
448		if (compat && i == ctl_arenas->narenas) {
449			/*
450			 * Provide deprecated backward compatibility for
451			 * accessing the merged stats at index narenas rather
452			 * than via MALLCTL_ARENAS_ALL.  This is scheduled for
453			 * removal in 6.0.0.
454			 */
455			a = 0;
456		} else if (validate && i >= ctl_arenas->narenas)
457			a = UINT_MAX;
458		else {
459			/*
460			 * This function should never be called for an index
461			 * more than one past the range of indices that have
462			 * initialized ctl data.
463			 */
464			assert(i < ctl_arenas->narenas || (!validate && i ==
465			    ctl_arenas->narenas));
466			a = (unsigned)i + 2;
467		}
468		break;
469	}
470
471	return (a);
472}
473
474static unsigned
475arenas_i2a(size_t i)
476{
477	return (arenas_i2a_impl(i, true, false));
478}
479
480static ctl_arena_t *
481arenas_i_impl(tsdn_t *tsdn, size_t i, bool compat, bool init)
482{
483	ctl_arena_t *ret;
484
485	assert(!compat || !init);
486
487	ret = ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)];
488	if (init && ret == NULL) {
489		if (config_stats) {
490			struct container_s {
491				ctl_arena_t		ctl_arena;
492				ctl_arena_stats_t	astats;
493			};
494			struct container_s *cont =
495			    (struct container_s *)base_alloc(tsdn, b0get(),
496			    sizeof(struct container_s), QUANTUM);
497			if (cont == NULL) {
498				return NULL;
499			}
500			ret = &cont->ctl_arena;
501			ret->astats = &cont->astats;
502		} else {
503			ret = (ctl_arena_t *)base_alloc(tsdn, b0get(),
504			    sizeof(ctl_arena_t), QUANTUM);
505			if (ret == NULL) {
506				return NULL;
507			}
508		}
509		ret->arena_ind = (unsigned)i;
510		ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)] = ret;
511	}
512
513	assert(ret == NULL || arenas_i2a(ret->arena_ind) == arenas_i2a(i));
514	return (ret);
515}
516
517static ctl_arena_t *
518arenas_i(size_t i)
519{
520	ctl_arena_t *ret = arenas_i_impl(TSDN_NULL, i, true, false);
521	assert(ret != NULL);
522	return (ret);
523}
524
525static void
526ctl_arena_clear(ctl_arena_t *ctl_arena)
527{
528	ctl_arena->nthreads = 0;
529	ctl_arena->dss = dss_prec_names[dss_prec_limit];
530	ctl_arena->decay_time = -1;
531	ctl_arena->pactive = 0;
532	ctl_arena->pdirty = 0;
533	if (config_stats) {
534		memset(&ctl_arena->astats->astats, 0, sizeof(arena_stats_t));
535		ctl_arena->astats->allocated_small = 0;
536		ctl_arena->astats->nmalloc_small = 0;
537		ctl_arena->astats->ndalloc_small = 0;
538		ctl_arena->astats->nrequests_small = 0;
539		memset(ctl_arena->astats->bstats, 0, NBINS *
540		    sizeof(malloc_bin_stats_t));
541		memset(ctl_arena->astats->lstats, 0, (NSIZES - NBINS) *
542		    sizeof(malloc_large_stats_t));
543	}
544}
545
546static void
547ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena)
548{
549	unsigned i;
550
551	if (config_stats) {
552		arena_stats_merge(tsdn, arena, &ctl_arena->nthreads,
553		    &ctl_arena->dss, &ctl_arena->decay_time,
554		    &ctl_arena->pactive, &ctl_arena->pdirty,
555		    &ctl_arena->astats->astats, ctl_arena->astats->bstats,
556		    ctl_arena->astats->lstats);
557
558		for (i = 0; i < NBINS; i++) {
559			ctl_arena->astats->allocated_small +=
560			    ctl_arena->astats->bstats[i].curregs *
561			    index2size(i);
562			ctl_arena->astats->nmalloc_small +=
563			    ctl_arena->astats->bstats[i].nmalloc;
564			ctl_arena->astats->ndalloc_small +=
565			    ctl_arena->astats->bstats[i].ndalloc;
566			ctl_arena->astats->nrequests_small +=
567			    ctl_arena->astats->bstats[i].nrequests;
568		}
569	} else {
570		arena_basic_stats_merge(tsdn, arena, &ctl_arena->nthreads,
571		    &ctl_arena->dss, &ctl_arena->decay_time,
572		    &ctl_arena->pactive, &ctl_arena->pdirty);
573	}
574}
575
576static void
577ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
578    bool destroyed)
579{
580	unsigned i;
581
582	if (!destroyed) {
583		ctl_sdarena->nthreads += ctl_arena->nthreads;
584		ctl_sdarena->pactive += ctl_arena->pactive;
585		ctl_sdarena->pdirty += ctl_arena->pdirty;
586	} else {
587		assert(ctl_arena->nthreads == 0);
588		assert(ctl_arena->pactive == 0);
589		assert(ctl_arena->pdirty == 0);
590	}
591
592	if (config_stats) {
593		ctl_arena_stats_t *sdstats = ctl_sdarena->astats;
594		ctl_arena_stats_t *astats = ctl_arena->astats;
595
596		if (!destroyed) {
597			sdstats->astats.mapped += astats->astats.mapped;
598			sdstats->astats.retained += astats->astats.retained;
599		}
600		sdstats->astats.npurge += astats->astats.npurge;
601		sdstats->astats.nmadvise += astats->astats.nmadvise;
602		sdstats->astats.purged += astats->astats.purged;
603
604		if (!destroyed) {
605			sdstats->astats.base += astats->astats.base;
606			sdstats->astats.internal += astats->astats.internal;
607			sdstats->astats.resident += astats->astats.resident;
608		} else
609			assert(astats->astats.internal == 0);
610
611		if (!destroyed)
612			sdstats->allocated_small += astats->allocated_small;
613		else
614			assert(astats->allocated_small == 0);
615		sdstats->nmalloc_small += astats->nmalloc_small;
616		sdstats->ndalloc_small += astats->ndalloc_small;
617		sdstats->nrequests_small += astats->nrequests_small;
618
619		if (!destroyed) {
620			sdstats->astats.allocated_large +=
621			    astats->astats.allocated_large;
622		} else
623			assert(astats->astats.allocated_large == 0);
624		sdstats->astats.nmalloc_large += astats->astats.nmalloc_large;
625		sdstats->astats.ndalloc_large += astats->astats.ndalloc_large;
626		sdstats->astats.nrequests_large +=
627		    astats->astats.nrequests_large;
628
629		if (config_tcache) {
630			sdstats->astats.tcache_bytes +=
631			    astats->astats.tcache_bytes;
632		}
633
634		for (i = 0; i < NBINS; i++) {
635			sdstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
636			sdstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
637			sdstats->bstats[i].nrequests +=
638			    astats->bstats[i].nrequests;
639			if (!destroyed) {
640				sdstats->bstats[i].curregs +=
641				    astats->bstats[i].curregs;
642			} else
643				assert(astats->bstats[i].curregs == 0);
644			if (config_tcache) {
645				sdstats->bstats[i].nfills +=
646				    astats->bstats[i].nfills;
647				sdstats->bstats[i].nflushes +=
648				    astats->bstats[i].nflushes;
649			}
650			sdstats->bstats[i].nslabs += astats->bstats[i].nslabs;
651			sdstats->bstats[i].reslabs += astats->bstats[i].reslabs;
652			if (!destroyed) {
653				sdstats->bstats[i].curslabs +=
654				    astats->bstats[i].curslabs;
655			} else
656				assert(astats->bstats[i].curslabs == 0);
657		}
658
659		for (i = 0; i < NSIZES - NBINS; i++) {
660			sdstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
661			sdstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
662			sdstats->lstats[i].nrequests +=
663			    astats->lstats[i].nrequests;
664			if (!destroyed) {
665				sdstats->lstats[i].curlextents +=
666				    astats->lstats[i].curlextents;
667			} else
668				assert(astats->lstats[i].curlextents == 0);
669		}
670	}
671}
672
673static void
674ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, ctl_arena_t *ctl_sdarena,
675    unsigned i, bool destroyed)
676{
677	ctl_arena_t *ctl_arena = arenas_i(i);
678
679	ctl_arena_clear(ctl_arena);
680	ctl_arena_stats_amerge(tsdn, ctl_arena, arena);
681	/* Merge into sum stats as well. */
682	ctl_arena_stats_sdmerge(ctl_sdarena, ctl_arena, destroyed);
683}
684
685static unsigned
686ctl_arena_init(tsdn_t *tsdn, extent_hooks_t *extent_hooks)
687{
688	unsigned arena_ind;
689	ctl_arena_t *ctl_arena;
690
691	if ((ctl_arena = ql_last(&ctl_arenas->destroyed, destroyed_link)) !=
692	    NULL) {
693		ql_remove(&ctl_arenas->destroyed, ctl_arena, destroyed_link);
694		arena_ind = ctl_arena->arena_ind;
695	} else
696		arena_ind = ctl_arenas->narenas;
697
698	/* Trigger stats allocation. */
699	if (arenas_i_impl(tsdn, arena_ind, false, true) == NULL)
700		return (UINT_MAX);
701
702	/* Initialize new arena. */
703	if (arena_init(tsdn, arena_ind, extent_hooks) == NULL)
704		return (UINT_MAX);
705
706	if (arena_ind == ctl_arenas->narenas)
707		ctl_arenas->narenas++;
708
709	return (arena_ind);
710}
711
712static void
713ctl_refresh(tsdn_t *tsdn)
714{
715	unsigned i;
716	ctl_arena_t *ctl_sarena = arenas_i(MALLCTL_ARENAS_ALL);
717	VARIABLE_ARRAY(arena_t *, tarenas, ctl_arenas->narenas);
718
719	/*
720	 * Clear sum stats, since they will be merged into by
721	 * ctl_arena_refresh().
722	 */
723	ctl_arena_clear(ctl_sarena);
724
725	for (i = 0; i < ctl_arenas->narenas; i++) {
726		tarenas[i] = arena_get(tsdn, i, false);
727	}
728
729	for (i = 0; i < ctl_arenas->narenas; i++) {
730		ctl_arena_t *ctl_arena = arenas_i(i);
731		bool initialized = (tarenas[i] != NULL);
732
733		ctl_arena->initialized = initialized;
734		if (initialized) {
735			ctl_arena_refresh(tsdn, tarenas[i], ctl_sarena, i,
736			    false);
737		}
738	}
739
740	if (config_stats) {
741		ctl_stats->allocated = ctl_sarena->astats->allocated_small +
742		    ctl_sarena->astats->astats.allocated_large;
743		ctl_stats->active = (ctl_sarena->pactive << LG_PAGE);
744		ctl_stats->metadata = ctl_sarena->astats->astats.base +
745		    ctl_sarena->astats->astats.internal;
746		ctl_stats->resident = ctl_sarena->astats->astats.resident;
747		ctl_stats->mapped = ctl_sarena->astats->astats.mapped;
748		ctl_stats->retained = ctl_sarena->astats->astats.retained;
749	}
750	ctl_arenas->epoch++;
751}
752
753static bool
754ctl_init(tsdn_t *tsdn)
755{
756	bool ret;
757
758	malloc_mutex_lock(tsdn, &ctl_mtx);
759	if (!ctl_initialized) {
760		ctl_arena_t *ctl_sarena, *ctl_darena;
761		unsigned i;
762
763		/*
764		 * Allocate demand-zeroed space for pointers to the full
765		 * range of supported arena indices.
766		 */
767		if (ctl_arenas == NULL) {
768			ctl_arenas = (ctl_arenas_t *)base_alloc(tsdn,
769			    b0get(), sizeof(ctl_arenas_t), QUANTUM);
770			if (ctl_arenas == NULL) {
771				ret = true;
772				goto label_return;
773			}
774		}
775
776		if (config_stats && ctl_stats == NULL) {
777			ctl_stats = (ctl_stats_t *)base_alloc(tsdn, b0get(),
778			    sizeof(ctl_stats_t), QUANTUM);
779			if (ctl_stats == NULL) {
780				ret = true;
781				goto label_return;
782			}
783		}
784
785		/*
786		 * Allocate space for the current full range of arenas
787		 * here rather than doing it lazily elsewhere, in order
788		 * to limit when OOM-caused errors can occur.
789		 */
790		if ((ctl_sarena = arenas_i_impl(tsdn, MALLCTL_ARENAS_ALL, false,
791		    true)) == NULL) {
792			ret = true;
793			goto label_return;
794		}
795		ctl_sarena->initialized = true;
796
797		if ((ctl_darena = arenas_i_impl(tsdn, MALLCTL_ARENAS_DESTROYED,
798		    false, true)) == NULL) {
799			ret = true;
800			goto label_return;
801		}
802		ctl_arena_clear(ctl_darena);
803		/*
804		 * Don't toggle ctl_darena to initialized until an arena is
805		 * actually destroyed, so that arena.<i>.initialized can be used
806		 * to query whether the stats are relevant.
807		 */
808
809		ctl_arenas->narenas = narenas_total_get();
810		for (i = 0; i < ctl_arenas->narenas; i++) {
811			if (arenas_i_impl(tsdn, i, false, true) == NULL) {
812				ret = true;
813				goto label_return;
814			}
815		}
816
817		ql_new(&ctl_arenas->destroyed);
818		ctl_refresh(tsdn);
819
820		ctl_initialized = true;
821	}
822
823	ret = false;
824label_return:
825	malloc_mutex_unlock(tsdn, &ctl_mtx);
826	return (ret);
827}
828
829static int
830ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
831    size_t *mibp, size_t *depthp)
832{
833	int ret;
834	const char *elm, *tdot, *dot;
835	size_t elen, i, j;
836	const ctl_named_node_t *node;
837
838	elm = name;
839	/* Equivalent to strchrnul(). */
840	dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0');
841	elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
842	if (elen == 0) {
843		ret = ENOENT;
844		goto label_return;
845	}
846	node = super_root_node;
847	for (i = 0; i < *depthp; i++) {
848		assert(node);
849		assert(node->nchildren > 0);
850		if (ctl_named_node(node->children) != NULL) {
851			const ctl_named_node_t *pnode = node;
852
853			/* Children are named. */
854			for (j = 0; j < node->nchildren; j++) {
855				const ctl_named_node_t *child =
856				    ctl_named_children(node, j);
857				if (strlen(child->name) == elen &&
858				    strncmp(elm, child->name, elen) == 0) {
859					node = child;
860					if (nodesp != NULL)
861						nodesp[i] =
862						    (const ctl_node_t *)node;
863					mibp[i] = j;
864					break;
865				}
866			}
867			if (node == pnode) {
868				ret = ENOENT;
869				goto label_return;
870			}
871		} else {
872			uintmax_t index;
873			const ctl_indexed_node_t *inode;
874
875			/* Children are indexed. */
876			index = malloc_strtoumax(elm, NULL, 10);
877			if (index == UINTMAX_MAX || index > SIZE_T_MAX) {
878				ret = ENOENT;
879				goto label_return;
880			}
881
882			inode = ctl_indexed_node(node->children);
883			node = inode->index(tsdn, mibp, *depthp, (size_t)index);
884			if (node == NULL) {
885				ret = ENOENT;
886				goto label_return;
887			}
888
889			if (nodesp != NULL)
890				nodesp[i] = (const ctl_node_t *)node;
891			mibp[i] = (size_t)index;
892		}
893
894		if (node->ctl != NULL) {
895			/* Terminal node. */
896			if (*dot != '\0') {
897				/*
898				 * The name contains more elements than are
899				 * in this path through the tree.
900				 */
901				ret = ENOENT;
902				goto label_return;
903			}
904			/* Complete lookup successful. */
905			*depthp = i + 1;
906			break;
907		}
908
909		/* Update elm. */
910		if (*dot == '\0') {
911			/* No more elements. */
912			ret = ENOENT;
913			goto label_return;
914		}
915		elm = &dot[1];
916		dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot :
917		    strchr(elm, '\0');
918		elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
919	}
920
921	ret = 0;
922label_return:
923	return (ret);
924}
925
926int
927ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
928    void *newp, size_t newlen)
929{
930	int ret;
931	size_t depth;
932	ctl_node_t const *nodes[CTL_MAX_DEPTH];
933	size_t mib[CTL_MAX_DEPTH];
934	const ctl_named_node_t *node;
935
936	if (!ctl_initialized && ctl_init(tsd_tsdn(tsd))) {
937		ret = EAGAIN;
938		goto label_return;
939	}
940
941	depth = CTL_MAX_DEPTH;
942	ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth);
943	if (ret != 0)
944		goto label_return;
945
946	node = ctl_named_node(nodes[depth-1]);
947	if (node != NULL && node->ctl)
948		ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen);
949	else {
950		/* The name refers to a partial path through the ctl tree. */
951		ret = ENOENT;
952	}
953
954label_return:
955	return(ret);
956}
957
958int
959ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp, size_t *miblenp)
960{
961	int ret;
962
963	if (!ctl_initialized && ctl_init(tsdn)) {
964		ret = EAGAIN;
965		goto label_return;
966	}
967
968	ret = ctl_lookup(tsdn, name, NULL, mibp, miblenp);
969label_return:
970	return(ret);
971}
972
973int
974ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
975    size_t *oldlenp, void *newp, size_t newlen)
976{
977	int ret;
978	const ctl_named_node_t *node;
979	size_t i;
980
981	if (!ctl_initialized && ctl_init(tsd_tsdn(tsd))) {
982		ret = EAGAIN;
983		goto label_return;
984	}
985
986	/* Iterate down the tree. */
987	node = super_root_node;
988	for (i = 0; i < miblen; i++) {
989		assert(node);
990		assert(node->nchildren > 0);
991		if (ctl_named_node(node->children) != NULL) {
992			/* Children are named. */
993			if (node->nchildren <= mib[i]) {
994				ret = ENOENT;
995				goto label_return;
996			}
997			node = ctl_named_children(node, mib[i]);
998		} else {
999			const ctl_indexed_node_t *inode;
1000
1001			/* Indexed element. */
1002			inode = ctl_indexed_node(node->children);
1003			node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]);
1004			if (node == NULL) {
1005				ret = ENOENT;
1006				goto label_return;
1007			}
1008		}
1009	}
1010
1011	/* Call the ctl function. */
1012	if (node && node->ctl)
1013		ret = node->ctl(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
1014	else {
1015		/* Partial MIB. */
1016		ret = ENOENT;
1017	}
1018
1019label_return:
1020	return(ret);
1021}
1022
1023bool
1024ctl_boot(void)
1025{
1026	if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL))
1027		return (true);
1028
1029	ctl_initialized = false;
1030
1031	return (false);
1032}
1033
1034void
1035ctl_prefork(tsdn_t *tsdn)
1036{
1037	malloc_mutex_prefork(tsdn, &ctl_mtx);
1038}
1039
1040void
1041ctl_postfork_parent(tsdn_t *tsdn)
1042{
1043	malloc_mutex_postfork_parent(tsdn, &ctl_mtx);
1044}
1045
1046void
1047ctl_postfork_child(tsdn_t *tsdn)
1048{
1049	malloc_mutex_postfork_child(tsdn, &ctl_mtx);
1050}
1051
1052/******************************************************************************/
1053/* *_ctl() functions. */
1054
1055#define	READONLY()	do {						\
1056	if (newp != NULL || newlen != 0) {				\
1057		ret = EPERM;						\
1058		goto label_return;					\
1059	}								\
1060} while (0)
1061
1062#define	WRITEONLY()	do {						\
1063	if (oldp != NULL || oldlenp != NULL) {				\
1064		ret = EPERM;						\
1065		goto label_return;					\
1066	}								\
1067} while (0)
1068
1069#define	READ_XOR_WRITE()	do {					\
1070	if ((oldp != NULL && oldlenp != NULL) && (newp != NULL ||	\
1071	    newlen != 0)) {						\
1072		ret = EPERM;						\
1073		goto label_return;					\
1074	}								\
1075} while (0)
1076
1077#define	READ(v, t)	do {						\
1078	if (oldp != NULL && oldlenp != NULL) {				\
1079		if (*oldlenp != sizeof(t)) {				\
1080			size_t	copylen = (sizeof(t) <= *oldlenp)	\
1081			    ? sizeof(t) : *oldlenp;			\
1082			memcpy(oldp, (void *)&(v), copylen);		\
1083			ret = EINVAL;					\
1084			goto label_return;				\
1085		}							\
1086		*(t *)oldp = (v);					\
1087	}								\
1088} while (0)
1089
1090#define	WRITE(v, t)	do {						\
1091	if (newp != NULL) {						\
1092		if (newlen != sizeof(t)) {				\
1093			ret = EINVAL;					\
1094			goto label_return;				\
1095		}							\
1096		(v) = *(t *)newp;					\
1097	}								\
1098} while (0)
1099
1100#define	MIB_UNSIGNED(v, i) do {						\
1101	if (mib[i] > UINT_MAX) {					\
1102		ret = EFAULT;						\
1103		goto label_return;					\
1104	}								\
1105	v = (unsigned)mib[i];						\
1106} while (0)
1107
1108/*
1109 * There's a lot of code duplication in the following macros due to limitations
1110 * in how nested cpp macros are expanded.
1111 */
1112#define	CTL_RO_CLGEN(c, l, n, v, t)					\
1113static int								\
1114n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,	\
1115    size_t *oldlenp, void *newp, size_t newlen)				\
1116{									\
1117	int ret;							\
1118	t oldval;							\
1119									\
1120	if (!(c))							\
1121		return (ENOENT);					\
1122	if (l)								\
1123		malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);		\
1124	READONLY();							\
1125	oldval = (v);							\
1126	READ(oldval, t);						\
1127									\
1128	ret = 0;							\
1129label_return:								\
1130	if (l)								\
1131		malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);		\
1132	return (ret);							\
1133}
1134
1135#define	CTL_RO_CGEN(c, n, v, t)						\
1136static int								\
1137n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,	\
1138    size_t *oldlenp, void *newp, size_t newlen)				\
1139{									\
1140	int ret;							\
1141	t oldval;							\
1142									\
1143	if (!(c))							\
1144		return (ENOENT);					\
1145	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);			\
1146	READONLY();							\
1147	oldval = (v);							\
1148	READ(oldval, t);						\
1149									\
1150	ret = 0;							\
1151label_return:								\
1152	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);			\
1153	return (ret);							\
1154}
1155
1156#define	CTL_RO_GEN(n, v, t)						\
1157static int								\
1158n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,	\
1159    size_t *oldlenp, void *newp, size_t newlen)				\
1160{									\
1161	int ret;							\
1162	t oldval;							\
1163									\
1164	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);			\
1165	READONLY();							\
1166	oldval = (v);							\
1167	READ(oldval, t);						\
1168									\
1169	ret = 0;							\
1170label_return:								\
1171	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);			\
1172	return (ret);							\
1173}
1174
1175/*
1176 * ctl_mtx is not acquired, under the assumption that no pertinent data will
1177 * mutate during the call.
1178 */
1179#define	CTL_RO_NL_CGEN(c, n, v, t)					\
1180static int								\
1181n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,	\
1182    size_t *oldlenp, void *newp, size_t newlen)				\
1183{									\
1184	int ret;							\
1185	t oldval;							\
1186									\
1187	if (!(c))							\
1188		return (ENOENT);					\
1189	READONLY();							\
1190	oldval = (v);							\
1191	READ(oldval, t);						\
1192									\
1193	ret = 0;							\
1194label_return:								\
1195	return (ret);							\
1196}
1197
1198#define	CTL_RO_NL_GEN(n, v, t)						\
1199static int								\
1200n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,	\
1201    size_t *oldlenp, void *newp, size_t newlen)				\
1202{									\
1203	int ret;							\
1204	t oldval;							\
1205									\
1206	READONLY();							\
1207	oldval = (v);							\
1208	READ(oldval, t);						\
1209									\
1210	ret = 0;							\
1211label_return:								\
1212	return (ret);							\
1213}
1214
1215#define	CTL_TSD_RO_NL_CGEN(c, n, m, t)					\
1216static int								\
1217n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,	\
1218    size_t *oldlenp, void *newp, size_t newlen)				\
1219{									\
1220	int ret;							\
1221	t oldval;							\
1222									\
1223	if (!(c))							\
1224		return (ENOENT);					\
1225	READONLY();							\
1226	oldval = (m(tsd));						\
1227	READ(oldval, t);						\
1228									\
1229	ret = 0;							\
1230label_return:								\
1231	return (ret);							\
1232}
1233
1234#define	CTL_RO_CONFIG_GEN(n, t)						\
1235static int								\
1236n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,	\
1237    size_t *oldlenp, void *newp, size_t newlen)				\
1238{									\
1239	int ret;							\
1240	t oldval;							\
1241									\
1242	READONLY();							\
1243	oldval = n;							\
1244	READ(oldval, t);						\
1245									\
1246	ret = 0;							\
1247label_return:								\
1248	return (ret);							\
1249}
1250
1251/******************************************************************************/
1252
1253CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
1254
1255static int
1256epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1257    size_t *oldlenp, void *newp, size_t newlen)
1258{
1259	int ret;
1260	UNUSED uint64_t newval;
1261
1262	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
1263	WRITE(newval, uint64_t);
1264	if (newp != NULL)
1265		ctl_refresh(tsd_tsdn(tsd));
1266	READ(ctl_arenas->epoch, uint64_t);
1267
1268	ret = 0;
1269label_return:
1270	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
1271	return (ret);
1272}
1273
1274/******************************************************************************/
1275
1276CTL_RO_CONFIG_GEN(config_cache_oblivious, bool)
1277CTL_RO_CONFIG_GEN(config_debug, bool)
1278CTL_RO_CONFIG_GEN(config_fill, bool)
1279CTL_RO_CONFIG_GEN(config_lazy_lock, bool)
1280CTL_RO_CONFIG_GEN(config_malloc_conf, const char *)
1281CTL_RO_CONFIG_GEN(config_munmap, bool)
1282CTL_RO_CONFIG_GEN(config_prof, bool)
1283CTL_RO_CONFIG_GEN(config_prof_libgcc, bool)
1284CTL_RO_CONFIG_GEN(config_prof_libunwind, bool)
1285CTL_RO_CONFIG_GEN(config_stats, bool)
1286CTL_RO_CONFIG_GEN(config_tcache, bool)
1287CTL_RO_CONFIG_GEN(config_tls, bool)
1288CTL_RO_CONFIG_GEN(config_utrace, bool)
1289CTL_RO_CONFIG_GEN(config_xmalloc, bool)
1290
1291/******************************************************************************/
1292
1293CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
1294CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
1295CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned)
1296CTL_RO_NL_GEN(opt_decay_time, opt_decay_time, ssize_t)
1297CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
1298CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
1299CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
1300CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
1301CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
1302CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool)
1303CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
1304CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
1305CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
1306CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool)
1307CTL_RO_NL_CGEN(config_prof, opt_prof_thread_active_init,
1308    opt_prof_thread_active_init, bool)
1309CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
1310CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
1311CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
1312CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
1313CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool)
1314CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
1315
1316/******************************************************************************/
1317
1318static int
1319thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1320    size_t *oldlenp, void *newp, size_t newlen)
1321{
1322	int ret;
1323	arena_t *oldarena;
1324	unsigned newind, oldind;
1325
1326	oldarena = arena_choose(tsd, NULL);
1327	if (oldarena == NULL)
1328		return (EAGAIN);
1329
1330	newind = oldind = arena_ind_get(oldarena);
1331	WRITE(newind, unsigned);
1332	READ(oldind, unsigned);
1333	if (newind != oldind) {
1334		arena_t *newarena;
1335
1336		if (newind >= narenas_total_get()) {
1337			/* New arena index is out of range. */
1338			ret = EFAULT;
1339			goto label_return;
1340		}
1341
1342		/* Initialize arena if necessary. */
1343		newarena = arena_get(tsd_tsdn(tsd), newind, true);
1344		if (newarena == NULL) {
1345			ret = EAGAIN;
1346			goto label_return;
1347		}
1348		/* Set new arena/tcache associations. */
1349		arena_migrate(tsd, oldind, newind);
1350		if (config_tcache) {
1351			tcache_t *tcache = tsd_tcache_get(tsd);
1352			if (tcache != NULL) {
1353				tcache_arena_reassociate(tsd_tsdn(tsd), tcache,
1354				    oldarena, newarena);
1355			}
1356		}
1357	}
1358
1359	ret = 0;
1360label_return:
1361	return (ret);
1362}
1363
1364CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get,
1365    uint64_t)
1366CTL_TSD_RO_NL_CGEN(config_stats, thread_allocatedp, tsd_thread_allocatedp_get,
1367    uint64_t *)
1368CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocated, tsd_thread_deallocated_get,
1369    uint64_t)
1370CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp,
1371    tsd_thread_deallocatedp_get, uint64_t *)
1372
1373static int
1374thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
1375    void *oldp, size_t *oldlenp, void *newp, size_t newlen)
1376{
1377	int ret;
1378	bool oldval;
1379
1380	if (!config_tcache)
1381		return (ENOENT);
1382
1383	oldval = tcache_enabled_get();
1384	if (newp != NULL) {
1385		if (newlen != sizeof(bool)) {
1386			ret = EINVAL;
1387			goto label_return;
1388		}
1389		tcache_enabled_set(*(bool *)newp);
1390	}
1391	READ(oldval, bool);
1392
1393	ret = 0;
1394label_return:
1395	return (ret);
1396}
1397
1398static int
1399thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
1400    void *oldp, size_t *oldlenp, void *newp, size_t newlen)
1401{
1402	int ret;
1403
1404	if (!config_tcache)
1405		return (ENOENT);
1406
1407	READONLY();
1408	WRITEONLY();
1409
1410	tcache_flush();
1411
1412	ret = 0;
1413label_return:
1414	return (ret);
1415}
1416
1417static int
1418thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1419    size_t *oldlenp, void *newp, size_t newlen)
1420{
1421	int ret;
1422
1423	if (!config_prof)
1424		return (ENOENT);
1425
1426	READ_XOR_WRITE();
1427
1428	if (newp != NULL) {
1429		if (newlen != sizeof(const char *)) {
1430			ret = EINVAL;
1431			goto label_return;
1432		}
1433
1434		if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) !=
1435		    0)
1436			goto label_return;
1437	} else {
1438		const char *oldname = prof_thread_name_get(tsd);
1439		READ(oldname, const char *);
1440	}
1441
1442	ret = 0;
1443label_return:
1444	return (ret);
1445}
1446
1447static int
1448thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1449    size_t *oldlenp, void *newp, size_t newlen)
1450{
1451	int ret;
1452	bool oldval;
1453
1454	if (!config_prof)
1455		return (ENOENT);
1456
1457	oldval = prof_thread_active_get(tsd);
1458	if (newp != NULL) {
1459		if (newlen != sizeof(bool)) {
1460			ret = EINVAL;
1461			goto label_return;
1462		}
1463		if (prof_thread_active_set(tsd, *(bool *)newp)) {
1464			ret = EAGAIN;
1465			goto label_return;
1466		}
1467	}
1468	READ(oldval, bool);
1469
1470	ret = 0;
1471label_return:
1472	return (ret);
1473}
1474
1475/******************************************************************************/
1476
1477static int
1478tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1479    size_t *oldlenp, void *newp, size_t newlen)
1480{
1481	int ret;
1482	unsigned tcache_ind;
1483
1484	if (!config_tcache)
1485		return (ENOENT);
1486
1487	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
1488	READONLY();
1489	if (tcaches_create(tsd, &tcache_ind)) {
1490		ret = EFAULT;
1491		goto label_return;
1492	}
1493	READ(tcache_ind, unsigned);
1494
1495	ret = 0;
1496label_return:
1497	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
1498	return (ret);
1499}
1500
1501static int
1502tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1503    size_t *oldlenp, void *newp, size_t newlen)
1504{
1505	int ret;
1506	unsigned tcache_ind;
1507
1508	if (!config_tcache)
1509		return (ENOENT);
1510
1511	WRITEONLY();
1512	tcache_ind = UINT_MAX;
1513	WRITE(tcache_ind, unsigned);
1514	if (tcache_ind == UINT_MAX) {
1515		ret = EFAULT;
1516		goto label_return;
1517	}
1518	tcaches_flush(tsd, tcache_ind);
1519
1520	ret = 0;
1521label_return:
1522	return (ret);
1523}
1524
1525static int
1526tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1527    size_t *oldlenp, void *newp, size_t newlen)
1528{
1529	int ret;
1530	unsigned tcache_ind;
1531
1532	if (!config_tcache)
1533		return (ENOENT);
1534
1535	WRITEONLY();
1536	tcache_ind = UINT_MAX;
1537	WRITE(tcache_ind, unsigned);
1538	if (tcache_ind == UINT_MAX) {
1539		ret = EFAULT;
1540		goto label_return;
1541	}
1542	tcaches_destroy(tsd, tcache_ind);
1543
1544	ret = 0;
1545label_return:
1546	return (ret);
1547}
1548
1549/******************************************************************************/
1550
1551static int
1552arena_i_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
1553    void *oldp, size_t *oldlenp, void *newp, size_t newlen)
1554{
1555	int ret;
1556	tsdn_t *tsdn = tsd_tsdn(tsd);
1557	unsigned arena_ind;
1558	bool initialized;
1559
1560	READONLY();
1561	MIB_UNSIGNED(arena_ind, 1);
1562
1563	malloc_mutex_lock(tsdn, &ctl_mtx);
1564	initialized = arenas_i(arena_ind)->initialized;
1565	malloc_mutex_unlock(tsdn, &ctl_mtx);
1566
1567	READ(initialized, bool);
1568
1569	ret = 0;
1570label_return:
1571	return (ret);
1572}
1573
1574static void
1575arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all)
1576{
1577	malloc_mutex_lock(tsdn, &ctl_mtx);
1578	{
1579		unsigned narenas = ctl_arenas->narenas;
1580
1581		/*
1582		 * Access via index narenas is deprecated, and scheduled for
1583		 * removal in 6.0.0.
1584		 */
1585		if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind == narenas) {
1586			unsigned i;
1587			VARIABLE_ARRAY(arena_t *, tarenas, narenas);
1588
1589			for (i = 0; i < narenas; i++)
1590				tarenas[i] = arena_get(tsdn, i, false);
1591
1592			/*
1593			 * No further need to hold ctl_mtx, since narenas and
1594			 * tarenas contain everything needed below.
1595			 */
1596			malloc_mutex_unlock(tsdn, &ctl_mtx);
1597
1598			for (i = 0; i < narenas; i++) {
1599				if (tarenas[i] != NULL)
1600					arena_purge(tsdn, tarenas[i], all);
1601			}
1602		} else {
1603			arena_t *tarena;
1604
1605			assert(arena_ind < narenas);
1606
1607			tarena = arena_get(tsdn, arena_ind, false);
1608
1609			/* No further need to hold ctl_mtx. */
1610			malloc_mutex_unlock(tsdn, &ctl_mtx);
1611
1612			if (tarena != NULL)
1613				arena_purge(tsdn, tarena, all);
1614		}
1615	}
1616}
1617
1618static int
1619arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1620    size_t *oldlenp, void *newp, size_t newlen)
1621{
1622	int ret;
1623	unsigned arena_ind;
1624
1625	READONLY();
1626	WRITEONLY();
1627	MIB_UNSIGNED(arena_ind, 1);
1628	arena_i_purge(tsd_tsdn(tsd), arena_ind, true);
1629
1630	ret = 0;
1631label_return:
1632	return (ret);
1633}
1634
1635static int
1636arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1637    size_t *oldlenp, void *newp, size_t newlen)
1638{
1639	int ret;
1640	unsigned arena_ind;
1641
1642	READONLY();
1643	WRITEONLY();
1644	MIB_UNSIGNED(arena_ind, 1);
1645	arena_i_purge(tsd_tsdn(tsd), arena_ind, false);
1646
1647	ret = 0;
1648label_return:
1649	return (ret);
1650}
1651
1652static int
1653arena_i_reset_destroy_helper(tsd_t *tsd, const size_t *mib, size_t miblen,
1654    void *oldp, size_t *oldlenp, void *newp, size_t newlen, unsigned *arena_ind,
1655    arena_t **arena)
1656{
1657	int ret;
1658
1659	READONLY();
1660	WRITEONLY();
1661	MIB_UNSIGNED(*arena_ind, 1);
1662
1663	if (*arena_ind < narenas_auto) {
1664		ret = EFAULT;
1665		goto label_return;
1666	}
1667
1668	*arena = arena_get(tsd_tsdn(tsd), *arena_ind, false);
1669	if (*arena == NULL) {
1670		ret = EFAULT;
1671		goto label_return;
1672	}
1673
1674	ret = 0;
1675label_return:
1676	return (ret);
1677}
1678
1679static int
1680arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1681    size_t *oldlenp, void *newp, size_t newlen)
1682{
1683	int ret;
1684	unsigned arena_ind;
1685	arena_t *arena;
1686
1687	ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
1688	    newp, newlen, &arena_ind, &arena);
1689	if (ret != 0)
1690		return (ret);
1691
1692	arena_reset(tsd, arena);
1693
1694	return (ret);
1695}
1696
1697static int
1698arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1699    size_t *oldlenp, void *newp, size_t newlen)
1700{
1701	int ret;
1702	unsigned arena_ind;
1703	arena_t *arena;
1704	ctl_arena_t *ctl_darena, *ctl_arena;
1705
1706	ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
1707	    newp, newlen, &arena_ind, &arena);
1708	if (ret != 0)
1709		goto label_return;
1710
1711	if (arena_nthreads_get(arena, false) != 0 || arena_nthreads_get(arena,
1712	    true) != 0) {
1713		ret = EFAULT;
1714		goto label_return;
1715	}
1716
1717	/* Merge stats after resetting and purging arena. */
1718	arena_reset(tsd, arena);
1719	arena_purge(tsd_tsdn(tsd), arena, true);
1720	ctl_darena = arenas_i(MALLCTL_ARENAS_DESTROYED);
1721	ctl_darena->initialized = true;
1722	ctl_arena_refresh(tsd_tsdn(tsd), arena, ctl_darena, arena_ind, true);
1723	/* Destroy arena. */
1724	arena_destroy(tsd, arena);
1725	ctl_arena = arenas_i(arena_ind);
1726	ctl_arena->initialized = false;
1727	/* Record arena index for later recycling via arenas.create. */
1728	ql_elm_new(ctl_arena, destroyed_link);
1729	ql_tail_insert(&ctl_arenas->destroyed, ctl_arena, destroyed_link);
1730
1731	assert(ret == 0);
1732label_return:
1733	return (ret);
1734}
1735
1736static int
1737arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1738    size_t *oldlenp, void *newp, size_t newlen)
1739{
1740	int ret;
1741	const char *dss = NULL;
1742	unsigned arena_ind;
1743	dss_prec_t dss_prec_old = dss_prec_limit;
1744	dss_prec_t dss_prec = dss_prec_limit;
1745
1746	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
1747	WRITE(dss, const char *);
1748	MIB_UNSIGNED(arena_ind, 1);
1749	if (dss != NULL) {
1750		int i;
1751		bool match = false;
1752
1753		for (i = 0; i < dss_prec_limit; i++) {
1754			if (strcmp(dss_prec_names[i], dss) == 0) {
1755				dss_prec = i;
1756				match = true;
1757				break;
1758			}
1759		}
1760
1761		if (!match) {
1762			ret = EINVAL;
1763			goto label_return;
1764		}
1765	}
1766
1767	/*
1768	 * Access via index narenas is deprecated, and scheduled for removal in
1769	 * 6.0.0.
1770	 */
1771	if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind ==
1772	    ctl_arenas->narenas) {
1773		if (dss_prec != dss_prec_limit &&
1774		    extent_dss_prec_set(dss_prec)) {
1775			ret = EFAULT;
1776			goto label_return;
1777		}
1778		dss_prec_old = extent_dss_prec_get();
1779	} else {
1780		arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
1781		if (arena == NULL || (dss_prec != dss_prec_limit &&
1782		    arena_dss_prec_set(tsd_tsdn(tsd), arena, dss_prec))) {
1783			ret = EFAULT;
1784			goto label_return;
1785		}
1786		dss_prec_old = arena_dss_prec_get(tsd_tsdn(tsd), arena);
1787	}
1788
1789	dss = dss_prec_names[dss_prec_old];
1790	READ(dss, const char *);
1791
1792	ret = 0;
1793label_return:
1794	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
1795	return (ret);
1796}
1797
1798static int
1799arena_i_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1800    size_t *oldlenp, void *newp, size_t newlen)
1801{
1802	int ret;
1803	unsigned arena_ind;
1804	arena_t *arena;
1805
1806	MIB_UNSIGNED(arena_ind, 1);
1807	arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
1808	if (arena == NULL) {
1809		ret = EFAULT;
1810		goto label_return;
1811	}
1812
1813	if (oldp != NULL && oldlenp != NULL) {
1814		size_t oldval = arena_decay_time_get(tsd_tsdn(tsd), arena);
1815		READ(oldval, ssize_t);
1816	}
1817	if (newp != NULL) {
1818		if (newlen != sizeof(ssize_t)) {
1819			ret = EINVAL;
1820			goto label_return;
1821		}
1822		if (arena_decay_time_set(tsd_tsdn(tsd), arena,
1823		    *(ssize_t *)newp)) {
1824			ret = EFAULT;
1825			goto label_return;
1826		}
1827	}
1828
1829	ret = 0;
1830label_return:
1831	return (ret);
1832}
1833
1834static int
1835arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
1836    void *oldp, size_t *oldlenp, void *newp, size_t newlen)
1837{
1838	int ret;
1839	unsigned arena_ind;
1840	arena_t *arena;
1841
1842	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
1843	MIB_UNSIGNED(arena_ind, 1);
1844	if (arena_ind < narenas_total_get() && (arena =
1845	    arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) {
1846		if (newp != NULL) {
1847			extent_hooks_t *old_extent_hooks;
1848			extent_hooks_t *new_extent_hooks
1849			    JEMALLOC_CC_SILENCE_INIT(NULL);
1850			WRITE(new_extent_hooks, extent_hooks_t *);
1851			old_extent_hooks = extent_hooks_set(arena,
1852			    new_extent_hooks);
1853			READ(old_extent_hooks, extent_hooks_t *);
1854		} else {
1855			extent_hooks_t *old_extent_hooks =
1856			    extent_hooks_get(arena);
1857			READ(old_extent_hooks, extent_hooks_t *);
1858		}
1859	} else {
1860		ret = EFAULT;
1861		goto label_return;
1862	}
1863	ret = 0;
1864label_return:
1865	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
1866	return (ret);
1867}
1868
1869static const ctl_named_node_t *
1870arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
1871{
1872	const ctl_named_node_t *ret;
1873
1874	malloc_mutex_lock(tsdn, &ctl_mtx);
1875	switch (i) {
1876	case MALLCTL_ARENAS_ALL:
1877	case MALLCTL_ARENAS_DESTROYED:
1878		break;
1879	default:
1880		if (i > ctl_arenas->narenas) {
1881			ret = NULL;
1882			goto label_return;
1883		}
1884		break;
1885	}
1886
1887	ret = super_arena_i_node;
1888label_return:
1889	malloc_mutex_unlock(tsdn, &ctl_mtx);
1890	return (ret);
1891}
1892
1893/******************************************************************************/
1894
1895static int
1896arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1897    size_t *oldlenp, void *newp, size_t newlen)
1898{
1899	int ret;
1900	unsigned narenas;
1901
1902	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
1903	READONLY();
1904	if (*oldlenp != sizeof(unsigned)) {
1905		ret = EINVAL;
1906		goto label_return;
1907	}
1908	narenas = ctl_arenas->narenas;
1909	READ(narenas, unsigned);
1910
1911	ret = 0;
1912label_return:
1913	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
1914	return (ret);
1915}
1916
1917static int
1918arenas_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1919    size_t *oldlenp, void *newp, size_t newlen)
1920{
1921	int ret;
1922
1923	if (oldp != NULL && oldlenp != NULL) {
1924		size_t oldval = arena_decay_time_default_get();
1925		READ(oldval, ssize_t);
1926	}
1927	if (newp != NULL) {
1928		if (newlen != sizeof(ssize_t)) {
1929			ret = EINVAL;
1930			goto label_return;
1931		}
1932		if (arena_decay_time_default_set(*(ssize_t *)newp)) {
1933			ret = EFAULT;
1934			goto label_return;
1935		}
1936	}
1937
1938	ret = 0;
1939label_return:
1940	return (ret);
1941}
1942
1943CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
1944CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
1945CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t)
1946CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned)
1947CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned)
1948CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
1949CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
1950CTL_RO_NL_GEN(arenas_bin_i_slab_size, arena_bin_info[mib[2]].slab_size, size_t)
1951static const ctl_named_node_t *
1952arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
1953{
1954	if (i > NBINS)
1955		return (NULL);
1956	return (super_arenas_bin_i_node);
1957}
1958
1959CTL_RO_NL_GEN(arenas_nlextents, NSIZES - NBINS, unsigned)
1960CTL_RO_NL_GEN(arenas_lextent_i_size, index2size(NBINS+(szind_t)mib[2]), size_t)
1961static const ctl_named_node_t *
1962arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
1963{
1964	if (i > NSIZES - NBINS)
1965		return (NULL);
1966	return (super_arenas_lextent_i_node);
1967}
1968
1969static int
1970arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1971    size_t *oldlenp, void *newp, size_t newlen)
1972{
1973	int ret;
1974	extent_hooks_t *extent_hooks;
1975	unsigned arena_ind;
1976
1977	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
1978
1979	extent_hooks = (extent_hooks_t *)&extent_hooks_default;
1980	WRITE(extent_hooks, extent_hooks_t *);
1981	if ((arena_ind = ctl_arena_init(tsd_tsdn(tsd), extent_hooks)) ==
1982	    UINT_MAX) {
1983		ret = EAGAIN;
1984		goto label_return;
1985	}
1986	READ(arena_ind, unsigned);
1987
1988	ret = 0;
1989label_return:
1990	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
1991	return (ret);
1992}
1993
1994/******************************************************************************/
1995
1996static int
1997prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
1998    void *oldp, size_t *oldlenp, void *newp, size_t newlen)
1999{
2000	int ret;
2001	bool oldval;
2002
2003	if (!config_prof)
2004		return (ENOENT);
2005
2006	if (newp != NULL) {
2007		if (newlen != sizeof(bool)) {
2008			ret = EINVAL;
2009			goto label_return;
2010		}
2011		oldval = prof_thread_active_init_set(tsd_tsdn(tsd),
2012		    *(bool *)newp);
2013	} else
2014		oldval = prof_thread_active_init_get(tsd_tsdn(tsd));
2015	READ(oldval, bool);
2016
2017	ret = 0;
2018label_return:
2019	return (ret);
2020}
2021
2022static int
2023prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2024    size_t *oldlenp, void *newp, size_t newlen)
2025{
2026	int ret;
2027	bool oldval;
2028
2029	if (!config_prof)
2030		return (ENOENT);
2031
2032	if (newp != NULL) {
2033		if (newlen != sizeof(bool)) {
2034			ret = EINVAL;
2035			goto label_return;
2036		}
2037		oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp);
2038	} else
2039		oldval = prof_active_get(tsd_tsdn(tsd));
2040	READ(oldval, bool);
2041
2042	ret = 0;
2043label_return:
2044	return (ret);
2045}
2046
2047static int
2048prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2049    size_t *oldlenp, void *newp, size_t newlen)
2050{
2051	int ret;
2052	const char *filename = NULL;
2053
2054	if (!config_prof)
2055		return (ENOENT);
2056
2057	WRITEONLY();
2058	WRITE(filename, const char *);
2059
2060	if (prof_mdump(tsd, filename)) {
2061		ret = EFAULT;
2062		goto label_return;
2063	}
2064
2065	ret = 0;
2066label_return:
2067	return (ret);
2068}
2069
2070static int
2071prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2072    size_t *oldlenp, void *newp, size_t newlen)
2073{
2074	int ret;
2075	bool oldval;
2076
2077	if (!config_prof)
2078		return (ENOENT);
2079
2080	if (newp != NULL) {
2081		if (newlen != sizeof(bool)) {
2082			ret = EINVAL;
2083			goto label_return;
2084		}
2085		oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp);
2086	} else
2087		oldval = prof_gdump_get(tsd_tsdn(tsd));
2088	READ(oldval, bool);
2089
2090	ret = 0;
2091label_return:
2092	return (ret);
2093}
2094
2095static int
2096prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2097    size_t *oldlenp, void *newp, size_t newlen)
2098{
2099	int ret;
2100	size_t lg_sample = lg_prof_sample;
2101
2102	if (!config_prof)
2103		return (ENOENT);
2104
2105	WRITEONLY();
2106	WRITE(lg_sample, size_t);
2107	if (lg_sample >= (sizeof(uint64_t) << 3))
2108		lg_sample = (sizeof(uint64_t) << 3) - 1;
2109
2110	prof_reset(tsd, lg_sample);
2111
2112	ret = 0;
2113label_return:
2114	return (ret);
2115}
2116
2117CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
2118CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t)
2119
2120/******************************************************************************/
2121
2122CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats->allocated, size_t)
2123CTL_RO_CGEN(config_stats, stats_active, ctl_stats->active, size_t)
2124CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats->metadata, size_t)
2125CTL_RO_CGEN(config_stats, stats_resident, ctl_stats->resident, size_t)
2126CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats->mapped, size_t)
2127CTL_RO_CGEN(config_stats, stats_retained, ctl_stats->retained, size_t)
2128
2129CTL_RO_GEN(stats_arenas_i_dss, arenas_i(mib[2])->dss, const char *)
2130CTL_RO_GEN(stats_arenas_i_decay_time, arenas_i(mib[2])->decay_time,
2131    ssize_t)
2132CTL_RO_GEN(stats_arenas_i_nthreads, arenas_i(mib[2])->nthreads, unsigned)
2133CTL_RO_GEN(stats_arenas_i_pactive, arenas_i(mib[2])->pactive, size_t)
2134CTL_RO_GEN(stats_arenas_i_pdirty, arenas_i(mib[2])->pdirty, size_t)
2135CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
2136    arenas_i(mib[2])->astats->astats.mapped, size_t)
2137CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
2138    arenas_i(mib[2])->astats->astats.retained, size_t)
2139CTL_RO_CGEN(config_stats, stats_arenas_i_npurge,
2140    arenas_i(mib[2])->astats->astats.npurge, uint64_t)
2141CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
2142    arenas_i(mib[2])->astats->astats.nmadvise, uint64_t)
2143CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
2144    arenas_i(mib[2])->astats->astats.purged, uint64_t)
2145CTL_RO_CGEN(config_stats, stats_arenas_i_base,
2146    arenas_i(mib[2])->astats->astats.base, size_t)
2147CTL_RO_CGEN(config_stats, stats_arenas_i_internal,
2148    arenas_i(mib[2])->astats->astats.internal, size_t)
2149CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_tcache_bytes,
2150    arenas_i(mib[2])->astats->astats.tcache_bytes, size_t)
2151CTL_RO_CGEN(config_stats, stats_arenas_i_resident,
2152    arenas_i(mib[2])->astats->astats.resident, size_t)
2153
2154CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
2155    arenas_i(mib[2])->astats->allocated_small, size_t)
2156CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
2157    arenas_i(mib[2])->astats->nmalloc_small, uint64_t)
2158CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc,
2159    arenas_i(mib[2])->astats->ndalloc_small, uint64_t)
2160CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests,
2161    arenas_i(mib[2])->astats->nrequests_small, uint64_t)
2162CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
2163    arenas_i(mib[2])->astats->astats.allocated_large, size_t)
2164CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
2165    arenas_i(mib[2])->astats->astats.nmalloc_large, uint64_t)
2166CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
2167    arenas_i(mib[2])->astats->astats.ndalloc_large, uint64_t)
2168CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
2169    arenas_i(mib[2])->astats->astats.nmalloc_large, uint64_t) /* Intentional. */
2170
2171CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
2172    arenas_i(mib[2])->astats->bstats[mib[4]].nmalloc, uint64_t)
2173CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
2174    arenas_i(mib[2])->astats->bstats[mib[4]].ndalloc, uint64_t)
2175CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
2176    arenas_i(mib[2])->astats->bstats[mib[4]].nrequests, uint64_t)
2177CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs,
2178    arenas_i(mib[2])->astats->bstats[mib[4]].curregs, size_t)
2179CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills,
2180    arenas_i(mib[2])->astats->bstats[mib[4]].nfills, uint64_t)
2181CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes,
2182    arenas_i(mib[2])->astats->bstats[mib[4]].nflushes, uint64_t)
2183CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nslabs,
2184    arenas_i(mib[2])->astats->bstats[mib[4]].nslabs, uint64_t)
2185CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreslabs,
2186    arenas_i(mib[2])->astats->bstats[mib[4]].reslabs, uint64_t)
2187CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs,
2188    arenas_i(mib[2])->astats->bstats[mib[4]].curslabs, size_t)
2189
2190static const ctl_named_node_t *
2191stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
2192    size_t j)
2193{
2194	if (j > NBINS)
2195		return (NULL);
2196	return (super_stats_arenas_i_bins_j_node);
2197}
2198
2199CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc,
2200    arenas_i(mib[2])->astats->lstats[mib[4]].nmalloc, uint64_t)
2201CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_ndalloc,
2202    arenas_i(mib[2])->astats->lstats[mib[4]].ndalloc, uint64_t)
2203CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nrequests,
2204    arenas_i(mib[2])->astats->lstats[mib[4]].nrequests, uint64_t)
2205CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents,
2206    arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t)
2207
2208static const ctl_named_node_t *
2209stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
2210    size_t j)
2211{
2212	if (j > NSIZES - NBINS)
2213		return (NULL);
2214	return (super_stats_arenas_i_lextents_j_node);
2215}
2216
2217static const ctl_named_node_t *
2218stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
2219{
2220	const ctl_named_node_t *ret;
2221	size_t a;
2222
2223	malloc_mutex_lock(tsdn, &ctl_mtx);
2224	a = arenas_i2a_impl(i, true, true);
2225	if (a == UINT_MAX || !ctl_arenas->arenas[a]->initialized) {
2226		ret = NULL;
2227		goto label_return;
2228	}
2229
2230	ret = super_stats_arenas_i_node;
2231label_return:
2232	malloc_mutex_unlock(tsdn, &ctl_mtx);
2233	return (ret);
2234}
2235