1#define	JEMALLOC_STATS_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4#define	CTL_GET(n, v, t) do {						\
5	size_t sz = sizeof(t);						\
6	xmallctl(n, v, &sz, NULL, 0);					\
7} while (0)
8
9#define	CTL_M2_GET(n, i, v, t) do {					\
10	size_t mib[6];							\
11	size_t miblen = sizeof(mib) / sizeof(size_t);			\
12	size_t sz = sizeof(t);						\
13	xmallctlnametomib(n, mib, &miblen);				\
14	mib[2] = (i);							\
15	xmallctlbymib(mib, miblen, v, &sz, NULL, 0);			\
16} while (0)
17
18#define	CTL_M2_M4_GET(n, i, j, v, t) do {				\
19	size_t mib[6];							\
20	size_t miblen = sizeof(mib) / sizeof(size_t);			\
21	size_t sz = sizeof(t);						\
22	xmallctlnametomib(n, mib, &miblen);				\
23	mib[2] = (i);							\
24	mib[4] = (j);							\
25	xmallctlbymib(mib, miblen, v, &sz, NULL, 0);			\
26} while (0)
27
28/******************************************************************************/
29/* Data. */
30
31bool	opt_stats_print = false;
32
33size_t	stats_cactive = 0;
34
35/******************************************************************************/
36/* Function prototypes for non-inline static functions. */
37
38static void	stats_arena_bins_print(void (*write_cb)(void *, const char *),
39    void *cbopaque, unsigned i);
40static void	stats_arena_lruns_print(void (*write_cb)(void *, const char *),
41    void *cbopaque, unsigned i);
42static void	stats_arena_hchunks_print(
43    void (*write_cb)(void *, const char *), void *cbopaque, unsigned i);
44static void	stats_arena_print(void (*write_cb)(void *, const char *),
45    void *cbopaque, unsigned i, bool bins, bool large, bool huge);
46
47/******************************************************************************/
48
49static void
50stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
51    unsigned i)
52{
53	size_t page;
54	bool config_tcache, in_gap;
55	unsigned nbins, j;
56
57	CTL_GET("arenas.page", &page, size_t);
58
59	CTL_GET("config.tcache", &config_tcache, bool);
60	if (config_tcache) {
61		malloc_cprintf(write_cb, cbopaque,
62		    "bins:           size ind    allocated      nmalloc"
63		    "      ndalloc    nrequests      curregs      curruns regs"
64		    " pgs  util       nfills     nflushes      newruns"
65		    "       reruns\n");
66	} else {
67		malloc_cprintf(write_cb, cbopaque,
68		    "bins:           size ind    allocated      nmalloc"
69		    "      ndalloc    nrequests      curregs      curruns regs"
70		    " pgs  util      newruns       reruns\n");
71	}
72	CTL_GET("arenas.nbins", &nbins, unsigned);
73	for (j = 0, in_gap = false; j < nbins; j++) {
74		uint64_t nruns;
75
76		CTL_M2_M4_GET("stats.arenas.0.bins.0.nruns", i, j, &nruns,
77		    uint64_t);
78		if (nruns == 0)
79			in_gap = true;
80		else {
81			size_t reg_size, run_size, curregs, availregs, milli;
82			size_t curruns;
83			uint32_t nregs;
84			uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
85			uint64_t reruns;
86			char util[6]; /* "x.yyy". */
87
88			if (in_gap) {
89				malloc_cprintf(write_cb, cbopaque,
90				    "                     ---\n");
91				in_gap = false;
92			}
93			CTL_M2_GET("arenas.bin.0.size", j, &reg_size, size_t);
94			CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t);
95			CTL_M2_GET("arenas.bin.0.run_size", j, &run_size,
96			    size_t);
97			CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j,
98			    &nmalloc, uint64_t);
99			CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j,
100			    &ndalloc, uint64_t);
101			CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j,
102			    &curregs, size_t);
103			CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j,
104			    &nrequests, uint64_t);
105			if (config_tcache) {
106				CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i,
107				    j, &nfills, uint64_t);
108				CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes",
109				    i, j, &nflushes, uint64_t);
110			}
111			CTL_M2_M4_GET("stats.arenas.0.bins.0.nreruns", i, j,
112			    &reruns, uint64_t);
113			CTL_M2_M4_GET("stats.arenas.0.bins.0.curruns", i, j,
114			    &curruns, size_t);
115
116			availregs = nregs * curruns;
117			milli = (availregs != 0) ? (1000 * curregs) / availregs
118			    : 1000;
119			assert(milli <= 1000);
120			if (milli < 10) {
121				malloc_snprintf(util, sizeof(util),
122				    "0.00%zu", milli);
123			} else if (milli < 100) {
124				malloc_snprintf(util, sizeof(util), "0.0%zu",
125				    milli);
126			} else if (milli < 1000) {
127				malloc_snprintf(util, sizeof(util), "0.%zu",
128				    milli);
129			} else
130				malloc_snprintf(util, sizeof(util), "1");
131
132			if (config_tcache) {
133				malloc_cprintf(write_cb, cbopaque,
134				    "%20zu %3u %12zu %12"FMTu64
135				    " %12"FMTu64" %12"FMTu64" %12zu"
136				    " %12zu %4u %3zu %-5s %12"FMTu64
137				    " %12"FMTu64" %12"FMTu64" %12"FMTu64"\n",
138				    reg_size, j, curregs * reg_size, nmalloc,
139				    ndalloc, nrequests, curregs, curruns, nregs,
140				    run_size / page, util, nfills, nflushes,
141				    nruns, reruns);
142			} else {
143				malloc_cprintf(write_cb, cbopaque,
144				    "%20zu %3u %12zu %12"FMTu64
145				    " %12"FMTu64" %12"FMTu64" %12zu"
146				    " %12zu %4u %3zu %-5s %12"FMTu64
147				    " %12"FMTu64"\n",
148				    reg_size, j, curregs * reg_size, nmalloc,
149				    ndalloc, nrequests, curregs, curruns, nregs,
150				    run_size / page, util, nruns, reruns);
151			}
152		}
153	}
154	if (in_gap) {
155		malloc_cprintf(write_cb, cbopaque,
156		    "                     ---\n");
157	}
158}
159
160static void
161stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
162    unsigned i)
163{
164	unsigned nbins, nlruns, j;
165	bool in_gap;
166
167	malloc_cprintf(write_cb, cbopaque,
168	    "large:          size ind    allocated      nmalloc      ndalloc"
169	    "    nrequests      curruns\n");
170	CTL_GET("arenas.nbins", &nbins, unsigned);
171	CTL_GET("arenas.nlruns", &nlruns, unsigned);
172	for (j = 0, in_gap = false; j < nlruns; j++) {
173		uint64_t nmalloc, ndalloc, nrequests;
174		size_t run_size, curruns;
175
176		CTL_M2_M4_GET("stats.arenas.0.lruns.0.nmalloc", i, j, &nmalloc,
177		    uint64_t);
178		CTL_M2_M4_GET("stats.arenas.0.lruns.0.ndalloc", i, j, &ndalloc,
179		    uint64_t);
180		CTL_M2_M4_GET("stats.arenas.0.lruns.0.nrequests", i, j,
181		    &nrequests, uint64_t);
182		if (nrequests == 0)
183			in_gap = true;
184		else {
185			CTL_M2_GET("arenas.lrun.0.size", j, &run_size, size_t);
186			CTL_M2_M4_GET("stats.arenas.0.lruns.0.curruns", i, j,
187			    &curruns, size_t);
188			if (in_gap) {
189				malloc_cprintf(write_cb, cbopaque,
190				    "                     ---\n");
191				in_gap = false;
192			}
193			malloc_cprintf(write_cb, cbopaque,
194			    "%20zu %3u %12zu %12"FMTu64" %12"FMTu64
195			    " %12"FMTu64" %12zu\n",
196			    run_size, nbins + j, curruns * run_size, nmalloc,
197			    ndalloc, nrequests, curruns);
198		}
199	}
200	if (in_gap) {
201		malloc_cprintf(write_cb, cbopaque,
202		    "                     ---\n");
203	}
204}
205
206static void
207stats_arena_hchunks_print(void (*write_cb)(void *, const char *),
208    void *cbopaque, unsigned i)
209{
210	unsigned nbins, nlruns, nhchunks, j;
211	bool in_gap;
212
213	malloc_cprintf(write_cb, cbopaque,
214	    "huge:           size ind    allocated      nmalloc      ndalloc"
215	    "    nrequests   curhchunks\n");
216	CTL_GET("arenas.nbins", &nbins, unsigned);
217	CTL_GET("arenas.nlruns", &nlruns, unsigned);
218	CTL_GET("arenas.nhchunks", &nhchunks, unsigned);
219	for (j = 0, in_gap = false; j < nhchunks; j++) {
220		uint64_t nmalloc, ndalloc, nrequests;
221		size_t hchunk_size, curhchunks;
222
223		CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nmalloc", i, j,
224		    &nmalloc, uint64_t);
225		CTL_M2_M4_GET("stats.arenas.0.hchunks.0.ndalloc", i, j,
226		    &ndalloc, uint64_t);
227		CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nrequests", i, j,
228		    &nrequests, uint64_t);
229		if (nrequests == 0)
230			in_gap = true;
231		else {
232			CTL_M2_GET("arenas.hchunk.0.size", j, &hchunk_size,
233			    size_t);
234			CTL_M2_M4_GET("stats.arenas.0.hchunks.0.curhchunks", i,
235			    j, &curhchunks, size_t);
236			if (in_gap) {
237				malloc_cprintf(write_cb, cbopaque,
238				    "                     ---\n");
239				in_gap = false;
240			}
241			malloc_cprintf(write_cb, cbopaque,
242			    "%20zu %3u %12zu %12"FMTu64" %12"FMTu64
243			    " %12"FMTu64" %12zu\n",
244			    hchunk_size, nbins + nlruns + j,
245			    curhchunks * hchunk_size, nmalloc, ndalloc,
246			    nrequests, curhchunks);
247		}
248	}
249	if (in_gap) {
250		malloc_cprintf(write_cb, cbopaque,
251		    "                     ---\n");
252	}
253}
254
255static void
256stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
257    unsigned i, bool bins, bool large, bool huge)
258{
259	unsigned nthreads;
260	const char *dss;
261	ssize_t lg_dirty_mult, decay_time;
262	size_t page, pactive, pdirty, mapped, retained;
263	size_t metadata_mapped, metadata_allocated;
264	uint64_t npurge, nmadvise, purged;
265	size_t small_allocated;
266	uint64_t small_nmalloc, small_ndalloc, small_nrequests;
267	size_t large_allocated;
268	uint64_t large_nmalloc, large_ndalloc, large_nrequests;
269	size_t huge_allocated;
270	uint64_t huge_nmalloc, huge_ndalloc, huge_nrequests;
271
272	CTL_GET("arenas.page", &page, size_t);
273
274	CTL_M2_GET("stats.arenas.0.nthreads", i, &nthreads, unsigned);
275	malloc_cprintf(write_cb, cbopaque,
276	    "assigned threads: %u\n", nthreads);
277	CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *);
278	malloc_cprintf(write_cb, cbopaque, "dss allocation precedence: %s\n",
279	    dss);
280	CTL_M2_GET("stats.arenas.0.lg_dirty_mult", i, &lg_dirty_mult, ssize_t);
281	if (opt_purge == purge_mode_ratio) {
282		if (lg_dirty_mult >= 0) {
283			malloc_cprintf(write_cb, cbopaque,
284			    "min active:dirty page ratio: %u:1\n",
285			    (1U << lg_dirty_mult));
286		} else {
287			malloc_cprintf(write_cb, cbopaque,
288			    "min active:dirty page ratio: N/A\n");
289		}
290	}
291	CTL_M2_GET("stats.arenas.0.decay_time", i, &decay_time, ssize_t);
292	if (opt_purge == purge_mode_decay) {
293		if (decay_time >= 0) {
294			malloc_cprintf(write_cb, cbopaque, "decay time: %zd\n",
295			    decay_time);
296		} else
297			malloc_cprintf(write_cb, cbopaque, "decay time: N/A\n");
298	}
299	CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t);
300	CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t);
301	CTL_M2_GET("stats.arenas.0.npurge", i, &npurge, uint64_t);
302	CTL_M2_GET("stats.arenas.0.nmadvise", i, &nmadvise, uint64_t);
303	CTL_M2_GET("stats.arenas.0.purged", i, &purged, uint64_t);
304	malloc_cprintf(write_cb, cbopaque,
305	    "purging: dirty: %zu, sweeps: %"FMTu64", madvises: %"FMTu64", "
306	    "purged: %"FMTu64"\n", pdirty, npurge, nmadvise, purged);
307
308	malloc_cprintf(write_cb, cbopaque,
309	    "                            allocated      nmalloc      ndalloc"
310	    "    nrequests\n");
311	CTL_M2_GET("stats.arenas.0.small.allocated", i, &small_allocated,
312	    size_t);
313	CTL_M2_GET("stats.arenas.0.small.nmalloc", i, &small_nmalloc, uint64_t);
314	CTL_M2_GET("stats.arenas.0.small.ndalloc", i, &small_ndalloc, uint64_t);
315	CTL_M2_GET("stats.arenas.0.small.nrequests", i, &small_nrequests,
316	    uint64_t);
317	malloc_cprintf(write_cb, cbopaque,
318	    "small:                   %12zu %12"FMTu64" %12"FMTu64
319	    " %12"FMTu64"\n",
320	    small_allocated, small_nmalloc, small_ndalloc, small_nrequests);
321	CTL_M2_GET("stats.arenas.0.large.allocated", i, &large_allocated,
322	    size_t);
323	CTL_M2_GET("stats.arenas.0.large.nmalloc", i, &large_nmalloc, uint64_t);
324	CTL_M2_GET("stats.arenas.0.large.ndalloc", i, &large_ndalloc, uint64_t);
325	CTL_M2_GET("stats.arenas.0.large.nrequests", i, &large_nrequests,
326	    uint64_t);
327	malloc_cprintf(write_cb, cbopaque,
328	    "large:                   %12zu %12"FMTu64" %12"FMTu64
329	    " %12"FMTu64"\n",
330	    large_allocated, large_nmalloc, large_ndalloc, large_nrequests);
331	CTL_M2_GET("stats.arenas.0.huge.allocated", i, &huge_allocated, size_t);
332	CTL_M2_GET("stats.arenas.0.huge.nmalloc", i, &huge_nmalloc, uint64_t);
333	CTL_M2_GET("stats.arenas.0.huge.ndalloc", i, &huge_ndalloc, uint64_t);
334	CTL_M2_GET("stats.arenas.0.huge.nrequests", i, &huge_nrequests,
335	    uint64_t);
336	malloc_cprintf(write_cb, cbopaque,
337	    "huge:                    %12zu %12"FMTu64" %12"FMTu64
338	    " %12"FMTu64"\n",
339	    huge_allocated, huge_nmalloc, huge_ndalloc, huge_nrequests);
340	malloc_cprintf(write_cb, cbopaque,
341	    "total:                   %12zu %12"FMTu64" %12"FMTu64
342	    " %12"FMTu64"\n",
343	    small_allocated + large_allocated + huge_allocated,
344	    small_nmalloc + large_nmalloc + huge_nmalloc,
345	    small_ndalloc + large_ndalloc + huge_ndalloc,
346	    small_nrequests + large_nrequests + huge_nrequests);
347	malloc_cprintf(write_cb, cbopaque,
348	    "active:                  %12zu\n", pactive * page);
349	CTL_M2_GET("stats.arenas.0.mapped", i, &mapped, size_t);
350	malloc_cprintf(write_cb, cbopaque,
351	    "mapped:                  %12zu\n", mapped);
352	CTL_M2_GET("stats.arenas.0.retained", i, &retained, size_t);
353	malloc_cprintf(write_cb, cbopaque,
354	    "retained:                %12zu\n", retained);
355	CTL_M2_GET("stats.arenas.0.metadata.mapped", i, &metadata_mapped,
356	    size_t);
357	CTL_M2_GET("stats.arenas.0.metadata.allocated", i, &metadata_allocated,
358	    size_t);
359	malloc_cprintf(write_cb, cbopaque,
360	    "metadata: mapped: %zu, allocated: %zu\n",
361	    metadata_mapped, metadata_allocated);
362
363	if (bins)
364		stats_arena_bins_print(write_cb, cbopaque, i);
365	if (large)
366		stats_arena_lruns_print(write_cb, cbopaque, i);
367	if (huge)
368		stats_arena_hchunks_print(write_cb, cbopaque, i);
369}
370
371void
372stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
373    const char *opts)
374{
375	int err;
376	uint64_t epoch;
377	size_t u64sz;
378	bool general = true;
379	bool merged = true;
380	bool unmerged = true;
381	bool bins = true;
382	bool large = true;
383	bool huge = true;
384
385	/*
386	 * Refresh stats, in case mallctl() was called by the application.
387	 *
388	 * Check for OOM here, since refreshing the ctl cache can trigger
389	 * allocation.  In practice, none of the subsequent mallctl()-related
390	 * calls in this function will cause OOM if this one succeeds.
391	 * */
392	epoch = 1;
393	u64sz = sizeof(uint64_t);
394	err = je_mallctl("epoch", &epoch, &u64sz, &epoch, sizeof(uint64_t));
395	if (err != 0) {
396		if (err == EAGAIN) {
397			malloc_write("<jemalloc>: Memory allocation failure in "
398			    "mallctl(\"epoch\", ...)\n");
399			return;
400		}
401		malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", "
402		    "...)\n");
403		abort();
404	}
405
406	if (opts != NULL) {
407		unsigned i;
408
409		for (i = 0; opts[i] != '\0'; i++) {
410			switch (opts[i]) {
411			case 'g':
412				general = false;
413				break;
414			case 'm':
415				merged = false;
416				break;
417			case 'a':
418				unmerged = false;
419				break;
420			case 'b':
421				bins = false;
422				break;
423			case 'l':
424				large = false;
425				break;
426			case 'h':
427				huge = false;
428				break;
429			default:;
430			}
431		}
432	}
433
434	malloc_cprintf(write_cb, cbopaque,
435	    "___ Begin jemalloc statistics ___\n");
436	if (general) {
437		const char *cpv;
438		bool bv;
439		unsigned uv;
440		ssize_t ssv;
441		size_t sv, bsz, usz, ssz, sssz, cpsz;
442
443		bsz = sizeof(bool);
444		usz = sizeof(unsigned);
445		ssz = sizeof(size_t);
446		sssz = sizeof(ssize_t);
447		cpsz = sizeof(const char *);
448
449		CTL_GET("version", &cpv, const char *);
450		malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv);
451		CTL_GET("config.debug", &bv, bool);
452		malloc_cprintf(write_cb, cbopaque, "Assertions %s\n",
453		    bv ? "enabled" : "disabled");
454		malloc_cprintf(write_cb, cbopaque,
455		    "config.malloc_conf: \"%s\"\n", config_malloc_conf);
456
457#define	OPT_WRITE_BOOL(n)						\
458		if (je_mallctl("opt."#n, &bv, &bsz, NULL, 0) == 0) {	\
459			malloc_cprintf(write_cb, cbopaque,		\
460			    "  opt."#n": %s\n", bv ? "true" : "false");	\
461		}
462#define	OPT_WRITE_BOOL_MUTABLE(n, m) {					\
463		bool bv2;						\
464		if (je_mallctl("opt."#n, &bv, &bsz, NULL, 0) == 0 &&	\
465		    je_mallctl(#m, &bv2, &bsz, NULL, 0) == 0) {		\
466			malloc_cprintf(write_cb, cbopaque,		\
467			    "  opt."#n": %s ("#m": %s)\n", bv ? "true"	\
468			    : "false", bv2 ? "true" : "false");		\
469		}							\
470}
471#define	OPT_WRITE_UNSIGNED(n)						\
472		if (je_mallctl("opt."#n, &uv, &usz, NULL, 0) == 0) {	\
473			malloc_cprintf(write_cb, cbopaque,		\
474			"  opt."#n": %u\n", uv);			\
475		}
476#define	OPT_WRITE_SIZE_T(n)						\
477		if (je_mallctl("opt."#n, &sv, &ssz, NULL, 0) == 0) {	\
478			malloc_cprintf(write_cb, cbopaque,		\
479			"  opt."#n": %zu\n", sv);			\
480		}
481#define	OPT_WRITE_SSIZE_T(n)						\
482		if (je_mallctl("opt."#n, &ssv, &sssz, NULL, 0) == 0) {	\
483			malloc_cprintf(write_cb, cbopaque,		\
484			    "  opt."#n": %zd\n", ssv);			\
485		}
486#define	OPT_WRITE_SSIZE_T_MUTABLE(n, m) {				\
487		ssize_t ssv2;						\
488		if (je_mallctl("opt."#n, &ssv, &sssz, NULL, 0) == 0 &&	\
489		    je_mallctl(#m, &ssv2, &sssz, NULL, 0) == 0) {	\
490			malloc_cprintf(write_cb, cbopaque,		\
491			    "  opt."#n": %zd ("#m": %zd)\n",		\
492			    ssv, ssv2);					\
493		}							\
494}
495#define	OPT_WRITE_CHAR_P(n)						\
496		if (je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0) == 0) {	\
497			malloc_cprintf(write_cb, cbopaque,		\
498			    "  opt."#n": \"%s\"\n", cpv);		\
499		}
500
501		malloc_cprintf(write_cb, cbopaque,
502		    "Run-time option settings:\n");
503		OPT_WRITE_BOOL(abort)
504		OPT_WRITE_SIZE_T(lg_chunk)
505		OPT_WRITE_CHAR_P(dss)
506		OPT_WRITE_UNSIGNED(narenas)
507		OPT_WRITE_CHAR_P(purge)
508		if (opt_purge == purge_mode_ratio) {
509			OPT_WRITE_SSIZE_T_MUTABLE(lg_dirty_mult,
510			    arenas.lg_dirty_mult)
511		}
512		if (opt_purge == purge_mode_decay)
513			OPT_WRITE_SSIZE_T_MUTABLE(decay_time, arenas.decay_time)
514		OPT_WRITE_BOOL(stats_print)
515		OPT_WRITE_CHAR_P(junk)
516		OPT_WRITE_SIZE_T(quarantine)
517		OPT_WRITE_BOOL(redzone)
518		OPT_WRITE_BOOL(zero)
519		OPT_WRITE_BOOL(utrace)
520		OPT_WRITE_BOOL(valgrind)
521		OPT_WRITE_BOOL(xmalloc)
522		OPT_WRITE_BOOL(tcache)
523		OPT_WRITE_SSIZE_T(lg_tcache_max)
524		OPT_WRITE_BOOL(prof)
525		OPT_WRITE_CHAR_P(prof_prefix)
526		OPT_WRITE_BOOL_MUTABLE(prof_active, prof.active)
527		OPT_WRITE_BOOL_MUTABLE(prof_thread_active_init,
528		    prof.thread_active_init)
529		OPT_WRITE_SSIZE_T(lg_prof_sample)
530		OPT_WRITE_BOOL(prof_accum)
531		OPT_WRITE_SSIZE_T(lg_prof_interval)
532		OPT_WRITE_BOOL(prof_gdump)
533		OPT_WRITE_BOOL(prof_final)
534		OPT_WRITE_BOOL(prof_leak)
535
536#undef OPT_WRITE_BOOL
537#undef OPT_WRITE_BOOL_MUTABLE
538#undef OPT_WRITE_SIZE_T
539#undef OPT_WRITE_SSIZE_T
540#undef OPT_WRITE_CHAR_P
541
542		malloc_cprintf(write_cb, cbopaque, "CPUs: %u\n", ncpus);
543
544		CTL_GET("arenas.narenas", &uv, unsigned);
545		malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv);
546
547		malloc_cprintf(write_cb, cbopaque, "Pointer size: %zu\n",
548		    sizeof(void *));
549
550		CTL_GET("arenas.quantum", &sv, size_t);
551		malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n",
552		    sv);
553
554		CTL_GET("arenas.page", &sv, size_t);
555		malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv);
556
557		CTL_GET("arenas.lg_dirty_mult", &ssv, ssize_t);
558		if (opt_purge == purge_mode_ratio) {
559			if (ssv >= 0) {
560				malloc_cprintf(write_cb, cbopaque,
561				    "Min active:dirty page ratio per arena: "
562				    "%u:1\n", (1U << ssv));
563			} else {
564				malloc_cprintf(write_cb, cbopaque,
565				    "Min active:dirty page ratio per arena: "
566				    "N/A\n");
567			}
568		}
569		CTL_GET("arenas.decay_time", &ssv, ssize_t);
570		if (opt_purge == purge_mode_decay) {
571			malloc_cprintf(write_cb, cbopaque,
572			    "Unused dirty page decay time: %zd%s\n",
573			    ssv, (ssv < 0) ? " (no decay)" : "");
574		}
575		if (je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0) == 0) {
576			malloc_cprintf(write_cb, cbopaque,
577			    "Maximum thread-cached size class: %zu\n", sv);
578		}
579		if (je_mallctl("opt.prof", &bv, &bsz, NULL, 0) == 0 && bv) {
580			CTL_GET("prof.lg_sample", &sv, size_t);
581			malloc_cprintf(write_cb, cbopaque,
582			    "Average profile sample interval: %"FMTu64
583			    " (2^%zu)\n", (((uint64_t)1U) << sv), sv);
584
585			CTL_GET("opt.lg_prof_interval", &ssv, ssize_t);
586			if (ssv >= 0) {
587				malloc_cprintf(write_cb, cbopaque,
588				    "Average profile dump interval: %"FMTu64
589				    " (2^%zd)\n",
590				    (((uint64_t)1U) << ssv), ssv);
591			} else {
592				malloc_cprintf(write_cb, cbopaque,
593				    "Average profile dump interval: N/A\n");
594			}
595		}
596		CTL_GET("opt.lg_chunk", &sv, size_t);
597		malloc_cprintf(write_cb, cbopaque,
598		    "Chunk size: %zu (2^%zu)\n", (ZU(1) << sv), sv);
599	}
600
601	if (config_stats) {
602		size_t *cactive;
603		size_t allocated, active, metadata, resident, mapped, retained;
604
605		CTL_GET("stats.cactive", &cactive, size_t *);
606		CTL_GET("stats.allocated", &allocated, size_t);
607		CTL_GET("stats.active", &active, size_t);
608		CTL_GET("stats.metadata", &metadata, size_t);
609		CTL_GET("stats.resident", &resident, size_t);
610		CTL_GET("stats.mapped", &mapped, size_t);
611		CTL_GET("stats.retained", &retained, size_t);
612		malloc_cprintf(write_cb, cbopaque,
613		    "Allocated: %zu, active: %zu, metadata: %zu,"
614		    " resident: %zu, mapped: %zu, retained: %zu\n",
615		    allocated, active, metadata, resident, mapped, retained);
616		malloc_cprintf(write_cb, cbopaque,
617		    "Current active ceiling: %zu\n",
618		    atomic_read_z(cactive));
619
620		if (merged) {
621			unsigned narenas;
622
623			CTL_GET("arenas.narenas", &narenas, unsigned);
624			{
625				VARIABLE_ARRAY(bool, initialized, narenas);
626				size_t isz;
627				unsigned i, ninitialized;
628
629				isz = sizeof(bool) * narenas;
630				xmallctl("arenas.initialized", initialized,
631				    &isz, NULL, 0);
632				for (i = ninitialized = 0; i < narenas; i++) {
633					if (initialized[i])
634						ninitialized++;
635				}
636
637				if (ninitialized > 1 || !unmerged) {
638					/* Print merged arena stats. */
639					malloc_cprintf(write_cb, cbopaque,
640					    "\nMerged arenas stats:\n");
641					stats_arena_print(write_cb, cbopaque,
642					    narenas, bins, large, huge);
643				}
644			}
645		}
646
647		if (unmerged) {
648			unsigned narenas;
649
650			/* Print stats for each arena. */
651
652			CTL_GET("arenas.narenas", &narenas, unsigned);
653			{
654				VARIABLE_ARRAY(bool, initialized, narenas);
655				size_t isz;
656				unsigned i;
657
658				isz = sizeof(bool) * narenas;
659				xmallctl("arenas.initialized", initialized,
660				    &isz, NULL, 0);
661
662				for (i = 0; i < narenas; i++) {
663					if (initialized[i]) {
664						malloc_cprintf(write_cb,
665						    cbopaque,
666						    "\narenas[%u]:\n", i);
667						stats_arena_print(write_cb,
668						    cbopaque, i, bins, large,
669						    huge);
670					}
671				}
672			}
673		}
674	}
675	malloc_cprintf(write_cb, cbopaque, "--- End jemalloc statistics ---\n");
676}
677