kern_malloc.c revision 114042
1178825Sdfr/*
2233294Sstas * Copyright (c) 1987, 1991, 1993
3233294Sstas *	The Regents of the University of California.  All rights reserved.
4233294Sstas *
5178825Sdfr * Redistribution and use in source and binary forms, with or without
6233294Sstas * modification, are permitted provided that the following conditions
7178825Sdfr * are met:
8233294Sstas * 1. Redistributions of source code must retain the above copyright
9233294Sstas *    notice, this list of conditions and the following disclaimer.
10233294Sstas * 2. Redistributions in binary form must reproduce the above copyright
11178825Sdfr *    notice, this list of conditions and the following disclaimer in the
12233294Sstas *    documentation and/or other materials provided with the distribution.
13233294Sstas * 3. All advertising materials mentioning features or use of this software
14178825Sdfr *    must display the following acknowledgement:
15233294Sstas *	This product includes software developed by the University of
16233294Sstas *	California, Berkeley and its contributors.
17233294Sstas * 4. Neither the name of the University nor the names of its contributors
18178825Sdfr *    may be used to endorse or promote products derived from this software
19233294Sstas *    without specific prior written permission.
20233294Sstas *
21233294Sstas * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22233294Sstas * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23233294Sstas * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24233294Sstas * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25233294Sstas * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26233294Sstas * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27233294Sstas * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28233294Sstas * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29233294Sstas * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30233294Sstas * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31233294Sstas * SUCH DAMAGE.
32233294Sstas *
33233294Sstas *	@(#)kern_malloc.c	8.3 (Berkeley) 1/4/94
34178825Sdfr * $FreeBSD: head/sys/kern/kern_malloc.c 114042 2003-04-25 21:49:24Z phk $
35178825Sdfr */
36178825Sdfr
37178825Sdfr#include "opt_vm.h"
38178825Sdfr
39178825Sdfr#include <sys/param.h>
40178825Sdfr#include <sys/systm.h>
41178825Sdfr#include <sys/kernel.h>
42178825Sdfr#include <sys/lock.h>
43178825Sdfr#include <sys/malloc.h>
44178825Sdfr#include <sys/mbuf.h>
45178825Sdfr#include <sys/mutex.h>
46178825Sdfr#include <sys/vmmeter.h>
47178825Sdfr#include <sys/proc.h>
48233294Sstas#include <sys/sysctl.h>
49178825Sdfr#include <sys/time.h>
50178825Sdfr
51178825Sdfr#include <vm/vm.h>
52178825Sdfr#include <vm/pmap.h>
53178825Sdfr#include <vm/vm_param.h>
54178825Sdfr#include <vm/vm_kern.h>
55178825Sdfr#include <vm/vm_extern.h>
56178825Sdfr#include <vm/vm_map.h>
57178825Sdfr#include <vm/vm_page.h>
58178825Sdfr#include <vm/uma.h>
59178825Sdfr#include <vm/uma_int.h>
60178825Sdfr#include <vm/uma_dbg.h>
61178825Sdfr
62178825Sdfr#if defined(INVARIANTS) && defined(__i386__)
63178825Sdfr#include <machine/cpu.h>
64178825Sdfr#endif
65178825Sdfr
66178825Sdfr/*
67178825Sdfr * When realloc() is called, if the new size is sufficiently smaller than
68233294Sstas * the old size, realloc() will allocate a new, smaller block to avoid
69178825Sdfr * wasting memory. 'Sufficiently smaller' is defined as: newsize <=
70178825Sdfr * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'.
71178825Sdfr */
72178825Sdfr#ifndef REALLOC_FRACTION
73178825Sdfr#define	REALLOC_FRACTION	1	/* new block if <= half the size */
74178825Sdfr#endif
75233294Sstas
76178825SdfrMALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
77178825SdfrMALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
78178825SdfrMALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
79178825Sdfr
80178825SdfrMALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
81178825SdfrMALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
82178825Sdfr
83178825Sdfrstatic void kmeminit(void *);
84178825SdfrSYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL)
85178825Sdfr
86178825Sdfrstatic MALLOC_DEFINE(M_FREE, "free", "should be on free list");
87178825Sdfr
88178825Sdfrstatic struct malloc_type *kmemstatistics;
89178825Sdfrstatic char *kmembase;
90233294Sstasstatic char *kmemlimit;
91178825Sdfr
92233294Sstas#define KMEM_ZSHIFT	4
93233294Sstas#define KMEM_ZBASE	16
94178825Sdfr#define KMEM_ZMASK	(KMEM_ZBASE - 1)
95178825Sdfr
96233294Sstas#define KMEM_ZMAX	65536
97233294Sstas#define KMEM_ZSIZE	(KMEM_ZMAX >> KMEM_ZSHIFT)
98233294Sstasstatic u_int8_t kmemsize[KMEM_ZSIZE + 1];
99178825Sdfr
100178825Sdfr/* These won't be powers of two for long */
101178825Sdfrstruct {
102178825Sdfr	int kz_size;
103178825Sdfr	char *kz_name;
104178825Sdfr	uma_zone_t kz_zone;
105178825Sdfr} kmemzones[] = {
106178825Sdfr	{16, "16", NULL},
107178825Sdfr	{32, "32", NULL},
108178825Sdfr	{64, "64", NULL},
109233294Sstas	{128, "128", NULL},
110178825Sdfr	{256, "256", NULL},
111178825Sdfr	{512, "512", NULL},
112178825Sdfr	{1024, "1024", NULL},
113178825Sdfr	{2048, "2048", NULL},
114178825Sdfr	{4096, "4096", NULL},
115178825Sdfr	{8192, "8192", NULL},
116178825Sdfr	{16384, "16384", NULL},
117178825Sdfr	{32768, "32768", NULL},
118233294Sstas	{65536, "65536", NULL},
119178825Sdfr	{0, NULL},
120178825Sdfr};
121178825Sdfr
122178825Sdfru_int vm_kmem_size;
123178825Sdfr
124178825Sdfr/*
125178825Sdfr * The malloc_mtx protects the kmemstatistics linked list.
126178825Sdfr */
127178825Sdfr
128178825Sdfrstruct mtx malloc_mtx;
129233294Sstas
130178825Sdfr#ifdef MALLOC_PROFILE
131178825Sdfruint64_t krequests[KMEM_ZSIZE + 1];
132178825Sdfr
133233294Sstasstatic int sysctl_kern_mprof(SYSCTL_HANDLER_ARGS);
134233294Sstas#endif
135233294Sstas
136233294Sstasstatic int sysctl_kern_malloc(SYSCTL_HANDLER_ARGS);
137233294Sstas
138233294Sstas/* time_uptime of last malloc(9) failure */
139178825Sdfrstatic time_t t_malloc_fail;
140233294Sstas
141178825Sdfr#ifdef MALLOC_MAKE_FAILURES
142178825Sdfr/*
143178825Sdfr * Causes malloc failures every (n) mallocs with M_NOWAIT.  If set to 0,
144178825Sdfr * doesn't cause failures.
145178825Sdfr */
146178825SdfrSYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD, 0,
147178825Sdfr    "Kernel malloc debugging options");
148178825Sdfr
149178825Sdfrstatic int malloc_failure_rate;
150178825Sdfrstatic int malloc_nowait_count;
151178825Sdfrstatic int malloc_failure_count;
152SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RW,
153    &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail");
154TUNABLE_INT("debug.malloc.failure_rate", &malloc_failure_rate);
155SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD,
156    &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures");
157#endif
158
159int
160malloc_last_fail(void)
161{
162
163	return (time_uptime - t_malloc_fail);
164}
165
166/*
167 *	malloc:
168 *
169 *	Allocate a block of memory.
170 *
171 *	If M_NOWAIT is set, this routine will not block and return NULL if
172 *	the allocation fails.
173 */
174void *
175malloc(size, type, flags)
176	unsigned long size;
177	struct malloc_type *type;
178	int flags;
179{
180	int indx;
181	caddr_t va;
182	uma_zone_t zone;
183#ifdef DIAGNOSTIC
184	unsigned long osize = size;
185#endif
186	register struct malloc_type *ksp = type;
187
188#ifdef INVARIANTS
189	/*
190	 * To make sure that WAITOK or NOWAIT is set, but not more than
191	 * one, and check against the API botches that are common.
192	 */
193	indx = flags & (M_WAITOK | M_NOWAIT | M_DONTWAIT | M_TRYWAIT);
194	if (indx != M_NOWAIT && indx != M_WAITOK) {
195		static	struct timeval lasterr;
196		static	int curerr, once;
197		if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) {
198			printf("Bad malloc flags: %x\n", indx);
199			backtrace();
200			flags |= M_WAITOK;
201			once++;
202		}
203	}
204#endif
205#if 0
206	if (size == 0)
207		Debugger("zero size malloc");
208#endif
209#ifdef MALLOC_MAKE_FAILURES
210	if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) {
211		atomic_add_int(&malloc_nowait_count, 1);
212		if ((malloc_nowait_count % malloc_failure_rate) == 0) {
213			atomic_add_int(&malloc_failure_count, 1);
214			t_malloc_fail = time_uptime;
215			return (NULL);
216		}
217	}
218#endif
219	if (flags & M_WAITOK)
220		KASSERT(curthread->td_intr_nesting_level == 0,
221		   ("malloc(M_WAITOK) in interrupt context"));
222	if (size <= KMEM_ZMAX) {
223		if (size & KMEM_ZMASK)
224			size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
225		indx = kmemsize[size >> KMEM_ZSHIFT];
226		zone = kmemzones[indx].kz_zone;
227#ifdef MALLOC_PROFILE
228		krequests[size >> KMEM_ZSHIFT]++;
229#endif
230		va = uma_zalloc(zone, flags);
231		mtx_lock(&ksp->ks_mtx);
232		if (va == NULL)
233			goto out;
234
235		ksp->ks_size |= 1 << indx;
236		size = zone->uz_size;
237	} else {
238		size = roundup(size, PAGE_SIZE);
239		zone = NULL;
240		va = uma_large_malloc(size, flags);
241		mtx_lock(&ksp->ks_mtx);
242		if (va == NULL)
243			goto out;
244	}
245	ksp->ks_memuse += size;
246	ksp->ks_inuse++;
247out:
248	ksp->ks_calls++;
249	if (ksp->ks_memuse > ksp->ks_maxused)
250		ksp->ks_maxused = ksp->ks_memuse;
251
252	mtx_unlock(&ksp->ks_mtx);
253	if (!(flags & M_NOWAIT))
254		KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL"));
255	if (va == NULL) {
256		t_malloc_fail = time_uptime;
257	}
258#ifdef DIAGNOSTIC
259	if (!(flags & M_ZERO)) {
260		memset(va, 0x70, osize);
261	}
262#endif
263	return ((void *) va);
264}
265
266/*
267 *	free:
268 *
269 *	Free a block of memory allocated by malloc.
270 *
271 *	This routine may not block.
272 */
273void
274free(addr, type)
275	void *addr;
276	struct malloc_type *type;
277{
278	register struct malloc_type *ksp = type;
279	uma_slab_t slab;
280	u_long size;
281
282	/* free(NULL, ...) does nothing */
283	if (addr == NULL)
284		return;
285
286	size = 0;
287
288	slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK));
289
290	if (slab == NULL)
291		panic("free: address %p(%p) has not been allocated.\n",
292		    addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
293
294
295	if (!(slab->us_flags & UMA_SLAB_MALLOC)) {
296#ifdef INVARIANTS
297		struct malloc_type **mtp = addr;
298#endif
299		size = slab->us_zone->uz_size;
300#ifdef INVARIANTS
301		/*
302		 * Cache a pointer to the malloc_type that most recently freed
303		 * this memory here.  This way we know who is most likely to
304		 * have stepped on it later.
305		 *
306		 * This code assumes that size is a multiple of 8 bytes for
307		 * 64 bit machines
308		 */
309		mtp = (struct malloc_type **)
310		    ((unsigned long)mtp & ~UMA_ALIGN_PTR);
311		mtp += (size - sizeof(struct malloc_type *)) /
312		    sizeof(struct malloc_type *);
313		*mtp = type;
314#endif
315		uma_zfree_arg(slab->us_zone, addr, slab);
316	} else {
317		size = slab->us_size;
318		uma_large_free(slab);
319	}
320	mtx_lock(&ksp->ks_mtx);
321	ksp->ks_memuse -= size;
322	ksp->ks_inuse--;
323	mtx_unlock(&ksp->ks_mtx);
324}
325
326/*
327 *	realloc: change the size of a memory block
328 */
329void *
330realloc(addr, size, type, flags)
331	void *addr;
332	unsigned long size;
333	struct malloc_type *type;
334	int flags;
335{
336	uma_slab_t slab;
337	unsigned long alloc;
338	void *newaddr;
339
340	/* realloc(NULL, ...) is equivalent to malloc(...) */
341	if (addr == NULL)
342		return (malloc(size, type, flags));
343
344	slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK));
345
346	/* Sanity check */
347	KASSERT(slab != NULL,
348	    ("realloc: address %p out of range", (void *)addr));
349
350	/* Get the size of the original block */
351	if (slab->us_zone)
352		alloc = slab->us_zone->uz_size;
353	else
354		alloc = slab->us_size;
355
356	/* Reuse the original block if appropriate */
357	if (size <= alloc
358	    && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE))
359		return (addr);
360
361	/* Allocate a new, bigger (or smaller) block */
362	if ((newaddr = malloc(size, type, flags)) == NULL)
363		return (NULL);
364
365	/* Copy over original contents */
366	bcopy(addr, newaddr, min(size, alloc));
367	free(addr, type);
368	return (newaddr);
369}
370
371/*
372 *	reallocf: same as realloc() but free memory on failure.
373 */
374void *
375reallocf(addr, size, type, flags)
376	void *addr;
377	unsigned long size;
378	struct malloc_type *type;
379	int flags;
380{
381	void *mem;
382
383	if ((mem = realloc(addr, size, type, flags)) == NULL)
384		free(addr, type);
385	return (mem);
386}
387
388/*
389 * Initialize the kernel memory allocator
390 */
391/* ARGSUSED*/
392static void
393kmeminit(dummy)
394	void *dummy;
395{
396	u_int8_t indx;
397	u_long npg;
398	u_long mem_size;
399	int i;
400
401	mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
402
403	/*
404	 * Try to auto-tune the kernel memory size, so that it is
405	 * more applicable for a wider range of machine sizes.
406	 * On an X86, a VM_KMEM_SIZE_SCALE value of 4 is good, while
407	 * a VM_KMEM_SIZE of 12MB is a fair compromise.  The
408	 * VM_KMEM_SIZE_MAX is dependent on the maximum KVA space
409	 * available, and on an X86 with a total KVA space of 256MB,
410	 * try to keep VM_KMEM_SIZE_MAX at 80MB or below.
411	 *
412	 * Note that the kmem_map is also used by the zone allocator,
413	 * so make sure that there is enough space.
414	 */
415	vm_kmem_size = VM_KMEM_SIZE;
416	mem_size = cnt.v_page_count * PAGE_SIZE;
417
418#if defined(VM_KMEM_SIZE_SCALE)
419	if ((mem_size / VM_KMEM_SIZE_SCALE) > vm_kmem_size)
420		vm_kmem_size = mem_size / VM_KMEM_SIZE_SCALE;
421#endif
422
423#if defined(VM_KMEM_SIZE_MAX)
424	if (vm_kmem_size >= VM_KMEM_SIZE_MAX)
425		vm_kmem_size = VM_KMEM_SIZE_MAX;
426#endif
427
428	/* Allow final override from the kernel environment */
429	TUNABLE_INT_FETCH("kern.vm.kmem.size", &vm_kmem_size);
430
431	/*
432	 * Limit kmem virtual size to twice the physical memory.
433	 * This allows for kmem map sparseness, but limits the size
434	 * to something sane. Be careful to not overflow the 32bit
435	 * ints while doing the check.
436	 */
437	if ((vm_kmem_size / 2) > (cnt.v_page_count * PAGE_SIZE))
438		vm_kmem_size = 2 * cnt.v_page_count * PAGE_SIZE;
439
440	/*
441	 * In mbuf_init(), we set up submaps for mbufs and clusters, in which
442	 * case we rounddown() (nmbufs * MSIZE) and (nmbclusters * MCLBYTES),
443	 * respectively. Mathematically, this means that what we do here may
444	 * amount to slightly more address space than we need for the submaps,
445	 * but it never hurts to have an extra page in kmem_map.
446	 */
447	npg = (nmbufs*MSIZE + nmbclusters*MCLBYTES + vm_kmem_size) / PAGE_SIZE;
448
449	kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase,
450		(vm_offset_t *)&kmemlimit, (vm_size_t)(npg * PAGE_SIZE));
451	kmem_map->system_map = 1;
452
453	uma_startup2();
454
455	for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) {
456		int size = kmemzones[indx].kz_size;
457		char *name = kmemzones[indx].kz_name;
458
459		kmemzones[indx].kz_zone = uma_zcreate(name, size,
460#ifdef INVARIANTS
461		    mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
462#else
463		    NULL, NULL, NULL, NULL,
464#endif
465		    UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
466
467		for (;i <= size; i+= KMEM_ZBASE)
468			kmemsize[i >> KMEM_ZSHIFT] = indx;
469
470	}
471}
472
473void
474malloc_init(data)
475	void *data;
476{
477	struct malloc_type *type = (struct malloc_type *)data;
478
479	mtx_lock(&malloc_mtx);
480	if (type->ks_magic != M_MAGIC)
481		panic("malloc type lacks magic");
482
483	if (cnt.v_page_count == 0)
484		panic("malloc_init not allowed before vm init");
485
486	if (type->ks_next != NULL)
487		return;
488
489	type->ks_next = kmemstatistics;
490	kmemstatistics = type;
491	mtx_init(&type->ks_mtx, type->ks_shortdesc, "Malloc Stats", MTX_DEF);
492	mtx_unlock(&malloc_mtx);
493}
494
495void
496malloc_uninit(data)
497	void *data;
498{
499	struct malloc_type *type = (struct malloc_type *)data;
500	struct malloc_type *t;
501
502	mtx_lock(&malloc_mtx);
503	mtx_lock(&type->ks_mtx);
504	if (type->ks_magic != M_MAGIC)
505		panic("malloc type lacks magic");
506
507	if (cnt.v_page_count == 0)
508		panic("malloc_uninit not allowed before vm init");
509
510	if (type == kmemstatistics)
511		kmemstatistics = type->ks_next;
512	else {
513		for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) {
514			if (t->ks_next == type) {
515				t->ks_next = type->ks_next;
516				break;
517			}
518		}
519	}
520	type->ks_next = NULL;
521	mtx_destroy(&type->ks_mtx);
522	mtx_unlock(&malloc_mtx);
523}
524
525static int
526sysctl_kern_malloc(SYSCTL_HANDLER_ARGS)
527{
528	struct malloc_type *type;
529	int linesize = 128;
530	int curline;
531	int bufsize;
532	int first;
533	int error;
534	char *buf;
535	char *p;
536	int cnt;
537	int len;
538	int i;
539
540	cnt = 0;
541
542	mtx_lock(&malloc_mtx);
543	for (type = kmemstatistics; type != NULL; type = type->ks_next)
544		cnt++;
545
546	mtx_unlock(&malloc_mtx);
547	bufsize = linesize * (cnt + 1);
548	p = buf = (char *)malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO);
549	mtx_lock(&malloc_mtx);
550
551	len = snprintf(p, linesize,
552	    "\n        Type  InUse MemUse HighUse Requests  Size(s)\n");
553	p += len;
554
555	for (type = kmemstatistics; cnt != 0 && type != NULL;
556	    type = type->ks_next, cnt--) {
557		if (type->ks_calls == 0)
558			continue;
559
560		curline = linesize - 2;	/* Leave room for the \n */
561		len = snprintf(p, curline, "%13s%6lu%6luK%7luK%9llu",
562			type->ks_shortdesc,
563			type->ks_inuse,
564			(type->ks_memuse + 1023) / 1024,
565			(type->ks_maxused + 1023) / 1024,
566			(long long unsigned)type->ks_calls);
567		curline -= len;
568		p += len;
569
570		first = 1;
571		for (i = 0; i < sizeof(kmemzones) / sizeof(kmemzones[0]) - 1;
572		    i++) {
573			if (type->ks_size & (1 << i)) {
574				if (first)
575					len = snprintf(p, curline, "  ");
576				else
577					len = snprintf(p, curline, ",");
578				curline -= len;
579				p += len;
580
581				len = snprintf(p, curline,
582				    "%s", kmemzones[i].kz_name);
583				curline -= len;
584				p += len;
585
586				first = 0;
587			}
588		}
589
590		len = snprintf(p, 2, "\n");
591		p += len;
592	}
593
594	mtx_unlock(&malloc_mtx);
595	error = SYSCTL_OUT(req, buf, p - buf);
596
597	free(buf, M_TEMP);
598	return (error);
599}
600
601SYSCTL_OID(_kern, OID_AUTO, malloc, CTLTYPE_STRING|CTLFLAG_RD,
602    NULL, 0, sysctl_kern_malloc, "A", "Malloc Stats");
603
604#ifdef MALLOC_PROFILE
605
606static int
607sysctl_kern_mprof(SYSCTL_HANDLER_ARGS)
608{
609	int linesize = 64;
610	uint64_t count;
611	uint64_t waste;
612	uint64_t mem;
613	int bufsize;
614	int error;
615	char *buf;
616	int rsize;
617	int size;
618	char *p;
619	int len;
620	int i;
621
622	bufsize = linesize * (KMEM_ZSIZE + 1);
623	bufsize += 128; 	/* For the stats line */
624	bufsize += 128; 	/* For the banner line */
625	waste = 0;
626	mem = 0;
627
628	p = buf = (char *)malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO);
629	len = snprintf(p, bufsize,
630	    "\n  Size                    Requests  Real Size\n");
631	bufsize -= len;
632	p += len;
633
634	for (i = 0; i < KMEM_ZSIZE; i++) {
635		size = i << KMEM_ZSHIFT;
636		rsize = kmemzones[kmemsize[i]].kz_size;
637		count = (long long unsigned)krequests[i];
638
639		len = snprintf(p, bufsize, "%6d%28llu%11d\n",
640		    size, (unsigned long long)count, rsize);
641		bufsize -= len;
642		p += len;
643
644		if ((rsize * count) > (size * count))
645			waste += (rsize * count) - (size * count);
646		mem += (rsize * count);
647	}
648
649	len = snprintf(p, bufsize,
650	    "\nTotal memory used:\t%30llu\nTotal Memory wasted:\t%30llu\n",
651	    (unsigned long long)mem, (unsigned long long)waste);
652	p += len;
653
654	error = SYSCTL_OUT(req, buf, p - buf);
655
656	free(buf, M_TEMP);
657	return (error);
658}
659
660SYSCTL_OID(_kern, OID_AUTO, mprof, CTLTYPE_STRING|CTLFLAG_RD,
661    NULL, 0, sysctl_kern_mprof, "A", "Malloc Profiling");
662#endif /* MALLOC_PROFILE */
663