Deleted Added
full compact
kern_malloc.c (95743) kern_malloc.c (95766)
1/*
2 * Copyright (c) 1987, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94
1/*
2 * Copyright (c) 1987, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94
34 * $FreeBSD: head/sys/kern/kern_malloc.c 95743 2002-04-29 17:53:23Z rwatson $
34 * $FreeBSD: head/sys/kern/kern_malloc.c 95766 2002-04-30 04:26:34Z jeff $
35 */
36
37#include "opt_vm.h"
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/kernel.h>
42#include <sys/lock.h>
43#include <sys/malloc.h>
44#include <sys/mbuf.h>
45#include <sys/mutex.h>
46#include <sys/vmmeter.h>
47#include <sys/proc.h>
48#include <sys/sysctl.h>
49
50#include <vm/vm.h>
51#include <vm/vm_param.h>
52#include <vm/vm_kern.h>
53#include <vm/vm_extern.h>
54#include <vm/pmap.h>
55#include <vm/vm_map.h>
56#include <vm/uma.h>
57#include <vm/uma_int.h>
58
59#if defined(INVARIANTS) && defined(__i386__)
60#include <machine/cpu.h>
61#endif
62
63/*
64 * When realloc() is called, if the new size is sufficiently smaller than
65 * the old size, realloc() will allocate a new, smaller block to avoid
66 * wasting memory. 'Sufficiently smaller' is defined as: newsize <=
67 * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'.
68 */
69#ifndef REALLOC_FRACTION
70#define REALLOC_FRACTION 1 /* new block if <= half the size */
71#endif
72
73MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
74MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
75MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
76
77MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
78MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
79
80static void kmeminit(void *);
81SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL)
82
83static MALLOC_DEFINE(M_FREE, "free", "should be on free list");
84
85static struct malloc_type *kmemstatistics;
86static char *kmembase;
87static char *kmemlimit;
88
89#define KMEM_ZSHIFT 4
90#define KMEM_ZBASE 16
91#define KMEM_ZMASK (KMEM_ZBASE - 1)
92
93#define KMEM_ZMAX 65536
94#define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT)
95static u_int8_t kmemsize[KMEM_ZSIZE + 1];
96
97/* These won't be powers of two for long */
98struct {
99 int kz_size;
100 char *kz_name;
101 uma_zone_t kz_zone;
102} kmemzones[] = {
103 {16, "16", NULL},
104 {32, "32", NULL},
105 {64, "64", NULL},
106 {128, "128", NULL},
107 {256, "256", NULL},
108 {512, "512", NULL},
109 {1024, "1024", NULL},
110 {2048, "2048", NULL},
111 {4096, "4096", NULL},
112 {8192, "8192", NULL},
113 {16384, "16384", NULL},
114 {32768, "32768", NULL},
115 {65536, "65536", NULL},
116 {0, NULL},
117};
118
119u_int vm_kmem_size;
120static struct mtx malloc_mtx;
121
122#ifdef MALLOC_PROFILE
123uint64_t krequests[KMEM_ZSIZE + 1];
124
125static int sysctl_kern_mprof(SYSCTL_HANDLER_ARGS);
126#endif
127
128static int sysctl_kern_malloc(SYSCTL_HANDLER_ARGS);
129
130/*
131 * malloc:
132 *
133 * Allocate a block of memory.
134 *
135 * If M_NOWAIT is set, this routine will not block and return NULL if
136 * the allocation fails.
137 */
138void *
139malloc(size, type, flags)
140 unsigned long size;
141 struct malloc_type *type;
142 int flags;
143{
144 int indx;
145 caddr_t va;
146 uma_zone_t zone;
147 register struct malloc_type *ksp = type;
148
149#if 0
150 if (size == 0)
151 Debugger("zero size malloc");
152#endif
153#if defined(INVARIANTS)
154 if (flags == M_WAITOK)
155 KASSERT(curthread->td_intr_nesting_level == 0,
156 ("malloc(M_WAITOK) in interrupt context"));
157#endif
158 if (size <= KMEM_ZMAX) {
159 if (size & KMEM_ZMASK)
160 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
161 indx = kmemsize[size >> KMEM_ZSHIFT];
162 zone = kmemzones[indx].kz_zone;
163#ifdef MALLOC_PROFILE
164 krequests[size >> KMEM_ZSHIFT]++;
165#endif
166 va = uma_zalloc(zone, flags);
167 if (va == NULL)
168 goto out;
169
170 ksp->ks_size |= 1 << indx;
171 size = zone->uz_size;
172 } else {
173 size = roundup(size, PAGE_SIZE);
174 zone = NULL;
175 va = uma_large_malloc(size, flags);
176 if (va == NULL)
177 goto out;
178 }
179 ksp->ks_memuse += size;
180 ksp->ks_inuse++;
181out:
182 ksp->ks_calls++;
183 if (ksp->ks_memuse > ksp->ks_maxused)
184 ksp->ks_maxused = ksp->ks_memuse;
185
35 */
36
37#include "opt_vm.h"
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/kernel.h>
42#include <sys/lock.h>
43#include <sys/malloc.h>
44#include <sys/mbuf.h>
45#include <sys/mutex.h>
46#include <sys/vmmeter.h>
47#include <sys/proc.h>
48#include <sys/sysctl.h>
49
50#include <vm/vm.h>
51#include <vm/vm_param.h>
52#include <vm/vm_kern.h>
53#include <vm/vm_extern.h>
54#include <vm/pmap.h>
55#include <vm/vm_map.h>
56#include <vm/uma.h>
57#include <vm/uma_int.h>
58
59#if defined(INVARIANTS) && defined(__i386__)
60#include <machine/cpu.h>
61#endif
62
63/*
64 * When realloc() is called, if the new size is sufficiently smaller than
65 * the old size, realloc() will allocate a new, smaller block to avoid
66 * wasting memory. 'Sufficiently smaller' is defined as: newsize <=
67 * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'.
68 */
69#ifndef REALLOC_FRACTION
70#define REALLOC_FRACTION 1 /* new block if <= half the size */
71#endif
72
73MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
74MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
75MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
76
77MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
78MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
79
80static void kmeminit(void *);
81SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL)
82
83static MALLOC_DEFINE(M_FREE, "free", "should be on free list");
84
85static struct malloc_type *kmemstatistics;
86static char *kmembase;
87static char *kmemlimit;
88
89#define KMEM_ZSHIFT 4
90#define KMEM_ZBASE 16
91#define KMEM_ZMASK (KMEM_ZBASE - 1)
92
93#define KMEM_ZMAX 65536
94#define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT)
95static u_int8_t kmemsize[KMEM_ZSIZE + 1];
96
97/* These won't be powers of two for long */
98struct {
99 int kz_size;
100 char *kz_name;
101 uma_zone_t kz_zone;
102} kmemzones[] = {
103 {16, "16", NULL},
104 {32, "32", NULL},
105 {64, "64", NULL},
106 {128, "128", NULL},
107 {256, "256", NULL},
108 {512, "512", NULL},
109 {1024, "1024", NULL},
110 {2048, "2048", NULL},
111 {4096, "4096", NULL},
112 {8192, "8192", NULL},
113 {16384, "16384", NULL},
114 {32768, "32768", NULL},
115 {65536, "65536", NULL},
116 {0, NULL},
117};
118
119u_int vm_kmem_size;
120static struct mtx malloc_mtx;
121
122#ifdef MALLOC_PROFILE
123uint64_t krequests[KMEM_ZSIZE + 1];
124
125static int sysctl_kern_mprof(SYSCTL_HANDLER_ARGS);
126#endif
127
128static int sysctl_kern_malloc(SYSCTL_HANDLER_ARGS);
129
130/*
131 * malloc:
132 *
133 * Allocate a block of memory.
134 *
135 * If M_NOWAIT is set, this routine will not block and return NULL if
136 * the allocation fails.
137 */
138void *
139malloc(size, type, flags)
140 unsigned long size;
141 struct malloc_type *type;
142 int flags;
143{
144 int indx;
145 caddr_t va;
146 uma_zone_t zone;
147 register struct malloc_type *ksp = type;
148
149#if 0
150 if (size == 0)
151 Debugger("zero size malloc");
152#endif
153#if defined(INVARIANTS)
154 if (flags == M_WAITOK)
155 KASSERT(curthread->td_intr_nesting_level == 0,
156 ("malloc(M_WAITOK) in interrupt context"));
157#endif
158 if (size <= KMEM_ZMAX) {
159 if (size & KMEM_ZMASK)
160 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
161 indx = kmemsize[size >> KMEM_ZSHIFT];
162 zone = kmemzones[indx].kz_zone;
163#ifdef MALLOC_PROFILE
164 krequests[size >> KMEM_ZSHIFT]++;
165#endif
166 va = uma_zalloc(zone, flags);
167 if (va == NULL)
168 goto out;
169
170 ksp->ks_size |= 1 << indx;
171 size = zone->uz_size;
172 } else {
173 size = roundup(size, PAGE_SIZE);
174 zone = NULL;
175 va = uma_large_malloc(size, flags);
176 if (va == NULL)
177 goto out;
178 }
179 ksp->ks_memuse += size;
180 ksp->ks_inuse++;
181out:
182 ksp->ks_calls++;
183 if (ksp->ks_memuse > ksp->ks_maxused)
184 ksp->ks_maxused = ksp->ks_memuse;
185
186 /* XXX: Do idle pre-zeroing. */
187 if (va != NULL && (flags & M_ZERO))
188 bzero(va, size);
189 return ((void *) va);
190}
191
192/*
193 * free:
194 *
195 * Free a block of memory allocated by malloc.
196 *
197 * This routine may not block.
198 */
199void
200free(addr, type)
201 void *addr;
202 struct malloc_type *type;
203{
204 uma_slab_t slab;
205 void *mem;
206 u_long size;
207 register struct malloc_type *ksp = type;
208
209 /* free(NULL, ...) does nothing */
210 if (addr == NULL)
211 return;
212
213 if ((u_long)addr & 3) { /* XXX: Jeff: find better value for 3 */
214 printf("free(9)'ing unaligned pointer %p\n", addr);
215 Debugger("Don't do that...");
216 return;
217 }
218
219 size = 0;
220
221 mem = (void *)((u_long)addr & (~UMA_SLAB_MASK));
222 slab = hash_sfind(mallochash, mem);
223
224 if (slab == NULL)
225 panic("free: address %p(%p) has not been allocated.\n",
226 addr, mem);
227
228 if (!(slab->us_flags & UMA_SLAB_MALLOC)) {
229 size = slab->us_zone->uz_size;
230 uma_zfree_arg(slab->us_zone, addr, slab);
231 } else {
232 size = slab->us_size;
233 uma_large_free(slab);
234 }
235 ksp->ks_memuse -= size;
236 ksp->ks_inuse--;
237}
238
239/*
240 * realloc: change the size of a memory block
241 */
242void *
243realloc(addr, size, type, flags)
244 void *addr;
245 unsigned long size;
246 struct malloc_type *type;
247 int flags;
248{
249 uma_slab_t slab;
250 unsigned long alloc;
251 void *newaddr;
252
253 /* realloc(NULL, ...) is equivalent to malloc(...) */
254 if (addr == NULL)
255 return (malloc(size, type, flags));
256
257 slab = hash_sfind(mallochash,
258 (void *)((u_long)addr & ~(UMA_SLAB_MASK)));
259
260 /* Sanity check */
261 KASSERT(slab != NULL,
262 ("realloc: address %p out of range", (void *)addr));
263
264 /* Get the size of the original block */
265 if (slab->us_zone)
266 alloc = slab->us_zone->uz_size;
267 else
268 alloc = slab->us_size;
269
270 /* Reuse the original block if appropriate */
271 if (size <= alloc
272 && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE))
273 return (addr);
274
275 /* Allocate a new, bigger (or smaller) block */
276 if ((newaddr = malloc(size, type, flags)) == NULL)
277 return (NULL);
278
279 /* Copy over original contents */
280 bcopy(addr, newaddr, min(size, alloc));
281 free(addr, type);
282 return (newaddr);
283}
284
285/*
286 * reallocf: same as realloc() but free memory on failure.
287 */
288void *
289reallocf(addr, size, type, flags)
290 void *addr;
291 unsigned long size;
292 struct malloc_type *type;
293 int flags;
294{
295 void *mem;
296
297 if ((mem = realloc(addr, size, type, flags)) == NULL)
298 free(addr, type);
299 return (mem);
300}
301
302/*
303 * Initialize the kernel memory allocator
304 */
305/* ARGSUSED*/
306static void
307kmeminit(dummy)
308 void *dummy;
309{
310 u_int8_t indx;
311 u_long npg;
312 u_long mem_size;
313 void *hashmem;
314 u_long hashsize;
315 int highbit;
316 int bits;
317 int i;
318
319 mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
320
321 /*
322 * Try to auto-tune the kernel memory size, so that it is
323 * more applicable for a wider range of machine sizes.
324 * On an X86, a VM_KMEM_SIZE_SCALE value of 4 is good, while
325 * a VM_KMEM_SIZE of 12MB is a fair compromise. The
326 * VM_KMEM_SIZE_MAX is dependent on the maximum KVA space
327 * available, and on an X86 with a total KVA space of 256MB,
328 * try to keep VM_KMEM_SIZE_MAX at 80MB or below.
329 *
330 * Note that the kmem_map is also used by the zone allocator,
331 * so make sure that there is enough space.
332 */
333 vm_kmem_size = VM_KMEM_SIZE;
334 mem_size = cnt.v_page_count * PAGE_SIZE;
335
336#if defined(VM_KMEM_SIZE_SCALE)
337 if ((mem_size / VM_KMEM_SIZE_SCALE) > vm_kmem_size)
338 vm_kmem_size = mem_size / VM_KMEM_SIZE_SCALE;
339#endif
340
341#if defined(VM_KMEM_SIZE_MAX)
342 if (vm_kmem_size >= VM_KMEM_SIZE_MAX)
343 vm_kmem_size = VM_KMEM_SIZE_MAX;
344#endif
345
346 /* Allow final override from the kernel environment */
347 TUNABLE_INT_FETCH("kern.vm.kmem.size", &vm_kmem_size);
348
349 /*
350 * Limit kmem virtual size to twice the physical memory.
351 * This allows for kmem map sparseness, but limits the size
352 * to something sane. Be careful to not overflow the 32bit
353 * ints while doing the check.
354 */
355 if ((vm_kmem_size / 2) > (cnt.v_page_count * PAGE_SIZE))
356 vm_kmem_size = 2 * cnt.v_page_count * PAGE_SIZE;
357
358 /*
359 * In mbuf_init(), we set up submaps for mbufs and clusters, in which
360 * case we rounddown() (nmbufs * MSIZE) and (nmbclusters * MCLBYTES),
361 * respectively. Mathematically, this means that what we do here may
362 * amount to slightly more address space than we need for the submaps,
363 * but it never hurts to have an extra page in kmem_map.
364 */
365 npg = (nmbufs * MSIZE + nmbclusters * MCLBYTES + nmbcnt *
366 sizeof(u_int) + vm_kmem_size) / PAGE_SIZE;
367
368 kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase,
369 (vm_offset_t *)&kmemlimit, (vm_size_t)(npg * PAGE_SIZE));
370 kmem_map->system_map = 1;
371
372 hashsize = npg * sizeof(void *);
373
374 highbit = 0;
375 bits = 0;
376 /* The hash size must be a power of two */
377 for (i = 0; i < 8 * sizeof(hashsize); i++)
378 if (hashsize & (1 << i)) {
379 highbit = i;
380 bits++;
381 }
382 if (bits > 1)
383 hashsize = 1 << (highbit);
384
385 hashmem = (void *)kmem_alloc(kernel_map, (vm_size_t)hashsize);
386 uma_startup2(hashmem, hashsize / sizeof(void *));
387
388 for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) {
389 int size = kmemzones[indx].kz_size;
390 char *name = kmemzones[indx].kz_name;
391
392 kmemzones[indx].kz_zone = uma_zcreate(name, size, NULL, NULL,
393 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
394
395 for (;i <= size; i+= KMEM_ZBASE)
396 kmemsize[i >> KMEM_ZSHIFT] = indx;
397
398 }
399}
400
401void
402malloc_init(data)
403 void *data;
404{
405 struct malloc_type *type = (struct malloc_type *)data;
406
407 mtx_lock(&malloc_mtx);
408 if (type->ks_magic != M_MAGIC)
409 panic("malloc type lacks magic");
410
411 if (cnt.v_page_count == 0)
412 panic("malloc_init not allowed before vm init");
413
414 if (type->ks_next != NULL)
415 return;
416
417 type->ks_next = kmemstatistics;
418 kmemstatistics = type;
419 mtx_unlock(&malloc_mtx);
420}
421
422void
423malloc_uninit(data)
424 void *data;
425{
426 struct malloc_type *type = (struct malloc_type *)data;
427 struct malloc_type *t;
428
429 mtx_lock(&malloc_mtx);
430 if (type->ks_magic != M_MAGIC)
431 panic("malloc type lacks magic");
432
433 if (cnt.v_page_count == 0)
434 panic("malloc_uninit not allowed before vm init");
435
436 if (type == kmemstatistics)
437 kmemstatistics = type->ks_next;
438 else {
439 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) {
440 if (t->ks_next == type) {
441 t->ks_next = type->ks_next;
442 break;
443 }
444 }
445 }
446 type->ks_next = NULL;
447 mtx_unlock(&malloc_mtx);
448}
449
450static int
451sysctl_kern_malloc(SYSCTL_HANDLER_ARGS)
452{
453 struct malloc_type *type;
454 int linesize = 128;
455 int curline;
456 int bufsize;
457 int first;
458 int error;
459 char *buf;
460 char *p;
461 int cnt;
462 int len;
463 int i;
464
465 cnt = 0;
466
467 mtx_lock(&malloc_mtx);
468 for (type = kmemstatistics; type != NULL; type = type->ks_next)
469 cnt++;
470
471 bufsize = linesize * (cnt + 1);
472 p = buf = (char *)malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO);
473
474 len = snprintf(p, linesize,
475 "\n Type InUse MemUse HighUse Requests Size(s)\n");
476 p += len;
477
478 for (type = kmemstatistics; cnt != 0 && type != NULL;
479 type = type->ks_next, cnt--) {
480 if (type->ks_calls == 0)
481 continue;
482
483 curline = linesize - 2; /* Leave room for the \n */
484 len = snprintf(p, curline, "%13s%6ld%6ldK%7ldK%9llu",
485 type->ks_shortdesc,
486 type->ks_inuse,
487 (type->ks_memuse + 1023) / 1024,
488 (type->ks_maxused + 1023) / 1024,
489 (long long unsigned)type->ks_calls);
490 curline -= len;
491 p += len;
492
493 first = 1;
494 for (i = 0; i < 8 * sizeof(type->ks_size); i++)
495 if (type->ks_size & (1 << i)) {
496 if (first)
497 len = snprintf(p, curline, " ");
498 else
499 len = snprintf(p, curline, ",");
500 curline -= len;
501 p += len;
502
503 len = snprintf(p, curline,
504 "%s", kmemzones[i].kz_name);
505 curline -= len;
506 p += len;
507
508 first = 0;
509 }
510
511 len = snprintf(p, 2, "\n");
512 p += len;
513 }
514
515 mtx_unlock(&malloc_mtx);
516 error = SYSCTL_OUT(req, buf, p - buf);
517
518 free(buf, M_TEMP);
519 return (error);
520}
521
522SYSCTL_OID(_kern, OID_AUTO, malloc, CTLTYPE_STRING|CTLFLAG_RD,
523 NULL, 0, sysctl_kern_malloc, "A", "Malloc Stats");
524
525#ifdef MALLOC_PROFILE
526
527static int
528sysctl_kern_mprof(SYSCTL_HANDLER_ARGS)
529{
530 int linesize = 64;
531 uint64_t count;
532 uint64_t waste;
533 uint64_t mem;
534 int bufsize;
535 int error;
536 char *buf;
537 int rsize;
538 int size;
539 char *p;
540 int len;
541 int i;
542
543 bufsize = linesize * (KMEM_ZSIZE + 1);
544 bufsize += 128; /* For the stats line */
545 bufsize += 128; /* For the banner line */
546 waste = 0;
547 mem = 0;
548
549 p = buf = (char *)malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO);
550 len = snprintf(p, bufsize,
551 "\n Size Requests Real Size\n");
552 bufsize -= len;
553 p += len;
554
555 for (i = 0; i < KMEM_ZSIZE; i++) {
556 size = i << KMEM_ZSHIFT;
557 rsize = kmemzones[kmemsize[i]].kz_size;
558 count = (long long unsigned)krequests[i];
559
560 len = snprintf(p, bufsize, "%6d%28llu%11d\n",
561 size, (unsigned long long)count, rsize);
562 bufsize -= len;
563 p += len;
564
565 if ((rsize * count) > (size * count))
566 waste += (rsize * count) - (size * count);
567 mem += (rsize * count);
568 }
569
570 len = snprintf(p, bufsize,
571 "\nTotal memory used:\t%30llu\nTotal Memory wasted:\t%30llu\n",
572 (unsigned long long)mem, (unsigned long long)waste);
573 p += len;
574
575 error = SYSCTL_OUT(req, buf, p - buf);
576
577 free(buf, M_TEMP);
578 return (error);
579}
580
581SYSCTL_OID(_kern, OID_AUTO, mprof, CTLTYPE_STRING|CTLFLAG_RD,
582 NULL, 0, sysctl_kern_mprof, "A", "Malloc Profiling");
583#endif /* MALLOC_PROFILE */
186 return ((void *) va);
187}
188
189/*
190 * free:
191 *
192 * Free a block of memory allocated by malloc.
193 *
194 * This routine may not block.
195 */
196void
197free(addr, type)
198 void *addr;
199 struct malloc_type *type;
200{
201 uma_slab_t slab;
202 void *mem;
203 u_long size;
204 register struct malloc_type *ksp = type;
205
206 /* free(NULL, ...) does nothing */
207 if (addr == NULL)
208 return;
209
210 if ((u_long)addr & 3) { /* XXX: Jeff: find better value for 3 */
211 printf("free(9)'ing unaligned pointer %p\n", addr);
212 Debugger("Don't do that...");
213 return;
214 }
215
216 size = 0;
217
218 mem = (void *)((u_long)addr & (~UMA_SLAB_MASK));
219 slab = hash_sfind(mallochash, mem);
220
221 if (slab == NULL)
222 panic("free: address %p(%p) has not been allocated.\n",
223 addr, mem);
224
225 if (!(slab->us_flags & UMA_SLAB_MALLOC)) {
226 size = slab->us_zone->uz_size;
227 uma_zfree_arg(slab->us_zone, addr, slab);
228 } else {
229 size = slab->us_size;
230 uma_large_free(slab);
231 }
232 ksp->ks_memuse -= size;
233 ksp->ks_inuse--;
234}
235
236/*
237 * realloc: change the size of a memory block
238 */
239void *
240realloc(addr, size, type, flags)
241 void *addr;
242 unsigned long size;
243 struct malloc_type *type;
244 int flags;
245{
246 uma_slab_t slab;
247 unsigned long alloc;
248 void *newaddr;
249
250 /* realloc(NULL, ...) is equivalent to malloc(...) */
251 if (addr == NULL)
252 return (malloc(size, type, flags));
253
254 slab = hash_sfind(mallochash,
255 (void *)((u_long)addr & ~(UMA_SLAB_MASK)));
256
257 /* Sanity check */
258 KASSERT(slab != NULL,
259 ("realloc: address %p out of range", (void *)addr));
260
261 /* Get the size of the original block */
262 if (slab->us_zone)
263 alloc = slab->us_zone->uz_size;
264 else
265 alloc = slab->us_size;
266
267 /* Reuse the original block if appropriate */
268 if (size <= alloc
269 && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE))
270 return (addr);
271
272 /* Allocate a new, bigger (or smaller) block */
273 if ((newaddr = malloc(size, type, flags)) == NULL)
274 return (NULL);
275
276 /* Copy over original contents */
277 bcopy(addr, newaddr, min(size, alloc));
278 free(addr, type);
279 return (newaddr);
280}
281
282/*
283 * reallocf: same as realloc() but free memory on failure.
284 */
285void *
286reallocf(addr, size, type, flags)
287 void *addr;
288 unsigned long size;
289 struct malloc_type *type;
290 int flags;
291{
292 void *mem;
293
294 if ((mem = realloc(addr, size, type, flags)) == NULL)
295 free(addr, type);
296 return (mem);
297}
298
299/*
300 * Initialize the kernel memory allocator
301 */
302/* ARGSUSED*/
303static void
304kmeminit(dummy)
305 void *dummy;
306{
307 u_int8_t indx;
308 u_long npg;
309 u_long mem_size;
310 void *hashmem;
311 u_long hashsize;
312 int highbit;
313 int bits;
314 int i;
315
316 mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
317
318 /*
319 * Try to auto-tune the kernel memory size, so that it is
320 * more applicable for a wider range of machine sizes.
321 * On an X86, a VM_KMEM_SIZE_SCALE value of 4 is good, while
322 * a VM_KMEM_SIZE of 12MB is a fair compromise. The
323 * VM_KMEM_SIZE_MAX is dependent on the maximum KVA space
324 * available, and on an X86 with a total KVA space of 256MB,
325 * try to keep VM_KMEM_SIZE_MAX at 80MB or below.
326 *
327 * Note that the kmem_map is also used by the zone allocator,
328 * so make sure that there is enough space.
329 */
330 vm_kmem_size = VM_KMEM_SIZE;
331 mem_size = cnt.v_page_count * PAGE_SIZE;
332
333#if defined(VM_KMEM_SIZE_SCALE)
334 if ((mem_size / VM_KMEM_SIZE_SCALE) > vm_kmem_size)
335 vm_kmem_size = mem_size / VM_KMEM_SIZE_SCALE;
336#endif
337
338#if defined(VM_KMEM_SIZE_MAX)
339 if (vm_kmem_size >= VM_KMEM_SIZE_MAX)
340 vm_kmem_size = VM_KMEM_SIZE_MAX;
341#endif
342
343 /* Allow final override from the kernel environment */
344 TUNABLE_INT_FETCH("kern.vm.kmem.size", &vm_kmem_size);
345
346 /*
347 * Limit kmem virtual size to twice the physical memory.
348 * This allows for kmem map sparseness, but limits the size
349 * to something sane. Be careful to not overflow the 32bit
350 * ints while doing the check.
351 */
352 if ((vm_kmem_size / 2) > (cnt.v_page_count * PAGE_SIZE))
353 vm_kmem_size = 2 * cnt.v_page_count * PAGE_SIZE;
354
355 /*
356 * In mbuf_init(), we set up submaps for mbufs and clusters, in which
357 * case we rounddown() (nmbufs * MSIZE) and (nmbclusters * MCLBYTES),
358 * respectively. Mathematically, this means that what we do here may
359 * amount to slightly more address space than we need for the submaps,
360 * but it never hurts to have an extra page in kmem_map.
361 */
362 npg = (nmbufs * MSIZE + nmbclusters * MCLBYTES + nmbcnt *
363 sizeof(u_int) + vm_kmem_size) / PAGE_SIZE;
364
365 kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase,
366 (vm_offset_t *)&kmemlimit, (vm_size_t)(npg * PAGE_SIZE));
367 kmem_map->system_map = 1;
368
369 hashsize = npg * sizeof(void *);
370
371 highbit = 0;
372 bits = 0;
373 /* The hash size must be a power of two */
374 for (i = 0; i < 8 * sizeof(hashsize); i++)
375 if (hashsize & (1 << i)) {
376 highbit = i;
377 bits++;
378 }
379 if (bits > 1)
380 hashsize = 1 << (highbit);
381
382 hashmem = (void *)kmem_alloc(kernel_map, (vm_size_t)hashsize);
383 uma_startup2(hashmem, hashsize / sizeof(void *));
384
385 for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) {
386 int size = kmemzones[indx].kz_size;
387 char *name = kmemzones[indx].kz_name;
388
389 kmemzones[indx].kz_zone = uma_zcreate(name, size, NULL, NULL,
390 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
391
392 for (;i <= size; i+= KMEM_ZBASE)
393 kmemsize[i >> KMEM_ZSHIFT] = indx;
394
395 }
396}
397
398void
399malloc_init(data)
400 void *data;
401{
402 struct malloc_type *type = (struct malloc_type *)data;
403
404 mtx_lock(&malloc_mtx);
405 if (type->ks_magic != M_MAGIC)
406 panic("malloc type lacks magic");
407
408 if (cnt.v_page_count == 0)
409 panic("malloc_init not allowed before vm init");
410
411 if (type->ks_next != NULL)
412 return;
413
414 type->ks_next = kmemstatistics;
415 kmemstatistics = type;
416 mtx_unlock(&malloc_mtx);
417}
418
419void
420malloc_uninit(data)
421 void *data;
422{
423 struct malloc_type *type = (struct malloc_type *)data;
424 struct malloc_type *t;
425
426 mtx_lock(&malloc_mtx);
427 if (type->ks_magic != M_MAGIC)
428 panic("malloc type lacks magic");
429
430 if (cnt.v_page_count == 0)
431 panic("malloc_uninit not allowed before vm init");
432
433 if (type == kmemstatistics)
434 kmemstatistics = type->ks_next;
435 else {
436 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) {
437 if (t->ks_next == type) {
438 t->ks_next = type->ks_next;
439 break;
440 }
441 }
442 }
443 type->ks_next = NULL;
444 mtx_unlock(&malloc_mtx);
445}
446
447static int
448sysctl_kern_malloc(SYSCTL_HANDLER_ARGS)
449{
450 struct malloc_type *type;
451 int linesize = 128;
452 int curline;
453 int bufsize;
454 int first;
455 int error;
456 char *buf;
457 char *p;
458 int cnt;
459 int len;
460 int i;
461
462 cnt = 0;
463
464 mtx_lock(&malloc_mtx);
465 for (type = kmemstatistics; type != NULL; type = type->ks_next)
466 cnt++;
467
468 bufsize = linesize * (cnt + 1);
469 p = buf = (char *)malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO);
470
471 len = snprintf(p, linesize,
472 "\n Type InUse MemUse HighUse Requests Size(s)\n");
473 p += len;
474
475 for (type = kmemstatistics; cnt != 0 && type != NULL;
476 type = type->ks_next, cnt--) {
477 if (type->ks_calls == 0)
478 continue;
479
480 curline = linesize - 2; /* Leave room for the \n */
481 len = snprintf(p, curline, "%13s%6ld%6ldK%7ldK%9llu",
482 type->ks_shortdesc,
483 type->ks_inuse,
484 (type->ks_memuse + 1023) / 1024,
485 (type->ks_maxused + 1023) / 1024,
486 (long long unsigned)type->ks_calls);
487 curline -= len;
488 p += len;
489
490 first = 1;
491 for (i = 0; i < 8 * sizeof(type->ks_size); i++)
492 if (type->ks_size & (1 << i)) {
493 if (first)
494 len = snprintf(p, curline, " ");
495 else
496 len = snprintf(p, curline, ",");
497 curline -= len;
498 p += len;
499
500 len = snprintf(p, curline,
501 "%s", kmemzones[i].kz_name);
502 curline -= len;
503 p += len;
504
505 first = 0;
506 }
507
508 len = snprintf(p, 2, "\n");
509 p += len;
510 }
511
512 mtx_unlock(&malloc_mtx);
513 error = SYSCTL_OUT(req, buf, p - buf);
514
515 free(buf, M_TEMP);
516 return (error);
517}
518
519SYSCTL_OID(_kern, OID_AUTO, malloc, CTLTYPE_STRING|CTLFLAG_RD,
520 NULL, 0, sysctl_kern_malloc, "A", "Malloc Stats");
521
522#ifdef MALLOC_PROFILE
523
524static int
525sysctl_kern_mprof(SYSCTL_HANDLER_ARGS)
526{
527 int linesize = 64;
528 uint64_t count;
529 uint64_t waste;
530 uint64_t mem;
531 int bufsize;
532 int error;
533 char *buf;
534 int rsize;
535 int size;
536 char *p;
537 int len;
538 int i;
539
540 bufsize = linesize * (KMEM_ZSIZE + 1);
541 bufsize += 128; /* For the stats line */
542 bufsize += 128; /* For the banner line */
543 waste = 0;
544 mem = 0;
545
546 p = buf = (char *)malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO);
547 len = snprintf(p, bufsize,
548 "\n Size Requests Real Size\n");
549 bufsize -= len;
550 p += len;
551
552 for (i = 0; i < KMEM_ZSIZE; i++) {
553 size = i << KMEM_ZSHIFT;
554 rsize = kmemzones[kmemsize[i]].kz_size;
555 count = (long long unsigned)krequests[i];
556
557 len = snprintf(p, bufsize, "%6d%28llu%11d\n",
558 size, (unsigned long long)count, rsize);
559 bufsize -= len;
560 p += len;
561
562 if ((rsize * count) > (size * count))
563 waste += (rsize * count) - (size * count);
564 mem += (rsize * count);
565 }
566
567 len = snprintf(p, bufsize,
568 "\nTotal memory used:\t%30llu\nTotal Memory wasted:\t%30llu\n",
569 (unsigned long long)mem, (unsigned long long)waste);
570 p += len;
571
572 error = SYSCTL_OUT(req, buf, p - buf);
573
574 free(buf, M_TEMP);
575 return (error);
576}
577
578SYSCTL_OID(_kern, OID_AUTO, mprof, CTLTYPE_STRING|CTLFLAG_RD,
579 NULL, 0, sysctl_kern_mprof, "A", "Malloc Profiling");
580#endif /* MALLOC_PROFILE */