kern_malloc.c revision 160598
1/*- 2 * Copyright (c) 1987, 1991, 1993 3 * The Regents of the University of California. 4 * Copyright (c) 2005 Robert N. M. Watson 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 4. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94 32 */ 33 34/* 35 * Kernel malloc(9) implementation -- general purpose kernel memory allocator 36 * based on memory types. Back end is implemented using the UMA(9) zone 37 * allocator. A set of fixed-size buckets are used for smaller allocations, 38 * and a special UMA allocation interface is used for larger allocations. 39 * Callers declare memory types, and statistics are maintained independently 40 * for each memory type. Statistics are maintained per-CPU for performance 41 * reasons. See malloc(9) and comments in malloc.h for a detailed 42 * description. 43 */ 44 45#include <sys/cdefs.h> 46__FBSDID("$FreeBSD: head/sys/kern/kern_malloc.c 160598 2006-07-23 19:51:39Z rwatson $"); 47 48#include "opt_ddb.h" 49#include "opt_vm.h" 50 51#include <sys/param.h> 52#include <sys/systm.h> 53#include <sys/kdb.h> 54#include <sys/kernel.h> 55#include <sys/lock.h> 56#include <sys/malloc.h> 57#include <sys/mbuf.h> 58#include <sys/mutex.h> 59#include <sys/vmmeter.h> 60#include <sys/proc.h> 61#include <sys/sbuf.h> 62#include <sys/sysctl.h> 63#include <sys/time.h> 64 65#include <vm/vm.h> 66#include <vm/pmap.h> 67#include <vm/vm_param.h> 68#include <vm/vm_kern.h> 69#include <vm/vm_extern.h> 70#include <vm/vm_map.h> 71#include <vm/vm_page.h> 72#include <vm/uma.h> 73#include <vm/uma_int.h> 74#include <vm/uma_dbg.h> 75 76#ifdef DEBUG_MEMGUARD 77#include <vm/memguard.h> 78#endif 79#ifdef DEBUG_REDZONE 80#include <vm/redzone.h> 81#endif 82 83#if defined(INVARIANTS) && defined(__i386__) 84#include <machine/cpu.h> 85#endif 86 87#include <ddb/ddb.h> 88 89/* 90 * When realloc() is called, if the new size is sufficiently smaller than 91 * the old size, realloc() will allocate a new, smaller block to avoid 92 * wasting memory. 'Sufficiently smaller' is defined as: newsize <= 93 * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'. 94 */ 95#ifndef REALLOC_FRACTION 96#define REALLOC_FRACTION 1 /* new block if <= half the size */ 97#endif 98 99/* 100 * Centrally define some common malloc types. 101 */ 102MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); 103MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); 104MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); 105 106MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options"); 107MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery"); 108 109static void kmeminit(void *); 110SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL) 111 112static MALLOC_DEFINE(M_FREE, "free", "should be on free list"); 113 114static struct malloc_type *kmemstatistics; 115static char *kmembase; 116static char *kmemlimit; 117static int kmemcount; 118 119#define KMEM_ZSHIFT 4 120#define KMEM_ZBASE 16 121#define KMEM_ZMASK (KMEM_ZBASE - 1) 122 123#define KMEM_ZMAX PAGE_SIZE 124#define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT) 125static u_int8_t kmemsize[KMEM_ZSIZE + 1]; 126 127/* 128 * Small malloc(9) memory allocations are allocated from a set of UMA buckets 129 * of various sizes. 130 * 131 * XXX: The comment here used to read "These won't be powers of two for 132 * long." It's possible that a significant amount of wasted memory could be 133 * recovered by tuning the sizes of these buckets. 134 */ 135struct { 136 int kz_size; 137 char *kz_name; 138 uma_zone_t kz_zone; 139} kmemzones[] = { 140 {16, "16", NULL}, 141 {32, "32", NULL}, 142 {64, "64", NULL}, 143 {128, "128", NULL}, 144 {256, "256", NULL}, 145 {512, "512", NULL}, 146 {1024, "1024", NULL}, 147 {2048, "2048", NULL}, 148 {4096, "4096", NULL}, 149#if PAGE_SIZE > 4096 150 {8192, "8192", NULL}, 151#if PAGE_SIZE > 8192 152 {16384, "16384", NULL}, 153#if PAGE_SIZE > 16384 154 {32768, "32768", NULL}, 155#if PAGE_SIZE > 32768 156 {65536, "65536", NULL}, 157#if PAGE_SIZE > 65536 158#error "Unsupported PAGE_SIZE" 159#endif /* 65536 */ 160#endif /* 32768 */ 161#endif /* 16384 */ 162#endif /* 8192 */ 163#endif /* 4096 */ 164 {0, NULL}, 165}; 166 167/* 168 * Zone to allocate malloc type descriptions from. For ABI reasons, memory 169 * types are described by a data structure passed by the declaring code, but 170 * the malloc(9) implementation has its own data structure describing the 171 * type and statistics. This permits the malloc(9)-internal data structures 172 * to be modified without breaking binary-compiled kernel modules that 173 * declare malloc types. 174 */ 175static uma_zone_t mt_zone; 176 177u_int vm_kmem_size; 178SYSCTL_UINT(_vm, OID_AUTO, kmem_size, CTLFLAG_RD, &vm_kmem_size, 0, 179 "Size of kernel memory"); 180 181u_int vm_kmem_size_max; 182SYSCTL_UINT(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RD, &vm_kmem_size_max, 0, 183 "Maximum size of kernel memory"); 184 185u_int vm_kmem_size_scale; 186SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RD, &vm_kmem_size_scale, 0, 187 "Scale factor for kernel memory size"); 188 189/* 190 * The malloc_mtx protects the kmemstatistics linked list. 191 */ 192struct mtx malloc_mtx; 193 194#ifdef MALLOC_PROFILE 195uint64_t krequests[KMEM_ZSIZE + 1]; 196 197static int sysctl_kern_mprof(SYSCTL_HANDLER_ARGS); 198#endif 199 200static int sysctl_kern_malloc(SYSCTL_HANDLER_ARGS); 201static int sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS); 202 203/* 204 * time_uptime of the last malloc(9) failure (induced or real). 205 */ 206static time_t t_malloc_fail; 207 208/* 209 * malloc(9) fault injection -- cause malloc failures every (n) mallocs when 210 * the caller specifies M_NOWAIT. If set to 0, no failures are caused. 211 */ 212#ifdef MALLOC_MAKE_FAILURES 213SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD, 0, 214 "Kernel malloc debugging options"); 215 216static int malloc_failure_rate; 217static int malloc_nowait_count; 218static int malloc_failure_count; 219SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RW, 220 &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail"); 221TUNABLE_INT("debug.malloc.failure_rate", &malloc_failure_rate); 222SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD, 223 &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures"); 224#endif 225 226int 227malloc_last_fail(void) 228{ 229 230 return (time_uptime - t_malloc_fail); 231} 232 233/* 234 * An allocation has succeeded -- update malloc type statistics for the 235 * amount of bucket size. Occurs within a critical section so that the 236 * thread isn't preempted and doesn't migrate while updating per-PCU 237 * statistics. 238 */ 239static void 240malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size, 241 int zindx) 242{ 243 struct malloc_type_internal *mtip; 244 struct malloc_type_stats *mtsp; 245 246 critical_enter(); 247 mtip = mtp->ks_handle; 248 mtsp = &mtip->mti_stats[curcpu]; 249 if (size > 0) { 250 mtsp->mts_memalloced += size; 251 mtsp->mts_numallocs++; 252 } 253 if (zindx != -1) 254 mtsp->mts_size |= 1 << zindx; 255 critical_exit(); 256} 257 258void 259malloc_type_allocated(struct malloc_type *mtp, unsigned long size) 260{ 261 262 if (size > 0) 263 malloc_type_zone_allocated(mtp, size, -1); 264} 265 266/* 267 * A free operation has occurred -- update malloc type statistis for the 268 * amount of the bucket size. Occurs within a critical section so that the 269 * thread isn't preempted and doesn't migrate while updating per-CPU 270 * statistics. 271 */ 272void 273malloc_type_freed(struct malloc_type *mtp, unsigned long size) 274{ 275 struct malloc_type_internal *mtip; 276 struct malloc_type_stats *mtsp; 277 278 critical_enter(); 279 mtip = mtp->ks_handle; 280 mtsp = &mtip->mti_stats[curcpu]; 281 mtsp->mts_memfreed += size; 282 mtsp->mts_numfrees++; 283 critical_exit(); 284} 285 286/* 287 * malloc: 288 * 289 * Allocate a block of memory. 290 * 291 * If M_NOWAIT is set, this routine will not block and return NULL if 292 * the allocation fails. 293 */ 294void * 295malloc(unsigned long size, struct malloc_type *mtp, int flags) 296{ 297 int indx; 298 caddr_t va; 299 uma_zone_t zone; 300 uma_keg_t keg; 301#if defined(DIAGNOSTIC) || defined(DEBUG_REDZONE) 302 unsigned long osize = size; 303#endif 304 305#ifdef INVARIANTS 306 /* 307 * Check that exactly one of M_WAITOK or M_NOWAIT is specified. 308 */ 309 indx = flags & (M_WAITOK | M_NOWAIT); 310 if (indx != M_NOWAIT && indx != M_WAITOK) { 311 static struct timeval lasterr; 312 static int curerr, once; 313 if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) { 314 printf("Bad malloc flags: %x\n", indx); 315 kdb_backtrace(); 316 flags |= M_WAITOK; 317 once++; 318 } 319 } 320#endif 321#if 0 322 if (size == 0) 323 kdb_enter("zero size malloc"); 324#endif 325#ifdef MALLOC_MAKE_FAILURES 326 if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) { 327 atomic_add_int(&malloc_nowait_count, 1); 328 if ((malloc_nowait_count % malloc_failure_rate) == 0) { 329 atomic_add_int(&malloc_failure_count, 1); 330 t_malloc_fail = time_uptime; 331 return (NULL); 332 } 333 } 334#endif 335 if (flags & M_WAITOK) 336 KASSERT(curthread->td_intr_nesting_level == 0, 337 ("malloc(M_WAITOK) in interrupt context")); 338 339#ifdef DEBUG_MEMGUARD 340 if (memguard_cmp(mtp)) 341 return memguard_alloc(size, flags); 342#endif 343 344#ifdef DEBUG_REDZONE 345 size = redzone_size_ntor(size); 346#endif 347 348 if (size <= KMEM_ZMAX) { 349 if (size & KMEM_ZMASK) 350 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE; 351 indx = kmemsize[size >> KMEM_ZSHIFT]; 352 zone = kmemzones[indx].kz_zone; 353 keg = zone->uz_keg; 354#ifdef MALLOC_PROFILE 355 krequests[size >> KMEM_ZSHIFT]++; 356#endif 357 va = uma_zalloc(zone, flags); 358 if (va != NULL) 359 size = keg->uk_size; 360 malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx); 361 } else { 362 size = roundup(size, PAGE_SIZE); 363 zone = NULL; 364 keg = NULL; 365 va = uma_large_malloc(size, flags); 366 malloc_type_allocated(mtp, va == NULL ? 0 : size); 367 } 368 if (flags & M_WAITOK) 369 KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL")); 370 else if (va == NULL) 371 t_malloc_fail = time_uptime; 372#ifdef DIAGNOSTIC 373 if (va != NULL && !(flags & M_ZERO)) { 374 memset(va, 0x70, osize); 375 } 376#endif 377#ifdef DEBUG_REDZONE 378 if (va != NULL) 379 va = redzone_setup(va, osize); 380#endif 381 return ((void *) va); 382} 383 384/* 385 * free: 386 * 387 * Free a block of memory allocated by malloc. 388 * 389 * This routine may not block. 390 */ 391void 392free(void *addr, struct malloc_type *mtp) 393{ 394 uma_slab_t slab; 395 u_long size; 396 397 /* free(NULL, ...) does nothing */ 398 if (addr == NULL) 399 return; 400 401#ifdef DEBUG_MEMGUARD 402 if (memguard_cmp(mtp)) { 403 memguard_free(addr); 404 return; 405 } 406#endif 407 408#ifdef DEBUG_REDZONE 409 redzone_check(addr); 410 addr = redzone_addr_ntor(addr); 411#endif 412 413 size = 0; 414 415 slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK)); 416 417 if (slab == NULL) 418 panic("free: address %p(%p) has not been allocated.\n", 419 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK))); 420 421 422 if (!(slab->us_flags & UMA_SLAB_MALLOC)) { 423#ifdef INVARIANTS 424 struct malloc_type **mtpp = addr; 425#endif 426 size = slab->us_keg->uk_size; 427#ifdef INVARIANTS 428 /* 429 * Cache a pointer to the malloc_type that most recently freed 430 * this memory here. This way we know who is most likely to 431 * have stepped on it later. 432 * 433 * This code assumes that size is a multiple of 8 bytes for 434 * 64 bit machines 435 */ 436 mtpp = (struct malloc_type **) 437 ((unsigned long)mtpp & ~UMA_ALIGN_PTR); 438 mtpp += (size - sizeof(struct malloc_type *)) / 439 sizeof(struct malloc_type *); 440 *mtpp = mtp; 441#endif 442 uma_zfree_arg(LIST_FIRST(&slab->us_keg->uk_zones), addr, slab); 443 } else { 444 size = slab->us_size; 445 uma_large_free(slab); 446 } 447 malloc_type_freed(mtp, size); 448} 449 450/* 451 * realloc: change the size of a memory block 452 */ 453void * 454realloc(void *addr, unsigned long size, struct malloc_type *mtp, int flags) 455{ 456 uma_slab_t slab; 457 unsigned long alloc; 458 void *newaddr; 459 460 /* realloc(NULL, ...) is equivalent to malloc(...) */ 461 if (addr == NULL) 462 return (malloc(size, mtp, flags)); 463 464 /* 465 * XXX: Should report free of old memory and alloc of new memory to 466 * per-CPU stats. 467 */ 468 469#ifdef DEBUG_MEMGUARD 470if (memguard_cmp(mtp)) { 471 slab = NULL; 472 alloc = size; 473} else { 474#endif 475 476#ifdef DEBUG_REDZONE 477 slab = NULL; 478 alloc = redzone_get_size(addr); 479#else 480 slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK)); 481 482 /* Sanity check */ 483 KASSERT(slab != NULL, 484 ("realloc: address %p out of range", (void *)addr)); 485 486 /* Get the size of the original block */ 487 if (!(slab->us_flags & UMA_SLAB_MALLOC)) 488 alloc = slab->us_keg->uk_size; 489 else 490 alloc = slab->us_size; 491 492 /* Reuse the original block if appropriate */ 493 if (size <= alloc 494 && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE)) 495 return (addr); 496#endif /* !DEBUG_REDZONE */ 497 498#ifdef DEBUG_MEMGUARD 499} 500#endif 501 502 /* Allocate a new, bigger (or smaller) block */ 503 if ((newaddr = malloc(size, mtp, flags)) == NULL) 504 return (NULL); 505 506 /* Copy over original contents */ 507 bcopy(addr, newaddr, min(size, alloc)); 508 free(addr, mtp); 509 return (newaddr); 510} 511 512/* 513 * reallocf: same as realloc() but free memory on failure. 514 */ 515void * 516reallocf(void *addr, unsigned long size, struct malloc_type *mtp, int flags) 517{ 518 void *mem; 519 520 if ((mem = realloc(addr, size, mtp, flags)) == NULL) 521 free(addr, mtp); 522 return (mem); 523} 524 525/* 526 * Initialize the kernel memory allocator 527 */ 528/* ARGSUSED*/ 529static void 530kmeminit(void *dummy) 531{ 532 u_int8_t indx; 533 u_long mem_size; 534 int i; 535 536 mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF); 537 538 /* 539 * Try to auto-tune the kernel memory size, so that it is 540 * more applicable for a wider range of machine sizes. 541 * On an X86, a VM_KMEM_SIZE_SCALE value of 4 is good, while 542 * a VM_KMEM_SIZE of 12MB is a fair compromise. The 543 * VM_KMEM_SIZE_MAX is dependent on the maximum KVA space 544 * available, and on an X86 with a total KVA space of 256MB, 545 * try to keep VM_KMEM_SIZE_MAX at 80MB or below. 546 * 547 * Note that the kmem_map is also used by the zone allocator, 548 * so make sure that there is enough space. 549 */ 550 vm_kmem_size = VM_KMEM_SIZE + nmbclusters * PAGE_SIZE; 551 mem_size = cnt.v_page_count; 552 553#if defined(VM_KMEM_SIZE_SCALE) 554 vm_kmem_size_scale = VM_KMEM_SIZE_SCALE; 555#endif 556 TUNABLE_INT_FETCH("vm.kmem_size_scale", &vm_kmem_size_scale); 557 if (vm_kmem_size_scale > 0 && 558 (mem_size / vm_kmem_size_scale) > (vm_kmem_size / PAGE_SIZE)) 559 vm_kmem_size = (mem_size / vm_kmem_size_scale) * PAGE_SIZE; 560 561#if defined(VM_KMEM_SIZE_MAX) 562 vm_kmem_size_max = VM_KMEM_SIZE_MAX; 563#endif 564 TUNABLE_INT_FETCH("vm.kmem_size_max", &vm_kmem_size_max); 565 if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max) 566 vm_kmem_size = vm_kmem_size_max; 567 568 /* Allow final override from the kernel environment */ 569#ifndef BURN_BRIDGES 570 if (TUNABLE_INT_FETCH("kern.vm.kmem.size", &vm_kmem_size) != 0) 571 printf("kern.vm.kmem.size is now called vm.kmem_size!\n"); 572#endif 573 TUNABLE_INT_FETCH("vm.kmem_size", &vm_kmem_size); 574 575 /* 576 * Limit kmem virtual size to twice the physical memory. 577 * This allows for kmem map sparseness, but limits the size 578 * to something sane. Be careful to not overflow the 32bit 579 * ints while doing the check. 580 */ 581 if (((vm_kmem_size / 2) / PAGE_SIZE) > cnt.v_page_count) 582 vm_kmem_size = 2 * cnt.v_page_count * PAGE_SIZE; 583 584 /* 585 * Tune settings based on the kernel map's size at this time. 586 */ 587 init_param3(vm_kmem_size / PAGE_SIZE); 588 589 kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase, 590 (vm_offset_t *)&kmemlimit, vm_kmem_size); 591 kmem_map->system_map = 1; 592 593#ifdef DEBUG_MEMGUARD 594 /* 595 * Initialize MemGuard if support compiled in. MemGuard is a 596 * replacement allocator used for detecting tamper-after-free 597 * scenarios as they occur. It is only used for debugging. 598 */ 599 vm_memguard_divisor = 10; 600 TUNABLE_INT_FETCH("vm.memguard.divisor", &vm_memguard_divisor); 601 602 /* Pick a conservative value if provided value sucks. */ 603 if ((vm_memguard_divisor <= 0) || 604 ((vm_kmem_size / vm_memguard_divisor) == 0)) 605 vm_memguard_divisor = 10; 606 memguard_init(kmem_map, vm_kmem_size / vm_memguard_divisor); 607#endif 608 609 uma_startup2(); 610 611 mt_zone = uma_zcreate("mt_zone", sizeof(struct malloc_type_internal), 612#ifdef INVARIANTS 613 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini, 614#else 615 NULL, NULL, NULL, NULL, 616#endif 617 UMA_ALIGN_PTR, UMA_ZONE_MALLOC); 618 for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) { 619 int size = kmemzones[indx].kz_size; 620 char *name = kmemzones[indx].kz_name; 621 622 kmemzones[indx].kz_zone = uma_zcreate(name, size, 623#ifdef INVARIANTS 624 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini, 625#else 626 NULL, NULL, NULL, NULL, 627#endif 628 UMA_ALIGN_PTR, UMA_ZONE_MALLOC); 629 630 for (;i <= size; i+= KMEM_ZBASE) 631 kmemsize[i >> KMEM_ZSHIFT] = indx; 632 633 } 634} 635 636void 637malloc_init(void *data) 638{ 639 struct malloc_type_internal *mtip; 640 struct malloc_type *mtp; 641 642 KASSERT(cnt.v_page_count != 0, ("malloc_register before vm_init")); 643 644 mtp = data; 645 mtip = uma_zalloc(mt_zone, M_WAITOK | M_ZERO); 646 mtp->ks_handle = mtip; 647 648 mtx_lock(&malloc_mtx); 649 mtp->ks_next = kmemstatistics; 650 kmemstatistics = mtp; 651 kmemcount++; 652 mtx_unlock(&malloc_mtx); 653} 654 655void 656malloc_uninit(void *data) 657{ 658 struct malloc_type_internal *mtip; 659 struct malloc_type_stats *mtsp; 660 struct malloc_type *mtp, *temp; 661 uma_slab_t slab; 662 long temp_allocs, temp_bytes; 663 int i; 664 665 mtp = data; 666 KASSERT(mtp->ks_handle != NULL, ("malloc_deregister: cookie NULL")); 667 mtx_lock(&malloc_mtx); 668 mtip = mtp->ks_handle; 669 mtp->ks_handle = NULL; 670 if (mtp != kmemstatistics) { 671 for (temp = kmemstatistics; temp != NULL; 672 temp = temp->ks_next) { 673 if (temp->ks_next == mtp) 674 temp->ks_next = mtp->ks_next; 675 } 676 } else 677 kmemstatistics = mtp->ks_next; 678 kmemcount--; 679 mtx_unlock(&malloc_mtx); 680 681 /* 682 * Look for memory leaks. 683 */ 684 temp_allocs = temp_bytes = 0; 685 for (i = 0; i < MAXCPU; i++) { 686 mtsp = &mtip->mti_stats[i]; 687 temp_allocs += mtsp->mts_numallocs; 688 temp_allocs -= mtsp->mts_numfrees; 689 temp_bytes += mtsp->mts_memalloced; 690 temp_bytes -= mtsp->mts_memfreed; 691 } 692 if (temp_allocs > 0 || temp_bytes > 0) { 693 printf("Warning: memory type %s leaked memory on destroy " 694 "(%ld allocations, %ld bytes leaked).\n", mtp->ks_shortdesc, 695 temp_allocs, temp_bytes); 696 } 697 698 slab = vtoslab((vm_offset_t) mtip & (~UMA_SLAB_MASK)); 699 uma_zfree_arg(mt_zone, mtip, slab); 700} 701 702struct malloc_type * 703malloc_desc2type(const char *desc) 704{ 705 struct malloc_type *mtp; 706 707 mtx_assert(&malloc_mtx, MA_OWNED); 708 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 709 if (strcmp(mtp->ks_shortdesc, desc) == 0) 710 return (mtp); 711 } 712 return (NULL); 713} 714 715static int 716sysctl_kern_malloc(SYSCTL_HANDLER_ARGS) 717{ 718 struct malloc_type_stats mts_local, *mtsp; 719 struct malloc_type_internal *mtip; 720 struct malloc_type *mtp; 721 struct sbuf sbuf; 722 long temp_allocs, temp_bytes; 723 int linesize = 128; 724 int bufsize; 725 int first; 726 int error; 727 char *buf; 728 int cnt; 729 int i; 730 731 cnt = 0; 732 733 /* Guess at how much room is needed. */ 734 mtx_lock(&malloc_mtx); 735 cnt = kmemcount; 736 mtx_unlock(&malloc_mtx); 737 738 bufsize = linesize * (cnt + 1); 739 buf = malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO); 740 sbuf_new(&sbuf, buf, bufsize, SBUF_FIXEDLEN); 741 742 mtx_lock(&malloc_mtx); 743 sbuf_printf(&sbuf, 744 "\n Type InUse MemUse HighUse Requests Size(s)\n"); 745 for (mtp = kmemstatistics; cnt != 0 && mtp != NULL; 746 mtp = mtp->ks_next, cnt--) { 747 mtip = mtp->ks_handle; 748 bzero(&mts_local, sizeof(mts_local)); 749 for (i = 0; i < MAXCPU; i++) { 750 mtsp = &mtip->mti_stats[i]; 751 mts_local.mts_memalloced += mtsp->mts_memalloced; 752 mts_local.mts_memfreed += mtsp->mts_memfreed; 753 mts_local.mts_numallocs += mtsp->mts_numallocs; 754 mts_local.mts_numfrees += mtsp->mts_numfrees; 755 mts_local.mts_size |= mtsp->mts_size; 756 } 757 if (mts_local.mts_numallocs == 0) 758 continue; 759 760 /* 761 * Due to races in per-CPU statistics gather, it's possible to 762 * get a slightly negative number here. If we do, approximate 763 * with 0. 764 */ 765 if (mts_local.mts_numallocs > mts_local.mts_numfrees) 766 temp_allocs = mts_local.mts_numallocs - 767 mts_local.mts_numfrees; 768 else 769 temp_allocs = 0; 770 771 /* 772 * Ditto for bytes allocated. 773 */ 774 if (mts_local.mts_memalloced > mts_local.mts_memfreed) 775 temp_bytes = mts_local.mts_memalloced - 776 mts_local.mts_memfreed; 777 else 778 temp_bytes = 0; 779 780 /* 781 * High-waterwark is no longer easily available, so we just 782 * print '-' for that column. 783 */ 784 sbuf_printf(&sbuf, "%13s%6lu%6luK -%9llu", 785 mtp->ks_shortdesc, 786 temp_allocs, 787 (temp_bytes + 1023) / 1024, 788 (unsigned long long)mts_local.mts_numallocs); 789 790 first = 1; 791 for (i = 0; i < sizeof(kmemzones) / sizeof(kmemzones[0]) - 1; 792 i++) { 793 if (mts_local.mts_size & (1 << i)) { 794 if (first) 795 sbuf_printf(&sbuf, " "); 796 else 797 sbuf_printf(&sbuf, ","); 798 sbuf_printf(&sbuf, "%s", 799 kmemzones[i].kz_name); 800 first = 0; 801 } 802 } 803 sbuf_printf(&sbuf, "\n"); 804 } 805 sbuf_finish(&sbuf); 806 mtx_unlock(&malloc_mtx); 807 808 error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf)); 809 810 sbuf_delete(&sbuf); 811 free(buf, M_TEMP); 812 return (error); 813} 814 815SYSCTL_OID(_kern, OID_AUTO, malloc, CTLTYPE_STRING|CTLFLAG_RD, 816 NULL, 0, sysctl_kern_malloc, "A", "Malloc Stats"); 817 818static int 819sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS) 820{ 821 struct malloc_type_stream_header mtsh; 822 struct malloc_type_internal *mtip; 823 struct malloc_type_header mth; 824 struct malloc_type *mtp; 825 int buflen, count, error, i; 826 struct sbuf sbuf; 827 char *buffer; 828 829 mtx_lock(&malloc_mtx); 830restart: 831 mtx_assert(&malloc_mtx, MA_OWNED); 832 count = kmemcount; 833 mtx_unlock(&malloc_mtx); 834 buflen = sizeof(mtsh) + count * (sizeof(mth) + 835 sizeof(struct malloc_type_stats) * MAXCPU) + 1; 836 buffer = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO); 837 mtx_lock(&malloc_mtx); 838 if (count < kmemcount) { 839 free(buffer, M_TEMP); 840 goto restart; 841 } 842 843 sbuf_new(&sbuf, buffer, buflen, SBUF_FIXEDLEN); 844 845 /* 846 * Insert stream header. 847 */ 848 bzero(&mtsh, sizeof(mtsh)); 849 mtsh.mtsh_version = MALLOC_TYPE_STREAM_VERSION; 850 mtsh.mtsh_maxcpus = MAXCPU; 851 mtsh.mtsh_count = kmemcount; 852 if (sbuf_bcat(&sbuf, &mtsh, sizeof(mtsh)) < 0) { 853 mtx_unlock(&malloc_mtx); 854 error = ENOMEM; 855 goto out; 856 } 857 858 /* 859 * Insert alternating sequence of type headers and type statistics. 860 */ 861 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 862 mtip = (struct malloc_type_internal *)mtp->ks_handle; 863 864 /* 865 * Insert type header. 866 */ 867 bzero(&mth, sizeof(mth)); 868 strlcpy(mth.mth_name, mtp->ks_shortdesc, MALLOC_MAX_NAME); 869 if (sbuf_bcat(&sbuf, &mth, sizeof(mth)) < 0) { 870 mtx_unlock(&malloc_mtx); 871 error = ENOMEM; 872 goto out; 873 } 874 875 /* 876 * Insert type statistics for each CPU. 877 */ 878 for (i = 0; i < MAXCPU; i++) { 879 if (sbuf_bcat(&sbuf, &mtip->mti_stats[i], 880 sizeof(mtip->mti_stats[i])) < 0) { 881 mtx_unlock(&malloc_mtx); 882 error = ENOMEM; 883 goto out; 884 } 885 } 886 } 887 mtx_unlock(&malloc_mtx); 888 sbuf_finish(&sbuf); 889 error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf)); 890out: 891 sbuf_delete(&sbuf); 892 free(buffer, M_TEMP); 893 return (error); 894} 895 896SYSCTL_PROC(_kern, OID_AUTO, malloc_stats, CTLFLAG_RD|CTLTYPE_STRUCT, 897 0, 0, sysctl_kern_malloc_stats, "s,malloc_type_ustats", 898 "Return malloc types"); 899 900SYSCTL_INT(_kern, OID_AUTO, malloc_count, CTLFLAG_RD, &kmemcount, 0, 901 "Count of kernel malloc types"); 902 903#ifdef DDB 904DB_SHOW_COMMAND(malloc, db_show_malloc) 905{ 906 struct malloc_type_internal *mtip; 907 struct malloc_type *mtp; 908 u_int64_t allocs, frees; 909 int i; 910 911 db_printf("%18s %12s %12s %12s\n", "Type", "Allocs", "Frees", 912 "Used"); 913 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 914 mtip = (struct malloc_type_internal *)mtp->ks_handle; 915 allocs = 0; 916 frees = 0; 917 for (i = 0; i < MAXCPU; i++) { 918 allocs += mtip->mti_stats[i].mts_numallocs; 919 frees += mtip->mti_stats[i].mts_numfrees; 920 } 921 db_printf("%18s %12ju %12ju %12ju\n", mtp->ks_shortdesc, 922 allocs, frees, allocs - frees); 923 } 924} 925#endif 926 927#ifdef MALLOC_PROFILE 928 929static int 930sysctl_kern_mprof(SYSCTL_HANDLER_ARGS) 931{ 932 int linesize = 64; 933 struct sbuf sbuf; 934 uint64_t count; 935 uint64_t waste; 936 uint64_t mem; 937 int bufsize; 938 int error; 939 char *buf; 940 int rsize; 941 int size; 942 int i; 943 944 bufsize = linesize * (KMEM_ZSIZE + 1); 945 bufsize += 128; /* For the stats line */ 946 bufsize += 128; /* For the banner line */ 947 waste = 0; 948 mem = 0; 949 950 buf = malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO); 951 sbuf_new(&sbuf, buf, bufsize, SBUF_FIXEDLEN); 952 sbuf_printf(&sbuf, 953 "\n Size Requests Real Size\n"); 954 for (i = 0; i < KMEM_ZSIZE; i++) { 955 size = i << KMEM_ZSHIFT; 956 rsize = kmemzones[kmemsize[i]].kz_size; 957 count = (long long unsigned)krequests[i]; 958 959 sbuf_printf(&sbuf, "%6d%28llu%11d\n", size, 960 (unsigned long long)count, rsize); 961 962 if ((rsize * count) > (size * count)) 963 waste += (rsize * count) - (size * count); 964 mem += (rsize * count); 965 } 966 sbuf_printf(&sbuf, 967 "\nTotal memory used:\t%30llu\nTotal Memory wasted:\t%30llu\n", 968 (unsigned long long)mem, (unsigned long long)waste); 969 sbuf_finish(&sbuf); 970 971 error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf)); 972 973 sbuf_delete(&sbuf); 974 free(buf, M_TEMP); 975 return (error); 976} 977 978SYSCTL_OID(_kern, OID_AUTO, mprof, CTLTYPE_STRING|CTLFLAG_RD, 979 NULL, 0, sysctl_kern_mprof, "A", "Malloc Profiling"); 980#endif /* MALLOC_PROFILE */ 981