62#include <sys/time.h> 63#include <sys/vmem.h> 64 65#include <vm/vm.h> 66#include <vm/pmap.h> 67#include <vm/vm_pageout.h> 68#include <vm/vm_param.h> 69#include <vm/vm_kern.h> 70#include <vm/vm_extern.h> 71#include <vm/vm_map.h> 72#include <vm/vm_page.h> 73#include <vm/uma.h> 74#include <vm/uma_int.h> 75#include <vm/uma_dbg.h> 76 77#ifdef DEBUG_MEMGUARD 78#include <vm/memguard.h> 79#endif 80#ifdef DEBUG_REDZONE 81#include <vm/redzone.h> 82#endif 83 84#if defined(INVARIANTS) && defined(__i386__) 85#include <machine/cpu.h> 86#endif 87 88#include <ddb/ddb.h> 89 90#ifdef KDTRACE_HOOKS 91#include <sys/dtrace_bsd.h> 92 93dtrace_malloc_probe_func_t dtrace_malloc_probe; 94#endif 95 96/* 97 * When realloc() is called, if the new size is sufficiently smaller than 98 * the old size, realloc() will allocate a new, smaller block to avoid 99 * wasting memory. 'Sufficiently smaller' is defined as: newsize <= 100 * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'. 101 */ 102#ifndef REALLOC_FRACTION 103#define REALLOC_FRACTION 1 /* new block if <= half the size */ 104#endif 105 106/* 107 * Centrally define some common malloc types. 108 */ 109MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); 110MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); 111MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); 112 113MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options"); 114MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery"); 115 116static struct malloc_type *kmemstatistics; 117static int kmemcount; 118 119#define KMEM_ZSHIFT 4 120#define KMEM_ZBASE 16 121#define KMEM_ZMASK (KMEM_ZBASE - 1) 122 123#define KMEM_ZMAX 65536 124#define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT) 125static uint8_t kmemsize[KMEM_ZSIZE + 1]; 126 127#ifndef MALLOC_DEBUG_MAXZONES 128#define MALLOC_DEBUG_MAXZONES 1 129#endif 130static int numzones = MALLOC_DEBUG_MAXZONES; 131 132/* 133 * Small malloc(9) memory allocations are allocated from a set of UMA buckets 134 * of various sizes. 135 * 136 * XXX: The comment here used to read "These won't be powers of two for 137 * long." It's possible that a significant amount of wasted memory could be 138 * recovered by tuning the sizes of these buckets. 139 */ 140struct { 141 int kz_size; 142 char *kz_name; 143 uma_zone_t kz_zone[MALLOC_DEBUG_MAXZONES]; 144} kmemzones[] = { 145 {16, "16", }, 146 {32, "32", }, 147 {64, "64", }, 148 {128, "128", }, 149 {256, "256", }, 150 {512, "512", }, 151 {1024, "1024", }, 152 {2048, "2048", }, 153 {4096, "4096", }, 154 {8192, "8192", }, 155 {16384, "16384", }, 156 {32768, "32768", }, 157 {65536, "65536", }, 158 {0, NULL}, 159}; 160 161/* 162 * Zone to allocate malloc type descriptions from. For ABI reasons, memory 163 * types are described by a data structure passed by the declaring code, but 164 * the malloc(9) implementation has its own data structure describing the 165 * type and statistics. This permits the malloc(9)-internal data structures 166 * to be modified without breaking binary-compiled kernel modules that 167 * declare malloc types. 168 */ 169static uma_zone_t mt_zone; 170 171u_long vm_kmem_size; 172SYSCTL_ULONG(_vm, OID_AUTO, kmem_size, CTLFLAG_RDTUN, &vm_kmem_size, 0, 173 "Size of kernel memory"); 174 175static u_long kmem_zmax = KMEM_ZMAX; 176SYSCTL_ULONG(_vm, OID_AUTO, kmem_zmax, CTLFLAG_RDTUN, &kmem_zmax, 0, 177 "Maximum allocation size that malloc(9) would use UMA as backend"); 178 179static u_long vm_kmem_size_min; 180SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_min, CTLFLAG_RDTUN, &vm_kmem_size_min, 0, 181 "Minimum size of kernel memory"); 182 183static u_long vm_kmem_size_max; 184SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RDTUN, &vm_kmem_size_max, 0, 185 "Maximum size of kernel memory"); 186 187static u_int vm_kmem_size_scale; 188SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RDTUN, &vm_kmem_size_scale, 0, 189 "Scale factor for kernel memory size"); 190 191static int sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS); 192SYSCTL_PROC(_vm, OID_AUTO, kmem_map_size, 193 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0, 194 sysctl_kmem_map_size, "LU", "Current kmem allocation size"); 195 196static int sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS); 197SYSCTL_PROC(_vm, OID_AUTO, kmem_map_free, 198 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0, 199 sysctl_kmem_map_free, "LU", "Free space in kmem"); 200 201/* 202 * The malloc_mtx protects the kmemstatistics linked list. 203 */ 204struct mtx malloc_mtx; 205 206#ifdef MALLOC_PROFILE 207uint64_t krequests[KMEM_ZSIZE + 1]; 208 209static int sysctl_kern_mprof(SYSCTL_HANDLER_ARGS); 210#endif 211 212static int sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS); 213 214/* 215 * time_uptime of the last malloc(9) failure (induced or real). 216 */ 217static time_t t_malloc_fail; 218 219#if defined(MALLOC_MAKE_FAILURES) || (MALLOC_DEBUG_MAXZONES > 1) 220static SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD, 0, 221 "Kernel malloc debugging options"); 222#endif 223 224/* 225 * malloc(9) fault injection -- cause malloc failures every (n) mallocs when 226 * the caller specifies M_NOWAIT. If set to 0, no failures are caused. 227 */ 228#ifdef MALLOC_MAKE_FAILURES 229static int malloc_failure_rate; 230static int malloc_nowait_count; 231static int malloc_failure_count; 232SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RWTUN, 233 &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail"); 234SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD, 235 &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures"); 236#endif 237 238static int 239sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS) 240{ 241 u_long size; 242 243 size = vmem_size(kmem_arena, VMEM_ALLOC); 244 return (sysctl_handle_long(oidp, &size, 0, req)); 245} 246 247static int 248sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS) 249{ 250 u_long size; 251 252 size = vmem_size(kmem_arena, VMEM_FREE); 253 return (sysctl_handle_long(oidp, &size, 0, req)); 254} 255 256/* 257 * malloc(9) uma zone separation -- sub-page buffer overruns in one 258 * malloc type will affect only a subset of other malloc types. 259 */ 260#if MALLOC_DEBUG_MAXZONES > 1 261static void 262tunable_set_numzones(void) 263{ 264 265 TUNABLE_INT_FETCH("debug.malloc.numzones", 266 &numzones); 267 268 /* Sanity check the number of malloc uma zones. */ 269 if (numzones <= 0) 270 numzones = 1; 271 if (numzones > MALLOC_DEBUG_MAXZONES) 272 numzones = MALLOC_DEBUG_MAXZONES; 273} 274SYSINIT(numzones, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_set_numzones, NULL); 275SYSCTL_INT(_debug_malloc, OID_AUTO, numzones, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 276 &numzones, 0, "Number of malloc uma subzones"); 277 278/* 279 * Any number that changes regularly is an okay choice for the 280 * offset. Build numbers are pretty good of you have them. 281 */ 282static u_int zone_offset = __FreeBSD_version; 283TUNABLE_INT("debug.malloc.zone_offset", &zone_offset); 284SYSCTL_UINT(_debug_malloc, OID_AUTO, zone_offset, CTLFLAG_RDTUN, 285 &zone_offset, 0, "Separate malloc types by examining the " 286 "Nth character in the malloc type short description."); 287 288static u_int 289mtp_get_subzone(const char *desc) 290{ 291 size_t len; 292 u_int val; 293 294 if (desc == NULL || (len = strlen(desc)) == 0) 295 return (0); 296 val = desc[zone_offset % len]; 297 return (val % numzones); 298} 299#elif MALLOC_DEBUG_MAXZONES == 0 300#error "MALLOC_DEBUG_MAXZONES must be positive." 301#else 302static inline u_int 303mtp_get_subzone(const char *desc) 304{ 305 306 return (0); 307} 308#endif /* MALLOC_DEBUG_MAXZONES > 1 */ 309 310int 311malloc_last_fail(void) 312{ 313 314 return (time_uptime - t_malloc_fail); 315} 316 317/* 318 * An allocation has succeeded -- update malloc type statistics for the 319 * amount of bucket size. Occurs within a critical section so that the 320 * thread isn't preempted and doesn't migrate while updating per-PCU 321 * statistics. 322 */ 323static void 324malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size, 325 int zindx) 326{ 327 struct malloc_type_internal *mtip; 328 struct malloc_type_stats *mtsp; 329 330 critical_enter(); 331 mtip = mtp->ks_handle; 332 mtsp = &mtip->mti_stats[curcpu]; 333 if (size > 0) { 334 mtsp->mts_memalloced += size; 335 mtsp->mts_numallocs++; 336 } 337 if (zindx != -1) 338 mtsp->mts_size |= 1 << zindx; 339 340#ifdef KDTRACE_HOOKS 341 if (dtrace_malloc_probe != NULL) { 342 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_MALLOC]; 343 if (probe_id != 0) 344 (dtrace_malloc_probe)(probe_id, 345 (uintptr_t) mtp, (uintptr_t) mtip, 346 (uintptr_t) mtsp, size, zindx); 347 } 348#endif 349 350 critical_exit(); 351} 352 353void 354malloc_type_allocated(struct malloc_type *mtp, unsigned long size) 355{ 356 357 if (size > 0) 358 malloc_type_zone_allocated(mtp, size, -1); 359} 360 361/* 362 * A free operation has occurred -- update malloc type statistics for the 363 * amount of the bucket size. Occurs within a critical section so that the 364 * thread isn't preempted and doesn't migrate while updating per-CPU 365 * statistics. 366 */ 367void 368malloc_type_freed(struct malloc_type *mtp, unsigned long size) 369{ 370 struct malloc_type_internal *mtip; 371 struct malloc_type_stats *mtsp; 372 373 critical_enter(); 374 mtip = mtp->ks_handle; 375 mtsp = &mtip->mti_stats[curcpu]; 376 mtsp->mts_memfreed += size; 377 mtsp->mts_numfrees++; 378 379#ifdef KDTRACE_HOOKS 380 if (dtrace_malloc_probe != NULL) { 381 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_FREE]; 382 if (probe_id != 0) 383 (dtrace_malloc_probe)(probe_id, 384 (uintptr_t) mtp, (uintptr_t) mtip, 385 (uintptr_t) mtsp, size, 0); 386 } 387#endif 388 389 critical_exit(); 390} 391 392/* 393 * contigmalloc: 394 * 395 * Allocate a block of physically contiguous memory. 396 * 397 * If M_NOWAIT is set, this routine will not block and return NULL if 398 * the allocation fails. 399 */ 400void * 401contigmalloc(unsigned long size, struct malloc_type *type, int flags, 402 vm_paddr_t low, vm_paddr_t high, unsigned long alignment, 403 vm_paddr_t boundary) 404{ 405 void *ret; 406 407 ret = (void *)kmem_alloc_contig(kernel_arena, size, flags, low, high, 408 alignment, boundary, VM_MEMATTR_DEFAULT); 409 if (ret != NULL) 410 malloc_type_allocated(type, round_page(size)); 411 return (ret); 412} 413 414/* 415 * contigfree: 416 * 417 * Free a block of memory allocated by contigmalloc. 418 * 419 * This routine may not block. 420 */ 421void 422contigfree(void *addr, unsigned long size, struct malloc_type *type) 423{ 424 425 kmem_free(kernel_arena, (vm_offset_t)addr, size); 426 malloc_type_freed(type, round_page(size)); 427} 428 429/* 430 * malloc: 431 * 432 * Allocate a block of memory. 433 * 434 * If M_NOWAIT is set, this routine will not block and return NULL if 435 * the allocation fails. 436 */ 437void * 438malloc(unsigned long size, struct malloc_type *mtp, int flags) 439{ 440 int indx; 441 struct malloc_type_internal *mtip; 442 caddr_t va; 443 uma_zone_t zone; 444#if defined(DIAGNOSTIC) || defined(DEBUG_REDZONE) 445 unsigned long osize = size; 446#endif 447 448#ifdef INVARIANTS 449 KASSERT(mtp->ks_magic == M_MAGIC, ("malloc: bad malloc type magic")); 450 /* 451 * Check that exactly one of M_WAITOK or M_NOWAIT is specified. 452 */ 453 indx = flags & (M_WAITOK | M_NOWAIT); 454 if (indx != M_NOWAIT && indx != M_WAITOK) { 455 static struct timeval lasterr; 456 static int curerr, once; 457 if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) { 458 printf("Bad malloc flags: %x\n", indx); 459 kdb_backtrace(); 460 flags |= M_WAITOK; 461 once++; 462 } 463 } 464#endif 465#ifdef MALLOC_MAKE_FAILURES 466 if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) { 467 atomic_add_int(&malloc_nowait_count, 1); 468 if ((malloc_nowait_count % malloc_failure_rate) == 0) { 469 atomic_add_int(&malloc_failure_count, 1); 470 t_malloc_fail = time_uptime; 471 return (NULL); 472 } 473 } 474#endif 475 if (flags & M_WAITOK) 476 KASSERT(curthread->td_intr_nesting_level == 0, 477 ("malloc(M_WAITOK) in interrupt context")); 478 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 479 ("malloc: called with spinlock or critical section held")); 480 481#ifdef DEBUG_MEMGUARD 482 if (memguard_cmp_mtp(mtp, size)) { 483 va = memguard_alloc(size, flags); 484 if (va != NULL) 485 return (va); 486 /* This is unfortunate but should not be fatal. */ 487 } 488#endif 489 490#ifdef DEBUG_REDZONE 491 size = redzone_size_ntor(size); 492#endif 493 494 if (size <= kmem_zmax) { 495 mtip = mtp->ks_handle; 496 if (size & KMEM_ZMASK) 497 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE; 498 indx = kmemsize[size >> KMEM_ZSHIFT]; 499 KASSERT(mtip->mti_zone < numzones, 500 ("mti_zone %u out of range %d", 501 mtip->mti_zone, numzones)); 502 zone = kmemzones[indx].kz_zone[mtip->mti_zone]; 503#ifdef MALLOC_PROFILE 504 krequests[size >> KMEM_ZSHIFT]++; 505#endif 506 va = uma_zalloc(zone, flags); 507 if (va != NULL) 508 size = zone->uz_size; 509 malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx); 510 } else { 511 size = roundup(size, PAGE_SIZE); 512 zone = NULL; 513 va = uma_large_malloc(size, flags); 514 malloc_type_allocated(mtp, va == NULL ? 0 : size); 515 } 516 if (flags & M_WAITOK) 517 KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL")); 518 else if (va == NULL) 519 t_malloc_fail = time_uptime; 520#ifdef DIAGNOSTIC 521 if (va != NULL && !(flags & M_ZERO)) { 522 memset(va, 0x70, osize); 523 } 524#endif 525#ifdef DEBUG_REDZONE 526 if (va != NULL) 527 va = redzone_setup(va, osize); 528#endif 529 return ((void *) va); 530} 531 532/* 533 * free: 534 * 535 * Free a block of memory allocated by malloc. 536 * 537 * This routine may not block. 538 */ 539void 540free(void *addr, struct malloc_type *mtp) 541{ 542 uma_slab_t slab; 543 u_long size; 544 545 KASSERT(mtp->ks_magic == M_MAGIC, ("free: bad malloc type magic")); 546 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 547 ("free: called with spinlock or critical section held")); 548 549 /* free(NULL, ...) does nothing */ 550 if (addr == NULL) 551 return; 552 553#ifdef DEBUG_MEMGUARD 554 if (is_memguard_addr(addr)) { 555 memguard_free(addr); 556 return; 557 } 558#endif 559 560#ifdef DEBUG_REDZONE 561 redzone_check(addr); 562 addr = redzone_addr_ntor(addr); 563#endif 564 565 slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK)); 566 567 if (slab == NULL) 568 panic("free: address %p(%p) has not been allocated.\n", 569 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK))); 570 571 if (!(slab->us_flags & UMA_SLAB_MALLOC)) { 572#ifdef INVARIANTS 573 struct malloc_type **mtpp = addr; 574#endif 575 size = slab->us_keg->uk_size; 576#ifdef INVARIANTS 577 /* 578 * Cache a pointer to the malloc_type that most recently freed 579 * this memory here. This way we know who is most likely to 580 * have stepped on it later. 581 * 582 * This code assumes that size is a multiple of 8 bytes for 583 * 64 bit machines 584 */ 585 mtpp = (struct malloc_type **) 586 ((unsigned long)mtpp & ~UMA_ALIGN_PTR); 587 mtpp += (size - sizeof(struct malloc_type *)) / 588 sizeof(struct malloc_type *); 589 *mtpp = mtp; 590#endif 591 uma_zfree_arg(LIST_FIRST(&slab->us_keg->uk_zones), addr, slab); 592 } else { 593 size = slab->us_size; 594 uma_large_free(slab); 595 } 596 malloc_type_freed(mtp, size); 597} 598 599/* 600 * realloc: change the size of a memory block 601 */ 602void * 603realloc(void *addr, unsigned long size, struct malloc_type *mtp, int flags) 604{ 605 uma_slab_t slab; 606 unsigned long alloc; 607 void *newaddr; 608 609 KASSERT(mtp->ks_magic == M_MAGIC, 610 ("realloc: bad malloc type magic")); 611 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 612 ("realloc: called with spinlock or critical section held")); 613 614 /* realloc(NULL, ...) is equivalent to malloc(...) */ 615 if (addr == NULL) 616 return (malloc(size, mtp, flags)); 617 618 /* 619 * XXX: Should report free of old memory and alloc of new memory to 620 * per-CPU stats. 621 */ 622 623#ifdef DEBUG_MEMGUARD 624 if (is_memguard_addr(addr)) 625 return (memguard_realloc(addr, size, mtp, flags)); 626#endif 627 628#ifdef DEBUG_REDZONE 629 slab = NULL; 630 alloc = redzone_get_size(addr); 631#else 632 slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK)); 633 634 /* Sanity check */ 635 KASSERT(slab != NULL, 636 ("realloc: address %p out of range", (void *)addr)); 637 638 /* Get the size of the original block */ 639 if (!(slab->us_flags & UMA_SLAB_MALLOC)) 640 alloc = slab->us_keg->uk_size; 641 else 642 alloc = slab->us_size; 643 644 /* Reuse the original block if appropriate */ 645 if (size <= alloc 646 && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE)) 647 return (addr); 648#endif /* !DEBUG_REDZONE */ 649 650 /* Allocate a new, bigger (or smaller) block */ 651 if ((newaddr = malloc(size, mtp, flags)) == NULL) 652 return (NULL); 653 654 /* Copy over original contents */ 655 bcopy(addr, newaddr, min(size, alloc)); 656 free(addr, mtp); 657 return (newaddr); 658} 659 660/* 661 * reallocf: same as realloc() but free memory on failure. 662 */ 663void * 664reallocf(void *addr, unsigned long size, struct malloc_type *mtp, int flags) 665{ 666 void *mem; 667 668 if ((mem = realloc(addr, size, mtp, flags)) == NULL) 669 free(addr, mtp); 670 return (mem); 671} 672 673/* 674 * Wake the uma reclamation pagedaemon thread when we exhaust KVA. It 675 * will call the lowmem handler and uma_reclaim() callbacks in a 676 * context that is safe. 677 */ 678static void 679kmem_reclaim(vmem_t *vm, int flags) 680{ 681 682 uma_reclaim_wakeup(); 683 pagedaemon_wakeup(); 684} 685 686#ifndef __sparc64__ 687CTASSERT(VM_KMEM_SIZE_SCALE >= 1); 688#endif 689 690/* 691 * Initialize the kernel memory (kmem) arena. 692 */ 693void 694kmeminit(void) 695{ 696 u_long mem_size; 697 u_long tmp; 698 699#ifdef VM_KMEM_SIZE 700 if (vm_kmem_size == 0) 701 vm_kmem_size = VM_KMEM_SIZE; 702#endif 703#ifdef VM_KMEM_SIZE_MIN 704 if (vm_kmem_size_min == 0) 705 vm_kmem_size_min = VM_KMEM_SIZE_MIN; 706#endif 707#ifdef VM_KMEM_SIZE_MAX 708 if (vm_kmem_size_max == 0) 709 vm_kmem_size_max = VM_KMEM_SIZE_MAX; 710#endif 711 /* 712 * Calculate the amount of kernel virtual address (KVA) space that is 713 * preallocated to the kmem arena. In order to support a wide range 714 * of machines, it is a function of the physical memory size, 715 * specifically, 716 * 717 * min(max(physical memory size / VM_KMEM_SIZE_SCALE, 718 * VM_KMEM_SIZE_MIN), VM_KMEM_SIZE_MAX) 719 * 720 * Every architecture must define an integral value for 721 * VM_KMEM_SIZE_SCALE. However, the definitions of VM_KMEM_SIZE_MIN 722 * and VM_KMEM_SIZE_MAX, which represent respectively the floor and 723 * ceiling on this preallocation, are optional. Typically, 724 * VM_KMEM_SIZE_MAX is itself a function of the available KVA space on 725 * a given architecture. 726 */ 727 mem_size = vm_cnt.v_page_count; 728 if (mem_size <= 32768) /* delphij XXX 128MB */ 729 kmem_zmax = PAGE_SIZE; 730 731 if (vm_kmem_size_scale < 1) 732 vm_kmem_size_scale = VM_KMEM_SIZE_SCALE; 733 734 /* 735 * Check if we should use defaults for the "vm_kmem_size" 736 * variable: 737 */ 738 if (vm_kmem_size == 0) { 739 vm_kmem_size = (mem_size / vm_kmem_size_scale) * PAGE_SIZE; 740 741 if (vm_kmem_size_min > 0 && vm_kmem_size < vm_kmem_size_min) 742 vm_kmem_size = vm_kmem_size_min; 743 if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max) 744 vm_kmem_size = vm_kmem_size_max; 745 } 746 747 /* 748 * The amount of KVA space that is preallocated to the 749 * kmem arena can be set statically at compile-time or manually 750 * through the kernel environment. However, it is still limited to 751 * twice the physical memory size, which has been sufficient to handle 752 * the most severe cases of external fragmentation in the kmem arena. 753 */ 754 if (vm_kmem_size / 2 / PAGE_SIZE > mem_size) 755 vm_kmem_size = 2 * mem_size * PAGE_SIZE; 756 757 vm_kmem_size = round_page(vm_kmem_size); 758#ifdef DEBUG_MEMGUARD 759 tmp = memguard_fudge(vm_kmem_size, kernel_map); 760#else 761 tmp = vm_kmem_size; 762#endif 763 vmem_init(kmem_arena, "kmem arena", kva_alloc(tmp), tmp, PAGE_SIZE, 764 0, 0); 765 vmem_set_reclaim(kmem_arena, kmem_reclaim); 766 767#ifdef DEBUG_MEMGUARD 768 /* 769 * Initialize MemGuard if support compiled in. MemGuard is a 770 * replacement allocator used for detecting tamper-after-free 771 * scenarios as they occur. It is only used for debugging. 772 */ 773 memguard_init(kmem_arena); 774#endif 775} 776 777/* 778 * Initialize the kernel memory allocator 779 */ 780/* ARGSUSED*/ 781static void 782mallocinit(void *dummy) 783{ 784 int i; 785 uint8_t indx; 786 787 mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF); 788 789 kmeminit(); 790 791 uma_startup2(); 792 793 if (kmem_zmax < PAGE_SIZE || kmem_zmax > KMEM_ZMAX) 794 kmem_zmax = KMEM_ZMAX; 795 796 mt_zone = uma_zcreate("mt_zone", sizeof(struct malloc_type_internal), 797#ifdef INVARIANTS 798 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini, 799#else 800 NULL, NULL, NULL, NULL, 801#endif 802 UMA_ALIGN_PTR, UMA_ZONE_MALLOC); 803 for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) { 804 int size = kmemzones[indx].kz_size; 805 char *name = kmemzones[indx].kz_name; 806 int subzone; 807 808 for (subzone = 0; subzone < numzones; subzone++) { 809 kmemzones[indx].kz_zone[subzone] = 810 uma_zcreate(name, size, 811#ifdef INVARIANTS 812 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini, 813#else 814 NULL, NULL, NULL, NULL, 815#endif 816 UMA_ALIGN_PTR, UMA_ZONE_MALLOC); 817 } 818 for (;i <= size; i+= KMEM_ZBASE) 819 kmemsize[i >> KMEM_ZSHIFT] = indx; 820 821 } 822} 823SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_SECOND, mallocinit, NULL); 824 825void 826malloc_init(void *data) 827{ 828 struct malloc_type_internal *mtip; 829 struct malloc_type *mtp; 830 831 KASSERT(vm_cnt.v_page_count != 0, ("malloc_register before vm_init")); 832 833 mtp = data; 834 if (mtp->ks_magic != M_MAGIC) 835 panic("malloc_init: bad malloc type magic"); 836 837 mtip = uma_zalloc(mt_zone, M_WAITOK | M_ZERO); 838 mtp->ks_handle = mtip; 839 mtip->mti_zone = mtp_get_subzone(mtp->ks_shortdesc); 840 841 mtx_lock(&malloc_mtx); 842 mtp->ks_next = kmemstatistics; 843 kmemstatistics = mtp; 844 kmemcount++; 845 mtx_unlock(&malloc_mtx); 846} 847 848void 849malloc_uninit(void *data) 850{ 851 struct malloc_type_internal *mtip; 852 struct malloc_type_stats *mtsp; 853 struct malloc_type *mtp, *temp; 854 uma_slab_t slab; 855 long temp_allocs, temp_bytes; 856 int i; 857 858 mtp = data; 859 KASSERT(mtp->ks_magic == M_MAGIC, 860 ("malloc_uninit: bad malloc type magic")); 861 KASSERT(mtp->ks_handle != NULL, ("malloc_deregister: cookie NULL")); 862 863 mtx_lock(&malloc_mtx); 864 mtip = mtp->ks_handle; 865 mtp->ks_handle = NULL; 866 if (mtp != kmemstatistics) { 867 for (temp = kmemstatistics; temp != NULL; 868 temp = temp->ks_next) { 869 if (temp->ks_next == mtp) { 870 temp->ks_next = mtp->ks_next; 871 break; 872 } 873 } 874 KASSERT(temp, 875 ("malloc_uninit: type '%s' not found", mtp->ks_shortdesc)); 876 } else 877 kmemstatistics = mtp->ks_next; 878 kmemcount--; 879 mtx_unlock(&malloc_mtx); 880 881 /* 882 * Look for memory leaks. 883 */ 884 temp_allocs = temp_bytes = 0; 885 for (i = 0; i < MAXCPU; i++) { 886 mtsp = &mtip->mti_stats[i]; 887 temp_allocs += mtsp->mts_numallocs; 888 temp_allocs -= mtsp->mts_numfrees; 889 temp_bytes += mtsp->mts_memalloced; 890 temp_bytes -= mtsp->mts_memfreed; 891 } 892 if (temp_allocs > 0 || temp_bytes > 0) { 893 printf("Warning: memory type %s leaked memory on destroy " 894 "(%ld allocations, %ld bytes leaked).\n", mtp->ks_shortdesc, 895 temp_allocs, temp_bytes); 896 } 897 898 slab = vtoslab((vm_offset_t) mtip & (~UMA_SLAB_MASK)); 899 uma_zfree_arg(mt_zone, mtip, slab); 900} 901 902struct malloc_type * 903malloc_desc2type(const char *desc) 904{ 905 struct malloc_type *mtp; 906 907 mtx_assert(&malloc_mtx, MA_OWNED); 908 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 909 if (strcmp(mtp->ks_shortdesc, desc) == 0) 910 return (mtp); 911 } 912 return (NULL); 913} 914 915static int 916sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS) 917{ 918 struct malloc_type_stream_header mtsh; 919 struct malloc_type_internal *mtip; 920 struct malloc_type_header mth; 921 struct malloc_type *mtp; 922 int error, i; 923 struct sbuf sbuf; 924 925 error = sysctl_wire_old_buffer(req, 0); 926 if (error != 0) 927 return (error); 928 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 929 sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL); 930 mtx_lock(&malloc_mtx); 931 932 /* 933 * Insert stream header. 934 */ 935 bzero(&mtsh, sizeof(mtsh)); 936 mtsh.mtsh_version = MALLOC_TYPE_STREAM_VERSION; 937 mtsh.mtsh_maxcpus = MAXCPU; 938 mtsh.mtsh_count = kmemcount; 939 (void)sbuf_bcat(&sbuf, &mtsh, sizeof(mtsh)); 940 941 /* 942 * Insert alternating sequence of type headers and type statistics. 943 */ 944 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 945 mtip = (struct malloc_type_internal *)mtp->ks_handle; 946 947 /* 948 * Insert type header. 949 */ 950 bzero(&mth, sizeof(mth)); 951 strlcpy(mth.mth_name, mtp->ks_shortdesc, MALLOC_MAX_NAME); 952 (void)sbuf_bcat(&sbuf, &mth, sizeof(mth)); 953 954 /* 955 * Insert type statistics for each CPU. 956 */ 957 for (i = 0; i < MAXCPU; i++) { 958 (void)sbuf_bcat(&sbuf, &mtip->mti_stats[i], 959 sizeof(mtip->mti_stats[i])); 960 } 961 } 962 mtx_unlock(&malloc_mtx); 963 error = sbuf_finish(&sbuf); 964 sbuf_delete(&sbuf); 965 return (error); 966} 967 968SYSCTL_PROC(_kern, OID_AUTO, malloc_stats, CTLFLAG_RD|CTLTYPE_STRUCT, 969 0, 0, sysctl_kern_malloc_stats, "s,malloc_type_ustats", 970 "Return malloc types"); 971 972SYSCTL_INT(_kern, OID_AUTO, malloc_count, CTLFLAG_RD, &kmemcount, 0, 973 "Count of kernel malloc types"); 974 975void 976malloc_type_list(malloc_type_list_func_t *func, void *arg) 977{ 978 struct malloc_type *mtp, **bufmtp; 979 int count, i; 980 size_t buflen; 981 982 mtx_lock(&malloc_mtx); 983restart: 984 mtx_assert(&malloc_mtx, MA_OWNED); 985 count = kmemcount; 986 mtx_unlock(&malloc_mtx); 987 988 buflen = sizeof(struct malloc_type *) * count; 989 bufmtp = malloc(buflen, M_TEMP, M_WAITOK); 990 991 mtx_lock(&malloc_mtx); 992 993 if (count < kmemcount) { 994 free(bufmtp, M_TEMP); 995 goto restart; 996 } 997 998 for (mtp = kmemstatistics, i = 0; mtp != NULL; mtp = mtp->ks_next, i++) 999 bufmtp[i] = mtp; 1000 1001 mtx_unlock(&malloc_mtx); 1002 1003 for (i = 0; i < count; i++) 1004 (func)(bufmtp[i], arg); 1005 1006 free(bufmtp, M_TEMP); 1007} 1008 1009#ifdef DDB 1010DB_SHOW_COMMAND(malloc, db_show_malloc) 1011{ 1012 struct malloc_type_internal *mtip; 1013 struct malloc_type *mtp; 1014 uint64_t allocs, frees; 1015 uint64_t alloced, freed; 1016 int i; 1017 1018 db_printf("%18s %12s %12s %12s\n", "Type", "InUse", "MemUse", 1019 "Requests"); 1020 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 1021 mtip = (struct malloc_type_internal *)mtp->ks_handle; 1022 allocs = 0; 1023 frees = 0; 1024 alloced = 0; 1025 freed = 0; 1026 for (i = 0; i < MAXCPU; i++) { 1027 allocs += mtip->mti_stats[i].mts_numallocs; 1028 frees += mtip->mti_stats[i].mts_numfrees; 1029 alloced += mtip->mti_stats[i].mts_memalloced; 1030 freed += mtip->mti_stats[i].mts_memfreed; 1031 } 1032 db_printf("%18s %12ju %12juK %12ju\n", 1033 mtp->ks_shortdesc, allocs - frees, 1034 (alloced - freed + 1023) / 1024, allocs); 1035 if (db_pager_quit) 1036 break; 1037 } 1038} 1039 1040#if MALLOC_DEBUG_MAXZONES > 1 1041DB_SHOW_COMMAND(multizone_matches, db_show_multizone_matches) 1042{ 1043 struct malloc_type_internal *mtip; 1044 struct malloc_type *mtp; 1045 u_int subzone; 1046 1047 if (!have_addr) { 1048 db_printf("Usage: show multizone_matches <malloc type/addr>\n"); 1049 return; 1050 } 1051 mtp = (void *)addr; 1052 if (mtp->ks_magic != M_MAGIC) { 1053 db_printf("Magic %lx does not match expected %x\n", 1054 mtp->ks_magic, M_MAGIC); 1055 return; 1056 } 1057 1058 mtip = mtp->ks_handle; 1059 subzone = mtip->mti_zone; 1060 1061 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 1062 mtip = mtp->ks_handle; 1063 if (mtip->mti_zone != subzone) 1064 continue; 1065 db_printf("%s\n", mtp->ks_shortdesc); 1066 if (db_pager_quit) 1067 break; 1068 } 1069} 1070#endif /* MALLOC_DEBUG_MAXZONES > 1 */ 1071#endif /* DDB */ 1072 1073#ifdef MALLOC_PROFILE 1074 1075static int 1076sysctl_kern_mprof(SYSCTL_HANDLER_ARGS) 1077{ 1078 struct sbuf sbuf; 1079 uint64_t count; 1080 uint64_t waste; 1081 uint64_t mem; 1082 int error; 1083 int rsize; 1084 int size; 1085 int i; 1086 1087 waste = 0; 1088 mem = 0; 1089 1090 error = sysctl_wire_old_buffer(req, 0); 1091 if (error != 0) 1092 return (error); 1093 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 1094 sbuf_printf(&sbuf, 1095 "\n Size Requests Real Size\n"); 1096 for (i = 0; i < KMEM_ZSIZE; i++) { 1097 size = i << KMEM_ZSHIFT; 1098 rsize = kmemzones[kmemsize[i]].kz_size; 1099 count = (long long unsigned)krequests[i]; 1100 1101 sbuf_printf(&sbuf, "%6d%28llu%11d\n", size, 1102 (unsigned long long)count, rsize); 1103 1104 if ((rsize * count) > (size * count)) 1105 waste += (rsize * count) - (size * count); 1106 mem += (rsize * count); 1107 } 1108 sbuf_printf(&sbuf, 1109 "\nTotal memory used:\t%30llu\nTotal Memory wasted:\t%30llu\n", 1110 (unsigned long long)mem, (unsigned long long)waste); 1111 error = sbuf_finish(&sbuf); 1112 sbuf_delete(&sbuf); 1113 return (error); 1114} 1115 1116SYSCTL_OID(_kern, OID_AUTO, mprof, CTLTYPE_STRING|CTLFLAG_RD, 1117 NULL, 0, sysctl_kern_mprof, "A", "Malloc Profiling"); 1118#endif /* MALLOC_PROFILE */
| 63#include <sys/time.h> 64#include <sys/vmem.h> 65 66#include <vm/vm.h> 67#include <vm/pmap.h> 68#include <vm/vm_pageout.h> 69#include <vm/vm_param.h> 70#include <vm/vm_kern.h> 71#include <vm/vm_extern.h> 72#include <vm/vm_map.h> 73#include <vm/vm_page.h> 74#include <vm/uma.h> 75#include <vm/uma_int.h> 76#include <vm/uma_dbg.h> 77 78#ifdef DEBUG_MEMGUARD 79#include <vm/memguard.h> 80#endif 81#ifdef DEBUG_REDZONE 82#include <vm/redzone.h> 83#endif 84 85#if defined(INVARIANTS) && defined(__i386__) 86#include <machine/cpu.h> 87#endif 88 89#include <ddb/ddb.h> 90 91#ifdef KDTRACE_HOOKS 92#include <sys/dtrace_bsd.h> 93 94dtrace_malloc_probe_func_t dtrace_malloc_probe; 95#endif 96 97/* 98 * When realloc() is called, if the new size is sufficiently smaller than 99 * the old size, realloc() will allocate a new, smaller block to avoid 100 * wasting memory. 'Sufficiently smaller' is defined as: newsize <= 101 * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'. 102 */ 103#ifndef REALLOC_FRACTION 104#define REALLOC_FRACTION 1 /* new block if <= half the size */ 105#endif 106 107/* 108 * Centrally define some common malloc types. 109 */ 110MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); 111MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); 112MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); 113 114MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options"); 115MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery"); 116 117static struct malloc_type *kmemstatistics; 118static int kmemcount; 119 120#define KMEM_ZSHIFT 4 121#define KMEM_ZBASE 16 122#define KMEM_ZMASK (KMEM_ZBASE - 1) 123 124#define KMEM_ZMAX 65536 125#define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT) 126static uint8_t kmemsize[KMEM_ZSIZE + 1]; 127 128#ifndef MALLOC_DEBUG_MAXZONES 129#define MALLOC_DEBUG_MAXZONES 1 130#endif 131static int numzones = MALLOC_DEBUG_MAXZONES; 132 133/* 134 * Small malloc(9) memory allocations are allocated from a set of UMA buckets 135 * of various sizes. 136 * 137 * XXX: The comment here used to read "These won't be powers of two for 138 * long." It's possible that a significant amount of wasted memory could be 139 * recovered by tuning the sizes of these buckets. 140 */ 141struct { 142 int kz_size; 143 char *kz_name; 144 uma_zone_t kz_zone[MALLOC_DEBUG_MAXZONES]; 145} kmemzones[] = { 146 {16, "16", }, 147 {32, "32", }, 148 {64, "64", }, 149 {128, "128", }, 150 {256, "256", }, 151 {512, "512", }, 152 {1024, "1024", }, 153 {2048, "2048", }, 154 {4096, "4096", }, 155 {8192, "8192", }, 156 {16384, "16384", }, 157 {32768, "32768", }, 158 {65536, "65536", }, 159 {0, NULL}, 160}; 161 162/* 163 * Zone to allocate malloc type descriptions from. For ABI reasons, memory 164 * types are described by a data structure passed by the declaring code, but 165 * the malloc(9) implementation has its own data structure describing the 166 * type and statistics. This permits the malloc(9)-internal data structures 167 * to be modified without breaking binary-compiled kernel modules that 168 * declare malloc types. 169 */ 170static uma_zone_t mt_zone; 171 172u_long vm_kmem_size; 173SYSCTL_ULONG(_vm, OID_AUTO, kmem_size, CTLFLAG_RDTUN, &vm_kmem_size, 0, 174 "Size of kernel memory"); 175 176static u_long kmem_zmax = KMEM_ZMAX; 177SYSCTL_ULONG(_vm, OID_AUTO, kmem_zmax, CTLFLAG_RDTUN, &kmem_zmax, 0, 178 "Maximum allocation size that malloc(9) would use UMA as backend"); 179 180static u_long vm_kmem_size_min; 181SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_min, CTLFLAG_RDTUN, &vm_kmem_size_min, 0, 182 "Minimum size of kernel memory"); 183 184static u_long vm_kmem_size_max; 185SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RDTUN, &vm_kmem_size_max, 0, 186 "Maximum size of kernel memory"); 187 188static u_int vm_kmem_size_scale; 189SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RDTUN, &vm_kmem_size_scale, 0, 190 "Scale factor for kernel memory size"); 191 192static int sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS); 193SYSCTL_PROC(_vm, OID_AUTO, kmem_map_size, 194 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0, 195 sysctl_kmem_map_size, "LU", "Current kmem allocation size"); 196 197static int sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS); 198SYSCTL_PROC(_vm, OID_AUTO, kmem_map_free, 199 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0, 200 sysctl_kmem_map_free, "LU", "Free space in kmem"); 201 202/* 203 * The malloc_mtx protects the kmemstatistics linked list. 204 */ 205struct mtx malloc_mtx; 206 207#ifdef MALLOC_PROFILE 208uint64_t krequests[KMEM_ZSIZE + 1]; 209 210static int sysctl_kern_mprof(SYSCTL_HANDLER_ARGS); 211#endif 212 213static int sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS); 214 215/* 216 * time_uptime of the last malloc(9) failure (induced or real). 217 */ 218static time_t t_malloc_fail; 219 220#if defined(MALLOC_MAKE_FAILURES) || (MALLOC_DEBUG_MAXZONES > 1) 221static SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD, 0, 222 "Kernel malloc debugging options"); 223#endif 224 225/* 226 * malloc(9) fault injection -- cause malloc failures every (n) mallocs when 227 * the caller specifies M_NOWAIT. If set to 0, no failures are caused. 228 */ 229#ifdef MALLOC_MAKE_FAILURES 230static int malloc_failure_rate; 231static int malloc_nowait_count; 232static int malloc_failure_count; 233SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RWTUN, 234 &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail"); 235SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD, 236 &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures"); 237#endif 238 239static int 240sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS) 241{ 242 u_long size; 243 244 size = vmem_size(kmem_arena, VMEM_ALLOC); 245 return (sysctl_handle_long(oidp, &size, 0, req)); 246} 247 248static int 249sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS) 250{ 251 u_long size; 252 253 size = vmem_size(kmem_arena, VMEM_FREE); 254 return (sysctl_handle_long(oidp, &size, 0, req)); 255} 256 257/* 258 * malloc(9) uma zone separation -- sub-page buffer overruns in one 259 * malloc type will affect only a subset of other malloc types. 260 */ 261#if MALLOC_DEBUG_MAXZONES > 1 262static void 263tunable_set_numzones(void) 264{ 265 266 TUNABLE_INT_FETCH("debug.malloc.numzones", 267 &numzones); 268 269 /* Sanity check the number of malloc uma zones. */ 270 if (numzones <= 0) 271 numzones = 1; 272 if (numzones > MALLOC_DEBUG_MAXZONES) 273 numzones = MALLOC_DEBUG_MAXZONES; 274} 275SYSINIT(numzones, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_set_numzones, NULL); 276SYSCTL_INT(_debug_malloc, OID_AUTO, numzones, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 277 &numzones, 0, "Number of malloc uma subzones"); 278 279/* 280 * Any number that changes regularly is an okay choice for the 281 * offset. Build numbers are pretty good of you have them. 282 */ 283static u_int zone_offset = __FreeBSD_version; 284TUNABLE_INT("debug.malloc.zone_offset", &zone_offset); 285SYSCTL_UINT(_debug_malloc, OID_AUTO, zone_offset, CTLFLAG_RDTUN, 286 &zone_offset, 0, "Separate malloc types by examining the " 287 "Nth character in the malloc type short description."); 288 289static u_int 290mtp_get_subzone(const char *desc) 291{ 292 size_t len; 293 u_int val; 294 295 if (desc == NULL || (len = strlen(desc)) == 0) 296 return (0); 297 val = desc[zone_offset % len]; 298 return (val % numzones); 299} 300#elif MALLOC_DEBUG_MAXZONES == 0 301#error "MALLOC_DEBUG_MAXZONES must be positive." 302#else 303static inline u_int 304mtp_get_subzone(const char *desc) 305{ 306 307 return (0); 308} 309#endif /* MALLOC_DEBUG_MAXZONES > 1 */ 310 311int 312malloc_last_fail(void) 313{ 314 315 return (time_uptime - t_malloc_fail); 316} 317 318/* 319 * An allocation has succeeded -- update malloc type statistics for the 320 * amount of bucket size. Occurs within a critical section so that the 321 * thread isn't preempted and doesn't migrate while updating per-PCU 322 * statistics. 323 */ 324static void 325malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size, 326 int zindx) 327{ 328 struct malloc_type_internal *mtip; 329 struct malloc_type_stats *mtsp; 330 331 critical_enter(); 332 mtip = mtp->ks_handle; 333 mtsp = &mtip->mti_stats[curcpu]; 334 if (size > 0) { 335 mtsp->mts_memalloced += size; 336 mtsp->mts_numallocs++; 337 } 338 if (zindx != -1) 339 mtsp->mts_size |= 1 << zindx; 340 341#ifdef KDTRACE_HOOKS 342 if (dtrace_malloc_probe != NULL) { 343 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_MALLOC]; 344 if (probe_id != 0) 345 (dtrace_malloc_probe)(probe_id, 346 (uintptr_t) mtp, (uintptr_t) mtip, 347 (uintptr_t) mtsp, size, zindx); 348 } 349#endif 350 351 critical_exit(); 352} 353 354void 355malloc_type_allocated(struct malloc_type *mtp, unsigned long size) 356{ 357 358 if (size > 0) 359 malloc_type_zone_allocated(mtp, size, -1); 360} 361 362/* 363 * A free operation has occurred -- update malloc type statistics for the 364 * amount of the bucket size. Occurs within a critical section so that the 365 * thread isn't preempted and doesn't migrate while updating per-CPU 366 * statistics. 367 */ 368void 369malloc_type_freed(struct malloc_type *mtp, unsigned long size) 370{ 371 struct malloc_type_internal *mtip; 372 struct malloc_type_stats *mtsp; 373 374 critical_enter(); 375 mtip = mtp->ks_handle; 376 mtsp = &mtip->mti_stats[curcpu]; 377 mtsp->mts_memfreed += size; 378 mtsp->mts_numfrees++; 379 380#ifdef KDTRACE_HOOKS 381 if (dtrace_malloc_probe != NULL) { 382 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_FREE]; 383 if (probe_id != 0) 384 (dtrace_malloc_probe)(probe_id, 385 (uintptr_t) mtp, (uintptr_t) mtip, 386 (uintptr_t) mtsp, size, 0); 387 } 388#endif 389 390 critical_exit(); 391} 392 393/* 394 * contigmalloc: 395 * 396 * Allocate a block of physically contiguous memory. 397 * 398 * If M_NOWAIT is set, this routine will not block and return NULL if 399 * the allocation fails. 400 */ 401void * 402contigmalloc(unsigned long size, struct malloc_type *type, int flags, 403 vm_paddr_t low, vm_paddr_t high, unsigned long alignment, 404 vm_paddr_t boundary) 405{ 406 void *ret; 407 408 ret = (void *)kmem_alloc_contig(kernel_arena, size, flags, low, high, 409 alignment, boundary, VM_MEMATTR_DEFAULT); 410 if (ret != NULL) 411 malloc_type_allocated(type, round_page(size)); 412 return (ret); 413} 414 415/* 416 * contigfree: 417 * 418 * Free a block of memory allocated by contigmalloc. 419 * 420 * This routine may not block. 421 */ 422void 423contigfree(void *addr, unsigned long size, struct malloc_type *type) 424{ 425 426 kmem_free(kernel_arena, (vm_offset_t)addr, size); 427 malloc_type_freed(type, round_page(size)); 428} 429 430/* 431 * malloc: 432 * 433 * Allocate a block of memory. 434 * 435 * If M_NOWAIT is set, this routine will not block and return NULL if 436 * the allocation fails. 437 */ 438void * 439malloc(unsigned long size, struct malloc_type *mtp, int flags) 440{ 441 int indx; 442 struct malloc_type_internal *mtip; 443 caddr_t va; 444 uma_zone_t zone; 445#if defined(DIAGNOSTIC) || defined(DEBUG_REDZONE) 446 unsigned long osize = size; 447#endif 448 449#ifdef INVARIANTS 450 KASSERT(mtp->ks_magic == M_MAGIC, ("malloc: bad malloc type magic")); 451 /* 452 * Check that exactly one of M_WAITOK or M_NOWAIT is specified. 453 */ 454 indx = flags & (M_WAITOK | M_NOWAIT); 455 if (indx != M_NOWAIT && indx != M_WAITOK) { 456 static struct timeval lasterr; 457 static int curerr, once; 458 if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) { 459 printf("Bad malloc flags: %x\n", indx); 460 kdb_backtrace(); 461 flags |= M_WAITOK; 462 once++; 463 } 464 } 465#endif 466#ifdef MALLOC_MAKE_FAILURES 467 if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) { 468 atomic_add_int(&malloc_nowait_count, 1); 469 if ((malloc_nowait_count % malloc_failure_rate) == 0) { 470 atomic_add_int(&malloc_failure_count, 1); 471 t_malloc_fail = time_uptime; 472 return (NULL); 473 } 474 } 475#endif 476 if (flags & M_WAITOK) 477 KASSERT(curthread->td_intr_nesting_level == 0, 478 ("malloc(M_WAITOK) in interrupt context")); 479 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 480 ("malloc: called with spinlock or critical section held")); 481 482#ifdef DEBUG_MEMGUARD 483 if (memguard_cmp_mtp(mtp, size)) { 484 va = memguard_alloc(size, flags); 485 if (va != NULL) 486 return (va); 487 /* This is unfortunate but should not be fatal. */ 488 } 489#endif 490 491#ifdef DEBUG_REDZONE 492 size = redzone_size_ntor(size); 493#endif 494 495 if (size <= kmem_zmax) { 496 mtip = mtp->ks_handle; 497 if (size & KMEM_ZMASK) 498 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE; 499 indx = kmemsize[size >> KMEM_ZSHIFT]; 500 KASSERT(mtip->mti_zone < numzones, 501 ("mti_zone %u out of range %d", 502 mtip->mti_zone, numzones)); 503 zone = kmemzones[indx].kz_zone[mtip->mti_zone]; 504#ifdef MALLOC_PROFILE 505 krequests[size >> KMEM_ZSHIFT]++; 506#endif 507 va = uma_zalloc(zone, flags); 508 if (va != NULL) 509 size = zone->uz_size; 510 malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx); 511 } else { 512 size = roundup(size, PAGE_SIZE); 513 zone = NULL; 514 va = uma_large_malloc(size, flags); 515 malloc_type_allocated(mtp, va == NULL ? 0 : size); 516 } 517 if (flags & M_WAITOK) 518 KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL")); 519 else if (va == NULL) 520 t_malloc_fail = time_uptime; 521#ifdef DIAGNOSTIC 522 if (va != NULL && !(flags & M_ZERO)) { 523 memset(va, 0x70, osize); 524 } 525#endif 526#ifdef DEBUG_REDZONE 527 if (va != NULL) 528 va = redzone_setup(va, osize); 529#endif 530 return ((void *) va); 531} 532 533/* 534 * free: 535 * 536 * Free a block of memory allocated by malloc. 537 * 538 * This routine may not block. 539 */ 540void 541free(void *addr, struct malloc_type *mtp) 542{ 543 uma_slab_t slab; 544 u_long size; 545 546 KASSERT(mtp->ks_magic == M_MAGIC, ("free: bad malloc type magic")); 547 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 548 ("free: called with spinlock or critical section held")); 549 550 /* free(NULL, ...) does nothing */ 551 if (addr == NULL) 552 return; 553 554#ifdef DEBUG_MEMGUARD 555 if (is_memguard_addr(addr)) { 556 memguard_free(addr); 557 return; 558 } 559#endif 560 561#ifdef DEBUG_REDZONE 562 redzone_check(addr); 563 addr = redzone_addr_ntor(addr); 564#endif 565 566 slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK)); 567 568 if (slab == NULL) 569 panic("free: address %p(%p) has not been allocated.\n", 570 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK))); 571 572 if (!(slab->us_flags & UMA_SLAB_MALLOC)) { 573#ifdef INVARIANTS 574 struct malloc_type **mtpp = addr; 575#endif 576 size = slab->us_keg->uk_size; 577#ifdef INVARIANTS 578 /* 579 * Cache a pointer to the malloc_type that most recently freed 580 * this memory here. This way we know who is most likely to 581 * have stepped on it later. 582 * 583 * This code assumes that size is a multiple of 8 bytes for 584 * 64 bit machines 585 */ 586 mtpp = (struct malloc_type **) 587 ((unsigned long)mtpp & ~UMA_ALIGN_PTR); 588 mtpp += (size - sizeof(struct malloc_type *)) / 589 sizeof(struct malloc_type *); 590 *mtpp = mtp; 591#endif 592 uma_zfree_arg(LIST_FIRST(&slab->us_keg->uk_zones), addr, slab); 593 } else { 594 size = slab->us_size; 595 uma_large_free(slab); 596 } 597 malloc_type_freed(mtp, size); 598} 599 600/* 601 * realloc: change the size of a memory block 602 */ 603void * 604realloc(void *addr, unsigned long size, struct malloc_type *mtp, int flags) 605{ 606 uma_slab_t slab; 607 unsigned long alloc; 608 void *newaddr; 609 610 KASSERT(mtp->ks_magic == M_MAGIC, 611 ("realloc: bad malloc type magic")); 612 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 613 ("realloc: called with spinlock or critical section held")); 614 615 /* realloc(NULL, ...) is equivalent to malloc(...) */ 616 if (addr == NULL) 617 return (malloc(size, mtp, flags)); 618 619 /* 620 * XXX: Should report free of old memory and alloc of new memory to 621 * per-CPU stats. 622 */ 623 624#ifdef DEBUG_MEMGUARD 625 if (is_memguard_addr(addr)) 626 return (memguard_realloc(addr, size, mtp, flags)); 627#endif 628 629#ifdef DEBUG_REDZONE 630 slab = NULL; 631 alloc = redzone_get_size(addr); 632#else 633 slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK)); 634 635 /* Sanity check */ 636 KASSERT(slab != NULL, 637 ("realloc: address %p out of range", (void *)addr)); 638 639 /* Get the size of the original block */ 640 if (!(slab->us_flags & UMA_SLAB_MALLOC)) 641 alloc = slab->us_keg->uk_size; 642 else 643 alloc = slab->us_size; 644 645 /* Reuse the original block if appropriate */ 646 if (size <= alloc 647 && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE)) 648 return (addr); 649#endif /* !DEBUG_REDZONE */ 650 651 /* Allocate a new, bigger (or smaller) block */ 652 if ((newaddr = malloc(size, mtp, flags)) == NULL) 653 return (NULL); 654 655 /* Copy over original contents */ 656 bcopy(addr, newaddr, min(size, alloc)); 657 free(addr, mtp); 658 return (newaddr); 659} 660 661/* 662 * reallocf: same as realloc() but free memory on failure. 663 */ 664void * 665reallocf(void *addr, unsigned long size, struct malloc_type *mtp, int flags) 666{ 667 void *mem; 668 669 if ((mem = realloc(addr, size, mtp, flags)) == NULL) 670 free(addr, mtp); 671 return (mem); 672} 673 674/* 675 * Wake the uma reclamation pagedaemon thread when we exhaust KVA. It 676 * will call the lowmem handler and uma_reclaim() callbacks in a 677 * context that is safe. 678 */ 679static void 680kmem_reclaim(vmem_t *vm, int flags) 681{ 682 683 uma_reclaim_wakeup(); 684 pagedaemon_wakeup(); 685} 686 687#ifndef __sparc64__ 688CTASSERT(VM_KMEM_SIZE_SCALE >= 1); 689#endif 690 691/* 692 * Initialize the kernel memory (kmem) arena. 693 */ 694void 695kmeminit(void) 696{ 697 u_long mem_size; 698 u_long tmp; 699 700#ifdef VM_KMEM_SIZE 701 if (vm_kmem_size == 0) 702 vm_kmem_size = VM_KMEM_SIZE; 703#endif 704#ifdef VM_KMEM_SIZE_MIN 705 if (vm_kmem_size_min == 0) 706 vm_kmem_size_min = VM_KMEM_SIZE_MIN; 707#endif 708#ifdef VM_KMEM_SIZE_MAX 709 if (vm_kmem_size_max == 0) 710 vm_kmem_size_max = VM_KMEM_SIZE_MAX; 711#endif 712 /* 713 * Calculate the amount of kernel virtual address (KVA) space that is 714 * preallocated to the kmem arena. In order to support a wide range 715 * of machines, it is a function of the physical memory size, 716 * specifically, 717 * 718 * min(max(physical memory size / VM_KMEM_SIZE_SCALE, 719 * VM_KMEM_SIZE_MIN), VM_KMEM_SIZE_MAX) 720 * 721 * Every architecture must define an integral value for 722 * VM_KMEM_SIZE_SCALE. However, the definitions of VM_KMEM_SIZE_MIN 723 * and VM_KMEM_SIZE_MAX, which represent respectively the floor and 724 * ceiling on this preallocation, are optional. Typically, 725 * VM_KMEM_SIZE_MAX is itself a function of the available KVA space on 726 * a given architecture. 727 */ 728 mem_size = vm_cnt.v_page_count; 729 if (mem_size <= 32768) /* delphij XXX 128MB */ 730 kmem_zmax = PAGE_SIZE; 731 732 if (vm_kmem_size_scale < 1) 733 vm_kmem_size_scale = VM_KMEM_SIZE_SCALE; 734 735 /* 736 * Check if we should use defaults for the "vm_kmem_size" 737 * variable: 738 */ 739 if (vm_kmem_size == 0) { 740 vm_kmem_size = (mem_size / vm_kmem_size_scale) * PAGE_SIZE; 741 742 if (vm_kmem_size_min > 0 && vm_kmem_size < vm_kmem_size_min) 743 vm_kmem_size = vm_kmem_size_min; 744 if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max) 745 vm_kmem_size = vm_kmem_size_max; 746 } 747 748 /* 749 * The amount of KVA space that is preallocated to the 750 * kmem arena can be set statically at compile-time or manually 751 * through the kernel environment. However, it is still limited to 752 * twice the physical memory size, which has been sufficient to handle 753 * the most severe cases of external fragmentation in the kmem arena. 754 */ 755 if (vm_kmem_size / 2 / PAGE_SIZE > mem_size) 756 vm_kmem_size = 2 * mem_size * PAGE_SIZE; 757 758 vm_kmem_size = round_page(vm_kmem_size); 759#ifdef DEBUG_MEMGUARD 760 tmp = memguard_fudge(vm_kmem_size, kernel_map); 761#else 762 tmp = vm_kmem_size; 763#endif 764 vmem_init(kmem_arena, "kmem arena", kva_alloc(tmp), tmp, PAGE_SIZE, 765 0, 0); 766 vmem_set_reclaim(kmem_arena, kmem_reclaim); 767 768#ifdef DEBUG_MEMGUARD 769 /* 770 * Initialize MemGuard if support compiled in. MemGuard is a 771 * replacement allocator used for detecting tamper-after-free 772 * scenarios as they occur. It is only used for debugging. 773 */ 774 memguard_init(kmem_arena); 775#endif 776} 777 778/* 779 * Initialize the kernel memory allocator 780 */ 781/* ARGSUSED*/ 782static void 783mallocinit(void *dummy) 784{ 785 int i; 786 uint8_t indx; 787 788 mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF); 789 790 kmeminit(); 791 792 uma_startup2(); 793 794 if (kmem_zmax < PAGE_SIZE || kmem_zmax > KMEM_ZMAX) 795 kmem_zmax = KMEM_ZMAX; 796 797 mt_zone = uma_zcreate("mt_zone", sizeof(struct malloc_type_internal), 798#ifdef INVARIANTS 799 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini, 800#else 801 NULL, NULL, NULL, NULL, 802#endif 803 UMA_ALIGN_PTR, UMA_ZONE_MALLOC); 804 for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) { 805 int size = kmemzones[indx].kz_size; 806 char *name = kmemzones[indx].kz_name; 807 int subzone; 808 809 for (subzone = 0; subzone < numzones; subzone++) { 810 kmemzones[indx].kz_zone[subzone] = 811 uma_zcreate(name, size, 812#ifdef INVARIANTS 813 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini, 814#else 815 NULL, NULL, NULL, NULL, 816#endif 817 UMA_ALIGN_PTR, UMA_ZONE_MALLOC); 818 } 819 for (;i <= size; i+= KMEM_ZBASE) 820 kmemsize[i >> KMEM_ZSHIFT] = indx; 821 822 } 823} 824SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_SECOND, mallocinit, NULL); 825 826void 827malloc_init(void *data) 828{ 829 struct malloc_type_internal *mtip; 830 struct malloc_type *mtp; 831 832 KASSERT(vm_cnt.v_page_count != 0, ("malloc_register before vm_init")); 833 834 mtp = data; 835 if (mtp->ks_magic != M_MAGIC) 836 panic("malloc_init: bad malloc type magic"); 837 838 mtip = uma_zalloc(mt_zone, M_WAITOK | M_ZERO); 839 mtp->ks_handle = mtip; 840 mtip->mti_zone = mtp_get_subzone(mtp->ks_shortdesc); 841 842 mtx_lock(&malloc_mtx); 843 mtp->ks_next = kmemstatistics; 844 kmemstatistics = mtp; 845 kmemcount++; 846 mtx_unlock(&malloc_mtx); 847} 848 849void 850malloc_uninit(void *data) 851{ 852 struct malloc_type_internal *mtip; 853 struct malloc_type_stats *mtsp; 854 struct malloc_type *mtp, *temp; 855 uma_slab_t slab; 856 long temp_allocs, temp_bytes; 857 int i; 858 859 mtp = data; 860 KASSERT(mtp->ks_magic == M_MAGIC, 861 ("malloc_uninit: bad malloc type magic")); 862 KASSERT(mtp->ks_handle != NULL, ("malloc_deregister: cookie NULL")); 863 864 mtx_lock(&malloc_mtx); 865 mtip = mtp->ks_handle; 866 mtp->ks_handle = NULL; 867 if (mtp != kmemstatistics) { 868 for (temp = kmemstatistics; temp != NULL; 869 temp = temp->ks_next) { 870 if (temp->ks_next == mtp) { 871 temp->ks_next = mtp->ks_next; 872 break; 873 } 874 } 875 KASSERT(temp, 876 ("malloc_uninit: type '%s' not found", mtp->ks_shortdesc)); 877 } else 878 kmemstatistics = mtp->ks_next; 879 kmemcount--; 880 mtx_unlock(&malloc_mtx); 881 882 /* 883 * Look for memory leaks. 884 */ 885 temp_allocs = temp_bytes = 0; 886 for (i = 0; i < MAXCPU; i++) { 887 mtsp = &mtip->mti_stats[i]; 888 temp_allocs += mtsp->mts_numallocs; 889 temp_allocs -= mtsp->mts_numfrees; 890 temp_bytes += mtsp->mts_memalloced; 891 temp_bytes -= mtsp->mts_memfreed; 892 } 893 if (temp_allocs > 0 || temp_bytes > 0) { 894 printf("Warning: memory type %s leaked memory on destroy " 895 "(%ld allocations, %ld bytes leaked).\n", mtp->ks_shortdesc, 896 temp_allocs, temp_bytes); 897 } 898 899 slab = vtoslab((vm_offset_t) mtip & (~UMA_SLAB_MASK)); 900 uma_zfree_arg(mt_zone, mtip, slab); 901} 902 903struct malloc_type * 904malloc_desc2type(const char *desc) 905{ 906 struct malloc_type *mtp; 907 908 mtx_assert(&malloc_mtx, MA_OWNED); 909 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 910 if (strcmp(mtp->ks_shortdesc, desc) == 0) 911 return (mtp); 912 } 913 return (NULL); 914} 915 916static int 917sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS) 918{ 919 struct malloc_type_stream_header mtsh; 920 struct malloc_type_internal *mtip; 921 struct malloc_type_header mth; 922 struct malloc_type *mtp; 923 int error, i; 924 struct sbuf sbuf; 925 926 error = sysctl_wire_old_buffer(req, 0); 927 if (error != 0) 928 return (error); 929 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 930 sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL); 931 mtx_lock(&malloc_mtx); 932 933 /* 934 * Insert stream header. 935 */ 936 bzero(&mtsh, sizeof(mtsh)); 937 mtsh.mtsh_version = MALLOC_TYPE_STREAM_VERSION; 938 mtsh.mtsh_maxcpus = MAXCPU; 939 mtsh.mtsh_count = kmemcount; 940 (void)sbuf_bcat(&sbuf, &mtsh, sizeof(mtsh)); 941 942 /* 943 * Insert alternating sequence of type headers and type statistics. 944 */ 945 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 946 mtip = (struct malloc_type_internal *)mtp->ks_handle; 947 948 /* 949 * Insert type header. 950 */ 951 bzero(&mth, sizeof(mth)); 952 strlcpy(mth.mth_name, mtp->ks_shortdesc, MALLOC_MAX_NAME); 953 (void)sbuf_bcat(&sbuf, &mth, sizeof(mth)); 954 955 /* 956 * Insert type statistics for each CPU. 957 */ 958 for (i = 0; i < MAXCPU; i++) { 959 (void)sbuf_bcat(&sbuf, &mtip->mti_stats[i], 960 sizeof(mtip->mti_stats[i])); 961 } 962 } 963 mtx_unlock(&malloc_mtx); 964 error = sbuf_finish(&sbuf); 965 sbuf_delete(&sbuf); 966 return (error); 967} 968 969SYSCTL_PROC(_kern, OID_AUTO, malloc_stats, CTLFLAG_RD|CTLTYPE_STRUCT, 970 0, 0, sysctl_kern_malloc_stats, "s,malloc_type_ustats", 971 "Return malloc types"); 972 973SYSCTL_INT(_kern, OID_AUTO, malloc_count, CTLFLAG_RD, &kmemcount, 0, 974 "Count of kernel malloc types"); 975 976void 977malloc_type_list(malloc_type_list_func_t *func, void *arg) 978{ 979 struct malloc_type *mtp, **bufmtp; 980 int count, i; 981 size_t buflen; 982 983 mtx_lock(&malloc_mtx); 984restart: 985 mtx_assert(&malloc_mtx, MA_OWNED); 986 count = kmemcount; 987 mtx_unlock(&malloc_mtx); 988 989 buflen = sizeof(struct malloc_type *) * count; 990 bufmtp = malloc(buflen, M_TEMP, M_WAITOK); 991 992 mtx_lock(&malloc_mtx); 993 994 if (count < kmemcount) { 995 free(bufmtp, M_TEMP); 996 goto restart; 997 } 998 999 for (mtp = kmemstatistics, i = 0; mtp != NULL; mtp = mtp->ks_next, i++) 1000 bufmtp[i] = mtp; 1001 1002 mtx_unlock(&malloc_mtx); 1003 1004 for (i = 0; i < count; i++) 1005 (func)(bufmtp[i], arg); 1006 1007 free(bufmtp, M_TEMP); 1008} 1009 1010#ifdef DDB 1011DB_SHOW_COMMAND(malloc, db_show_malloc) 1012{ 1013 struct malloc_type_internal *mtip; 1014 struct malloc_type *mtp; 1015 uint64_t allocs, frees; 1016 uint64_t alloced, freed; 1017 int i; 1018 1019 db_printf("%18s %12s %12s %12s\n", "Type", "InUse", "MemUse", 1020 "Requests"); 1021 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 1022 mtip = (struct malloc_type_internal *)mtp->ks_handle; 1023 allocs = 0; 1024 frees = 0; 1025 alloced = 0; 1026 freed = 0; 1027 for (i = 0; i < MAXCPU; i++) { 1028 allocs += mtip->mti_stats[i].mts_numallocs; 1029 frees += mtip->mti_stats[i].mts_numfrees; 1030 alloced += mtip->mti_stats[i].mts_memalloced; 1031 freed += mtip->mti_stats[i].mts_memfreed; 1032 } 1033 db_printf("%18s %12ju %12juK %12ju\n", 1034 mtp->ks_shortdesc, allocs - frees, 1035 (alloced - freed + 1023) / 1024, allocs); 1036 if (db_pager_quit) 1037 break; 1038 } 1039} 1040 1041#if MALLOC_DEBUG_MAXZONES > 1 1042DB_SHOW_COMMAND(multizone_matches, db_show_multizone_matches) 1043{ 1044 struct malloc_type_internal *mtip; 1045 struct malloc_type *mtp; 1046 u_int subzone; 1047 1048 if (!have_addr) { 1049 db_printf("Usage: show multizone_matches <malloc type/addr>\n"); 1050 return; 1051 } 1052 mtp = (void *)addr; 1053 if (mtp->ks_magic != M_MAGIC) { 1054 db_printf("Magic %lx does not match expected %x\n", 1055 mtp->ks_magic, M_MAGIC); 1056 return; 1057 } 1058 1059 mtip = mtp->ks_handle; 1060 subzone = mtip->mti_zone; 1061 1062 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 1063 mtip = mtp->ks_handle; 1064 if (mtip->mti_zone != subzone) 1065 continue; 1066 db_printf("%s\n", mtp->ks_shortdesc); 1067 if (db_pager_quit) 1068 break; 1069 } 1070} 1071#endif /* MALLOC_DEBUG_MAXZONES > 1 */ 1072#endif /* DDB */ 1073 1074#ifdef MALLOC_PROFILE 1075 1076static int 1077sysctl_kern_mprof(SYSCTL_HANDLER_ARGS) 1078{ 1079 struct sbuf sbuf; 1080 uint64_t count; 1081 uint64_t waste; 1082 uint64_t mem; 1083 int error; 1084 int rsize; 1085 int size; 1086 int i; 1087 1088 waste = 0; 1089 mem = 0; 1090 1091 error = sysctl_wire_old_buffer(req, 0); 1092 if (error != 0) 1093 return (error); 1094 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 1095 sbuf_printf(&sbuf, 1096 "\n Size Requests Real Size\n"); 1097 for (i = 0; i < KMEM_ZSIZE; i++) { 1098 size = i << KMEM_ZSHIFT; 1099 rsize = kmemzones[kmemsize[i]].kz_size; 1100 count = (long long unsigned)krequests[i]; 1101 1102 sbuf_printf(&sbuf, "%6d%28llu%11d\n", size, 1103 (unsigned long long)count, rsize); 1104 1105 if ((rsize * count) > (size * count)) 1106 waste += (rsize * count) - (size * count); 1107 mem += (rsize * count); 1108 } 1109 sbuf_printf(&sbuf, 1110 "\nTotal memory used:\t%30llu\nTotal Memory wasted:\t%30llu\n", 1111 (unsigned long long)mem, (unsigned long long)waste); 1112 error = sbuf_finish(&sbuf); 1113 sbuf_delete(&sbuf); 1114 return (error); 1115} 1116 1117SYSCTL_OID(_kern, OID_AUTO, mprof, CTLTYPE_STRING|CTLFLAG_RD, 1118 NULL, 0, sysctl_kern_mprof, "A", "Malloc Profiling"); 1119#endif /* MALLOC_PROFILE */
|