kern_malloc.c revision 254083
1/*- 2 * Copyright (c) 1987, 1991, 1993 3 * The Regents of the University of California. 4 * Copyright (c) 2005-2009 Robert N. M. Watson 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 4. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94 32 */ 33 34/* 35 * Kernel malloc(9) implementation -- general purpose kernel memory allocator 36 * based on memory types. Back end is implemented using the UMA(9) zone 37 * allocator. A set of fixed-size buckets are used for smaller allocations, 38 * and a special UMA allocation interface is used for larger allocations. 39 * Callers declare memory types, and statistics are maintained independently 40 * for each memory type. Statistics are maintained per-CPU for performance 41 * reasons. See malloc(9) and comments in malloc.h for a detailed 42 * description. 43 */ 44 45#include <sys/cdefs.h> 46__FBSDID("$FreeBSD: stable/9/sys/kern/kern_malloc.c 254083 2013-08-08 05:35:58Z kib $"); 47 48#include "opt_ddb.h" 49#include "opt_kdtrace.h" 50#include "opt_vm.h" 51 52#include <sys/param.h> 53#include <sys/systm.h> 54#include <sys/kdb.h> 55#include <sys/kernel.h> 56#include <sys/lock.h> 57#include <sys/malloc.h> 58#include <sys/mbuf.h> 59#include <sys/mutex.h> 60#include <sys/vmmeter.h> 61#include <sys/proc.h> 62#include <sys/sbuf.h> 63#include <sys/sysctl.h> 64#include <sys/time.h> 65 66#include <vm/vm.h> 67#include <vm/pmap.h> 68#include <vm/vm_param.h> 69#include <vm/vm_kern.h> 70#include <vm/vm_extern.h> 71#include <vm/vm_map.h> 72#include <vm/vm_page.h> 73#include <vm/uma.h> 74#include <vm/uma_int.h> 75#include <vm/uma_dbg.h> 76 77#ifdef DEBUG_MEMGUARD 78#include <vm/memguard.h> 79#endif 80#ifdef DEBUG_REDZONE 81#include <vm/redzone.h> 82#endif 83 84#if defined(INVARIANTS) && defined(__i386__) 85#include <machine/cpu.h> 86#endif 87 88#include <ddb/ddb.h> 89 90#ifdef KDTRACE_HOOKS 91#include <sys/dtrace_bsd.h> 92 93dtrace_malloc_probe_func_t dtrace_malloc_probe; 94#endif 95 96/* 97 * When realloc() is called, if the new size is sufficiently smaller than 98 * the old size, realloc() will allocate a new, smaller block to avoid 99 * wasting memory. 'Sufficiently smaller' is defined as: newsize <= 100 * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'. 101 */ 102#ifndef REALLOC_FRACTION 103#define REALLOC_FRACTION 1 /* new block if <= half the size */ 104#endif 105 106/* 107 * Centrally define some common malloc types. 108 */ 109MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); 110MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); 111MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); 112 113MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options"); 114MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery"); 115 116static void kmeminit(void *); 117SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL); 118 119static struct malloc_type *kmemstatistics; 120static vm_offset_t kmembase; 121static vm_offset_t kmemlimit; 122static int kmemcount; 123 124#define KMEM_ZSHIFT 4 125#define KMEM_ZBASE 16 126#define KMEM_ZMASK (KMEM_ZBASE - 1) 127 128#define KMEM_ZMAX PAGE_SIZE 129#define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT) 130static uint8_t kmemsize[KMEM_ZSIZE + 1]; 131 132#ifndef MALLOC_DEBUG_MAXZONES 133#define MALLOC_DEBUG_MAXZONES 1 134#endif 135static int numzones = MALLOC_DEBUG_MAXZONES; 136 137/* 138 * Small malloc(9) memory allocations are allocated from a set of UMA buckets 139 * of various sizes. 140 * 141 * XXX: The comment here used to read "These won't be powers of two for 142 * long." It's possible that a significant amount of wasted memory could be 143 * recovered by tuning the sizes of these buckets. 144 */ 145struct { 146 int kz_size; 147 char *kz_name; 148 uma_zone_t kz_zone[MALLOC_DEBUG_MAXZONES]; 149} kmemzones[] = { 150 {16, "16", }, 151 {32, "32", }, 152 {64, "64", }, 153 {128, "128", }, 154 {256, "256", }, 155 {512, "512", }, 156 {1024, "1024", }, 157 {2048, "2048", }, 158 {4096, "4096", }, 159#if PAGE_SIZE > 4096 160 {8192, "8192", }, 161#if PAGE_SIZE > 8192 162 {16384, "16384", }, 163#if PAGE_SIZE > 16384 164 {32768, "32768", }, 165#if PAGE_SIZE > 32768 166 {65536, "65536", }, 167#if PAGE_SIZE > 65536 168#error "Unsupported PAGE_SIZE" 169#endif /* 65536 */ 170#endif /* 32768 */ 171#endif /* 16384 */ 172#endif /* 8192 */ 173#endif /* 4096 */ 174 {0, NULL}, 175}; 176 177/* 178 * Zone to allocate malloc type descriptions from. For ABI reasons, memory 179 * types are described by a data structure passed by the declaring code, but 180 * the malloc(9) implementation has its own data structure describing the 181 * type and statistics. This permits the malloc(9)-internal data structures 182 * to be modified without breaking binary-compiled kernel modules that 183 * declare malloc types. 184 */ 185static uma_zone_t mt_zone; 186 187u_long vm_kmem_size; 188SYSCTL_ULONG(_vm, OID_AUTO, kmem_size, CTLFLAG_RDTUN, &vm_kmem_size, 0, 189 "Size of kernel memory"); 190 191static u_long vm_kmem_size_min; 192SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_min, CTLFLAG_RDTUN, &vm_kmem_size_min, 0, 193 "Minimum size of kernel memory"); 194 195static u_long vm_kmem_size_max; 196SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RDTUN, &vm_kmem_size_max, 0, 197 "Maximum size of kernel memory"); 198 199static u_int vm_kmem_size_scale; 200SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RDTUN, &vm_kmem_size_scale, 0, 201 "Scale factor for kernel memory size"); 202 203static int sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS); 204SYSCTL_PROC(_vm, OID_AUTO, kmem_map_size, 205 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0, 206 sysctl_kmem_map_size, "LU", "Current kmem_map allocation size"); 207 208static int sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS); 209SYSCTL_PROC(_vm, OID_AUTO, kmem_map_free, 210 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0, 211 sysctl_kmem_map_free, "LU", "Largest contiguous free range in kmem_map"); 212 213/* 214 * The malloc_mtx protects the kmemstatistics linked list. 215 */ 216struct mtx malloc_mtx; 217 218#ifdef MALLOC_PROFILE 219uint64_t krequests[KMEM_ZSIZE + 1]; 220 221static int sysctl_kern_mprof(SYSCTL_HANDLER_ARGS); 222#endif 223 224static int sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS); 225 226/* 227 * time_uptime of the last malloc(9) failure (induced or real). 228 */ 229static time_t t_malloc_fail; 230 231#if defined(MALLOC_MAKE_FAILURES) || (MALLOC_DEBUG_MAXZONES > 1) 232static SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD, 0, 233 "Kernel malloc debugging options"); 234#endif 235 236/* 237 * malloc(9) fault injection -- cause malloc failures every (n) mallocs when 238 * the caller specifies M_NOWAIT. If set to 0, no failures are caused. 239 */ 240#ifdef MALLOC_MAKE_FAILURES 241static int malloc_failure_rate; 242static int malloc_nowait_count; 243static int malloc_failure_count; 244SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RW, 245 &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail"); 246TUNABLE_INT("debug.malloc.failure_rate", &malloc_failure_rate); 247SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD, 248 &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures"); 249#endif 250 251static int 252sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS) 253{ 254 u_long size; 255 256 size = kmem_map->size; 257 return (sysctl_handle_long(oidp, &size, 0, req)); 258} 259 260static int 261sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS) 262{ 263 u_long size; 264 265 vm_map_lock_read(kmem_map); 266 size = kmem_map->root != NULL ? kmem_map->root->max_free : 267 kmem_map->max_offset - kmem_map->min_offset; 268 vm_map_unlock_read(kmem_map); 269 return (sysctl_handle_long(oidp, &size, 0, req)); 270} 271 272/* 273 * malloc(9) uma zone separation -- sub-page buffer overruns in one 274 * malloc type will affect only a subset of other malloc types. 275 */ 276#if MALLOC_DEBUG_MAXZONES > 1 277static void 278tunable_set_numzones(void) 279{ 280 281 TUNABLE_INT_FETCH("debug.malloc.numzones", 282 &numzones); 283 284 /* Sanity check the number of malloc uma zones. */ 285 if (numzones <= 0) 286 numzones = 1; 287 if (numzones > MALLOC_DEBUG_MAXZONES) 288 numzones = MALLOC_DEBUG_MAXZONES; 289} 290SYSINIT(numzones, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_set_numzones, NULL); 291SYSCTL_INT(_debug_malloc, OID_AUTO, numzones, CTLFLAG_RDTUN, 292 &numzones, 0, "Number of malloc uma subzones"); 293 294/* 295 * Any number that changes regularly is an okay choice for the 296 * offset. Build numbers are pretty good of you have them. 297 */ 298static u_int zone_offset = __FreeBSD_version; 299TUNABLE_INT("debug.malloc.zone_offset", &zone_offset); 300SYSCTL_UINT(_debug_malloc, OID_AUTO, zone_offset, CTLFLAG_RDTUN, 301 &zone_offset, 0, "Separate malloc types by examining the " 302 "Nth character in the malloc type short description."); 303 304static u_int 305mtp_get_subzone(const char *desc) 306{ 307 size_t len; 308 u_int val; 309 310 if (desc == NULL || (len = strlen(desc)) == 0) 311 return (0); 312 val = desc[zone_offset % len]; 313 return (val % numzones); 314} 315#elif MALLOC_DEBUG_MAXZONES == 0 316#error "MALLOC_DEBUG_MAXZONES must be positive." 317#else 318static inline u_int 319mtp_get_subzone(const char *desc) 320{ 321 322 return (0); 323} 324#endif /* MALLOC_DEBUG_MAXZONES > 1 */ 325 326int 327malloc_last_fail(void) 328{ 329 330 return (time_uptime - t_malloc_fail); 331} 332 333/* 334 * An allocation has succeeded -- update malloc type statistics for the 335 * amount of bucket size. Occurs within a critical section so that the 336 * thread isn't preempted and doesn't migrate while updating per-PCU 337 * statistics. 338 */ 339static void 340malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size, 341 int zindx) 342{ 343 struct malloc_type_internal *mtip; 344 struct malloc_type_stats *mtsp; 345 346 critical_enter(); 347 mtip = mtp->ks_handle; 348 mtsp = &mtip->mti_stats[curcpu]; 349 if (size > 0) { 350 mtsp->mts_memalloced += size; 351 mtsp->mts_numallocs++; 352 } 353 if (zindx != -1) 354 mtsp->mts_size |= 1 << zindx; 355 356#ifdef KDTRACE_HOOKS 357 if (dtrace_malloc_probe != NULL) { 358 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_MALLOC]; 359 if (probe_id != 0) 360 (dtrace_malloc_probe)(probe_id, 361 (uintptr_t) mtp, (uintptr_t) mtip, 362 (uintptr_t) mtsp, size, zindx); 363 } 364#endif 365 366 critical_exit(); 367} 368 369void 370malloc_type_allocated(struct malloc_type *mtp, unsigned long size) 371{ 372 373 if (size > 0) 374 malloc_type_zone_allocated(mtp, size, -1); 375} 376 377/* 378 * A free operation has occurred -- update malloc type statistics for the 379 * amount of the bucket size. Occurs within a critical section so that the 380 * thread isn't preempted and doesn't migrate while updating per-CPU 381 * statistics. 382 */ 383void 384malloc_type_freed(struct malloc_type *mtp, unsigned long size) 385{ 386 struct malloc_type_internal *mtip; 387 struct malloc_type_stats *mtsp; 388 389 critical_enter(); 390 mtip = mtp->ks_handle; 391 mtsp = &mtip->mti_stats[curcpu]; 392 mtsp->mts_memfreed += size; 393 mtsp->mts_numfrees++; 394 395#ifdef KDTRACE_HOOKS 396 if (dtrace_malloc_probe != NULL) { 397 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_FREE]; 398 if (probe_id != 0) 399 (dtrace_malloc_probe)(probe_id, 400 (uintptr_t) mtp, (uintptr_t) mtip, 401 (uintptr_t) mtsp, size, 0); 402 } 403#endif 404 405 critical_exit(); 406} 407 408/* 409 * malloc: 410 * 411 * Allocate a block of memory. 412 * 413 * If M_NOWAIT is set, this routine will not block and return NULL if 414 * the allocation fails. 415 */ 416void * 417malloc(unsigned long size, struct malloc_type *mtp, int flags) 418{ 419 int indx; 420 struct malloc_type_internal *mtip; 421 caddr_t va; 422 uma_zone_t zone; 423#if defined(DIAGNOSTIC) || defined(DEBUG_REDZONE) 424 unsigned long osize = size; 425#endif 426 427#ifdef INVARIANTS 428 KASSERT(mtp->ks_magic == M_MAGIC, ("malloc: bad malloc type magic")); 429 /* 430 * Check that exactly one of M_WAITOK or M_NOWAIT is specified. 431 */ 432 indx = flags & (M_WAITOK | M_NOWAIT); 433 if (indx != M_NOWAIT && indx != M_WAITOK) { 434 static struct timeval lasterr; 435 static int curerr, once; 436 if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) { 437 printf("Bad malloc flags: %x\n", indx); 438 kdb_backtrace(); 439 flags |= M_WAITOK; 440 once++; 441 } 442 } 443#endif 444#ifdef MALLOC_MAKE_FAILURES 445 if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) { 446 atomic_add_int(&malloc_nowait_count, 1); 447 if ((malloc_nowait_count % malloc_failure_rate) == 0) { 448 atomic_add_int(&malloc_failure_count, 1); 449 t_malloc_fail = time_uptime; 450 return (NULL); 451 } 452 } 453#endif 454 if (flags & M_WAITOK) 455 KASSERT(curthread->td_intr_nesting_level == 0, 456 ("malloc(M_WAITOK) in interrupt context")); 457 458#ifdef DEBUG_MEMGUARD 459 if (memguard_cmp(mtp, size)) { 460 va = memguard_alloc(size, flags); 461 if (va != NULL) 462 return (va); 463 /* This is unfortunate but should not be fatal. */ 464 } 465#endif 466 467#ifdef DEBUG_REDZONE 468 size = redzone_size_ntor(size); 469#endif 470 471 if (size <= KMEM_ZMAX) { 472 mtip = mtp->ks_handle; 473 if (size & KMEM_ZMASK) 474 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE; 475 indx = kmemsize[size >> KMEM_ZSHIFT]; 476 KASSERT(mtip->mti_zone < numzones, 477 ("mti_zone %u out of range %d", 478 mtip->mti_zone, numzones)); 479 zone = kmemzones[indx].kz_zone[mtip->mti_zone]; 480#ifdef MALLOC_PROFILE 481 krequests[size >> KMEM_ZSHIFT]++; 482#endif 483 va = uma_zalloc(zone, flags); 484 if (va != NULL) 485 size = zone->uz_size; 486 malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx); 487 } else { 488 size = roundup(size, PAGE_SIZE); 489 zone = NULL; 490 va = uma_large_malloc(size, flags); 491 malloc_type_allocated(mtp, va == NULL ? 0 : size); 492 } 493 if (flags & M_WAITOK) 494 KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL")); 495 else if (va == NULL) 496 t_malloc_fail = time_uptime; 497#ifdef DIAGNOSTIC 498 if (va != NULL && !(flags & M_ZERO)) { 499 memset(va, 0x70, osize); 500 } 501#endif 502#ifdef DEBUG_REDZONE 503 if (va != NULL) 504 va = redzone_setup(va, osize); 505#endif 506 return ((void *) va); 507} 508 509/* 510 * free: 511 * 512 * Free a block of memory allocated by malloc. 513 * 514 * This routine may not block. 515 */ 516void 517free(void *addr, struct malloc_type *mtp) 518{ 519 uma_slab_t slab; 520 u_long size; 521 522 KASSERT(mtp->ks_magic == M_MAGIC, ("free: bad malloc type magic")); 523 524 /* free(NULL, ...) does nothing */ 525 if (addr == NULL) 526 return; 527 528#ifdef DEBUG_MEMGUARD 529 if (is_memguard_addr(addr)) { 530 memguard_free(addr); 531 return; 532 } 533#endif 534 535#ifdef DEBUG_REDZONE 536 redzone_check(addr); 537 addr = redzone_addr_ntor(addr); 538#endif 539 540 slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK)); 541 542 if (slab == NULL) 543 panic("free: address %p(%p) has not been allocated.\n", 544 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK))); 545 546 547 if (!(slab->us_flags & UMA_SLAB_MALLOC)) { 548#ifdef INVARIANTS 549 struct malloc_type **mtpp = addr; 550#endif 551 size = slab->us_keg->uk_size; 552#ifdef INVARIANTS 553 /* 554 * Cache a pointer to the malloc_type that most recently freed 555 * this memory here. This way we know who is most likely to 556 * have stepped on it later. 557 * 558 * This code assumes that size is a multiple of 8 bytes for 559 * 64 bit machines 560 */ 561 mtpp = (struct malloc_type **) 562 ((unsigned long)mtpp & ~UMA_ALIGN_PTR); 563 mtpp += (size - sizeof(struct malloc_type *)) / 564 sizeof(struct malloc_type *); 565 *mtpp = mtp; 566#endif 567 uma_zfree_arg(LIST_FIRST(&slab->us_keg->uk_zones), addr, slab); 568 } else { 569 size = slab->us_size; 570 uma_large_free(slab); 571 } 572 malloc_type_freed(mtp, size); 573} 574 575/* 576 * realloc: change the size of a memory block 577 */ 578void * 579realloc(void *addr, unsigned long size, struct malloc_type *mtp, int flags) 580{ 581 uma_slab_t slab; 582 unsigned long alloc; 583 void *newaddr; 584 585 KASSERT(mtp->ks_magic == M_MAGIC, 586 ("realloc: bad malloc type magic")); 587 588 /* realloc(NULL, ...) is equivalent to malloc(...) */ 589 if (addr == NULL) 590 return (malloc(size, mtp, flags)); 591 592 /* 593 * XXX: Should report free of old memory and alloc of new memory to 594 * per-CPU stats. 595 */ 596 597#ifdef DEBUG_MEMGUARD 598 if (is_memguard_addr(addr)) 599 return (memguard_realloc(addr, size, mtp, flags)); 600#endif 601 602#ifdef DEBUG_REDZONE 603 slab = NULL; 604 alloc = redzone_get_size(addr); 605#else 606 slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK)); 607 608 /* Sanity check */ 609 KASSERT(slab != NULL, 610 ("realloc: address %p out of range", (void *)addr)); 611 612 /* Get the size of the original block */ 613 if (!(slab->us_flags & UMA_SLAB_MALLOC)) 614 alloc = slab->us_keg->uk_size; 615 else 616 alloc = slab->us_size; 617 618 /* Reuse the original block if appropriate */ 619 if (size <= alloc 620 && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE)) 621 return (addr); 622#endif /* !DEBUG_REDZONE */ 623 624 /* Allocate a new, bigger (or smaller) block */ 625 if ((newaddr = malloc(size, mtp, flags)) == NULL) 626 return (NULL); 627 628 /* Copy over original contents */ 629 bcopy(addr, newaddr, min(size, alloc)); 630 free(addr, mtp); 631 return (newaddr); 632} 633 634/* 635 * reallocf: same as realloc() but free memory on failure. 636 */ 637void * 638reallocf(void *addr, unsigned long size, struct malloc_type *mtp, int flags) 639{ 640 void *mem; 641 642 if ((mem = realloc(addr, size, mtp, flags)) == NULL) 643 free(addr, mtp); 644 return (mem); 645} 646 647/* 648 * Initialize the kernel memory allocator 649 */ 650/* ARGSUSED*/ 651static void 652kmeminit(void *dummy) 653{ 654 uint8_t indx; 655 u_long mem_size, tmp; 656 int i; 657 658 mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF); 659 660 /* 661 * Try to auto-tune the kernel memory size, so that it is 662 * more applicable for a wider range of machine sizes. The 663 * VM_KMEM_SIZE_MAX is dependent on the maximum KVA space 664 * available. 665 * 666 * Note that the kmem_map is also used by the zone allocator, 667 * so make sure that there is enough space. 668 */ 669 vm_kmem_size = VM_KMEM_SIZE + nmbclusters * PAGE_SIZE; 670 mem_size = cnt.v_page_count; 671 672#if defined(VM_KMEM_SIZE_SCALE) 673 vm_kmem_size_scale = VM_KMEM_SIZE_SCALE; 674#endif 675 TUNABLE_INT_FETCH("vm.kmem_size_scale", &vm_kmem_size_scale); 676 if (vm_kmem_size_scale > 0 && 677 (mem_size / vm_kmem_size_scale) > (vm_kmem_size / PAGE_SIZE)) 678 vm_kmem_size = (mem_size / vm_kmem_size_scale) * PAGE_SIZE; 679 680#if defined(VM_KMEM_SIZE_MIN) 681 vm_kmem_size_min = VM_KMEM_SIZE_MIN; 682#endif 683 TUNABLE_ULONG_FETCH("vm.kmem_size_min", &vm_kmem_size_min); 684 if (vm_kmem_size_min > 0 && vm_kmem_size < vm_kmem_size_min) { 685 vm_kmem_size = vm_kmem_size_min; 686 } 687 688#if defined(VM_KMEM_SIZE_MAX) 689 vm_kmem_size_max = VM_KMEM_SIZE_MAX; 690#endif 691 TUNABLE_ULONG_FETCH("vm.kmem_size_max", &vm_kmem_size_max); 692 if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max) 693 vm_kmem_size = vm_kmem_size_max; 694 695 /* Allow final override from the kernel environment */ 696 TUNABLE_ULONG_FETCH("vm.kmem_size", &vm_kmem_size); 697 698 /* 699 * Limit kmem virtual size to twice the physical memory. 700 * This allows for kmem map sparseness, but limits the size 701 * to something sane. Be careful to not overflow the 32bit 702 * ints while doing the check or the adjustment. 703 */ 704 if (vm_kmem_size / 2 / PAGE_SIZE > mem_size) 705 vm_kmem_size = 2 * mem_size * PAGE_SIZE; 706 707#ifdef DEBUG_MEMGUARD 708 tmp = memguard_fudge(vm_kmem_size, kernel_map); 709#else 710 tmp = vm_kmem_size; 711#endif 712 kmem_map = kmem_suballoc(kernel_map, &kmembase, &kmemlimit, 713 tmp, TRUE); 714 kmem_map->system_map = 1; 715 716#ifdef DEBUG_MEMGUARD 717 /* 718 * Initialize MemGuard if support compiled in. MemGuard is a 719 * replacement allocator used for detecting tamper-after-free 720 * scenarios as they occur. It is only used for debugging. 721 */ 722 memguard_init(kmem_map); 723#endif 724 725 uma_startup2(); 726 727 mt_zone = uma_zcreate("mt_zone", sizeof(struct malloc_type_internal), 728#ifdef INVARIANTS 729 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini, 730#else 731 NULL, NULL, NULL, NULL, 732#endif 733 UMA_ALIGN_PTR, UMA_ZONE_MALLOC); 734 for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) { 735 int size = kmemzones[indx].kz_size; 736 char *name = kmemzones[indx].kz_name; 737 int subzone; 738 739 for (subzone = 0; subzone < numzones; subzone++) { 740 kmemzones[indx].kz_zone[subzone] = 741 uma_zcreate(name, size, 742#ifdef INVARIANTS 743 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini, 744#else 745 NULL, NULL, NULL, NULL, 746#endif 747 UMA_ALIGN_PTR, UMA_ZONE_MALLOC); 748 } 749 for (;i <= size; i+= KMEM_ZBASE) 750 kmemsize[i >> KMEM_ZSHIFT] = indx; 751 752 } 753} 754 755void 756malloc_init(void *data) 757{ 758 struct malloc_type_internal *mtip; 759 struct malloc_type *mtp; 760 761 KASSERT(cnt.v_page_count != 0, ("malloc_register before vm_init")); 762 763 mtp = data; 764 if (mtp->ks_magic != M_MAGIC) 765 panic("malloc_init: bad malloc type magic"); 766 767 mtip = uma_zalloc(mt_zone, M_WAITOK | M_ZERO); 768 mtp->ks_handle = mtip; 769 mtip->mti_zone = mtp_get_subzone(mtp->ks_shortdesc); 770 771 mtx_lock(&malloc_mtx); 772 mtp->ks_next = kmemstatistics; 773 kmemstatistics = mtp; 774 kmemcount++; 775 mtx_unlock(&malloc_mtx); 776} 777 778void 779malloc_uninit(void *data) 780{ 781 struct malloc_type_internal *mtip; 782 struct malloc_type_stats *mtsp; 783 struct malloc_type *mtp, *temp; 784 uma_slab_t slab; 785 long temp_allocs, temp_bytes; 786 int i; 787 788 mtp = data; 789 KASSERT(mtp->ks_magic == M_MAGIC, 790 ("malloc_uninit: bad malloc type magic")); 791 KASSERT(mtp->ks_handle != NULL, ("malloc_deregister: cookie NULL")); 792 793 mtx_lock(&malloc_mtx); 794 mtip = mtp->ks_handle; 795 mtp->ks_handle = NULL; 796 if (mtp != kmemstatistics) { 797 for (temp = kmemstatistics; temp != NULL; 798 temp = temp->ks_next) { 799 if (temp->ks_next == mtp) { 800 temp->ks_next = mtp->ks_next; 801 break; 802 } 803 } 804 KASSERT(temp, 805 ("malloc_uninit: type '%s' not found", mtp->ks_shortdesc)); 806 } else 807 kmemstatistics = mtp->ks_next; 808 kmemcount--; 809 mtx_unlock(&malloc_mtx); 810 811 /* 812 * Look for memory leaks. 813 */ 814 temp_allocs = temp_bytes = 0; 815 for (i = 0; i < MAXCPU; i++) { 816 mtsp = &mtip->mti_stats[i]; 817 temp_allocs += mtsp->mts_numallocs; 818 temp_allocs -= mtsp->mts_numfrees; 819 temp_bytes += mtsp->mts_memalloced; 820 temp_bytes -= mtsp->mts_memfreed; 821 } 822 if (temp_allocs > 0 || temp_bytes > 0) { 823 printf("Warning: memory type %s leaked memory on destroy " 824 "(%ld allocations, %ld bytes leaked).\n", mtp->ks_shortdesc, 825 temp_allocs, temp_bytes); 826 } 827 828 slab = vtoslab((vm_offset_t) mtip & (~UMA_SLAB_MASK)); 829 uma_zfree_arg(mt_zone, mtip, slab); 830} 831 832struct malloc_type * 833malloc_desc2type(const char *desc) 834{ 835 struct malloc_type *mtp; 836 837 mtx_assert(&malloc_mtx, MA_OWNED); 838 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 839 if (strcmp(mtp->ks_shortdesc, desc) == 0) 840 return (mtp); 841 } 842 return (NULL); 843} 844 845static int 846sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS) 847{ 848 struct malloc_type_stream_header mtsh; 849 struct malloc_type_internal *mtip; 850 struct malloc_type_header mth; 851 struct malloc_type *mtp; 852 int error, i; 853 struct sbuf sbuf; 854 855 error = sysctl_wire_old_buffer(req, 0); 856 if (error != 0) 857 return (error); 858 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 859 mtx_lock(&malloc_mtx); 860 861 /* 862 * Insert stream header. 863 */ 864 bzero(&mtsh, sizeof(mtsh)); 865 mtsh.mtsh_version = MALLOC_TYPE_STREAM_VERSION; 866 mtsh.mtsh_maxcpus = MAXCPU; 867 mtsh.mtsh_count = kmemcount; 868 (void)sbuf_bcat(&sbuf, &mtsh, sizeof(mtsh)); 869 870 /* 871 * Insert alternating sequence of type headers and type statistics. 872 */ 873 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 874 mtip = (struct malloc_type_internal *)mtp->ks_handle; 875 876 /* 877 * Insert type header. 878 */ 879 bzero(&mth, sizeof(mth)); 880 strlcpy(mth.mth_name, mtp->ks_shortdesc, MALLOC_MAX_NAME); 881 (void)sbuf_bcat(&sbuf, &mth, sizeof(mth)); 882 883 /* 884 * Insert type statistics for each CPU. 885 */ 886 for (i = 0; i < MAXCPU; i++) { 887 (void)sbuf_bcat(&sbuf, &mtip->mti_stats[i], 888 sizeof(mtip->mti_stats[i])); 889 } 890 } 891 mtx_unlock(&malloc_mtx); 892 error = sbuf_finish(&sbuf); 893 sbuf_delete(&sbuf); 894 return (error); 895} 896 897SYSCTL_PROC(_kern, OID_AUTO, malloc_stats, CTLFLAG_RD|CTLTYPE_STRUCT, 898 0, 0, sysctl_kern_malloc_stats, "s,malloc_type_ustats", 899 "Return malloc types"); 900 901SYSCTL_INT(_kern, OID_AUTO, malloc_count, CTLFLAG_RD, &kmemcount, 0, 902 "Count of kernel malloc types"); 903 904void 905malloc_type_list(malloc_type_list_func_t *func, void *arg) 906{ 907 struct malloc_type *mtp, **bufmtp; 908 int count, i; 909 size_t buflen; 910 911 mtx_lock(&malloc_mtx); 912restart: 913 mtx_assert(&malloc_mtx, MA_OWNED); 914 count = kmemcount; 915 mtx_unlock(&malloc_mtx); 916 917 buflen = sizeof(struct malloc_type *) * count; 918 bufmtp = malloc(buflen, M_TEMP, M_WAITOK); 919 920 mtx_lock(&malloc_mtx); 921 922 if (count < kmemcount) { 923 free(bufmtp, M_TEMP); 924 goto restart; 925 } 926 927 for (mtp = kmemstatistics, i = 0; mtp != NULL; mtp = mtp->ks_next, i++) 928 bufmtp[i] = mtp; 929 930 mtx_unlock(&malloc_mtx); 931 932 for (i = 0; i < count; i++) 933 (func)(bufmtp[i], arg); 934 935 free(bufmtp, M_TEMP); 936} 937 938#ifdef DDB 939DB_SHOW_COMMAND(malloc, db_show_malloc) 940{ 941 struct malloc_type_internal *mtip; 942 struct malloc_type *mtp; 943 uint64_t allocs, frees; 944 uint64_t alloced, freed; 945 int i; 946 947 db_printf("%18s %12s %12s %12s\n", "Type", "InUse", "MemUse", 948 "Requests"); 949 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 950 mtip = (struct malloc_type_internal *)mtp->ks_handle; 951 allocs = 0; 952 frees = 0; 953 alloced = 0; 954 freed = 0; 955 for (i = 0; i < MAXCPU; i++) { 956 allocs += mtip->mti_stats[i].mts_numallocs; 957 frees += mtip->mti_stats[i].mts_numfrees; 958 alloced += mtip->mti_stats[i].mts_memalloced; 959 freed += mtip->mti_stats[i].mts_memfreed; 960 } 961 db_printf("%18s %12ju %12juK %12ju\n", 962 mtp->ks_shortdesc, allocs - frees, 963 (alloced - freed + 1023) / 1024, allocs); 964 if (db_pager_quit) 965 break; 966 } 967} 968 969#if MALLOC_DEBUG_MAXZONES > 1 970DB_SHOW_COMMAND(multizone_matches, db_show_multizone_matches) 971{ 972 struct malloc_type_internal *mtip; 973 struct malloc_type *mtp; 974 u_int subzone; 975 976 if (!have_addr) { 977 db_printf("Usage: show multizone_matches <malloc type/addr>\n"); 978 return; 979 } 980 mtp = (void *)addr; 981 if (mtp->ks_magic != M_MAGIC) { 982 db_printf("Magic %lx does not match expected %x\n", 983 mtp->ks_magic, M_MAGIC); 984 return; 985 } 986 987 mtip = mtp->ks_handle; 988 subzone = mtip->mti_zone; 989 990 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 991 mtip = mtp->ks_handle; 992 if (mtip->mti_zone != subzone) 993 continue; 994 db_printf("%s\n", mtp->ks_shortdesc); 995 if (db_pager_quit) 996 break; 997 } 998} 999#endif /* MALLOC_DEBUG_MAXZONES > 1 */ 1000#endif /* DDB */ 1001 1002#ifdef MALLOC_PROFILE 1003 1004static int 1005sysctl_kern_mprof(SYSCTL_HANDLER_ARGS) 1006{ 1007 struct sbuf sbuf; 1008 uint64_t count; 1009 uint64_t waste; 1010 uint64_t mem; 1011 int error; 1012 int rsize; 1013 int size; 1014 int i; 1015 1016 waste = 0; 1017 mem = 0; 1018 1019 error = sysctl_wire_old_buffer(req, 0); 1020 if (error != 0) 1021 return (error); 1022 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 1023 sbuf_printf(&sbuf, 1024 "\n Size Requests Real Size\n"); 1025 for (i = 0; i < KMEM_ZSIZE; i++) { 1026 size = i << KMEM_ZSHIFT; 1027 rsize = kmemzones[kmemsize[i]].kz_size; 1028 count = (long long unsigned)krequests[i]; 1029 1030 sbuf_printf(&sbuf, "%6d%28llu%11d\n", size, 1031 (unsigned long long)count, rsize); 1032 1033 if ((rsize * count) > (size * count)) 1034 waste += (rsize * count) - (size * count); 1035 mem += (rsize * count); 1036 } 1037 sbuf_printf(&sbuf, 1038 "\nTotal memory used:\t%30llu\nTotal Memory wasted:\t%30llu\n", 1039 (unsigned long long)mem, (unsigned long long)waste); 1040 error = sbuf_finish(&sbuf); 1041 sbuf_delete(&sbuf); 1042 return (error); 1043} 1044 1045SYSCTL_OID(_kern, OID_AUTO, mprof, CTLTYPE_STRING|CTLFLAG_RD, 1046 NULL, 0, sysctl_kern_mprof, "A", "Malloc Profiling"); 1047#endif /* MALLOC_PROFILE */ 1048