1/*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2002-2019 Jeffrey Roberson <jeff@FreeBSD.org> 5 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org> 6 * Copyright (c) 2004-2006 Robert N. M. Watson 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice unmodified, this list of conditions, and the following 14 * disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31/* 32 * uma_core.c Implementation of the Universal Memory allocator 33 * 34 * This allocator is intended to replace the multitude of similar object caches 35 * in the standard FreeBSD kernel. The intent is to be flexible as well as 36 * efficient. A primary design goal is to return unused memory to the rest of 37 * the system. This will make the system as a whole more flexible due to the 38 * ability to move memory to subsystems which most need it instead of leaving 39 * pools of reserved memory unused. 40 * 41 * The basic ideas stem from similar slab/zone based allocators whose algorithms 42 * are well known. 43 * 44 */ 45 46/* 47 * TODO: 48 * - Improve memory usage for large allocations 49 * - Investigate cache size adjustments 50 */ 51 52#include <sys/cdefs.h> 53__FBSDID("$FreeBSD$"); 54 55#include "opt_ddb.h" 56#include "opt_param.h" 57#include "opt_vm.h" 58 59#include <sys/param.h> 60#include <sys/systm.h> 61#include <sys/bitset.h> 62#include <sys/domainset.h> 63#include <sys/eventhandler.h> 64#include <sys/kernel.h> 65#include <sys/types.h> 66#include <sys/limits.h> 67#include <sys/queue.h> 68#include <sys/malloc.h> 69#include <sys/ktr.h> 70#include <sys/lock.h> 71#include <sys/sysctl.h> 72#include <sys/mutex.h> 73#include <sys/proc.h> 74#include <sys/random.h> 75#include <sys/rwlock.h> 76#include <sys/sbuf.h> 77#include <sys/sched.h> 78#include <sys/sleepqueue.h> 79#include <sys/smp.h> 80#include <sys/smr.h> 81#include <sys/taskqueue.h> 82#include <sys/vmmeter.h> 83 84#include <vm/vm.h> 85#include <vm/vm_param.h> 86#include <vm/vm_domainset.h> 87#include <vm/vm_object.h> 88#include <vm/vm_page.h> 89#include <vm/vm_pageout.h> 90#include <vm/vm_phys.h> 91#include <vm/vm_pagequeue.h> 92#include <vm/vm_map.h> 93#include <vm/vm_kern.h> 94#include <vm/vm_extern.h> 95#include <vm/vm_dumpset.h> 96#include <vm/uma.h> 97#include <vm/uma_int.h> 98#include <vm/uma_dbg.h> 99 100#include <ddb/ddb.h> 101 102#ifdef DEBUG_MEMGUARD 103#include <vm/memguard.h> 104#endif 105 106#include <machine/md_var.h> 107 108#ifdef INVARIANTS 109#define UMA_ALWAYS_CTORDTOR 1 110#else 111#define UMA_ALWAYS_CTORDTOR 0 112#endif 113 114/* 115 * This is the zone and keg from which all zones are spawned. 116 */ 117static uma_zone_t kegs; 118static uma_zone_t zones; 119 120/* 121 * On INVARIANTS builds, the slab contains a second bitset of the same size, 122 * "dbg_bits", which is laid out immediately after us_free. 123 */ 124#ifdef INVARIANTS 125#define SLAB_BITSETS 2 126#else 127#define SLAB_BITSETS 1 128#endif 129 130/* 131 * These are the two zones from which all offpage uma_slab_ts are allocated. 132 * 133 * One zone is for slab headers that can represent a larger number of items, 134 * making the slabs themselves more efficient, and the other zone is for 135 * headers that are smaller and represent fewer items, making the headers more 136 * efficient. 137 */ 138#define SLABZONE_SIZE(setsize) \ 139 (sizeof(struct uma_hash_slab) + BITSET_SIZE(setsize) * SLAB_BITSETS) 140#define SLABZONE0_SETSIZE (PAGE_SIZE / 16) 141#define SLABZONE1_SETSIZE SLAB_MAX_SETSIZE 142#define SLABZONE0_SIZE SLABZONE_SIZE(SLABZONE0_SETSIZE) 143#define SLABZONE1_SIZE SLABZONE_SIZE(SLABZONE1_SETSIZE) 144static uma_zone_t slabzones[2]; 145 146/* 147 * The initial hash tables come out of this zone so they can be allocated 148 * prior to malloc coming up. 149 */ 150static uma_zone_t hashzone; 151 152/* The boot-time adjusted value for cache line alignment. */ 153int uma_align_cache = 64 - 1; 154 155static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets"); 156static MALLOC_DEFINE(M_UMA, "UMA", "UMA Misc"); 157 158/* 159 * Are we allowed to allocate buckets? 160 */ 161static int bucketdisable = 1; 162 163/* Linked list of all kegs in the system */ 164static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs); 165 166/* Linked list of all cache-only zones in the system */ 167static LIST_HEAD(,uma_zone) uma_cachezones = 168 LIST_HEAD_INITIALIZER(uma_cachezones); 169 170/* 171 * Mutex for global lists: uma_kegs, uma_cachezones, and the per-keg list of 172 * zones. 173 */ 174static struct rwlock_padalign __exclusive_cache_line uma_rwlock; 175 176static struct sx uma_reclaim_lock; 177 178/* 179 * First available virual address for boot time allocations. 180 */ 181static vm_offset_t bootstart; 182static vm_offset_t bootmem; 183 184/* 185 * kmem soft limit, initialized by uma_set_limit(). Ensure that early 186 * allocations don't trigger a wakeup of the reclaim thread. 187 */ 188unsigned long uma_kmem_limit = LONG_MAX; 189SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_limit, CTLFLAG_RD, &uma_kmem_limit, 0, 190 "UMA kernel memory soft limit"); 191unsigned long uma_kmem_total; 192SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_total, CTLFLAG_RD, &uma_kmem_total, 0, 193 "UMA kernel memory usage"); 194 195/* Is the VM done starting up? */ 196static enum { 197 BOOT_COLD, 198 BOOT_KVA, 199 BOOT_PCPU, 200 BOOT_RUNNING, 201 BOOT_SHUTDOWN, 202} booted = BOOT_COLD; 203 204/* 205 * This is the handle used to schedule events that need to happen 206 * outside of the allocation fast path. 207 */ 208static struct callout uma_callout; 209#define UMA_TIMEOUT 20 /* Seconds for callout interval. */ 210 211/* 212 * This structure is passed as the zone ctor arg so that I don't have to create 213 * a special allocation function just for zones. 214 */ 215struct uma_zctor_args { 216 const char *name; 217 size_t size; 218 uma_ctor ctor; 219 uma_dtor dtor; 220 uma_init uminit; 221 uma_fini fini; 222 uma_import import; 223 uma_release release; 224 void *arg; 225 uma_keg_t keg; 226 int align; 227 uint32_t flags; 228}; 229 230struct uma_kctor_args { 231 uma_zone_t zone; 232 size_t size; 233 uma_init uminit; 234 uma_fini fini; 235 int align; 236 uint32_t flags; 237}; 238 239struct uma_bucket_zone { 240 uma_zone_t ubz_zone; 241 const char *ubz_name; 242 int ubz_entries; /* Number of items it can hold. */ 243 int ubz_maxsize; /* Maximum allocation size per-item. */ 244}; 245 246/* 247 * Compute the actual number of bucket entries to pack them in power 248 * of two sizes for more efficient space utilization. 249 */ 250#define BUCKET_SIZE(n) \ 251 (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *)) 252 253#define BUCKET_MAX BUCKET_SIZE(256) 254 255struct uma_bucket_zone bucket_zones[] = { 256 /* Literal bucket sizes. */ 257 { NULL, "2 Bucket", 2, 4096 }, 258 { NULL, "4 Bucket", 4, 3072 }, 259 { NULL, "8 Bucket", 8, 2048 }, 260 { NULL, "16 Bucket", 16, 1024 }, 261 /* Rounded down power of 2 sizes for efficiency. */ 262 { NULL, "32 Bucket", BUCKET_SIZE(32), 512 }, 263 { NULL, "64 Bucket", BUCKET_SIZE(64), 256 }, 264 { NULL, "128 Bucket", BUCKET_SIZE(128), 128 }, 265 { NULL, "256 Bucket", BUCKET_SIZE(256), 64 }, 266 { NULL, NULL, 0} 267}; 268 269/* 270 * Flags and enumerations to be passed to internal functions. 271 */ 272enum zfreeskip { 273 SKIP_NONE = 0, 274 SKIP_CNT = 0x00000001, 275 SKIP_DTOR = 0x00010000, 276 SKIP_FINI = 0x00020000, 277}; 278 279/* Prototypes.. */ 280 281void uma_startup1(vm_offset_t); 282void uma_startup2(void); 283 284static void *noobj_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); 285static void *page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); 286static void *pcpu_page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); 287static void *startup_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); 288static void *contig_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); 289static void page_free(void *, vm_size_t, uint8_t); 290static void pcpu_page_free(void *, vm_size_t, uint8_t); 291static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int, int, int); 292static void cache_drain(uma_zone_t); 293static void bucket_drain(uma_zone_t, uma_bucket_t); 294static void bucket_cache_reclaim(uma_zone_t zone, bool, int); 295static bool bucket_cache_reclaim_domain(uma_zone_t, bool, bool, int); 296static int keg_ctor(void *, int, void *, int); 297static void keg_dtor(void *, int, void *); 298static void keg_drain(uma_keg_t keg, int domain); 299static int zone_ctor(void *, int, void *, int); 300static void zone_dtor(void *, int, void *); 301static inline void item_dtor(uma_zone_t zone, void *item, int size, 302 void *udata, enum zfreeskip skip); 303static int zero_init(void *, int, int); 304static void zone_free_bucket(uma_zone_t zone, uma_bucket_t bucket, void *udata, 305 int itemdomain, bool ws); 306static void zone_foreach(void (*zfunc)(uma_zone_t, void *), void *); 307static void zone_foreach_unlocked(void (*zfunc)(uma_zone_t, void *), void *); 308static void zone_timeout(uma_zone_t zone, void *); 309static int hash_alloc(struct uma_hash *, u_int); 310static int hash_expand(struct uma_hash *, struct uma_hash *); 311static void hash_free(struct uma_hash *hash); 312static void uma_timeout(void *); 313static void uma_shutdown(void); 314static void *zone_alloc_item(uma_zone_t, void *, int, int); 315static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip); 316static int zone_alloc_limit(uma_zone_t zone, int count, int flags); 317static void zone_free_limit(uma_zone_t zone, int count); 318static void bucket_enable(void); 319static void bucket_init(void); 320static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int); 321static void bucket_free(uma_zone_t zone, uma_bucket_t, void *); 322static void bucket_zone_drain(int domain); 323static uma_bucket_t zone_alloc_bucket(uma_zone_t, void *, int, int); 324static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab); 325static void slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item); 326static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, 327 uma_fini fini, int align, uint32_t flags); 328static int zone_import(void *, void **, int, int, int); 329static void zone_release(void *, void **, int); 330static bool cache_alloc(uma_zone_t, uma_cache_t, void *, int); 331static bool cache_free(uma_zone_t, uma_cache_t, void *, void *, int); 332 333static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS); 334static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS); 335static int sysctl_handle_uma_zone_allocs(SYSCTL_HANDLER_ARGS); 336static int sysctl_handle_uma_zone_frees(SYSCTL_HANDLER_ARGS); 337static int sysctl_handle_uma_zone_flags(SYSCTL_HANDLER_ARGS); 338static int sysctl_handle_uma_slab_efficiency(SYSCTL_HANDLER_ARGS); 339static int sysctl_handle_uma_zone_items(SYSCTL_HANDLER_ARGS); 340 341static uint64_t uma_zone_get_allocs(uma_zone_t zone); 342 343static SYSCTL_NODE(_vm, OID_AUTO, debug, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 344 "Memory allocation debugging"); 345 346#ifdef INVARIANTS 347static uint64_t uma_keg_get_allocs(uma_keg_t zone); 348static inline struct noslabbits *slab_dbg_bits(uma_slab_t slab, uma_keg_t keg); 349 350static bool uma_dbg_kskip(uma_keg_t keg, void *mem); 351static bool uma_dbg_zskip(uma_zone_t zone, void *mem); 352static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item); 353static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item); 354 355static u_int dbg_divisor = 1; 356SYSCTL_UINT(_vm_debug, OID_AUTO, divisor, 357 CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &dbg_divisor, 0, 358 "Debug & thrash every this item in memory allocator"); 359 360static counter_u64_t uma_dbg_cnt = EARLY_COUNTER; 361static counter_u64_t uma_skip_cnt = EARLY_COUNTER; 362SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, trashed, CTLFLAG_RD, 363 &uma_dbg_cnt, "memory items debugged"); 364SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, skipped, CTLFLAG_RD, 365 &uma_skip_cnt, "memory items skipped, not debugged"); 366#endif 367 368SYSCTL_NODE(_vm, OID_AUTO, uma, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 369 "Universal Memory Allocator"); 370 371SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLFLAG_MPSAFE|CTLTYPE_INT, 372 0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones"); 373 374SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLFLAG_MPSAFE|CTLTYPE_STRUCT, 375 0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats"); 376 377static int zone_warnings = 1; 378SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0, 379 "Warn when UMA zones becomes full"); 380 381static int multipage_slabs = 1; 382TUNABLE_INT("vm.debug.uma_multipage_slabs", &multipage_slabs); 383SYSCTL_INT(_vm_debug, OID_AUTO, uma_multipage_slabs, 384 CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &multipage_slabs, 0, 385 "UMA may choose larger slab sizes for better efficiency"); 386 387/* 388 * Select the slab zone for an offpage slab with the given maximum item count. 389 */ 390static inline uma_zone_t 391slabzone(int ipers) 392{ 393 394 return (slabzones[ipers > SLABZONE0_SETSIZE]); 395} 396 397/* 398 * This routine checks to see whether or not it's safe to enable buckets. 399 */ 400static void 401bucket_enable(void) 402{ 403 404 KASSERT(booted >= BOOT_KVA, ("Bucket enable before init")); 405 bucketdisable = vm_page_count_min(); 406} 407 408/* 409 * Initialize bucket_zones, the array of zones of buckets of various sizes. 410 * 411 * For each zone, calculate the memory required for each bucket, consisting 412 * of the header and an array of pointers. 413 */ 414static void 415bucket_init(void) 416{ 417 struct uma_bucket_zone *ubz; 418 int size; 419 420 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) { 421 size = roundup(sizeof(struct uma_bucket), sizeof(void *)); 422 size += sizeof(void *) * ubz->ubz_entries; 423 ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size, 424 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 425 UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET | 426 UMA_ZONE_FIRSTTOUCH); 427 } 428} 429 430/* 431 * Given a desired number of entries for a bucket, return the zone from which 432 * to allocate the bucket. 433 */ 434static struct uma_bucket_zone * 435bucket_zone_lookup(int entries) 436{ 437 struct uma_bucket_zone *ubz; 438 439 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) 440 if (ubz->ubz_entries >= entries) 441 return (ubz); 442 ubz--; 443 return (ubz); 444} 445 446static int 447bucket_select(int size) 448{ 449 struct uma_bucket_zone *ubz; 450 451 ubz = &bucket_zones[0]; 452 if (size > ubz->ubz_maxsize) 453 return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1); 454 455 for (; ubz->ubz_entries != 0; ubz++) 456 if (ubz->ubz_maxsize < size) 457 break; 458 ubz--; 459 return (ubz->ubz_entries); 460} 461 462static uma_bucket_t 463bucket_alloc(uma_zone_t zone, void *udata, int flags) 464{ 465 struct uma_bucket_zone *ubz; 466 uma_bucket_t bucket; 467 468 /* 469 * Don't allocate buckets early in boot. 470 */ 471 if (__predict_false(booted < BOOT_KVA)) 472 return (NULL); 473 474 /* 475 * To limit bucket recursion we store the original zone flags 476 * in a cookie passed via zalloc_arg/zfree_arg. This allows the 477 * NOVM flag to persist even through deep recursions. We also 478 * store ZFLAG_BUCKET once we have recursed attempting to allocate 479 * a bucket for a bucket zone so we do not allow infinite bucket 480 * recursion. This cookie will even persist to frees of unused 481 * buckets via the allocation path or bucket allocations in the 482 * free path. 483 */ 484 if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0) 485 udata = (void *)(uintptr_t)zone->uz_flags; 486 else { 487 if ((uintptr_t)udata & UMA_ZFLAG_BUCKET) 488 return (NULL); 489 udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET); 490 } 491 if (((uintptr_t)udata & UMA_ZONE_VM) != 0) 492 flags |= M_NOVM; 493 ubz = bucket_zone_lookup(atomic_load_16(&zone->uz_bucket_size)); 494 if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0) 495 ubz++; 496 bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags); 497 if (bucket) { 498#ifdef INVARIANTS 499 bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries); 500#endif 501 bucket->ub_cnt = 0; 502 bucket->ub_entries = min(ubz->ubz_entries, 503 zone->uz_bucket_size_max); 504 bucket->ub_seq = SMR_SEQ_INVALID; 505 CTR3(KTR_UMA, "bucket_alloc: zone %s(%p) allocated bucket %p", 506 zone->uz_name, zone, bucket); 507 } 508 509 return (bucket); 510} 511 512static void 513bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata) 514{ 515 struct uma_bucket_zone *ubz; 516 517 if (bucket->ub_cnt != 0) 518 bucket_drain(zone, bucket); 519 520 KASSERT(bucket->ub_cnt == 0, 521 ("bucket_free: Freeing a non free bucket.")); 522 KASSERT(bucket->ub_seq == SMR_SEQ_INVALID, 523 ("bucket_free: Freeing an SMR bucket.")); 524 if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0) 525 udata = (void *)(uintptr_t)zone->uz_flags; 526 ubz = bucket_zone_lookup(bucket->ub_entries); 527 uma_zfree_arg(ubz->ubz_zone, bucket, udata); 528} 529 530static void 531bucket_zone_drain(int domain) 532{ 533 struct uma_bucket_zone *ubz; 534 535 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) 536 uma_zone_reclaim_domain(ubz->ubz_zone, UMA_RECLAIM_DRAIN, 537 domain); 538} 539 540/* 541 * Acquire the domain lock and record contention. 542 */ 543static uma_zone_domain_t 544zone_domain_lock(uma_zone_t zone, int domain) 545{ 546 uma_zone_domain_t zdom; 547 bool lockfail; 548 549 zdom = ZDOM_GET(zone, domain); 550 lockfail = false; 551 if (ZDOM_OWNED(zdom)) 552 lockfail = true; 553 ZDOM_LOCK(zdom); 554 /* This is unsynchronized. The counter does not need to be precise. */ 555 if (lockfail && zone->uz_bucket_size < zone->uz_bucket_size_max) 556 zone->uz_bucket_size++; 557 return (zdom); 558} 559 560/* 561 * Search for the domain with the least cached items and return it if it 562 * is out of balance with the preferred domain. 563 */ 564static __noinline int 565zone_domain_lowest(uma_zone_t zone, int pref) 566{ 567 long least, nitems, prefitems; 568 int domain; 569 int i; 570 571 prefitems = least = LONG_MAX; 572 domain = 0; 573 for (i = 0; i < vm_ndomains; i++) { 574 nitems = ZDOM_GET(zone, i)->uzd_nitems; 575 if (nitems < least) { 576 domain = i; 577 least = nitems; 578 } 579 if (domain == pref) 580 prefitems = nitems; 581 } 582 if (prefitems < least * 2) 583 return (pref); 584 585 return (domain); 586} 587 588/* 589 * Search for the domain with the most cached items and return it or the 590 * preferred domain if it has enough to proceed. 591 */ 592static __noinline int 593zone_domain_highest(uma_zone_t zone, int pref) 594{ 595 long most, nitems; 596 int domain; 597 int i; 598 599 if (ZDOM_GET(zone, pref)->uzd_nitems > BUCKET_MAX) 600 return (pref); 601 602 most = 0; 603 domain = 0; 604 for (i = 0; i < vm_ndomains; i++) { 605 nitems = ZDOM_GET(zone, i)->uzd_nitems; 606 if (nitems > most) { 607 domain = i; 608 most = nitems; 609 } 610 } 611 612 return (domain); 613} 614 615/* 616 * Set the maximum imax value. 617 */ 618static void 619zone_domain_imax_set(uma_zone_domain_t zdom, int nitems) 620{ 621 long old; 622 623 old = zdom->uzd_imax; 624 do { 625 if (old >= nitems) 626 return; 627 } while (atomic_fcmpset_long(&zdom->uzd_imax, &old, nitems) == 0); 628 629 /* 630 * We are at new maximum, so do the last WSS update for the old 631 * bimin and prepare to measure next allocation batch. 632 */ 633 if (zdom->uzd_wss < old - zdom->uzd_bimin) 634 zdom->uzd_wss = old - zdom->uzd_bimin; 635 zdom->uzd_bimin = nitems; 636} 637 638/* 639 * Attempt to satisfy an allocation by retrieving a full bucket from one of the 640 * zone's caches. If a bucket is found the zone is not locked on return. 641 */ 642static uma_bucket_t 643zone_fetch_bucket(uma_zone_t zone, uma_zone_domain_t zdom, bool reclaim) 644{ 645 uma_bucket_t bucket; 646 long cnt; 647 int i; 648 bool dtor = false; 649 650 ZDOM_LOCK_ASSERT(zdom); 651 652 if ((bucket = STAILQ_FIRST(&zdom->uzd_buckets)) == NULL) 653 return (NULL); 654 655 /* SMR Buckets can not be re-used until readers expire. */ 656 if ((zone->uz_flags & UMA_ZONE_SMR) != 0 && 657 bucket->ub_seq != SMR_SEQ_INVALID) { 658 if (!smr_poll(zone->uz_smr, bucket->ub_seq, false)) 659 return (NULL); 660 bucket->ub_seq = SMR_SEQ_INVALID; 661 dtor = (zone->uz_dtor != NULL) || UMA_ALWAYS_CTORDTOR; 662 if (STAILQ_NEXT(bucket, ub_link) != NULL) 663 zdom->uzd_seq = STAILQ_NEXT(bucket, ub_link)->ub_seq; 664 } 665 STAILQ_REMOVE_HEAD(&zdom->uzd_buckets, ub_link); 666 667 KASSERT(zdom->uzd_nitems >= bucket->ub_cnt, 668 ("%s: item count underflow (%ld, %d)", 669 __func__, zdom->uzd_nitems, bucket->ub_cnt)); 670 KASSERT(bucket->ub_cnt > 0, 671 ("%s: empty bucket in bucket cache", __func__)); 672 zdom->uzd_nitems -= bucket->ub_cnt; 673 674 if (reclaim) { 675 /* 676 * Shift the bounds of the current WSS interval to avoid 677 * perturbing the estimates. 678 */ 679 cnt = lmin(zdom->uzd_bimin, bucket->ub_cnt); 680 atomic_subtract_long(&zdom->uzd_imax, cnt); 681 zdom->uzd_bimin -= cnt; 682 zdom->uzd_imin -= lmin(zdom->uzd_imin, bucket->ub_cnt); 683 if (zdom->uzd_limin >= bucket->ub_cnt) { 684 zdom->uzd_limin -= bucket->ub_cnt; 685 } else { 686 zdom->uzd_limin = 0; 687 zdom->uzd_timin = 0; 688 } 689 } else if (zdom->uzd_bimin > zdom->uzd_nitems) { 690 zdom->uzd_bimin = zdom->uzd_nitems; 691 if (zdom->uzd_imin > zdom->uzd_nitems) 692 zdom->uzd_imin = zdom->uzd_nitems; 693 } 694 695 ZDOM_UNLOCK(zdom); 696 if (dtor) 697 for (i = 0; i < bucket->ub_cnt; i++) 698 item_dtor(zone, bucket->ub_bucket[i], zone->uz_size, 699 NULL, SKIP_NONE); 700 701 return (bucket); 702} 703 704/* 705 * Insert a full bucket into the specified cache. The "ws" parameter indicates 706 * whether the bucket's contents should be counted as part of the zone's working 707 * set. The bucket may be freed if it exceeds the bucket limit. 708 */ 709static void 710zone_put_bucket(uma_zone_t zone, int domain, uma_bucket_t bucket, void *udata, 711 const bool ws) 712{ 713 uma_zone_domain_t zdom; 714 715 /* We don't cache empty buckets. This can happen after a reclaim. */ 716 if (bucket->ub_cnt == 0) 717 goto out; 718 zdom = zone_domain_lock(zone, domain); 719 720 /* 721 * Conditionally set the maximum number of items. 722 */ 723 zdom->uzd_nitems += bucket->ub_cnt; 724 if (__predict_true(zdom->uzd_nitems < zone->uz_bucket_max)) { 725 if (ws) { 726 zone_domain_imax_set(zdom, zdom->uzd_nitems); 727 } else { 728 /* 729 * Shift the bounds of the current WSS interval to 730 * avoid perturbing the estimates. 731 */ 732 atomic_add_long(&zdom->uzd_imax, bucket->ub_cnt); 733 zdom->uzd_imin += bucket->ub_cnt; 734 zdom->uzd_bimin += bucket->ub_cnt; 735 zdom->uzd_limin += bucket->ub_cnt; 736 } 737 if (STAILQ_EMPTY(&zdom->uzd_buckets)) 738 zdom->uzd_seq = bucket->ub_seq; 739 740 /* 741 * Try to promote reuse of recently used items. For items 742 * protected by SMR, try to defer reuse to minimize polling. 743 */ 744 if (bucket->ub_seq == SMR_SEQ_INVALID) 745 STAILQ_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link); 746 else 747 STAILQ_INSERT_TAIL(&zdom->uzd_buckets, bucket, ub_link); 748 ZDOM_UNLOCK(zdom); 749 return; 750 } 751 zdom->uzd_nitems -= bucket->ub_cnt; 752 ZDOM_UNLOCK(zdom); 753out: 754 bucket_free(zone, bucket, udata); 755} 756 757/* Pops an item out of a per-cpu cache bucket. */ 758static inline void * 759cache_bucket_pop(uma_cache_t cache, uma_cache_bucket_t bucket) 760{ 761 void *item; 762 763 CRITICAL_ASSERT(curthread); 764 765 bucket->ucb_cnt--; 766 item = bucket->ucb_bucket->ub_bucket[bucket->ucb_cnt]; 767#ifdef INVARIANTS 768 bucket->ucb_bucket->ub_bucket[bucket->ucb_cnt] = NULL; 769 KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled.")); 770#endif 771 cache->uc_allocs++; 772 773 return (item); 774} 775 776/* Pushes an item into a per-cpu cache bucket. */ 777static inline void 778cache_bucket_push(uma_cache_t cache, uma_cache_bucket_t bucket, void *item) 779{ 780 781 CRITICAL_ASSERT(curthread); 782 KASSERT(bucket->ucb_bucket->ub_bucket[bucket->ucb_cnt] == NULL, 783 ("uma_zfree: Freeing to non free bucket index.")); 784 785 bucket->ucb_bucket->ub_bucket[bucket->ucb_cnt] = item; 786 bucket->ucb_cnt++; 787 cache->uc_frees++; 788} 789 790/* 791 * Unload a UMA bucket from a per-cpu cache. 792 */ 793static inline uma_bucket_t 794cache_bucket_unload(uma_cache_bucket_t bucket) 795{ 796 uma_bucket_t b; 797 798 b = bucket->ucb_bucket; 799 if (b != NULL) { 800 MPASS(b->ub_entries == bucket->ucb_entries); 801 b->ub_cnt = bucket->ucb_cnt; 802 bucket->ucb_bucket = NULL; 803 bucket->ucb_entries = bucket->ucb_cnt = 0; 804 } 805 806 return (b); 807} 808 809static inline uma_bucket_t 810cache_bucket_unload_alloc(uma_cache_t cache) 811{ 812 813 return (cache_bucket_unload(&cache->uc_allocbucket)); 814} 815 816static inline uma_bucket_t 817cache_bucket_unload_free(uma_cache_t cache) 818{ 819 820 return (cache_bucket_unload(&cache->uc_freebucket)); 821} 822 823static inline uma_bucket_t 824cache_bucket_unload_cross(uma_cache_t cache) 825{ 826 827 return (cache_bucket_unload(&cache->uc_crossbucket)); 828} 829 830/* 831 * Load a bucket into a per-cpu cache bucket. 832 */ 833static inline void 834cache_bucket_load(uma_cache_bucket_t bucket, uma_bucket_t b) 835{ 836 837 CRITICAL_ASSERT(curthread); 838 MPASS(bucket->ucb_bucket == NULL); 839 MPASS(b->ub_seq == SMR_SEQ_INVALID); 840 841 bucket->ucb_bucket = b; 842 bucket->ucb_cnt = b->ub_cnt; 843 bucket->ucb_entries = b->ub_entries; 844} 845 846static inline void 847cache_bucket_load_alloc(uma_cache_t cache, uma_bucket_t b) 848{ 849 850 cache_bucket_load(&cache->uc_allocbucket, b); 851} 852 853static inline void 854cache_bucket_load_free(uma_cache_t cache, uma_bucket_t b) 855{ 856 857 cache_bucket_load(&cache->uc_freebucket, b); 858} 859 860#ifdef NUMA 861static inline void 862cache_bucket_load_cross(uma_cache_t cache, uma_bucket_t b) 863{ 864 865 cache_bucket_load(&cache->uc_crossbucket, b); 866} 867#endif 868 869/* 870 * Copy and preserve ucb_spare. 871 */ 872static inline void 873cache_bucket_copy(uma_cache_bucket_t b1, uma_cache_bucket_t b2) 874{ 875 876 b1->ucb_bucket = b2->ucb_bucket; 877 b1->ucb_entries = b2->ucb_entries; 878 b1->ucb_cnt = b2->ucb_cnt; 879} 880 881/* 882 * Swap two cache buckets. 883 */ 884static inline void 885cache_bucket_swap(uma_cache_bucket_t b1, uma_cache_bucket_t b2) 886{ 887 struct uma_cache_bucket b3; 888 889 CRITICAL_ASSERT(curthread); 890 891 cache_bucket_copy(&b3, b1); 892 cache_bucket_copy(b1, b2); 893 cache_bucket_copy(b2, &b3); 894} 895 896/* 897 * Attempt to fetch a bucket from a zone on behalf of the current cpu cache. 898 */ 899static uma_bucket_t 900cache_fetch_bucket(uma_zone_t zone, uma_cache_t cache, int domain) 901{ 902 uma_zone_domain_t zdom; 903 uma_bucket_t bucket; 904 905 /* 906 * Avoid the lock if possible. 907 */ 908 zdom = ZDOM_GET(zone, domain); 909 if (zdom->uzd_nitems == 0) 910 return (NULL); 911 912 if ((cache_uz_flags(cache) & UMA_ZONE_SMR) != 0 && 913 !smr_poll(zone->uz_smr, zdom->uzd_seq, false)) 914 return (NULL); 915 916 /* 917 * Check the zone's cache of buckets. 918 */ 919 zdom = zone_domain_lock(zone, domain); 920 if ((bucket = zone_fetch_bucket(zone, zdom, false)) != NULL) 921 return (bucket); 922 ZDOM_UNLOCK(zdom); 923 924 return (NULL); 925} 926 927static void 928zone_log_warning(uma_zone_t zone) 929{ 930 static const struct timeval warninterval = { 300, 0 }; 931 932 if (!zone_warnings || zone->uz_warning == NULL) 933 return; 934 935 if (ratecheck(&zone->uz_ratecheck, &warninterval)) 936 printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning); 937} 938 939static inline void 940zone_maxaction(uma_zone_t zone) 941{ 942 943 if (zone->uz_maxaction.ta_func != NULL) 944 taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction); 945} 946 947/* 948 * Routine called by timeout which is used to fire off some time interval 949 * based calculations. (stats, hash size, etc.) 950 * 951 * Arguments: 952 * arg Unused 953 * 954 * Returns: 955 * Nothing 956 */ 957static void 958uma_timeout(void *unused) 959{ 960 bucket_enable(); 961 zone_foreach(zone_timeout, NULL); 962 963 /* Reschedule this event */ 964 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); 965} 966 967/* 968 * Update the working set size estimates for the zone's bucket cache. 969 * The constants chosen here are somewhat arbitrary. 970 */ 971static void 972zone_domain_update_wss(uma_zone_domain_t zdom) 973{ 974 long m; 975 976 ZDOM_LOCK_ASSERT(zdom); 977 MPASS(zdom->uzd_imax >= zdom->uzd_nitems); 978 MPASS(zdom->uzd_nitems >= zdom->uzd_bimin); 979 MPASS(zdom->uzd_bimin >= zdom->uzd_imin); 980 981 /* 982 * Estimate WSS as modified moving average of biggest allocation 983 * batches for each period over few minutes (UMA_TIMEOUT of 20s). 984 */ 985 zdom->uzd_wss = lmax(zdom->uzd_wss * 3 / 4, 986 zdom->uzd_imax - zdom->uzd_bimin); 987 988 /* 989 * Estimate longtime minimum item count as a combination of recent 990 * minimum item count, adjusted by WSS for safety, and the modified 991 * moving average over the last several hours (UMA_TIMEOUT of 20s). 992 * timin measures time since limin tried to go negative, that means 993 * we were dangerously close to or got out of cache. 994 */ 995 m = zdom->uzd_imin - zdom->uzd_wss; 996 if (m >= 0) { 997 if (zdom->uzd_limin >= m) 998 zdom->uzd_limin = m; 999 else 1000 zdom->uzd_limin = (m + zdom->uzd_limin * 255) / 256; 1001 zdom->uzd_timin++; 1002 } else { 1003 zdom->uzd_limin = 0; 1004 zdom->uzd_timin = 0; 1005 } 1006 1007 /* To reduce period edge effects on WSS keep half of the imax. */ 1008 atomic_subtract_long(&zdom->uzd_imax, 1009 (zdom->uzd_imax - zdom->uzd_nitems + 1) / 2); 1010 zdom->uzd_imin = zdom->uzd_bimin = zdom->uzd_nitems; 1011} 1012 1013/* 1014 * Routine to perform timeout driven calculations. This expands the 1015 * hashes and does per cpu statistics aggregation. 1016 * 1017 * Returns nothing. 1018 */ 1019static void 1020zone_timeout(uma_zone_t zone, void *unused) 1021{ 1022 uma_keg_t keg; 1023 u_int slabs, pages; 1024 1025 if ((zone->uz_flags & UMA_ZFLAG_HASH) == 0) 1026 goto trim; 1027 1028 keg = zone->uz_keg; 1029 1030 /* 1031 * Hash zones are non-numa by definition so the first domain 1032 * is the only one present. 1033 */ 1034 KEG_LOCK(keg, 0); 1035 pages = keg->uk_domain[0].ud_pages; 1036 1037 /* 1038 * Expand the keg hash table. 1039 * 1040 * This is done if the number of slabs is larger than the hash size. 1041 * What I'm trying to do here is completely reduce collisions. This 1042 * may be a little aggressive. Should I allow for two collisions max? 1043 */ 1044 if ((slabs = pages / keg->uk_ppera) > keg->uk_hash.uh_hashsize) { 1045 struct uma_hash newhash; 1046 struct uma_hash oldhash; 1047 int ret; 1048 1049 /* 1050 * This is so involved because allocating and freeing 1051 * while the keg lock is held will lead to deadlock. 1052 * I have to do everything in stages and check for 1053 * races. 1054 */ 1055 KEG_UNLOCK(keg, 0); 1056 ret = hash_alloc(&newhash, 1 << fls(slabs)); 1057 KEG_LOCK(keg, 0); 1058 if (ret) { 1059 if (hash_expand(&keg->uk_hash, &newhash)) { 1060 oldhash = keg->uk_hash; 1061 keg->uk_hash = newhash; 1062 } else 1063 oldhash = newhash; 1064 1065 KEG_UNLOCK(keg, 0); 1066 hash_free(&oldhash); 1067 goto trim; 1068 } 1069 } 1070 KEG_UNLOCK(keg, 0); 1071 1072trim: 1073 /* Trim caches not used for a long time. */ 1074 for (int i = 0; i < vm_ndomains; i++) { 1075 if (bucket_cache_reclaim_domain(zone, false, false, i) && 1076 (zone->uz_flags & UMA_ZFLAG_CACHE) == 0) 1077 keg_drain(zone->uz_keg, i); 1078 } 1079} 1080 1081/* 1082 * Allocate and zero fill the next sized hash table from the appropriate 1083 * backing store. 1084 * 1085 * Arguments: 1086 * hash A new hash structure with the old hash size in uh_hashsize 1087 * 1088 * Returns: 1089 * 1 on success and 0 on failure. 1090 */ 1091static int 1092hash_alloc(struct uma_hash *hash, u_int size) 1093{ 1094 size_t alloc; 1095 1096 KASSERT(powerof2(size), ("hash size must be power of 2")); 1097 if (size > UMA_HASH_SIZE_INIT) { 1098 hash->uh_hashsize = size; 1099 alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize; 1100 hash->uh_slab_hash = malloc(alloc, M_UMAHASH, M_NOWAIT); 1101 } else { 1102 alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT; 1103 hash->uh_slab_hash = zone_alloc_item(hashzone, NULL, 1104 UMA_ANYDOMAIN, M_WAITOK); 1105 hash->uh_hashsize = UMA_HASH_SIZE_INIT; 1106 } 1107 if (hash->uh_slab_hash) { 1108 bzero(hash->uh_slab_hash, alloc); 1109 hash->uh_hashmask = hash->uh_hashsize - 1; 1110 return (1); 1111 } 1112 1113 return (0); 1114} 1115 1116/* 1117 * Expands the hash table for HASH zones. This is done from zone_timeout 1118 * to reduce collisions. This must not be done in the regular allocation 1119 * path, otherwise, we can recurse on the vm while allocating pages. 1120 * 1121 * Arguments: 1122 * oldhash The hash you want to expand 1123 * newhash The hash structure for the new table 1124 * 1125 * Returns: 1126 * Nothing 1127 * 1128 * Discussion: 1129 */ 1130static int 1131hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash) 1132{ 1133 uma_hash_slab_t slab; 1134 u_int hval; 1135 u_int idx; 1136 1137 if (!newhash->uh_slab_hash) 1138 return (0); 1139 1140 if (oldhash->uh_hashsize >= newhash->uh_hashsize) 1141 return (0); 1142 1143 /* 1144 * I need to investigate hash algorithms for resizing without a 1145 * full rehash. 1146 */ 1147 1148 for (idx = 0; idx < oldhash->uh_hashsize; idx++) 1149 while (!LIST_EMPTY(&oldhash->uh_slab_hash[idx])) { 1150 slab = LIST_FIRST(&oldhash->uh_slab_hash[idx]); 1151 LIST_REMOVE(slab, uhs_hlink); 1152 hval = UMA_HASH(newhash, slab->uhs_data); 1153 LIST_INSERT_HEAD(&newhash->uh_slab_hash[hval], 1154 slab, uhs_hlink); 1155 } 1156 1157 return (1); 1158} 1159 1160/* 1161 * Free the hash bucket to the appropriate backing store. 1162 * 1163 * Arguments: 1164 * slab_hash The hash bucket we're freeing 1165 * hashsize The number of entries in that hash bucket 1166 * 1167 * Returns: 1168 * Nothing 1169 */ 1170static void 1171hash_free(struct uma_hash *hash) 1172{ 1173 if (hash->uh_slab_hash == NULL) 1174 return; 1175 if (hash->uh_hashsize == UMA_HASH_SIZE_INIT) 1176 zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE); 1177 else 1178 free(hash->uh_slab_hash, M_UMAHASH); 1179} 1180 1181/* 1182 * Frees all outstanding items in a bucket 1183 * 1184 * Arguments: 1185 * zone The zone to free to, must be unlocked. 1186 * bucket The free/alloc bucket with items. 1187 * 1188 * Returns: 1189 * Nothing 1190 */ 1191static void 1192bucket_drain(uma_zone_t zone, uma_bucket_t bucket) 1193{ 1194 int i; 1195 1196 if (bucket->ub_cnt == 0) 1197 return; 1198 1199 if ((zone->uz_flags & UMA_ZONE_SMR) != 0 && 1200 bucket->ub_seq != SMR_SEQ_INVALID) { 1201 smr_wait(zone->uz_smr, bucket->ub_seq); 1202 bucket->ub_seq = SMR_SEQ_INVALID; 1203 for (i = 0; i < bucket->ub_cnt; i++) 1204 item_dtor(zone, bucket->ub_bucket[i], 1205 zone->uz_size, NULL, SKIP_NONE); 1206 } 1207 if (zone->uz_fini) 1208 for (i = 0; i < bucket->ub_cnt; i++) 1209 zone->uz_fini(bucket->ub_bucket[i], zone->uz_size); 1210 zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt); 1211 if (zone->uz_max_items > 0) 1212 zone_free_limit(zone, bucket->ub_cnt); 1213#ifdef INVARIANTS 1214 bzero(bucket->ub_bucket, sizeof(void *) * bucket->ub_cnt); 1215#endif 1216 bucket->ub_cnt = 0; 1217} 1218 1219/* 1220 * Drains the per cpu caches for a zone. 1221 * 1222 * NOTE: This may only be called while the zone is being torn down, and not 1223 * during normal operation. This is necessary in order that we do not have 1224 * to migrate CPUs to drain the per-CPU caches. 1225 * 1226 * Arguments: 1227 * zone The zone to drain, must be unlocked. 1228 * 1229 * Returns: 1230 * Nothing 1231 */ 1232static void 1233cache_drain(uma_zone_t zone) 1234{ 1235 uma_cache_t cache; 1236 uma_bucket_t bucket; 1237 smr_seq_t seq; 1238 int cpu; 1239 1240 /* 1241 * XXX: It is safe to not lock the per-CPU caches, because we're 1242 * tearing down the zone anyway. I.e., there will be no further use 1243 * of the caches at this point. 1244 * 1245 * XXX: It would good to be able to assert that the zone is being 1246 * torn down to prevent improper use of cache_drain(). 1247 */ 1248 seq = SMR_SEQ_INVALID; 1249 if ((zone->uz_flags & UMA_ZONE_SMR) != 0) 1250 seq = smr_advance(zone->uz_smr); 1251 CPU_FOREACH(cpu) { 1252 cache = &zone->uz_cpu[cpu]; 1253 bucket = cache_bucket_unload_alloc(cache); 1254 if (bucket != NULL) 1255 bucket_free(zone, bucket, NULL); 1256 bucket = cache_bucket_unload_free(cache); 1257 if (bucket != NULL) { 1258 bucket->ub_seq = seq; 1259 bucket_free(zone, bucket, NULL); 1260 } 1261 bucket = cache_bucket_unload_cross(cache); 1262 if (bucket != NULL) { 1263 bucket->ub_seq = seq; 1264 bucket_free(zone, bucket, NULL); 1265 } 1266 } 1267 bucket_cache_reclaim(zone, true, UMA_ANYDOMAIN); 1268} 1269 1270static void 1271cache_shrink(uma_zone_t zone, void *unused) 1272{ 1273 1274 if (zone->uz_flags & UMA_ZFLAG_INTERNAL) 1275 return; 1276 1277 ZONE_LOCK(zone); 1278 zone->uz_bucket_size = 1279 (zone->uz_bucket_size_min + zone->uz_bucket_size) / 2; 1280 ZONE_UNLOCK(zone); 1281} 1282 1283static void 1284cache_drain_safe_cpu(uma_zone_t zone, void *unused) 1285{ 1286 uma_cache_t cache; 1287 uma_bucket_t b1, b2, b3; 1288 int domain; 1289 1290 if (zone->uz_flags & UMA_ZFLAG_INTERNAL) 1291 return; 1292 1293 b1 = b2 = b3 = NULL; 1294 critical_enter(); 1295 cache = &zone->uz_cpu[curcpu]; 1296 domain = PCPU_GET(domain); 1297 b1 = cache_bucket_unload_alloc(cache); 1298 1299 /* 1300 * Don't flush SMR zone buckets. This leaves the zone without a 1301 * bucket and forces every free to synchronize(). 1302 */ 1303 if ((zone->uz_flags & UMA_ZONE_SMR) == 0) { 1304 b2 = cache_bucket_unload_free(cache); 1305 b3 = cache_bucket_unload_cross(cache); 1306 } 1307 critical_exit(); 1308 1309 if (b1 != NULL) 1310 zone_free_bucket(zone, b1, NULL, domain, false); 1311 if (b2 != NULL) 1312 zone_free_bucket(zone, b2, NULL, domain, false); 1313 if (b3 != NULL) { 1314 /* Adjust the domain so it goes to zone_free_cross. */ 1315 domain = (domain + 1) % vm_ndomains; 1316 zone_free_bucket(zone, b3, NULL, domain, false); 1317 } 1318} 1319 1320/* 1321 * Safely drain per-CPU caches of a zone(s) to alloc bucket. 1322 * This is an expensive call because it needs to bind to all CPUs 1323 * one by one and enter a critical section on each of them in order 1324 * to safely access their cache buckets. 1325 * Zone lock must not be held on call this function. 1326 */ 1327static void 1328pcpu_cache_drain_safe(uma_zone_t zone) 1329{ 1330 int cpu; 1331 1332 /* 1333 * Polite bucket sizes shrinking was not enough, shrink aggressively. 1334 */ 1335 if (zone) 1336 cache_shrink(zone, NULL); 1337 else 1338 zone_foreach(cache_shrink, NULL); 1339 1340 CPU_FOREACH(cpu) { 1341 thread_lock(curthread); 1342 sched_bind(curthread, cpu); 1343 thread_unlock(curthread); 1344 1345 if (zone) 1346 cache_drain_safe_cpu(zone, NULL); 1347 else 1348 zone_foreach(cache_drain_safe_cpu, NULL); 1349 } 1350 thread_lock(curthread); 1351 sched_unbind(curthread); 1352 thread_unlock(curthread); 1353} 1354 1355/* 1356 * Reclaim cached buckets from a zone. All buckets are reclaimed if the caller 1357 * requested a drain, otherwise the per-domain caches are trimmed to either 1358 * estimated working set size. 1359 */ 1360static bool 1361bucket_cache_reclaim_domain(uma_zone_t zone, bool drain, bool trim, int domain) 1362{ 1363 uma_zone_domain_t zdom; 1364 uma_bucket_t bucket; 1365 long target; 1366 bool done = false; 1367 1368 /* 1369 * The cross bucket is partially filled and not part of 1370 * the item count. Reclaim it individually here. 1371 */ 1372 zdom = ZDOM_GET(zone, domain); 1373 if ((zone->uz_flags & UMA_ZONE_SMR) == 0 || drain) { 1374 ZONE_CROSS_LOCK(zone); 1375 bucket = zdom->uzd_cross; 1376 zdom->uzd_cross = NULL; 1377 ZONE_CROSS_UNLOCK(zone); 1378 if (bucket != NULL) 1379 bucket_free(zone, bucket, NULL); 1380 } 1381 1382 /* 1383 * If we were asked to drain the zone, we are done only once 1384 * this bucket cache is empty. If trim, we reclaim items in 1385 * excess of the zone's estimated working set size. Multiple 1386 * consecutive calls will shrink the WSS and so reclaim more. 1387 * If neither drain nor trim, then voluntarily reclaim 1/4 1388 * (to reduce first spike) of items not used for a long time. 1389 */ 1390 ZDOM_LOCK(zdom); 1391 zone_domain_update_wss(zdom); 1392 if (drain) 1393 target = 0; 1394 else if (trim) 1395 target = zdom->uzd_wss; 1396 else if (zdom->uzd_timin > 900 / UMA_TIMEOUT) 1397 target = zdom->uzd_nitems - zdom->uzd_limin / 4; 1398 else { 1399 ZDOM_UNLOCK(zdom); 1400 return (done); 1401 } 1402 while ((bucket = STAILQ_FIRST(&zdom->uzd_buckets)) != NULL && 1403 zdom->uzd_nitems >= target + bucket->ub_cnt) { 1404 bucket = zone_fetch_bucket(zone, zdom, true); 1405 if (bucket == NULL) 1406 break; 1407 bucket_free(zone, bucket, NULL); 1408 done = true; 1409 ZDOM_LOCK(zdom); 1410 } 1411 ZDOM_UNLOCK(zdom); 1412 return (done); 1413} 1414 1415static void 1416bucket_cache_reclaim(uma_zone_t zone, bool drain, int domain) 1417{ 1418 int i; 1419 1420 /* 1421 * Shrink the zone bucket size to ensure that the per-CPU caches 1422 * don't grow too large. 1423 */ 1424 if (zone->uz_bucket_size > zone->uz_bucket_size_min) 1425 zone->uz_bucket_size--; 1426 1427 if (domain != UMA_ANYDOMAIN && 1428 (zone->uz_flags & UMA_ZONE_ROUNDROBIN) == 0) { 1429 bucket_cache_reclaim_domain(zone, drain, true, domain); 1430 } else { 1431 for (i = 0; i < vm_ndomains; i++) 1432 bucket_cache_reclaim_domain(zone, drain, true, i); 1433 } 1434} 1435 1436static void 1437keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start) 1438{ 1439 uint8_t *mem; 1440 int i; 1441 uint8_t flags; 1442 1443 CTR4(KTR_UMA, "keg_free_slab keg %s(%p) slab %p, returning %d bytes", 1444 keg->uk_name, keg, slab, PAGE_SIZE * keg->uk_ppera); 1445 1446 mem = slab_data(slab, keg); 1447 flags = slab->us_flags; 1448 i = start; 1449 if (keg->uk_fini != NULL) { 1450 for (i--; i > -1; i--) 1451#ifdef INVARIANTS 1452 /* 1453 * trash_fini implies that dtor was trash_dtor. trash_fini 1454 * would check that memory hasn't been modified since free, 1455 * which executed trash_dtor. 1456 * That's why we need to run uma_dbg_kskip() check here, 1457 * albeit we don't make skip check for other init/fini 1458 * invocations. 1459 */ 1460 if (!uma_dbg_kskip(keg, slab_item(slab, keg, i)) || 1461 keg->uk_fini != trash_fini) 1462#endif 1463 keg->uk_fini(slab_item(slab, keg, i), keg->uk_size); 1464 } 1465 if (keg->uk_flags & UMA_ZFLAG_OFFPAGE) 1466 zone_free_item(slabzone(keg->uk_ipers), slab_tohashslab(slab), 1467 NULL, SKIP_NONE); 1468 keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags); 1469 uma_total_dec(PAGE_SIZE * keg->uk_ppera); 1470} 1471 1472static void 1473keg_drain_domain(uma_keg_t keg, int domain) 1474{ 1475 struct slabhead freeslabs; 1476 uma_domain_t dom; 1477 uma_slab_t slab, tmp; 1478 uint32_t i, stofree, stokeep, partial; 1479 1480 dom = &keg->uk_domain[domain]; 1481 LIST_INIT(&freeslabs); 1482 1483 CTR4(KTR_UMA, "keg_drain %s(%p) domain %d free items: %u", 1484 keg->uk_name, keg, domain, dom->ud_free_items); 1485 1486 KEG_LOCK(keg, domain); 1487 1488 /* 1489 * Are the free items in partially allocated slabs sufficient to meet 1490 * the reserve? If not, compute the number of fully free slabs that must 1491 * be kept. 1492 */ 1493 partial = dom->ud_free_items - dom->ud_free_slabs * keg->uk_ipers; 1494 if (partial < keg->uk_reserve) { 1495 stokeep = min(dom->ud_free_slabs, 1496 howmany(keg->uk_reserve - partial, keg->uk_ipers)); 1497 } else { 1498 stokeep = 0; 1499 } 1500 stofree = dom->ud_free_slabs - stokeep; 1501 1502 /* 1503 * Partition the free slabs into two sets: those that must be kept in 1504 * order to maintain the reserve, and those that may be released back to 1505 * the system. Since one set may be much larger than the other, 1506 * populate the smaller of the two sets and swap them if necessary. 1507 */ 1508 for (i = min(stofree, stokeep); i > 0; i--) { 1509 slab = LIST_FIRST(&dom->ud_free_slab); 1510 LIST_REMOVE(slab, us_link); 1511 LIST_INSERT_HEAD(&freeslabs, slab, us_link); 1512 } 1513 if (stofree > stokeep) 1514 LIST_SWAP(&freeslabs, &dom->ud_free_slab, uma_slab, us_link); 1515 1516 if ((keg->uk_flags & UMA_ZFLAG_HASH) != 0) { 1517 LIST_FOREACH(slab, &freeslabs, us_link) 1518 UMA_HASH_REMOVE(&keg->uk_hash, slab); 1519 } 1520 dom->ud_free_items -= stofree * keg->uk_ipers; 1521 dom->ud_free_slabs -= stofree; 1522 dom->ud_pages -= stofree * keg->uk_ppera; 1523 KEG_UNLOCK(keg, domain); 1524 1525 LIST_FOREACH_SAFE(slab, &freeslabs, us_link, tmp) 1526 keg_free_slab(keg, slab, keg->uk_ipers); 1527} 1528 1529/* 1530 * Frees pages from a keg back to the system. This is done on demand from 1531 * the pageout daemon. 1532 * 1533 * Returns nothing. 1534 */ 1535static void 1536keg_drain(uma_keg_t keg, int domain) 1537{ 1538 int i; 1539 1540 if ((keg->uk_flags & UMA_ZONE_NOFREE) != 0) 1541 return; 1542 if (domain != UMA_ANYDOMAIN) { 1543 keg_drain_domain(keg, domain); 1544 } else { 1545 for (i = 0; i < vm_ndomains; i++) 1546 keg_drain_domain(keg, i); 1547 } 1548} 1549 1550static void 1551zone_reclaim(uma_zone_t zone, int domain, int waitok, bool drain) 1552{ 1553 /* 1554 * Count active reclaim operations in order to interlock with 1555 * zone_dtor(), which removes the zone from global lists before 1556 * attempting to reclaim items itself. 1557 * 1558 * The zone may be destroyed while sleeping, so only zone_dtor() should 1559 * specify M_WAITOK. 1560 */ 1561 ZONE_LOCK(zone); 1562 if (waitok == M_WAITOK) { 1563 while (zone->uz_reclaimers > 0) 1564 msleep(zone, ZONE_LOCKPTR(zone), PVM, "zonedrain", 1); 1565 } 1566 zone->uz_reclaimers++; 1567 ZONE_UNLOCK(zone); 1568 bucket_cache_reclaim(zone, drain, domain); 1569 1570 if ((zone->uz_flags & UMA_ZFLAG_CACHE) == 0) 1571 keg_drain(zone->uz_keg, domain); 1572 ZONE_LOCK(zone); 1573 zone->uz_reclaimers--; 1574 if (zone->uz_reclaimers == 0) 1575 wakeup(zone); 1576 ZONE_UNLOCK(zone); 1577} 1578 1579static void 1580zone_drain(uma_zone_t zone, void *arg) 1581{ 1582 int domain; 1583 1584 domain = (int)(uintptr_t)arg; 1585 zone_reclaim(zone, domain, M_NOWAIT, true); 1586} 1587 1588static void 1589zone_trim(uma_zone_t zone, void *arg) 1590{ 1591 int domain; 1592 1593 domain = (int)(uintptr_t)arg; 1594 zone_reclaim(zone, domain, M_NOWAIT, false); 1595} 1596 1597/* 1598 * Allocate a new slab for a keg and inserts it into the partial slab list. 1599 * The keg should be unlocked on entry. If the allocation succeeds it will 1600 * be locked on return. 1601 * 1602 * Arguments: 1603 * flags Wait flags for the item initialization routine 1604 * aflags Wait flags for the slab allocation 1605 * 1606 * Returns: 1607 * The slab that was allocated or NULL if there is no memory and the 1608 * caller specified M_NOWAIT. 1609 */ 1610static uma_slab_t 1611keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int flags, 1612 int aflags) 1613{ 1614 uma_domain_t dom; 1615 uma_alloc allocf; 1616 uma_slab_t slab; 1617 unsigned long size; 1618 uint8_t *mem; 1619 uint8_t sflags; 1620 int i; 1621 1622 KASSERT(domain >= 0 && domain < vm_ndomains, 1623 ("keg_alloc_slab: domain %d out of range", domain)); 1624 1625 allocf = keg->uk_allocf; 1626 slab = NULL; 1627 mem = NULL; 1628 if (keg->uk_flags & UMA_ZFLAG_OFFPAGE) { 1629 uma_hash_slab_t hslab; 1630 hslab = zone_alloc_item(slabzone(keg->uk_ipers), NULL, 1631 domain, aflags); 1632 if (hslab == NULL) 1633 goto fail; 1634 slab = &hslab->uhs_slab; 1635 } 1636 1637 /* 1638 * This reproduces the old vm_zone behavior of zero filling pages the 1639 * first time they are added to a zone. 1640 * 1641 * Malloced items are zeroed in uma_zalloc. 1642 */ 1643 1644 if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0) 1645 aflags |= M_ZERO; 1646 else 1647 aflags &= ~M_ZERO; 1648 1649 if (keg->uk_flags & UMA_ZONE_NODUMP) 1650 aflags |= M_NODUMP; 1651 1652 /* zone is passed for legacy reasons. */ 1653 size = keg->uk_ppera * PAGE_SIZE; 1654 mem = allocf(zone, size, domain, &sflags, aflags); 1655 if (mem == NULL) { 1656 if (keg->uk_flags & UMA_ZFLAG_OFFPAGE) 1657 zone_free_item(slabzone(keg->uk_ipers), 1658 slab_tohashslab(slab), NULL, SKIP_NONE); 1659 goto fail; 1660 } 1661 uma_total_inc(size); 1662 1663 /* For HASH zones all pages go to the same uma_domain. */ 1664 if ((keg->uk_flags & UMA_ZFLAG_HASH) != 0) 1665 domain = 0; 1666 1667 /* Point the slab into the allocated memory */ 1668 if (!(keg->uk_flags & UMA_ZFLAG_OFFPAGE)) 1669 slab = (uma_slab_t )(mem + keg->uk_pgoff); 1670 else 1671 slab_tohashslab(slab)->uhs_data = mem; 1672 1673 if (keg->uk_flags & UMA_ZFLAG_VTOSLAB) 1674 for (i = 0; i < keg->uk_ppera; i++) 1675 vsetzoneslab((vm_offset_t)mem + (i * PAGE_SIZE), 1676 zone, slab); 1677 1678 slab->us_freecount = keg->uk_ipers; 1679 slab->us_flags = sflags; 1680 slab->us_domain = domain; 1681 1682 BIT_FILL(keg->uk_ipers, &slab->us_free); 1683#ifdef INVARIANTS 1684 BIT_ZERO(keg->uk_ipers, slab_dbg_bits(slab, keg)); 1685#endif 1686 1687 if (keg->uk_init != NULL) { 1688 for (i = 0; i < keg->uk_ipers; i++) 1689 if (keg->uk_init(slab_item(slab, keg, i), 1690 keg->uk_size, flags) != 0) 1691 break; 1692 if (i != keg->uk_ipers) { 1693 keg_free_slab(keg, slab, i); 1694 goto fail; 1695 } 1696 } 1697 KEG_LOCK(keg, domain); 1698 1699 CTR3(KTR_UMA, "keg_alloc_slab: allocated slab %p for %s(%p)", 1700 slab, keg->uk_name, keg); 1701 1702 if (keg->uk_flags & UMA_ZFLAG_HASH) 1703 UMA_HASH_INSERT(&keg->uk_hash, slab, mem); 1704 1705 /* 1706 * If we got a slab here it's safe to mark it partially used 1707 * and return. We assume that the caller is going to remove 1708 * at least one item. 1709 */ 1710 dom = &keg->uk_domain[domain]; 1711 LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link); 1712 dom->ud_pages += keg->uk_ppera; 1713 dom->ud_free_items += keg->uk_ipers; 1714 1715 return (slab); 1716 1717fail: 1718 return (NULL); 1719} 1720 1721/* 1722 * This function is intended to be used early on in place of page_alloc(). It 1723 * performs contiguous physical memory allocations and uses a bump allocator for 1724 * KVA, so is usable before the kernel map is initialized. 1725 */ 1726static void * 1727startup_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, 1728 int wait) 1729{ 1730 vm_paddr_t pa; 1731 vm_page_t m; 1732 void *mem; 1733 int pages; 1734 int i; 1735 1736 pages = howmany(bytes, PAGE_SIZE); 1737 KASSERT(pages > 0, ("%s can't reserve 0 pages", __func__)); 1738 1739 *pflag = UMA_SLAB_BOOT; 1740 m = vm_page_alloc_contig_domain(NULL, 0, domain, 1741 malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED, pages, 1742 (vm_paddr_t)0, ~(vm_paddr_t)0, 1, 0, VM_MEMATTR_DEFAULT); 1743 if (m == NULL) 1744 return (NULL); 1745 1746 pa = VM_PAGE_TO_PHYS(m); 1747 for (i = 0; i < pages; i++, pa += PAGE_SIZE) { 1748#if defined(__aarch64__) || defined(__amd64__) || defined(__mips__) || \ 1749 defined(__riscv) || defined(__powerpc64__) 1750 if ((wait & M_NODUMP) == 0) 1751 dump_add_page(pa); 1752#endif 1753 } 1754 /* Allocate KVA and indirectly advance bootmem. */ 1755 mem = (void *)pmap_map(&bootmem, m->phys_addr, 1756 m->phys_addr + (pages * PAGE_SIZE), VM_PROT_READ | VM_PROT_WRITE); 1757 if ((wait & M_ZERO) != 0) 1758 bzero(mem, pages * PAGE_SIZE); 1759 1760 return (mem); 1761} 1762 1763static void 1764startup_free(void *mem, vm_size_t bytes) 1765{ 1766 vm_offset_t va; 1767 vm_page_t m; 1768 1769 va = (vm_offset_t)mem; 1770 m = PHYS_TO_VM_PAGE(pmap_kextract(va)); 1771 1772 /* 1773 * startup_alloc() returns direct-mapped slabs on some platforms. Avoid 1774 * unmapping ranges of the direct map. 1775 */ 1776 if (va >= bootstart && va + bytes <= bootmem) 1777 pmap_remove(kernel_pmap, va, va + bytes); 1778 for (; bytes != 0; bytes -= PAGE_SIZE, m++) { 1779#if defined(__aarch64__) || defined(__amd64__) || defined(__mips__) || \ 1780 defined(__riscv) || defined(__powerpc64__) 1781 dump_drop_page(VM_PAGE_TO_PHYS(m)); 1782#endif 1783 vm_page_unwire_noq(m); 1784 vm_page_free(m); 1785 } 1786} 1787 1788/* 1789 * Allocates a number of pages from the system 1790 * 1791 * Arguments: 1792 * bytes The number of bytes requested 1793 * wait Shall we wait? 1794 * 1795 * Returns: 1796 * A pointer to the alloced memory or possibly 1797 * NULL if M_NOWAIT is set. 1798 */ 1799static void * 1800page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, 1801 int wait) 1802{ 1803 void *p; /* Returned page */ 1804 1805 *pflag = UMA_SLAB_KERNEL; 1806 p = (void *)kmem_malloc_domainset(DOMAINSET_FIXED(domain), bytes, wait); 1807 1808 return (p); 1809} 1810 1811static void * 1812pcpu_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, 1813 int wait) 1814{ 1815 struct pglist alloctail; 1816 vm_offset_t addr, zkva; 1817 int cpu, flags; 1818 vm_page_t p, p_next; 1819#ifdef NUMA 1820 struct pcpu *pc; 1821#endif 1822 1823 MPASS(bytes == (mp_maxid + 1) * PAGE_SIZE); 1824 1825 TAILQ_INIT(&alloctail); 1826 flags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ | 1827 malloc2vm_flags(wait); 1828 *pflag = UMA_SLAB_KERNEL; 1829 for (cpu = 0; cpu <= mp_maxid; cpu++) { 1830 if (CPU_ABSENT(cpu)) { 1831 p = vm_page_alloc(NULL, 0, flags); 1832 } else { 1833#ifndef NUMA 1834 p = vm_page_alloc(NULL, 0, flags); 1835#else 1836 pc = pcpu_find(cpu); 1837 if (__predict_false(VM_DOMAIN_EMPTY(pc->pc_domain))) 1838 p = NULL; 1839 else 1840 p = vm_page_alloc_domain(NULL, 0, 1841 pc->pc_domain, flags); 1842 if (__predict_false(p == NULL)) 1843 p = vm_page_alloc(NULL, 0, flags); 1844#endif 1845 } 1846 if (__predict_false(p == NULL)) 1847 goto fail; 1848 TAILQ_INSERT_TAIL(&alloctail, p, listq); 1849 } 1850 if ((addr = kva_alloc(bytes)) == 0) 1851 goto fail; 1852 zkva = addr; 1853 TAILQ_FOREACH(p, &alloctail, listq) { 1854 pmap_qenter(zkva, &p, 1); 1855 zkva += PAGE_SIZE; 1856 } 1857 return ((void*)addr); 1858fail: 1859 TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) { 1860 vm_page_unwire_noq(p); 1861 vm_page_free(p); 1862 } 1863 return (NULL); 1864} 1865 1866/* 1867 * Allocates a number of pages from within an object 1868 * 1869 * Arguments: 1870 * bytes The number of bytes requested 1871 * wait Shall we wait? 1872 * 1873 * Returns: 1874 * A pointer to the alloced memory or possibly 1875 * NULL if M_NOWAIT is set. 1876 */ 1877static void * 1878noobj_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags, 1879 int wait) 1880{ 1881 TAILQ_HEAD(, vm_page) alloctail; 1882 u_long npages; 1883 vm_offset_t retkva, zkva; 1884 vm_page_t p, p_next; 1885 uma_keg_t keg; 1886 1887 TAILQ_INIT(&alloctail); 1888 keg = zone->uz_keg; 1889 1890 npages = howmany(bytes, PAGE_SIZE); 1891 while (npages > 0) { 1892 p = vm_page_alloc_domain(NULL, 0, domain, VM_ALLOC_INTERRUPT | 1893 VM_ALLOC_WIRED | VM_ALLOC_NOOBJ | 1894 ((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK : 1895 VM_ALLOC_NOWAIT)); 1896 if (p != NULL) { 1897 /* 1898 * Since the page does not belong to an object, its 1899 * listq is unused. 1900 */ 1901 TAILQ_INSERT_TAIL(&alloctail, p, listq); 1902 npages--; 1903 continue; 1904 } 1905 /* 1906 * Page allocation failed, free intermediate pages and 1907 * exit. 1908 */ 1909 TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) { 1910 vm_page_unwire_noq(p); 1911 vm_page_free(p); 1912 } 1913 return (NULL); 1914 } 1915 *flags = UMA_SLAB_PRIV; 1916 zkva = keg->uk_kva + 1917 atomic_fetchadd_long(&keg->uk_offset, round_page(bytes)); 1918 retkva = zkva; 1919 TAILQ_FOREACH(p, &alloctail, listq) { 1920 pmap_qenter(zkva, &p, 1); 1921 zkva += PAGE_SIZE; 1922 } 1923 1924 return ((void *)retkva); 1925} 1926 1927/* 1928 * Allocate physically contiguous pages. 1929 */ 1930static void * 1931contig_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, 1932 int wait) 1933{ 1934 1935 *pflag = UMA_SLAB_KERNEL; 1936 return ((void *)kmem_alloc_contig_domainset(DOMAINSET_FIXED(domain), 1937 bytes, wait, 0, ~(vm_paddr_t)0, 1, 0, VM_MEMATTR_DEFAULT)); 1938} 1939 1940/* 1941 * Frees a number of pages to the system 1942 * 1943 * Arguments: 1944 * mem A pointer to the memory to be freed 1945 * size The size of the memory being freed 1946 * flags The original p->us_flags field 1947 * 1948 * Returns: 1949 * Nothing 1950 */ 1951static void 1952page_free(void *mem, vm_size_t size, uint8_t flags) 1953{ 1954 1955 if ((flags & UMA_SLAB_BOOT) != 0) { 1956 startup_free(mem, size); 1957 return; 1958 } 1959 1960 KASSERT((flags & UMA_SLAB_KERNEL) != 0, 1961 ("UMA: page_free used with invalid flags %x", flags)); 1962 1963 kmem_free((vm_offset_t)mem, size); 1964} 1965 1966/* 1967 * Frees pcpu zone allocations 1968 * 1969 * Arguments: 1970 * mem A pointer to the memory to be freed 1971 * size The size of the memory being freed 1972 * flags The original p->us_flags field 1973 * 1974 * Returns: 1975 * Nothing 1976 */ 1977static void 1978pcpu_page_free(void *mem, vm_size_t size, uint8_t flags) 1979{ 1980 vm_offset_t sva, curva; 1981 vm_paddr_t paddr; 1982 vm_page_t m; 1983 1984 MPASS(size == (mp_maxid+1)*PAGE_SIZE); 1985 1986 if ((flags & UMA_SLAB_BOOT) != 0) { 1987 startup_free(mem, size); 1988 return; 1989 } 1990 1991 sva = (vm_offset_t)mem; 1992 for (curva = sva; curva < sva + size; curva += PAGE_SIZE) { 1993 paddr = pmap_kextract(curva); 1994 m = PHYS_TO_VM_PAGE(paddr); 1995 vm_page_unwire_noq(m); 1996 vm_page_free(m); 1997 } 1998 pmap_qremove(sva, size >> PAGE_SHIFT); 1999 kva_free(sva, size); 2000} 2001 2002/* 2003 * Zero fill initializer 2004 * 2005 * Arguments/Returns follow uma_init specifications 2006 */ 2007static int 2008zero_init(void *mem, int size, int flags) 2009{ 2010 bzero(mem, size); 2011 return (0); 2012} 2013 2014#ifdef INVARIANTS 2015static struct noslabbits * 2016slab_dbg_bits(uma_slab_t slab, uma_keg_t keg) 2017{ 2018 2019 return ((void *)((char *)&slab->us_free + BITSET_SIZE(keg->uk_ipers))); 2020} 2021#endif 2022 2023/* 2024 * Actual size of embedded struct slab (!OFFPAGE). 2025 */ 2026static size_t 2027slab_sizeof(int nitems) 2028{ 2029 size_t s; 2030 2031 s = sizeof(struct uma_slab) + BITSET_SIZE(nitems) * SLAB_BITSETS; 2032 return (roundup(s, UMA_ALIGN_PTR + 1)); 2033} 2034 2035#define UMA_FIXPT_SHIFT 31 2036#define UMA_FRAC_FIXPT(n, d) \ 2037 ((uint32_t)(((uint64_t)(n) << UMA_FIXPT_SHIFT) / (d))) 2038#define UMA_FIXPT_PCT(f) \ 2039 ((u_int)(((uint64_t)100 * (f)) >> UMA_FIXPT_SHIFT)) 2040#define UMA_PCT_FIXPT(pct) UMA_FRAC_FIXPT((pct), 100) 2041#define UMA_MIN_EFF UMA_PCT_FIXPT(100 - UMA_MAX_WASTE) 2042 2043/* 2044 * Compute the number of items that will fit in a slab. If hdr is true, the 2045 * item count may be limited to provide space in the slab for an inline slab 2046 * header. Otherwise, all slab space will be provided for item storage. 2047 */ 2048static u_int 2049slab_ipers_hdr(u_int size, u_int rsize, u_int slabsize, bool hdr) 2050{ 2051 u_int ipers; 2052 u_int padpi; 2053 2054 /* The padding between items is not needed after the last item. */ 2055 padpi = rsize - size; 2056 2057 if (hdr) { 2058 /* 2059 * Start with the maximum item count and remove items until 2060 * the slab header first alongside the allocatable memory. 2061 */ 2062 for (ipers = MIN(SLAB_MAX_SETSIZE, 2063 (slabsize + padpi - slab_sizeof(1)) / rsize); 2064 ipers > 0 && 2065 ipers * rsize - padpi + slab_sizeof(ipers) > slabsize; 2066 ipers--) 2067 continue; 2068 } else { 2069 ipers = MIN((slabsize + padpi) / rsize, SLAB_MAX_SETSIZE); 2070 } 2071 2072 return (ipers); 2073} 2074 2075struct keg_layout_result { 2076 u_int format; 2077 u_int slabsize; 2078 u_int ipers; 2079 u_int eff; 2080}; 2081 2082static void 2083keg_layout_one(uma_keg_t keg, u_int rsize, u_int slabsize, u_int fmt, 2084 struct keg_layout_result *kl) 2085{ 2086 u_int total; 2087 2088 kl->format = fmt; 2089 kl->slabsize = slabsize; 2090 2091 /* Handle INTERNAL as inline with an extra page. */ 2092 if ((fmt & UMA_ZFLAG_INTERNAL) != 0) { 2093 kl->format &= ~UMA_ZFLAG_INTERNAL; 2094 kl->slabsize += PAGE_SIZE; 2095 } 2096 2097 kl->ipers = slab_ipers_hdr(keg->uk_size, rsize, kl->slabsize, 2098 (fmt & UMA_ZFLAG_OFFPAGE) == 0); 2099 2100 /* Account for memory used by an offpage slab header. */ 2101 total = kl->slabsize; 2102 if ((fmt & UMA_ZFLAG_OFFPAGE) != 0) 2103 total += slabzone(kl->ipers)->uz_keg->uk_rsize; 2104 2105 kl->eff = UMA_FRAC_FIXPT(kl->ipers * rsize, total); 2106} 2107 2108/* 2109 * Determine the format of a uma keg. This determines where the slab header 2110 * will be placed (inline or offpage) and calculates ipers, rsize, and ppera. 2111 * 2112 * Arguments 2113 * keg The zone we should initialize 2114 * 2115 * Returns 2116 * Nothing 2117 */ 2118static void 2119keg_layout(uma_keg_t keg) 2120{ 2121 struct keg_layout_result kl = {}, kl_tmp; 2122 u_int fmts[2]; 2123 u_int alignsize; 2124 u_int nfmt; 2125 u_int pages; 2126 u_int rsize; 2127 u_int slabsize; 2128 u_int i, j; 2129 2130 KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 || 2131 (keg->uk_size <= UMA_PCPU_ALLOC_SIZE && 2132 (keg->uk_flags & UMA_ZONE_CACHESPREAD) == 0), 2133 ("%s: cannot configure for PCPU: keg=%s, size=%u, flags=0x%b", 2134 __func__, keg->uk_name, keg->uk_size, keg->uk_flags, 2135 PRINT_UMA_ZFLAGS)); 2136 KASSERT((keg->uk_flags & (UMA_ZFLAG_INTERNAL | UMA_ZONE_VM)) == 0 || 2137 (keg->uk_flags & (UMA_ZONE_NOTOUCH | UMA_ZONE_PCPU)) == 0, 2138 ("%s: incompatible flags 0x%b", __func__, keg->uk_flags, 2139 PRINT_UMA_ZFLAGS)); 2140 2141 alignsize = keg->uk_align + 1; 2142 2143 /* 2144 * Calculate the size of each allocation (rsize) according to 2145 * alignment. If the requested size is smaller than we have 2146 * allocation bits for we round it up. 2147 */ 2148 rsize = MAX(keg->uk_size, UMA_SMALLEST_UNIT); 2149 rsize = roundup2(rsize, alignsize); 2150 2151 if ((keg->uk_flags & UMA_ZONE_CACHESPREAD) != 0) { 2152 /* 2153 * We want one item to start on every align boundary in a page. 2154 * To do this we will span pages. We will also extend the item 2155 * by the size of align if it is an even multiple of align. 2156 * Otherwise, it would fall on the same boundary every time. 2157 */ 2158 if ((rsize & alignsize) == 0) 2159 rsize += alignsize; 2160 slabsize = rsize * (PAGE_SIZE / alignsize); 2161 slabsize = MIN(slabsize, rsize * SLAB_MAX_SETSIZE); 2162 slabsize = MIN(slabsize, UMA_CACHESPREAD_MAX_SIZE); 2163 slabsize = round_page(slabsize); 2164 } else { 2165 /* 2166 * Start with a slab size of as many pages as it takes to 2167 * represent a single item. We will try to fit as many 2168 * additional items into the slab as possible. 2169 */ 2170 slabsize = round_page(keg->uk_size); 2171 } 2172 2173 /* Build a list of all of the available formats for this keg. */ 2174 nfmt = 0; 2175 2176 /* Evaluate an inline slab layout. */ 2177 if ((keg->uk_flags & (UMA_ZONE_NOTOUCH | UMA_ZONE_PCPU)) == 0) 2178 fmts[nfmt++] = 0; 2179 2180 /* TODO: vm_page-embedded slab. */ 2181 2182 /* 2183 * We can't do OFFPAGE if we're internal or if we've been 2184 * asked to not go to the VM for buckets. If we do this we 2185 * may end up going to the VM for slabs which we do not want 2186 * to do if we're UMA_ZONE_VM, which clearly forbids it. 2187 * In those cases, evaluate a pseudo-format called INTERNAL 2188 * which has an inline slab header and one extra page to 2189 * guarantee that it fits. 2190 * 2191 * Otherwise, see if using an OFFPAGE slab will improve our 2192 * efficiency. 2193 */ 2194 if ((keg->uk_flags & (UMA_ZFLAG_INTERNAL | UMA_ZONE_VM)) != 0) 2195 fmts[nfmt++] = UMA_ZFLAG_INTERNAL; 2196 else 2197 fmts[nfmt++] = UMA_ZFLAG_OFFPAGE; 2198 2199 /* 2200 * Choose a slab size and format which satisfy the minimum efficiency. 2201 * Prefer the smallest slab size that meets the constraints. 2202 * 2203 * Start with a minimum slab size, to accommodate CACHESPREAD. Then, 2204 * for small items (up to PAGE_SIZE), the iteration increment is one 2205 * page; and for large items, the increment is one item. 2206 */ 2207 i = (slabsize + rsize - keg->uk_size) / MAX(PAGE_SIZE, rsize); 2208 KASSERT(i >= 1, ("keg %s(%p) flags=0x%b slabsize=%u, rsize=%u, i=%u", 2209 keg->uk_name, keg, keg->uk_flags, PRINT_UMA_ZFLAGS, slabsize, 2210 rsize, i)); 2211 for ( ; ; i++) { 2212 slabsize = (rsize <= PAGE_SIZE) ? ptoa(i) : 2213 round_page(rsize * (i - 1) + keg->uk_size); 2214 2215 for (j = 0; j < nfmt; j++) { 2216 /* Only if we have no viable format yet. */ 2217 if ((fmts[j] & UMA_ZFLAG_INTERNAL) != 0 && 2218 kl.ipers > 0) 2219 continue; 2220 2221 keg_layout_one(keg, rsize, slabsize, fmts[j], &kl_tmp); 2222 if (kl_tmp.eff <= kl.eff) 2223 continue; 2224 2225 kl = kl_tmp; 2226 2227 CTR6(KTR_UMA, "keg %s layout: format %#x " 2228 "(ipers %u * rsize %u) / slabsize %#x = %u%% eff", 2229 keg->uk_name, kl.format, kl.ipers, rsize, 2230 kl.slabsize, UMA_FIXPT_PCT(kl.eff)); 2231 2232 /* Stop when we reach the minimum efficiency. */ 2233 if (kl.eff >= UMA_MIN_EFF) 2234 break; 2235 } 2236 2237 if (kl.eff >= UMA_MIN_EFF || !multipage_slabs || 2238 slabsize >= SLAB_MAX_SETSIZE * rsize || 2239 (keg->uk_flags & (UMA_ZONE_PCPU | UMA_ZONE_CONTIG)) != 0) 2240 break; 2241 } 2242 2243 pages = atop(kl.slabsize); 2244 if ((keg->uk_flags & UMA_ZONE_PCPU) != 0) 2245 pages *= mp_maxid + 1; 2246 2247 keg->uk_rsize = rsize; 2248 keg->uk_ipers = kl.ipers; 2249 keg->uk_ppera = pages; 2250 keg->uk_flags |= kl.format; 2251 2252 /* 2253 * How do we find the slab header if it is offpage or if not all item 2254 * start addresses are in the same page? We could solve the latter 2255 * case with vaddr alignment, but we don't. 2256 */ 2257 if ((keg->uk_flags & UMA_ZFLAG_OFFPAGE) != 0 || 2258 (keg->uk_ipers - 1) * rsize >= PAGE_SIZE) { 2259 if ((keg->uk_flags & UMA_ZONE_NOTPAGE) != 0) 2260 keg->uk_flags |= UMA_ZFLAG_HASH; 2261 else 2262 keg->uk_flags |= UMA_ZFLAG_VTOSLAB; 2263 } 2264 2265 CTR6(KTR_UMA, "%s: keg=%s, flags=%#x, rsize=%u, ipers=%u, ppera=%u", 2266 __func__, keg->uk_name, keg->uk_flags, rsize, keg->uk_ipers, 2267 pages); 2268 KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_MAX_SETSIZE, 2269 ("%s: keg=%s, flags=0x%b, rsize=%u, ipers=%u, ppera=%u", __func__, 2270 keg->uk_name, keg->uk_flags, PRINT_UMA_ZFLAGS, rsize, 2271 keg->uk_ipers, pages)); 2272} 2273 2274/* 2275 * Keg header ctor. This initializes all fields, locks, etc. And inserts 2276 * the keg onto the global keg list. 2277 * 2278 * Arguments/Returns follow uma_ctor specifications 2279 * udata Actually uma_kctor_args 2280 */ 2281static int 2282keg_ctor(void *mem, int size, void *udata, int flags) 2283{ 2284 struct uma_kctor_args *arg = udata; 2285 uma_keg_t keg = mem; 2286 uma_zone_t zone; 2287 int i; 2288 2289 bzero(keg, size); 2290 keg->uk_size = arg->size; 2291 keg->uk_init = arg->uminit; 2292 keg->uk_fini = arg->fini; 2293 keg->uk_align = arg->align; 2294 keg->uk_reserve = 0; 2295 keg->uk_flags = arg->flags; 2296 2297 /* 2298 * We use a global round-robin policy by default. Zones with 2299 * UMA_ZONE_FIRSTTOUCH set will use first-touch instead, in which 2300 * case the iterator is never run. 2301 */ 2302 keg->uk_dr.dr_policy = DOMAINSET_RR(); 2303 keg->uk_dr.dr_iter = 0; 2304 2305 /* 2306 * The primary zone is passed to us at keg-creation time. 2307 */ 2308 zone = arg->zone; 2309 keg->uk_name = zone->uz_name; 2310 2311 if (arg->flags & UMA_ZONE_ZINIT) 2312 keg->uk_init = zero_init; 2313 2314 if (arg->flags & UMA_ZONE_MALLOC) 2315 keg->uk_flags |= UMA_ZFLAG_VTOSLAB; 2316 2317#ifndef SMP 2318 keg->uk_flags &= ~UMA_ZONE_PCPU; 2319#endif 2320 2321 keg_layout(keg); 2322 2323 /* 2324 * Use a first-touch NUMA policy for kegs that pmap_extract() will 2325 * work on. Use round-robin for everything else. 2326 * 2327 * Zones may override the default by specifying either. 2328 */ 2329#ifdef NUMA 2330 if ((keg->uk_flags & 2331 (UMA_ZONE_ROUNDROBIN | UMA_ZFLAG_CACHE | UMA_ZONE_NOTPAGE)) == 0) 2332 keg->uk_flags |= UMA_ZONE_FIRSTTOUCH; 2333 else if ((keg->uk_flags & UMA_ZONE_FIRSTTOUCH) == 0) 2334 keg->uk_flags |= UMA_ZONE_ROUNDROBIN; 2335#endif 2336 2337 /* 2338 * If we haven't booted yet we need allocations to go through the 2339 * startup cache until the vm is ready. 2340 */ 2341#ifdef UMA_MD_SMALL_ALLOC 2342 if (keg->uk_ppera == 1) 2343 keg->uk_allocf = uma_small_alloc; 2344 else 2345#endif 2346 if (booted < BOOT_KVA) 2347 keg->uk_allocf = startup_alloc; 2348 else if (keg->uk_flags & UMA_ZONE_PCPU) 2349 keg->uk_allocf = pcpu_page_alloc; 2350 else if ((keg->uk_flags & UMA_ZONE_CONTIG) != 0 && keg->uk_ppera > 1) 2351 keg->uk_allocf = contig_alloc; 2352 else 2353 keg->uk_allocf = page_alloc; 2354#ifdef UMA_MD_SMALL_ALLOC 2355 if (keg->uk_ppera == 1) 2356 keg->uk_freef = uma_small_free; 2357 else 2358#endif 2359 if (keg->uk_flags & UMA_ZONE_PCPU) 2360 keg->uk_freef = pcpu_page_free; 2361 else 2362 keg->uk_freef = page_free; 2363 2364 /* 2365 * Initialize keg's locks. 2366 */ 2367 for (i = 0; i < vm_ndomains; i++) 2368 KEG_LOCK_INIT(keg, i, (arg->flags & UMA_ZONE_MTXCLASS)); 2369 2370 /* 2371 * If we're putting the slab header in the actual page we need to 2372 * figure out where in each page it goes. See slab_sizeof 2373 * definition. 2374 */ 2375 if (!(keg->uk_flags & UMA_ZFLAG_OFFPAGE)) { 2376 size_t shsize; 2377 2378 shsize = slab_sizeof(keg->uk_ipers); 2379 keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - shsize; 2380 /* 2381 * The only way the following is possible is if with our 2382 * UMA_ALIGN_PTR adjustments we are now bigger than 2383 * UMA_SLAB_SIZE. I haven't checked whether this is 2384 * mathematically possible for all cases, so we make 2385 * sure here anyway. 2386 */ 2387 KASSERT(keg->uk_pgoff + shsize <= PAGE_SIZE * keg->uk_ppera, 2388 ("zone %s ipers %d rsize %d size %d slab won't fit", 2389 zone->uz_name, keg->uk_ipers, keg->uk_rsize, keg->uk_size)); 2390 } 2391 2392 if (keg->uk_flags & UMA_ZFLAG_HASH) 2393 hash_alloc(&keg->uk_hash, 0); 2394 2395 CTR3(KTR_UMA, "keg_ctor %p zone %s(%p)", keg, zone->uz_name, zone); 2396 2397 LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link); 2398 2399 rw_wlock(&uma_rwlock); 2400 LIST_INSERT_HEAD(&uma_kegs, keg, uk_link); 2401 rw_wunlock(&uma_rwlock); 2402 return (0); 2403} 2404 2405static void 2406zone_kva_available(uma_zone_t zone, void *unused) 2407{ 2408 uma_keg_t keg; 2409 2410 if ((zone->uz_flags & UMA_ZFLAG_CACHE) != 0) 2411 return; 2412 KEG_GET(zone, keg); 2413 2414 if (keg->uk_allocf == startup_alloc) { 2415 /* Switch to the real allocator. */ 2416 if (keg->uk_flags & UMA_ZONE_PCPU) 2417 keg->uk_allocf = pcpu_page_alloc; 2418 else if ((keg->uk_flags & UMA_ZONE_CONTIG) != 0 && 2419 keg->uk_ppera > 1) 2420 keg->uk_allocf = contig_alloc; 2421 else 2422 keg->uk_allocf = page_alloc; 2423 } 2424} 2425 2426static void 2427zone_alloc_counters(uma_zone_t zone, void *unused) 2428{ 2429 2430 zone->uz_allocs = counter_u64_alloc(M_WAITOK); 2431 zone->uz_frees = counter_u64_alloc(M_WAITOK); 2432 zone->uz_fails = counter_u64_alloc(M_WAITOK); 2433 zone->uz_xdomain = counter_u64_alloc(M_WAITOK); 2434} 2435 2436static void 2437zone_alloc_sysctl(uma_zone_t zone, void *unused) 2438{ 2439 uma_zone_domain_t zdom; 2440 uma_domain_t dom; 2441 uma_keg_t keg; 2442 struct sysctl_oid *oid, *domainoid; 2443 int domains, i, cnt; 2444 static const char *nokeg = "cache zone"; 2445 char *c; 2446 2447 /* 2448 * Make a sysctl safe copy of the zone name by removing 2449 * any special characters and handling dups by appending 2450 * an index. 2451 */ 2452 if (zone->uz_namecnt != 0) { 2453 /* Count the number of decimal digits and '_' separator. */ 2454 for (i = 1, cnt = zone->uz_namecnt; cnt != 0; i++) 2455 cnt /= 10; 2456 zone->uz_ctlname = malloc(strlen(zone->uz_name) + i + 1, 2457 M_UMA, M_WAITOK); 2458 sprintf(zone->uz_ctlname, "%s_%d", zone->uz_name, 2459 zone->uz_namecnt); 2460 } else 2461 zone->uz_ctlname = strdup(zone->uz_name, M_UMA); 2462 for (c = zone->uz_ctlname; *c != '\0'; c++) 2463 if (strchr("./\\ -", *c) != NULL) 2464 *c = '_'; 2465 2466 /* 2467 * Basic parameters at the root. 2468 */ 2469 zone->uz_oid = SYSCTL_ADD_NODE(NULL, SYSCTL_STATIC_CHILDREN(_vm_uma), 2470 OID_AUTO, zone->uz_ctlname, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); 2471 oid = zone->uz_oid; 2472 SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2473 "size", CTLFLAG_RD, &zone->uz_size, 0, "Allocation size"); 2474 SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2475 "flags", CTLFLAG_RD | CTLTYPE_STRING | CTLFLAG_MPSAFE, 2476 zone, 0, sysctl_handle_uma_zone_flags, "A", 2477 "Allocator configuration flags"); 2478 SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2479 "bucket_size", CTLFLAG_RD, &zone->uz_bucket_size, 0, 2480 "Desired per-cpu cache size"); 2481 SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2482 "bucket_size_max", CTLFLAG_RD, &zone->uz_bucket_size_max, 0, 2483 "Maximum allowed per-cpu cache size"); 2484 2485 /* 2486 * keg if present. 2487 */ 2488 if ((zone->uz_flags & UMA_ZFLAG_HASH) == 0) 2489 domains = vm_ndomains; 2490 else 2491 domains = 1; 2492 oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), OID_AUTO, 2493 "keg", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); 2494 keg = zone->uz_keg; 2495 if ((zone->uz_flags & UMA_ZFLAG_CACHE) == 0) { 2496 SYSCTL_ADD_CONST_STRING(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2497 "name", CTLFLAG_RD, keg->uk_name, "Keg name"); 2498 SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2499 "rsize", CTLFLAG_RD, &keg->uk_rsize, 0, 2500 "Real object size with alignment"); 2501 SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2502 "ppera", CTLFLAG_RD, &keg->uk_ppera, 0, 2503 "pages per-slab allocation"); 2504 SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2505 "ipers", CTLFLAG_RD, &keg->uk_ipers, 0, 2506 "items available per-slab"); 2507 SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2508 "align", CTLFLAG_RD, &keg->uk_align, 0, 2509 "item alignment mask"); 2510 SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2511 "reserve", CTLFLAG_RD, &keg->uk_reserve, 0, 2512 "number of reserved items"); 2513 SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2514 "efficiency", CTLFLAG_RD | CTLTYPE_INT | CTLFLAG_MPSAFE, 2515 keg, 0, sysctl_handle_uma_slab_efficiency, "I", 2516 "Slab utilization (100 - internal fragmentation %)"); 2517 domainoid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(oid), 2518 OID_AUTO, "domain", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); 2519 for (i = 0; i < domains; i++) { 2520 dom = &keg->uk_domain[i]; 2521 oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(domainoid), 2522 OID_AUTO, VM_DOMAIN(i)->vmd_name, 2523 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); 2524 SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2525 "pages", CTLFLAG_RD, &dom->ud_pages, 0, 2526 "Total pages currently allocated from VM"); 2527 SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2528 "free_items", CTLFLAG_RD, &dom->ud_free_items, 0, 2529 "items free in the slab layer"); 2530 } 2531 } else 2532 SYSCTL_ADD_CONST_STRING(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2533 "name", CTLFLAG_RD, nokeg, "Keg name"); 2534 2535 /* 2536 * Information about zone limits. 2537 */ 2538 oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), OID_AUTO, 2539 "limit", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); 2540 SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2541 "items", CTLFLAG_RD | CTLTYPE_U64 | CTLFLAG_MPSAFE, 2542 zone, 0, sysctl_handle_uma_zone_items, "QU", 2543 "Current number of allocated items if limit is set"); 2544 SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2545 "max_items", CTLFLAG_RD, &zone->uz_max_items, 0, 2546 "Maximum number of allocated and cached items"); 2547 SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2548 "sleepers", CTLFLAG_RD, &zone->uz_sleepers, 0, 2549 "Number of threads sleeping at limit"); 2550 SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2551 "sleeps", CTLFLAG_RD, &zone->uz_sleeps, 0, 2552 "Total zone limit sleeps"); 2553 SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2554 "bucket_max", CTLFLAG_RD, &zone->uz_bucket_max, 0, 2555 "Maximum number of items in each domain's bucket cache"); 2556 2557 /* 2558 * Per-domain zone information. 2559 */ 2560 domainoid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), 2561 OID_AUTO, "domain", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); 2562 for (i = 0; i < domains; i++) { 2563 zdom = ZDOM_GET(zone, i); 2564 oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(domainoid), 2565 OID_AUTO, VM_DOMAIN(i)->vmd_name, 2566 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); 2567 SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2568 "nitems", CTLFLAG_RD, &zdom->uzd_nitems, 2569 "number of items in this domain"); 2570 SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2571 "imax", CTLFLAG_RD, &zdom->uzd_imax, 2572 "maximum item count in this period"); 2573 SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2574 "imin", CTLFLAG_RD, &zdom->uzd_imin, 2575 "minimum item count in this period"); 2576 SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2577 "bimin", CTLFLAG_RD, &zdom->uzd_bimin, 2578 "Minimum item count in this batch"); 2579 SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2580 "wss", CTLFLAG_RD, &zdom->uzd_wss, 2581 "Working set size"); 2582 SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2583 "limin", CTLFLAG_RD, &zdom->uzd_limin, 2584 "Long time minimum item count"); 2585 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2586 "timin", CTLFLAG_RD, &zdom->uzd_timin, 0, 2587 "Time since zero long time minimum item count"); 2588 } 2589 2590 /* 2591 * General statistics. 2592 */ 2593 oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), OID_AUTO, 2594 "stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); 2595 SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2596 "current", CTLFLAG_RD | CTLTYPE_INT | CTLFLAG_MPSAFE, 2597 zone, 1, sysctl_handle_uma_zone_cur, "I", 2598 "Current number of allocated items"); 2599 SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2600 "allocs", CTLFLAG_RD | CTLTYPE_U64 | CTLFLAG_MPSAFE, 2601 zone, 0, sysctl_handle_uma_zone_allocs, "QU", 2602 "Total allocation calls"); 2603 SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2604 "frees", CTLFLAG_RD | CTLTYPE_U64 | CTLFLAG_MPSAFE, 2605 zone, 0, sysctl_handle_uma_zone_frees, "QU", 2606 "Total free calls"); 2607 SYSCTL_ADD_COUNTER_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2608 "fails", CTLFLAG_RD, &zone->uz_fails, 2609 "Number of allocation failures"); 2610 SYSCTL_ADD_COUNTER_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2611 "xdomain", CTLFLAG_RD, &zone->uz_xdomain, 2612 "Free calls from the wrong domain"); 2613} 2614 2615struct uma_zone_count { 2616 const char *name; 2617 int count; 2618}; 2619 2620static void 2621zone_count(uma_zone_t zone, void *arg) 2622{ 2623 struct uma_zone_count *cnt; 2624 2625 cnt = arg; 2626 /* 2627 * Some zones are rapidly created with identical names and 2628 * destroyed out of order. This can lead to gaps in the count. 2629 * Use one greater than the maximum observed for this name. 2630 */ 2631 if (strcmp(zone->uz_name, cnt->name) == 0) 2632 cnt->count = MAX(cnt->count, 2633 zone->uz_namecnt + 1); 2634} 2635 2636static void 2637zone_update_caches(uma_zone_t zone) 2638{ 2639 int i; 2640 2641 for (i = 0; i <= mp_maxid; i++) { 2642 cache_set_uz_size(&zone->uz_cpu[i], zone->uz_size); 2643 cache_set_uz_flags(&zone->uz_cpu[i], zone->uz_flags); 2644 } 2645} 2646 2647/* 2648 * Zone header ctor. This initializes all fields, locks, etc. 2649 * 2650 * Arguments/Returns follow uma_ctor specifications 2651 * udata Actually uma_zctor_args 2652 */ 2653static int 2654zone_ctor(void *mem, int size, void *udata, int flags) 2655{ 2656 struct uma_zone_count cnt; 2657 struct uma_zctor_args *arg = udata; 2658 uma_zone_domain_t zdom; 2659 uma_zone_t zone = mem; 2660 uma_zone_t z; 2661 uma_keg_t keg; 2662 int i; 2663 2664 bzero(zone, size); 2665 zone->uz_name = arg->name; 2666 zone->uz_ctor = arg->ctor; 2667 zone->uz_dtor = arg->dtor; 2668 zone->uz_init = NULL; 2669 zone->uz_fini = NULL; 2670 zone->uz_sleeps = 0; 2671 zone->uz_bucket_size = 0; 2672 zone->uz_bucket_size_min = 0; 2673 zone->uz_bucket_size_max = BUCKET_MAX; 2674 zone->uz_flags = (arg->flags & UMA_ZONE_SMR); 2675 zone->uz_warning = NULL; 2676 /* The domain structures follow the cpu structures. */ 2677 zone->uz_bucket_max = ULONG_MAX; 2678 timevalclear(&zone->uz_ratecheck); 2679 2680 /* Count the number of duplicate names. */ 2681 cnt.name = arg->name; 2682 cnt.count = 0; 2683 zone_foreach(zone_count, &cnt); 2684 zone->uz_namecnt = cnt.count; 2685 ZONE_CROSS_LOCK_INIT(zone); 2686 2687 for (i = 0; i < vm_ndomains; i++) { 2688 zdom = ZDOM_GET(zone, i); 2689 ZDOM_LOCK_INIT(zone, zdom, (arg->flags & UMA_ZONE_MTXCLASS)); 2690 STAILQ_INIT(&zdom->uzd_buckets); 2691 } 2692 2693#ifdef INVARIANTS 2694 if (arg->uminit == trash_init && arg->fini == trash_fini) 2695 zone->uz_flags |= UMA_ZFLAG_TRASH | UMA_ZFLAG_CTORDTOR; 2696#endif 2697 2698 /* 2699 * This is a pure cache zone, no kegs. 2700 */ 2701 if (arg->import) { 2702 KASSERT((arg->flags & UMA_ZFLAG_CACHE) != 0, 2703 ("zone_ctor: Import specified for non-cache zone.")); 2704 zone->uz_flags = arg->flags; 2705 zone->uz_size = arg->size; 2706 zone->uz_import = arg->import; 2707 zone->uz_release = arg->release; 2708 zone->uz_arg = arg->arg; 2709#ifdef NUMA 2710 /* 2711 * Cache zones are round-robin unless a policy is 2712 * specified because they may have incompatible 2713 * constraints. 2714 */ 2715 if ((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) == 0) 2716 zone->uz_flags |= UMA_ZONE_ROUNDROBIN; 2717#endif 2718 rw_wlock(&uma_rwlock); 2719 LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link); 2720 rw_wunlock(&uma_rwlock); 2721 goto out; 2722 } 2723 2724 /* 2725 * Use the regular zone/keg/slab allocator. 2726 */ 2727 zone->uz_import = zone_import; 2728 zone->uz_release = zone_release; 2729 zone->uz_arg = zone; 2730 keg = arg->keg; 2731 2732 if (arg->flags & UMA_ZONE_SECONDARY) { 2733 KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0, 2734 ("Secondary zone requested UMA_ZFLAG_INTERNAL")); 2735 KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg")); 2736 zone->uz_init = arg->uminit; 2737 zone->uz_fini = arg->fini; 2738 zone->uz_flags |= UMA_ZONE_SECONDARY; 2739 rw_wlock(&uma_rwlock); 2740 ZONE_LOCK(zone); 2741 LIST_FOREACH(z, &keg->uk_zones, uz_link) { 2742 if (LIST_NEXT(z, uz_link) == NULL) { 2743 LIST_INSERT_AFTER(z, zone, uz_link); 2744 break; 2745 } 2746 } 2747 ZONE_UNLOCK(zone); 2748 rw_wunlock(&uma_rwlock); 2749 } else if (keg == NULL) { 2750 if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini, 2751 arg->align, arg->flags)) == NULL) 2752 return (ENOMEM); 2753 } else { 2754 struct uma_kctor_args karg; 2755 int error; 2756 2757 /* We should only be here from uma_startup() */ 2758 karg.size = arg->size; 2759 karg.uminit = arg->uminit; 2760 karg.fini = arg->fini; 2761 karg.align = arg->align; 2762 karg.flags = (arg->flags & ~UMA_ZONE_SMR); 2763 karg.zone = zone; 2764 error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg, 2765 flags); 2766 if (error) 2767 return (error); 2768 } 2769 2770 /* Inherit properties from the keg. */ 2771 zone->uz_keg = keg; 2772 zone->uz_size = keg->uk_size; 2773 zone->uz_flags |= (keg->uk_flags & 2774 (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT)); 2775 2776out: 2777 if (booted >= BOOT_PCPU) { 2778 zone_alloc_counters(zone, NULL); 2779 if (booted >= BOOT_RUNNING) 2780 zone_alloc_sysctl(zone, NULL); 2781 } else { 2782 zone->uz_allocs = EARLY_COUNTER; 2783 zone->uz_frees = EARLY_COUNTER; 2784 zone->uz_fails = EARLY_COUNTER; 2785 } 2786 2787 /* Caller requests a private SMR context. */ 2788 if ((zone->uz_flags & UMA_ZONE_SMR) != 0) 2789 zone->uz_smr = smr_create(zone->uz_name, 0, 0); 2790 2791 KASSERT((arg->flags & (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET)) != 2792 (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET), 2793 ("Invalid zone flag combination")); 2794 if (arg->flags & UMA_ZFLAG_INTERNAL) 2795 zone->uz_bucket_size_max = zone->uz_bucket_size = 0; 2796 if ((arg->flags & UMA_ZONE_MAXBUCKET) != 0) 2797 zone->uz_bucket_size = BUCKET_MAX; 2798 else if ((arg->flags & UMA_ZONE_NOBUCKET) != 0) 2799 zone->uz_bucket_size = 0; 2800 else 2801 zone->uz_bucket_size = bucket_select(zone->uz_size); 2802 zone->uz_bucket_size_min = zone->uz_bucket_size; 2803 if (zone->uz_dtor != NULL || zone->uz_ctor != NULL) 2804 zone->uz_flags |= UMA_ZFLAG_CTORDTOR; 2805 zone_update_caches(zone); 2806 2807 return (0); 2808} 2809 2810/* 2811 * Keg header dtor. This frees all data, destroys locks, frees the hash 2812 * table and removes the keg from the global list. 2813 * 2814 * Arguments/Returns follow uma_dtor specifications 2815 * udata unused 2816 */ 2817static void 2818keg_dtor(void *arg, int size, void *udata) 2819{ 2820 uma_keg_t keg; 2821 uint32_t free, pages; 2822 int i; 2823 2824 keg = (uma_keg_t)arg; 2825 free = pages = 0; 2826 for (i = 0; i < vm_ndomains; i++) { 2827 free += keg->uk_domain[i].ud_free_items; 2828 pages += keg->uk_domain[i].ud_pages; 2829 KEG_LOCK_FINI(keg, i); 2830 } 2831 if (pages != 0) 2832 printf("Freed UMA keg (%s) was not empty (%u items). " 2833 " Lost %u pages of memory.\n", 2834 keg->uk_name ? keg->uk_name : "", 2835 pages / keg->uk_ppera * keg->uk_ipers - free, pages); 2836 2837 hash_free(&keg->uk_hash); 2838} 2839 2840/* 2841 * Zone header dtor. 2842 * 2843 * Arguments/Returns follow uma_dtor specifications 2844 * udata unused 2845 */ 2846static void 2847zone_dtor(void *arg, int size, void *udata) 2848{ 2849 uma_zone_t zone; 2850 uma_keg_t keg; 2851 int i; 2852 2853 zone = (uma_zone_t)arg; 2854 2855 sysctl_remove_oid(zone->uz_oid, 1, 1); 2856 2857 if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL)) 2858 cache_drain(zone); 2859 2860 rw_wlock(&uma_rwlock); 2861 LIST_REMOVE(zone, uz_link); 2862 rw_wunlock(&uma_rwlock); 2863 if ((zone->uz_flags & (UMA_ZONE_SECONDARY | UMA_ZFLAG_CACHE)) == 0) { 2864 keg = zone->uz_keg; 2865 keg->uk_reserve = 0; 2866 } 2867 zone_reclaim(zone, UMA_ANYDOMAIN, M_WAITOK, true); 2868 2869 /* 2870 * We only destroy kegs from non secondary/non cache zones. 2871 */ 2872 if ((zone->uz_flags & (UMA_ZONE_SECONDARY | UMA_ZFLAG_CACHE)) == 0) { 2873 keg = zone->uz_keg; 2874 rw_wlock(&uma_rwlock); 2875 LIST_REMOVE(keg, uk_link); 2876 rw_wunlock(&uma_rwlock); 2877 zone_free_item(kegs, keg, NULL, SKIP_NONE); 2878 } 2879 counter_u64_free(zone->uz_allocs); 2880 counter_u64_free(zone->uz_frees); 2881 counter_u64_free(zone->uz_fails); 2882 counter_u64_free(zone->uz_xdomain); 2883 free(zone->uz_ctlname, M_UMA); 2884 for (i = 0; i < vm_ndomains; i++) 2885 ZDOM_LOCK_FINI(ZDOM_GET(zone, i)); 2886 ZONE_CROSS_LOCK_FINI(zone); 2887} 2888 2889static void 2890zone_foreach_unlocked(void (*zfunc)(uma_zone_t, void *arg), void *arg) 2891{ 2892 uma_keg_t keg; 2893 uma_zone_t zone; 2894 2895 LIST_FOREACH(keg, &uma_kegs, uk_link) { 2896 LIST_FOREACH(zone, &keg->uk_zones, uz_link) 2897 zfunc(zone, arg); 2898 } 2899 LIST_FOREACH(zone, &uma_cachezones, uz_link) 2900 zfunc(zone, arg); 2901} 2902 2903/* 2904 * Traverses every zone in the system and calls a callback 2905 * 2906 * Arguments: 2907 * zfunc A pointer to a function which accepts a zone 2908 * as an argument. 2909 * 2910 * Returns: 2911 * Nothing 2912 */ 2913static void 2914zone_foreach(void (*zfunc)(uma_zone_t, void *arg), void *arg) 2915{ 2916 2917 rw_rlock(&uma_rwlock); 2918 zone_foreach_unlocked(zfunc, arg); 2919 rw_runlock(&uma_rwlock); 2920} 2921 2922/* 2923 * Initialize the kernel memory allocator. This is done after pages can be 2924 * allocated but before general KVA is available. 2925 */ 2926void 2927uma_startup1(vm_offset_t virtual_avail) 2928{ 2929 struct uma_zctor_args args; 2930 size_t ksize, zsize, size; 2931 uma_keg_t primarykeg; 2932 uintptr_t m; 2933 int domain; 2934 uint8_t pflag; 2935 2936 bootstart = bootmem = virtual_avail; 2937 2938 rw_init(&uma_rwlock, "UMA lock"); 2939 sx_init(&uma_reclaim_lock, "umareclaim"); 2940 2941 ksize = sizeof(struct uma_keg) + 2942 (sizeof(struct uma_domain) * vm_ndomains); 2943 ksize = roundup(ksize, UMA_SUPER_ALIGN); 2944 zsize = sizeof(struct uma_zone) + 2945 (sizeof(struct uma_cache) * (mp_maxid + 1)) + 2946 (sizeof(struct uma_zone_domain) * vm_ndomains); 2947 zsize = roundup(zsize, UMA_SUPER_ALIGN); 2948 2949 /* Allocate the zone of zones, zone of kegs, and zone of zones keg. */ 2950 size = (zsize * 2) + ksize; 2951 for (domain = 0; domain < vm_ndomains; domain++) { 2952 m = (uintptr_t)startup_alloc(NULL, size, domain, &pflag, 2953 M_NOWAIT | M_ZERO); 2954 if (m != 0) 2955 break; 2956 } 2957 zones = (uma_zone_t)m; 2958 m += zsize; 2959 kegs = (uma_zone_t)m; 2960 m += zsize; 2961 primarykeg = (uma_keg_t)m; 2962 2963 /* "manually" create the initial zone */ 2964 memset(&args, 0, sizeof(args)); 2965 args.name = "UMA Kegs"; 2966 args.size = ksize; 2967 args.ctor = keg_ctor; 2968 args.dtor = keg_dtor; 2969 args.uminit = zero_init; 2970 args.fini = NULL; 2971 args.keg = primarykeg; 2972 args.align = UMA_SUPER_ALIGN - 1; 2973 args.flags = UMA_ZFLAG_INTERNAL; 2974 zone_ctor(kegs, zsize, &args, M_WAITOK); 2975 2976 args.name = "UMA Zones"; 2977 args.size = zsize; 2978 args.ctor = zone_ctor; 2979 args.dtor = zone_dtor; 2980 args.uminit = zero_init; 2981 args.fini = NULL; 2982 args.keg = NULL; 2983 args.align = UMA_SUPER_ALIGN - 1; 2984 args.flags = UMA_ZFLAG_INTERNAL; 2985 zone_ctor(zones, zsize, &args, M_WAITOK); 2986 2987 /* Now make zones for slab headers */ 2988 slabzones[0] = uma_zcreate("UMA Slabs 0", SLABZONE0_SIZE, 2989 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 2990 slabzones[1] = uma_zcreate("UMA Slabs 1", SLABZONE1_SIZE, 2991 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 2992 2993 hashzone = uma_zcreate("UMA Hash", 2994 sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT, 2995 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 2996 2997 bucket_init(); 2998 smr_init(); 2999} 3000 3001#ifndef UMA_MD_SMALL_ALLOC 3002extern void vm_radix_reserve_kva(void); 3003#endif 3004 3005/* 3006 * Advertise the availability of normal kva allocations and switch to 3007 * the default back-end allocator. Marks the KVA we consumed on startup 3008 * as used in the map. 3009 */ 3010void 3011uma_startup2(void) 3012{ 3013 3014 if (bootstart != bootmem) { 3015 vm_map_lock(kernel_map); 3016 (void)vm_map_insert(kernel_map, NULL, 0, bootstart, bootmem, 3017 VM_PROT_RW, VM_PROT_RW, MAP_NOFAULT); 3018 vm_map_unlock(kernel_map); 3019 } 3020 3021#ifndef UMA_MD_SMALL_ALLOC 3022 /* Set up radix zone to use noobj_alloc. */ 3023 vm_radix_reserve_kva(); 3024#endif 3025 3026 booted = BOOT_KVA; 3027 zone_foreach_unlocked(zone_kva_available, NULL); 3028 bucket_enable(); 3029} 3030 3031/* 3032 * Allocate counters as early as possible so that boot-time allocations are 3033 * accounted more precisely. 3034 */ 3035static void 3036uma_startup_pcpu(void *arg __unused) 3037{ 3038 3039 zone_foreach_unlocked(zone_alloc_counters, NULL); 3040 booted = BOOT_PCPU; 3041} 3042SYSINIT(uma_startup_pcpu, SI_SUB_COUNTER, SI_ORDER_ANY, uma_startup_pcpu, NULL); 3043 3044/* 3045 * Finish our initialization steps. 3046 */ 3047static void 3048uma_startup3(void *arg __unused) 3049{ 3050 3051#ifdef INVARIANTS 3052 TUNABLE_INT_FETCH("vm.debug.divisor", &dbg_divisor); 3053 uma_dbg_cnt = counter_u64_alloc(M_WAITOK); 3054 uma_skip_cnt = counter_u64_alloc(M_WAITOK); 3055#endif 3056 zone_foreach_unlocked(zone_alloc_sysctl, NULL); 3057 callout_init(&uma_callout, 1); 3058 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); 3059 booted = BOOT_RUNNING; 3060 3061 EVENTHANDLER_REGISTER(shutdown_post_sync, uma_shutdown, NULL, 3062 EVENTHANDLER_PRI_FIRST); 3063} 3064SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL); 3065 3066static void 3067uma_shutdown(void) 3068{ 3069 3070 booted = BOOT_SHUTDOWN; 3071} 3072 3073static uma_keg_t 3074uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini, 3075 int align, uint32_t flags) 3076{ 3077 struct uma_kctor_args args; 3078 3079 args.size = size; 3080 args.uminit = uminit; 3081 args.fini = fini; 3082 args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align; 3083 args.flags = flags; 3084 args.zone = zone; 3085 return (zone_alloc_item(kegs, &args, UMA_ANYDOMAIN, M_WAITOK)); 3086} 3087 3088/* Public functions */ 3089/* See uma.h */ 3090void 3091uma_set_align(int align) 3092{ 3093 3094 if (align != UMA_ALIGN_CACHE) 3095 uma_align_cache = align; 3096} 3097 3098/* See uma.h */ 3099uma_zone_t 3100uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor, 3101 uma_init uminit, uma_fini fini, int align, uint32_t flags) 3102 3103{ 3104 struct uma_zctor_args args; 3105 uma_zone_t res; 3106 3107 KASSERT(powerof2(align + 1), ("invalid zone alignment %d for \"%s\"", 3108 align, name)); 3109 3110 /* This stuff is essential for the zone ctor */ 3111 memset(&args, 0, sizeof(args)); 3112 args.name = name; 3113 args.size = size; 3114 args.ctor = ctor; 3115 args.dtor = dtor; 3116 args.uminit = uminit; 3117 args.fini = fini; 3118#ifdef INVARIANTS 3119 /* 3120 * Inject procedures which check for memory use after free if we are 3121 * allowed to scramble the memory while it is not allocated. This 3122 * requires that: UMA is actually able to access the memory, no init 3123 * or fini procedures, no dependency on the initial value of the 3124 * memory, and no (legitimate) use of the memory after free. Note, 3125 * the ctor and dtor do not need to be empty. 3126 */ 3127 if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOTOUCH | 3128 UMA_ZONE_NOFREE))) && uminit == NULL && fini == NULL) { 3129 args.uminit = trash_init; 3130 args.fini = trash_fini; 3131 } 3132#endif 3133 args.align = align; 3134 args.flags = flags; 3135 args.keg = NULL; 3136 3137 sx_xlock(&uma_reclaim_lock); 3138 res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK); 3139 sx_xunlock(&uma_reclaim_lock); 3140 3141 return (res); 3142} 3143 3144/* See uma.h */ 3145uma_zone_t 3146uma_zsecond_create(const char *name, uma_ctor ctor, uma_dtor dtor, 3147 uma_init zinit, uma_fini zfini, uma_zone_t primary) 3148{ 3149 struct uma_zctor_args args; 3150 uma_keg_t keg; 3151 uma_zone_t res; 3152 3153 keg = primary->uz_keg; 3154 memset(&args, 0, sizeof(args)); 3155 args.name = name; 3156 args.size = keg->uk_size; 3157 args.ctor = ctor; 3158 args.dtor = dtor; 3159 args.uminit = zinit; 3160 args.fini = zfini; 3161 args.align = keg->uk_align; 3162 args.flags = keg->uk_flags | UMA_ZONE_SECONDARY; 3163 args.keg = keg; 3164 3165 sx_xlock(&uma_reclaim_lock); 3166 res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK); 3167 sx_xunlock(&uma_reclaim_lock); 3168 3169 return (res); 3170} 3171 3172/* See uma.h */ 3173uma_zone_t 3174uma_zcache_create(const char *name, int size, uma_ctor ctor, uma_dtor dtor, 3175 uma_init zinit, uma_fini zfini, uma_import zimport, uma_release zrelease, 3176 void *arg, int flags) 3177{ 3178 struct uma_zctor_args args; 3179 3180 memset(&args, 0, sizeof(args)); 3181 args.name = name; 3182 args.size = size; 3183 args.ctor = ctor; 3184 args.dtor = dtor; 3185 args.uminit = zinit; 3186 args.fini = zfini; 3187 args.import = zimport; 3188 args.release = zrelease; 3189 args.arg = arg; 3190 args.align = 0; 3191 args.flags = flags | UMA_ZFLAG_CACHE; 3192 3193 return (zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK)); 3194} 3195 3196/* See uma.h */ 3197void 3198uma_zdestroy(uma_zone_t zone) 3199{ 3200 3201 /* 3202 * Large slabs are expensive to reclaim, so don't bother doing 3203 * unnecessary work if we're shutting down. 3204 */ 3205 if (booted == BOOT_SHUTDOWN && 3206 zone->uz_fini == NULL && zone->uz_release == zone_release) 3207 return; 3208 sx_xlock(&uma_reclaim_lock); 3209 zone_free_item(zones, zone, NULL, SKIP_NONE); 3210 sx_xunlock(&uma_reclaim_lock); 3211} 3212 3213void 3214uma_zwait(uma_zone_t zone) 3215{ 3216 3217 if ((zone->uz_flags & UMA_ZONE_SMR) != 0) 3218 uma_zfree_smr(zone, uma_zalloc_smr(zone, M_WAITOK)); 3219 else if ((zone->uz_flags & UMA_ZONE_PCPU) != 0) 3220 uma_zfree_pcpu(zone, uma_zalloc_pcpu(zone, M_WAITOK)); 3221 else 3222 uma_zfree(zone, uma_zalloc(zone, M_WAITOK)); 3223} 3224 3225void * 3226uma_zalloc_pcpu_arg(uma_zone_t zone, void *udata, int flags) 3227{ 3228 void *item, *pcpu_item; 3229#ifdef SMP 3230 int i; 3231 3232 MPASS(zone->uz_flags & UMA_ZONE_PCPU); 3233#endif 3234 item = uma_zalloc_arg(zone, udata, flags & ~M_ZERO); 3235 if (item == NULL) 3236 return (NULL); 3237 pcpu_item = zpcpu_base_to_offset(item); 3238 if (flags & M_ZERO) { 3239#ifdef SMP 3240 for (i = 0; i <= mp_maxid; i++) 3241 bzero(zpcpu_get_cpu(pcpu_item, i), zone->uz_size); 3242#else 3243 bzero(item, zone->uz_size); 3244#endif 3245 } 3246 return (pcpu_item); 3247} 3248 3249/* 3250 * A stub while both regular and pcpu cases are identical. 3251 */ 3252void 3253uma_zfree_pcpu_arg(uma_zone_t zone, void *pcpu_item, void *udata) 3254{ 3255 void *item; 3256 3257#ifdef SMP 3258 MPASS(zone->uz_flags & UMA_ZONE_PCPU); 3259#endif 3260 3261 /* uma_zfree_pcu_*(..., NULL) does nothing, to match free(9). */ 3262 if (pcpu_item == NULL) 3263 return; 3264 3265 item = zpcpu_offset_to_base(pcpu_item); 3266 uma_zfree_arg(zone, item, udata); 3267} 3268 3269static inline void * 3270item_ctor(uma_zone_t zone, int uz_flags, int size, void *udata, int flags, 3271 void *item) 3272{ 3273#ifdef INVARIANTS 3274 bool skipdbg; 3275 3276 skipdbg = uma_dbg_zskip(zone, item); 3277 if (!skipdbg && (zone->uz_flags & UMA_ZFLAG_TRASH) != 0 && 3278 zone->uz_ctor != trash_ctor) 3279 trash_ctor(item, size, udata, flags); 3280#endif 3281 /* Check flags before loading ctor pointer. */ 3282 if (__predict_false((uz_flags & UMA_ZFLAG_CTORDTOR) != 0) && 3283 __predict_false(zone->uz_ctor != NULL) && 3284 zone->uz_ctor(item, size, udata, flags) != 0) { 3285 counter_u64_add(zone->uz_fails, 1); 3286 zone_free_item(zone, item, udata, SKIP_DTOR | SKIP_CNT); 3287 return (NULL); 3288 } 3289#ifdef INVARIANTS 3290 if (!skipdbg) 3291 uma_dbg_alloc(zone, NULL, item); 3292#endif 3293 if (__predict_false(flags & M_ZERO)) 3294 return (memset(item, 0, size)); 3295 3296 return (item); 3297} 3298 3299static inline void 3300item_dtor(uma_zone_t zone, void *item, int size, void *udata, 3301 enum zfreeskip skip) 3302{ 3303#ifdef INVARIANTS 3304 bool skipdbg; 3305 3306 skipdbg = uma_dbg_zskip(zone, item); 3307 if (skip == SKIP_NONE && !skipdbg) { 3308 if ((zone->uz_flags & UMA_ZONE_MALLOC) != 0) 3309 uma_dbg_free(zone, udata, item); 3310 else 3311 uma_dbg_free(zone, NULL, item); 3312 } 3313#endif 3314 if (__predict_true(skip < SKIP_DTOR)) { 3315 if (zone->uz_dtor != NULL) 3316 zone->uz_dtor(item, size, udata); 3317#ifdef INVARIANTS 3318 if (!skipdbg && (zone->uz_flags & UMA_ZFLAG_TRASH) != 0 && 3319 zone->uz_dtor != trash_dtor) 3320 trash_dtor(item, size, udata); 3321#endif 3322 } 3323} 3324 3325#ifdef NUMA 3326static int 3327item_domain(void *item) 3328{ 3329 int domain; 3330 3331 domain = vm_phys_domain(vtophys(item)); 3332 KASSERT(domain >= 0 && domain < vm_ndomains, 3333 ("%s: unknown domain for item %p", __func__, item)); 3334 return (domain); 3335} 3336#endif 3337 3338#if defined(INVARIANTS) || defined(DEBUG_MEMGUARD) || defined(WITNESS) 3339#define UMA_ZALLOC_DEBUG 3340static int 3341uma_zalloc_debug(uma_zone_t zone, void **itemp, void *udata, int flags) 3342{ 3343 int error; 3344 3345 error = 0; 3346#ifdef WITNESS 3347 if (flags & M_WAITOK) { 3348 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 3349 "uma_zalloc_debug: zone \"%s\"", zone->uz_name); 3350 } 3351#endif 3352 3353#ifdef INVARIANTS 3354 KASSERT((flags & M_EXEC) == 0, 3355 ("uma_zalloc_debug: called with M_EXEC")); 3356 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 3357 ("uma_zalloc_debug: called within spinlock or critical section")); 3358 KASSERT((zone->uz_flags & UMA_ZONE_PCPU) == 0 || (flags & M_ZERO) == 0, 3359 ("uma_zalloc_debug: allocating from a pcpu zone with M_ZERO")); 3360#endif 3361 3362#ifdef DEBUG_MEMGUARD 3363 if ((zone->uz_flags & UMA_ZONE_SMR) == 0 && memguard_cmp_zone(zone)) { 3364 void *item; 3365 item = memguard_alloc(zone->uz_size, flags); 3366 if (item != NULL) { 3367 error = EJUSTRETURN; 3368 if (zone->uz_init != NULL && 3369 zone->uz_init(item, zone->uz_size, flags) != 0) { 3370 *itemp = NULL; 3371 return (error); 3372 } 3373 if (zone->uz_ctor != NULL && 3374 zone->uz_ctor(item, zone->uz_size, udata, 3375 flags) != 0) { 3376 counter_u64_add(zone->uz_fails, 1); 3377 zone->uz_fini(item, zone->uz_size); 3378 *itemp = NULL; 3379 return (error); 3380 } 3381 *itemp = item; 3382 return (error); 3383 } 3384 /* This is unfortunate but should not be fatal. */ 3385 } 3386#endif 3387 return (error); 3388} 3389 3390static int 3391uma_zfree_debug(uma_zone_t zone, void *item, void *udata) 3392{ 3393 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 3394 ("uma_zfree_debug: called with spinlock or critical section held")); 3395 3396#ifdef DEBUG_MEMGUARD 3397 if ((zone->uz_flags & UMA_ZONE_SMR) == 0 && is_memguard_addr(item)) { 3398 if (zone->uz_dtor != NULL) 3399 zone->uz_dtor(item, zone->uz_size, udata); 3400 if (zone->uz_fini != NULL) 3401 zone->uz_fini(item, zone->uz_size); 3402 memguard_free(item); 3403 return (EJUSTRETURN); 3404 } 3405#endif 3406 return (0); 3407} 3408#endif 3409 3410static inline void * 3411cache_alloc_item(uma_zone_t zone, uma_cache_t cache, uma_cache_bucket_t bucket, 3412 void *udata, int flags) 3413{ 3414 void *item; 3415 int size, uz_flags; 3416 3417 item = cache_bucket_pop(cache, bucket); 3418 size = cache_uz_size(cache); 3419 uz_flags = cache_uz_flags(cache); 3420 critical_exit(); 3421 return (item_ctor(zone, uz_flags, size, udata, flags, item)); 3422} 3423 3424static __noinline void * 3425cache_alloc_retry(uma_zone_t zone, uma_cache_t cache, void *udata, int flags) 3426{ 3427 uma_cache_bucket_t bucket; 3428 int domain; 3429 3430 while (cache_alloc(zone, cache, udata, flags)) { 3431 cache = &zone->uz_cpu[curcpu]; 3432 bucket = &cache->uc_allocbucket; 3433 if (__predict_false(bucket->ucb_cnt == 0)) 3434 continue; 3435 return (cache_alloc_item(zone, cache, bucket, udata, flags)); 3436 } 3437 critical_exit(); 3438 3439 /* 3440 * We can not get a bucket so try to return a single item. 3441 */ 3442 if (zone->uz_flags & UMA_ZONE_FIRSTTOUCH) 3443 domain = PCPU_GET(domain); 3444 else 3445 domain = UMA_ANYDOMAIN; 3446 return (zone_alloc_item(zone, udata, domain, flags)); 3447} 3448 3449/* See uma.h */ 3450void * 3451uma_zalloc_smr(uma_zone_t zone, int flags) 3452{ 3453 uma_cache_bucket_t bucket; 3454 uma_cache_t cache; 3455 3456#ifdef UMA_ZALLOC_DEBUG 3457 void *item; 3458 3459 KASSERT((zone->uz_flags & UMA_ZONE_SMR) != 0, 3460 ("uma_zalloc_arg: called with non-SMR zone.")); 3461 if (uma_zalloc_debug(zone, &item, NULL, flags) == EJUSTRETURN) 3462 return (item); 3463#endif 3464 3465 critical_enter(); 3466 cache = &zone->uz_cpu[curcpu]; 3467 bucket = &cache->uc_allocbucket; 3468 if (__predict_false(bucket->ucb_cnt == 0)) 3469 return (cache_alloc_retry(zone, cache, NULL, flags)); 3470 return (cache_alloc_item(zone, cache, bucket, NULL, flags)); 3471} 3472 3473/* See uma.h */ 3474void * 3475uma_zalloc_arg(uma_zone_t zone, void *udata, int flags) 3476{ 3477 uma_cache_bucket_t bucket; 3478 uma_cache_t cache; 3479 3480 /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ 3481 random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); 3482 3483 /* This is the fast path allocation */ 3484 CTR3(KTR_UMA, "uma_zalloc_arg zone %s(%p) flags %d", zone->uz_name, 3485 zone, flags); 3486 3487#ifdef UMA_ZALLOC_DEBUG 3488 void *item; 3489 3490 KASSERT((zone->uz_flags & UMA_ZONE_SMR) == 0, 3491 ("uma_zalloc_arg: called with SMR zone.")); 3492 if (uma_zalloc_debug(zone, &item, udata, flags) == EJUSTRETURN) 3493 return (item); 3494#endif 3495 3496 /* 3497 * If possible, allocate from the per-CPU cache. There are two 3498 * requirements for safe access to the per-CPU cache: (1) the thread 3499 * accessing the cache must not be preempted or yield during access, 3500 * and (2) the thread must not migrate CPUs without switching which 3501 * cache it accesses. We rely on a critical section to prevent 3502 * preemption and migration. We release the critical section in 3503 * order to acquire the zone mutex if we are unable to allocate from 3504 * the current cache; when we re-acquire the critical section, we 3505 * must detect and handle migration if it has occurred. 3506 */ 3507 critical_enter(); 3508 cache = &zone->uz_cpu[curcpu]; 3509 bucket = &cache->uc_allocbucket; 3510 if (__predict_false(bucket->ucb_cnt == 0)) 3511 return (cache_alloc_retry(zone, cache, udata, flags)); 3512 return (cache_alloc_item(zone, cache, bucket, udata, flags)); 3513} 3514 3515/* 3516 * Replenish an alloc bucket and possibly restore an old one. Called in 3517 * a critical section. Returns in a critical section. 3518 * 3519 * A false return value indicates an allocation failure. 3520 * A true return value indicates success and the caller should retry. 3521 */ 3522static __noinline bool 3523cache_alloc(uma_zone_t zone, uma_cache_t cache, void *udata, int flags) 3524{ 3525 uma_bucket_t bucket; 3526 int curdomain, domain; 3527 bool new; 3528 3529 CRITICAL_ASSERT(curthread); 3530 3531 /* 3532 * If we have run out of items in our alloc bucket see 3533 * if we can switch with the free bucket. 3534 * 3535 * SMR Zones can't re-use the free bucket until the sequence has 3536 * expired. 3537 */ 3538 if ((cache_uz_flags(cache) & UMA_ZONE_SMR) == 0 && 3539 cache->uc_freebucket.ucb_cnt != 0) { 3540 cache_bucket_swap(&cache->uc_freebucket, 3541 &cache->uc_allocbucket); 3542 return (true); 3543 } 3544 3545 /* 3546 * Discard any empty allocation bucket while we hold no locks. 3547 */ 3548 bucket = cache_bucket_unload_alloc(cache); 3549 critical_exit(); 3550 3551 if (bucket != NULL) { 3552 KASSERT(bucket->ub_cnt == 0, 3553 ("cache_alloc: Entered with non-empty alloc bucket.")); 3554 bucket_free(zone, bucket, udata); 3555 } 3556 3557 /* 3558 * Attempt to retrieve the item from the per-CPU cache has failed, so 3559 * we must go back to the zone. This requires the zdom lock, so we 3560 * must drop the critical section, then re-acquire it when we go back 3561 * to the cache. Since the critical section is released, we may be 3562 * preempted or migrate. As such, make sure not to maintain any 3563 * thread-local state specific to the cache from prior to releasing 3564 * the critical section. 3565 */ 3566 domain = PCPU_GET(domain); 3567 if ((cache_uz_flags(cache) & UMA_ZONE_ROUNDROBIN) != 0 || 3568 VM_DOMAIN_EMPTY(domain)) 3569 domain = zone_domain_highest(zone, domain); 3570 bucket = cache_fetch_bucket(zone, cache, domain); 3571 if (bucket == NULL && zone->uz_bucket_size != 0 && !bucketdisable) { 3572 bucket = zone_alloc_bucket(zone, udata, domain, flags); 3573 new = true; 3574 } else { 3575 new = false; 3576 } 3577 3578 CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p", 3579 zone->uz_name, zone, bucket); 3580 if (bucket == NULL) { 3581 critical_enter(); 3582 return (false); 3583 } 3584 3585 /* 3586 * See if we lost the race or were migrated. Cache the 3587 * initialized bucket to make this less likely or claim 3588 * the memory directly. 3589 */ 3590 critical_enter(); 3591 cache = &zone->uz_cpu[curcpu]; 3592 if (cache->uc_allocbucket.ucb_bucket == NULL && 3593 ((cache_uz_flags(cache) & UMA_ZONE_FIRSTTOUCH) == 0 || 3594 (curdomain = PCPU_GET(domain)) == domain || 3595 VM_DOMAIN_EMPTY(curdomain))) { 3596 if (new) 3597 atomic_add_long(&ZDOM_GET(zone, domain)->uzd_imax, 3598 bucket->ub_cnt); 3599 cache_bucket_load_alloc(cache, bucket); 3600 return (true); 3601 } 3602 3603 /* 3604 * We lost the race, release this bucket and start over. 3605 */ 3606 critical_exit(); 3607 zone_put_bucket(zone, domain, bucket, udata, !new); 3608 critical_enter(); 3609 3610 return (true); 3611} 3612 3613void * 3614uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags) 3615{ 3616#ifdef NUMA 3617 uma_bucket_t bucket; 3618 uma_zone_domain_t zdom; 3619 void *item; 3620#endif 3621 3622 /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ 3623 random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); 3624 3625 /* This is the fast path allocation */ 3626 CTR4(KTR_UMA, "uma_zalloc_domain zone %s(%p) domain %d flags %d", 3627 zone->uz_name, zone, domain, flags); 3628 3629 if (flags & M_WAITOK) { 3630 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 3631 "uma_zalloc_domain: zone \"%s\"", zone->uz_name); 3632 } 3633 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 3634 ("uma_zalloc_domain: called with spinlock or critical section held")); 3635 KASSERT((zone->uz_flags & UMA_ZONE_SMR) == 0, 3636 ("uma_zalloc_domain: called with SMR zone.")); 3637#ifdef NUMA 3638 KASSERT((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) != 0, 3639 ("uma_zalloc_domain: called with non-FIRSTTOUCH zone.")); 3640 3641 if (vm_ndomains == 1) 3642 return (uma_zalloc_arg(zone, udata, flags)); 3643 3644 /* 3645 * Try to allocate from the bucket cache before falling back to the keg. 3646 * We could try harder and attempt to allocate from per-CPU caches or 3647 * the per-domain cross-domain buckets, but the complexity is probably 3648 * not worth it. It is more important that frees of previous 3649 * cross-domain allocations do not blow up the cache. 3650 */ 3651 zdom = zone_domain_lock(zone, domain); 3652 if ((bucket = zone_fetch_bucket(zone, zdom, false)) != NULL) { 3653 item = bucket->ub_bucket[bucket->ub_cnt - 1]; 3654#ifdef INVARIANTS 3655 bucket->ub_bucket[bucket->ub_cnt - 1] = NULL; 3656#endif 3657 bucket->ub_cnt--; 3658 zone_put_bucket(zone, domain, bucket, udata, true); 3659 item = item_ctor(zone, zone->uz_flags, zone->uz_size, udata, 3660 flags, item); 3661 if (item != NULL) { 3662 KASSERT(item_domain(item) == domain, 3663 ("%s: bucket cache item %p from wrong domain", 3664 __func__, item)); 3665 counter_u64_add(zone->uz_allocs, 1); 3666 } 3667 return (item); 3668 } 3669 ZDOM_UNLOCK(zdom); 3670 return (zone_alloc_item(zone, udata, domain, flags)); 3671#else 3672 return (uma_zalloc_arg(zone, udata, flags)); 3673#endif 3674} 3675 3676/* 3677 * Find a slab with some space. Prefer slabs that are partially used over those 3678 * that are totally full. This helps to reduce fragmentation. 3679 * 3680 * If 'rr' is 1, search all domains starting from 'domain'. Otherwise check 3681 * only 'domain'. 3682 */ 3683static uma_slab_t 3684keg_first_slab(uma_keg_t keg, int domain, bool rr) 3685{ 3686 uma_domain_t dom; 3687 uma_slab_t slab; 3688 int start; 3689 3690 KASSERT(domain >= 0 && domain < vm_ndomains, 3691 ("keg_first_slab: domain %d out of range", domain)); 3692 KEG_LOCK_ASSERT(keg, domain); 3693 3694 slab = NULL; 3695 start = domain; 3696 do { 3697 dom = &keg->uk_domain[domain]; 3698 if ((slab = LIST_FIRST(&dom->ud_part_slab)) != NULL) 3699 return (slab); 3700 if ((slab = LIST_FIRST(&dom->ud_free_slab)) != NULL) { 3701 LIST_REMOVE(slab, us_link); 3702 dom->ud_free_slabs--; 3703 LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link); 3704 return (slab); 3705 } 3706 if (rr) 3707 domain = (domain + 1) % vm_ndomains; 3708 } while (domain != start); 3709 3710 return (NULL); 3711} 3712 3713/* 3714 * Fetch an existing slab from a free or partial list. Returns with the 3715 * keg domain lock held if a slab was found or unlocked if not. 3716 */ 3717static uma_slab_t 3718keg_fetch_free_slab(uma_keg_t keg, int domain, bool rr, int flags) 3719{ 3720 uma_slab_t slab; 3721 uint32_t reserve; 3722 3723 /* HASH has a single free list. */ 3724 if ((keg->uk_flags & UMA_ZFLAG_HASH) != 0) 3725 domain = 0; 3726 3727 KEG_LOCK(keg, domain); 3728 reserve = (flags & M_USE_RESERVE) != 0 ? 0 : keg->uk_reserve; 3729 if (keg->uk_domain[domain].ud_free_items <= reserve || 3730 (slab = keg_first_slab(keg, domain, rr)) == NULL) { 3731 KEG_UNLOCK(keg, domain); 3732 return (NULL); 3733 } 3734 return (slab); 3735} 3736 3737static uma_slab_t 3738keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int rdomain, const int flags) 3739{ 3740 struct vm_domainset_iter di; 3741 uma_slab_t slab; 3742 int aflags, domain; 3743 bool rr; 3744 3745restart: 3746 /* 3747 * Use the keg's policy if upper layers haven't already specified a 3748 * domain (as happens with first-touch zones). 3749 * 3750 * To avoid races we run the iterator with the keg lock held, but that 3751 * means that we cannot allow the vm_domainset layer to sleep. Thus, 3752 * clear M_WAITOK and handle low memory conditions locally. 3753 */ 3754 rr = rdomain == UMA_ANYDOMAIN; 3755 if (rr) { 3756 aflags = (flags & ~M_WAITOK) | M_NOWAIT; 3757 vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain, 3758 &aflags); 3759 } else { 3760 aflags = flags; 3761 domain = rdomain; 3762 } 3763 3764 for (;;) { 3765 slab = keg_fetch_free_slab(keg, domain, rr, flags); 3766 if (slab != NULL) 3767 return (slab); 3768 3769 /* 3770 * M_NOVM means don't ask at all! 3771 */ 3772 if (flags & M_NOVM) 3773 break; 3774 3775 slab = keg_alloc_slab(keg, zone, domain, flags, aflags); 3776 if (slab != NULL) 3777 return (slab); 3778 if (!rr && (flags & M_WAITOK) == 0) 3779 break; 3780 if (rr && vm_domainset_iter_policy(&di, &domain) != 0) { 3781 if ((flags & M_WAITOK) != 0) { 3782 vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask, 0); 3783 goto restart; 3784 } 3785 break; 3786 } 3787 } 3788 3789 /* 3790 * We might not have been able to get a slab but another cpu 3791 * could have while we were unlocked. Check again before we 3792 * fail. 3793 */ 3794 if ((slab = keg_fetch_free_slab(keg, domain, rr, flags)) != NULL) 3795 return (slab); 3796 3797 return (NULL); 3798} 3799 3800static void * 3801slab_alloc_item(uma_keg_t keg, uma_slab_t slab) 3802{ 3803 uma_domain_t dom; 3804 void *item; 3805 int freei; 3806 3807 KEG_LOCK_ASSERT(keg, slab->us_domain); 3808 3809 dom = &keg->uk_domain[slab->us_domain]; 3810 freei = BIT_FFS(keg->uk_ipers, &slab->us_free) - 1; 3811 BIT_CLR(keg->uk_ipers, freei, &slab->us_free); 3812 item = slab_item(slab, keg, freei); 3813 slab->us_freecount--; 3814 dom->ud_free_items--; 3815 3816 /* 3817 * Move this slab to the full list. It must be on the partial list, so 3818 * we do not need to update the free slab count. In particular, 3819 * keg_fetch_slab() always returns slabs on the partial list. 3820 */ 3821 if (slab->us_freecount == 0) { 3822 LIST_REMOVE(slab, us_link); 3823 LIST_INSERT_HEAD(&dom->ud_full_slab, slab, us_link); 3824 } 3825 3826 return (item); 3827} 3828 3829static int 3830zone_import(void *arg, void **bucket, int max, int domain, int flags) 3831{ 3832 uma_domain_t dom; 3833 uma_zone_t zone; 3834 uma_slab_t slab; 3835 uma_keg_t keg; 3836#ifdef NUMA 3837 int stripe; 3838#endif 3839 int i; 3840 3841 zone = arg; 3842 slab = NULL; 3843 keg = zone->uz_keg; 3844 /* Try to keep the buckets totally full */ 3845 for (i = 0; i < max; ) { 3846 if ((slab = keg_fetch_slab(keg, zone, domain, flags)) == NULL) 3847 break; 3848#ifdef NUMA 3849 stripe = howmany(max, vm_ndomains); 3850#endif 3851 dom = &keg->uk_domain[slab->us_domain]; 3852 do { 3853 bucket[i++] = slab_alloc_item(keg, slab); 3854 if (dom->ud_free_items <= keg->uk_reserve) { 3855 /* 3856 * Avoid depleting the reserve after a 3857 * successful item allocation, even if 3858 * M_USE_RESERVE is specified. 3859 */ 3860 KEG_UNLOCK(keg, slab->us_domain); 3861 goto out; 3862 } 3863#ifdef NUMA 3864 /* 3865 * If the zone is striped we pick a new slab for every 3866 * N allocations. Eliminating this conditional will 3867 * instead pick a new domain for each bucket rather 3868 * than stripe within each bucket. The current option 3869 * produces more fragmentation and requires more cpu 3870 * time but yields better distribution. 3871 */ 3872 if ((zone->uz_flags & UMA_ZONE_ROUNDROBIN) != 0 && 3873 vm_ndomains > 1 && --stripe == 0) 3874 break; 3875#endif 3876 } while (slab->us_freecount != 0 && i < max); 3877 KEG_UNLOCK(keg, slab->us_domain); 3878 3879 /* Don't block if we allocated any successfully. */ 3880 flags &= ~M_WAITOK; 3881 flags |= M_NOWAIT; 3882 } 3883out: 3884 return i; 3885} 3886 3887static int 3888zone_alloc_limit_hard(uma_zone_t zone, int count, int flags) 3889{ 3890 uint64_t old, new, total, max; 3891 3892 /* 3893 * The hard case. We're going to sleep because there were existing 3894 * sleepers or because we ran out of items. This routine enforces 3895 * fairness by keeping fifo order. 3896 * 3897 * First release our ill gotten gains and make some noise. 3898 */ 3899 for (;;) { 3900 zone_free_limit(zone, count); 3901 zone_log_warning(zone); 3902 zone_maxaction(zone); 3903 if (flags & M_NOWAIT) 3904 return (0); 3905 3906 /* 3907 * We need to allocate an item or set ourself as a sleeper 3908 * while the sleepq lock is held to avoid wakeup races. This 3909 * is essentially a home rolled semaphore. 3910 */ 3911 sleepq_lock(&zone->uz_max_items); 3912 old = zone->uz_items; 3913 do { 3914 MPASS(UZ_ITEMS_SLEEPERS(old) < UZ_ITEMS_SLEEPERS_MAX); 3915 /* Cache the max since we will evaluate twice. */ 3916 max = zone->uz_max_items; 3917 if (UZ_ITEMS_SLEEPERS(old) != 0 || 3918 UZ_ITEMS_COUNT(old) >= max) 3919 new = old + UZ_ITEMS_SLEEPER; 3920 else 3921 new = old + MIN(count, max - old); 3922 } while (atomic_fcmpset_64(&zone->uz_items, &old, new) == 0); 3923 3924 /* We may have successfully allocated under the sleepq lock. */ 3925 if (UZ_ITEMS_SLEEPERS(new) == 0) { 3926 sleepq_release(&zone->uz_max_items); 3927 return (new - old); 3928 } 3929 3930 /* 3931 * This is in a different cacheline from uz_items so that we 3932 * don't constantly invalidate the fastpath cacheline when we 3933 * adjust item counts. This could be limited to toggling on 3934 * transitions. 3935 */ 3936 atomic_add_32(&zone->uz_sleepers, 1); 3937 atomic_add_64(&zone->uz_sleeps, 1); 3938 3939 /* 3940 * We have added ourselves as a sleeper. The sleepq lock 3941 * protects us from wakeup races. Sleep now and then retry. 3942 */ 3943 sleepq_add(&zone->uz_max_items, NULL, "zonelimit", 0, 0); 3944 sleepq_wait(&zone->uz_max_items, PVM); 3945 3946 /* 3947 * After wakeup, remove ourselves as a sleeper and try 3948 * again. We no longer have the sleepq lock for protection. 3949 * 3950 * Subract ourselves as a sleeper while attempting to add 3951 * our count. 3952 */ 3953 atomic_subtract_32(&zone->uz_sleepers, 1); 3954 old = atomic_fetchadd_64(&zone->uz_items, 3955 -(UZ_ITEMS_SLEEPER - count)); 3956 /* We're no longer a sleeper. */ 3957 old -= UZ_ITEMS_SLEEPER; 3958 3959 /* 3960 * If we're still at the limit, restart. Notably do not 3961 * block on other sleepers. Cache the max value to protect 3962 * against changes via sysctl. 3963 */ 3964 total = UZ_ITEMS_COUNT(old); 3965 max = zone->uz_max_items; 3966 if (total >= max) 3967 continue; 3968 /* Truncate if necessary, otherwise wake other sleepers. */ 3969 if (total + count > max) { 3970 zone_free_limit(zone, total + count - max); 3971 count = max - total; 3972 } else if (total + count < max && UZ_ITEMS_SLEEPERS(old) != 0) 3973 wakeup_one(&zone->uz_max_items); 3974 3975 return (count); 3976 } 3977} 3978 3979/* 3980 * Allocate 'count' items from our max_items limit. Returns the number 3981 * available. If M_NOWAIT is not specified it will sleep until at least 3982 * one item can be allocated. 3983 */ 3984static int 3985zone_alloc_limit(uma_zone_t zone, int count, int flags) 3986{ 3987 uint64_t old; 3988 uint64_t max; 3989 3990 max = zone->uz_max_items; 3991 MPASS(max > 0); 3992 3993 /* 3994 * We expect normal allocations to succeed with a simple 3995 * fetchadd. 3996 */ 3997 old = atomic_fetchadd_64(&zone->uz_items, count); 3998 if (__predict_true(old + count <= max)) 3999 return (count); 4000 4001 /* 4002 * If we had some items and no sleepers just return the 4003 * truncated value. We have to release the excess space 4004 * though because that may wake sleepers who weren't woken 4005 * because we were temporarily over the limit. 4006 */ 4007 if (old < max) { 4008 zone_free_limit(zone, (old + count) - max); 4009 return (max - old); 4010 } 4011 return (zone_alloc_limit_hard(zone, count, flags)); 4012} 4013 4014/* 4015 * Free a number of items back to the limit. 4016 */ 4017static void 4018zone_free_limit(uma_zone_t zone, int count) 4019{ 4020 uint64_t old; 4021 4022 MPASS(count > 0); 4023 4024 /* 4025 * In the common case we either have no sleepers or 4026 * are still over the limit and can just return. 4027 */ 4028 old = atomic_fetchadd_64(&zone->uz_items, -count); 4029 if (__predict_true(UZ_ITEMS_SLEEPERS(old) == 0 || 4030 UZ_ITEMS_COUNT(old) - count >= zone->uz_max_items)) 4031 return; 4032 4033 /* 4034 * Moderate the rate of wakeups. Sleepers will continue 4035 * to generate wakeups if necessary. 4036 */ 4037 wakeup_one(&zone->uz_max_items); 4038} 4039 4040static uma_bucket_t 4041zone_alloc_bucket(uma_zone_t zone, void *udata, int domain, int flags) 4042{ 4043 uma_bucket_t bucket; 4044 int maxbucket, cnt; 4045 4046 CTR3(KTR_UMA, "zone_alloc_bucket zone %s(%p) domain %d", zone->uz_name, 4047 zone, domain); 4048 4049 /* Avoid allocs targeting empty domains. */ 4050 if (domain != UMA_ANYDOMAIN && VM_DOMAIN_EMPTY(domain)) 4051 domain = UMA_ANYDOMAIN; 4052 else if ((zone->uz_flags & UMA_ZONE_ROUNDROBIN) != 0) 4053 domain = UMA_ANYDOMAIN; 4054 4055 if (zone->uz_max_items > 0) 4056 maxbucket = zone_alloc_limit(zone, zone->uz_bucket_size, 4057 M_NOWAIT); 4058 else 4059 maxbucket = zone->uz_bucket_size; 4060 if (maxbucket == 0) 4061 return (false); 4062 4063 /* Don't wait for buckets, preserve caller's NOVM setting. */ 4064 bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM)); 4065 if (bucket == NULL) { 4066 cnt = 0; 4067 goto out; 4068 } 4069 4070 bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket, 4071 MIN(maxbucket, bucket->ub_entries), domain, flags); 4072 4073 /* 4074 * Initialize the memory if necessary. 4075 */ 4076 if (bucket->ub_cnt != 0 && zone->uz_init != NULL) { 4077 int i; 4078 4079 for (i = 0; i < bucket->ub_cnt; i++) 4080 if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size, 4081 flags) != 0) 4082 break; 4083 /* 4084 * If we couldn't initialize the whole bucket, put the 4085 * rest back onto the freelist. 4086 */ 4087 if (i != bucket->ub_cnt) { 4088 zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i], 4089 bucket->ub_cnt - i); 4090#ifdef INVARIANTS 4091 bzero(&bucket->ub_bucket[i], 4092 sizeof(void *) * (bucket->ub_cnt - i)); 4093#endif 4094 bucket->ub_cnt = i; 4095 } 4096 } 4097 4098 cnt = bucket->ub_cnt; 4099 if (bucket->ub_cnt == 0) { 4100 bucket_free(zone, bucket, udata); 4101 counter_u64_add(zone->uz_fails, 1); 4102 bucket = NULL; 4103 } 4104out: 4105 if (zone->uz_max_items > 0 && cnt < maxbucket) 4106 zone_free_limit(zone, maxbucket - cnt); 4107 4108 return (bucket); 4109} 4110 4111/* 4112 * Allocates a single item from a zone. 4113 * 4114 * Arguments 4115 * zone The zone to alloc for. 4116 * udata The data to be passed to the constructor. 4117 * domain The domain to allocate from or UMA_ANYDOMAIN. 4118 * flags M_WAITOK, M_NOWAIT, M_ZERO. 4119 * 4120 * Returns 4121 * NULL if there is no memory and M_NOWAIT is set 4122 * An item if successful 4123 */ 4124 4125static void * 4126zone_alloc_item(uma_zone_t zone, void *udata, int domain, int flags) 4127{ 4128 void *item; 4129 4130 if (zone->uz_max_items > 0 && zone_alloc_limit(zone, 1, flags) == 0) { 4131 counter_u64_add(zone->uz_fails, 1); 4132 return (NULL); 4133 } 4134 4135 /* Avoid allocs targeting empty domains. */ 4136 if (domain != UMA_ANYDOMAIN && VM_DOMAIN_EMPTY(domain)) 4137 domain = UMA_ANYDOMAIN; 4138 4139 if (zone->uz_import(zone->uz_arg, &item, 1, domain, flags) != 1) 4140 goto fail_cnt; 4141 4142 /* 4143 * We have to call both the zone's init (not the keg's init) 4144 * and the zone's ctor. This is because the item is going from 4145 * a keg slab directly to the user, and the user is expecting it 4146 * to be both zone-init'd as well as zone-ctor'd. 4147 */ 4148 if (zone->uz_init != NULL) { 4149 if (zone->uz_init(item, zone->uz_size, flags) != 0) { 4150 zone_free_item(zone, item, udata, SKIP_FINI | SKIP_CNT); 4151 goto fail_cnt; 4152 } 4153 } 4154 item = item_ctor(zone, zone->uz_flags, zone->uz_size, udata, flags, 4155 item); 4156 if (item == NULL) 4157 goto fail; 4158 4159 counter_u64_add(zone->uz_allocs, 1); 4160 CTR3(KTR_UMA, "zone_alloc_item item %p from %s(%p)", item, 4161 zone->uz_name, zone); 4162 4163 return (item); 4164 4165fail_cnt: 4166 counter_u64_add(zone->uz_fails, 1); 4167fail: 4168 if (zone->uz_max_items > 0) 4169 zone_free_limit(zone, 1); 4170 CTR2(KTR_UMA, "zone_alloc_item failed from %s(%p)", 4171 zone->uz_name, zone); 4172 4173 return (NULL); 4174} 4175 4176/* See uma.h */ 4177void 4178uma_zfree_smr(uma_zone_t zone, void *item) 4179{ 4180 uma_cache_t cache; 4181 uma_cache_bucket_t bucket; 4182 int itemdomain, uz_flags; 4183 4184#ifdef UMA_ZALLOC_DEBUG 4185 KASSERT((zone->uz_flags & UMA_ZONE_SMR) != 0, 4186 ("uma_zfree_smr: called with non-SMR zone.")); 4187 KASSERT(item != NULL, ("uma_zfree_smr: Called with NULL pointer.")); 4188 SMR_ASSERT_NOT_ENTERED(zone->uz_smr); 4189 if (uma_zfree_debug(zone, item, NULL) == EJUSTRETURN) 4190 return; 4191#endif 4192 cache = &zone->uz_cpu[curcpu]; 4193 uz_flags = cache_uz_flags(cache); 4194 itemdomain = 0; 4195#ifdef NUMA 4196 if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0) 4197 itemdomain = item_domain(item); 4198#endif 4199 critical_enter(); 4200 do { 4201 cache = &zone->uz_cpu[curcpu]; 4202 /* SMR Zones must free to the free bucket. */ 4203 bucket = &cache->uc_freebucket; 4204#ifdef NUMA 4205 if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0 && 4206 PCPU_GET(domain) != itemdomain) { 4207 bucket = &cache->uc_crossbucket; 4208 } 4209#endif 4210 if (__predict_true(bucket->ucb_cnt < bucket->ucb_entries)) { 4211 cache_bucket_push(cache, bucket, item); 4212 critical_exit(); 4213 return; 4214 } 4215 } while (cache_free(zone, cache, NULL, item, itemdomain)); 4216 critical_exit(); 4217 4218 /* 4219 * If nothing else caught this, we'll just do an internal free. 4220 */ 4221 zone_free_item(zone, item, NULL, SKIP_NONE); 4222} 4223 4224/* See uma.h */ 4225void 4226uma_zfree_arg(uma_zone_t zone, void *item, void *udata) 4227{ 4228 uma_cache_t cache; 4229 uma_cache_bucket_t bucket; 4230 int itemdomain, uz_flags; 4231 4232 /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ 4233 random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); 4234 4235 CTR2(KTR_UMA, "uma_zfree_arg zone %s(%p)", zone->uz_name, zone); 4236 4237#ifdef UMA_ZALLOC_DEBUG 4238 KASSERT((zone->uz_flags & UMA_ZONE_SMR) == 0, 4239 ("uma_zfree_arg: called with SMR zone.")); 4240 if (uma_zfree_debug(zone, item, udata) == EJUSTRETURN) 4241 return; 4242#endif 4243 /* uma_zfree(..., NULL) does nothing, to match free(9). */ 4244 if (item == NULL) 4245 return; 4246 4247 /* 4248 * We are accessing the per-cpu cache without a critical section to 4249 * fetch size and flags. This is acceptable, if we are preempted we 4250 * will simply read another cpu's line. 4251 */ 4252 cache = &zone->uz_cpu[curcpu]; 4253 uz_flags = cache_uz_flags(cache); 4254 if (UMA_ALWAYS_CTORDTOR || 4255 __predict_false((uz_flags & UMA_ZFLAG_CTORDTOR) != 0)) 4256 item_dtor(zone, item, cache_uz_size(cache), udata, SKIP_NONE); 4257 4258 /* 4259 * The race here is acceptable. If we miss it we'll just have to wait 4260 * a little longer for the limits to be reset. 4261 */ 4262 if (__predict_false(uz_flags & UMA_ZFLAG_LIMIT)) { 4263 if (atomic_load_32(&zone->uz_sleepers) > 0) 4264 goto zfree_item; 4265 } 4266 4267 /* 4268 * If possible, free to the per-CPU cache. There are two 4269 * requirements for safe access to the per-CPU cache: (1) the thread 4270 * accessing the cache must not be preempted or yield during access, 4271 * and (2) the thread must not migrate CPUs without switching which 4272 * cache it accesses. We rely on a critical section to prevent 4273 * preemption and migration. We release the critical section in 4274 * order to acquire the zone mutex if we are unable to free to the 4275 * current cache; when we re-acquire the critical section, we must 4276 * detect and handle migration if it has occurred. 4277 */ 4278 itemdomain = 0; 4279#ifdef NUMA 4280 if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0) 4281 itemdomain = item_domain(item); 4282#endif 4283 critical_enter(); 4284 do { 4285 cache = &zone->uz_cpu[curcpu]; 4286 /* 4287 * Try to free into the allocbucket first to give LIFO 4288 * ordering for cache-hot datastructures. Spill over 4289 * into the freebucket if necessary. Alloc will swap 4290 * them if one runs dry. 4291 */ 4292 bucket = &cache->uc_allocbucket; 4293#ifdef NUMA 4294 if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0 && 4295 PCPU_GET(domain) != itemdomain) { 4296 bucket = &cache->uc_crossbucket; 4297 } else 4298#endif 4299 if (bucket->ucb_cnt == bucket->ucb_entries && 4300 cache->uc_freebucket.ucb_cnt < 4301 cache->uc_freebucket.ucb_entries) 4302 cache_bucket_swap(&cache->uc_freebucket, 4303 &cache->uc_allocbucket); 4304 if (__predict_true(bucket->ucb_cnt < bucket->ucb_entries)) { 4305 cache_bucket_push(cache, bucket, item); 4306 critical_exit(); 4307 return; 4308 } 4309 } while (cache_free(zone, cache, udata, item, itemdomain)); 4310 critical_exit(); 4311 4312 /* 4313 * If nothing else caught this, we'll just do an internal free. 4314 */ 4315zfree_item: 4316 zone_free_item(zone, item, udata, SKIP_DTOR); 4317} 4318 4319#ifdef NUMA 4320/* 4321 * sort crossdomain free buckets to domain correct buckets and cache 4322 * them. 4323 */ 4324static void 4325zone_free_cross(uma_zone_t zone, uma_bucket_t bucket, void *udata) 4326{ 4327 struct uma_bucketlist emptybuckets, fullbuckets; 4328 uma_zone_domain_t zdom; 4329 uma_bucket_t b; 4330 smr_seq_t seq; 4331 void *item; 4332 int domain; 4333 4334 CTR3(KTR_UMA, 4335 "uma_zfree: zone %s(%p) draining cross bucket %p", 4336 zone->uz_name, zone, bucket); 4337 4338 /* 4339 * It is possible for buckets to arrive here out of order so we fetch 4340 * the current smr seq rather than accepting the bucket's. 4341 */ 4342 seq = SMR_SEQ_INVALID; 4343 if ((zone->uz_flags & UMA_ZONE_SMR) != 0) 4344 seq = smr_advance(zone->uz_smr); 4345 4346 /* 4347 * To avoid having ndomain * ndomain buckets for sorting we have a 4348 * lock on the current crossfree bucket. A full matrix with 4349 * per-domain locking could be used if necessary. 4350 */ 4351 STAILQ_INIT(&emptybuckets); 4352 STAILQ_INIT(&fullbuckets); 4353 ZONE_CROSS_LOCK(zone); 4354 for (; bucket->ub_cnt > 0; bucket->ub_cnt--) { 4355 item = bucket->ub_bucket[bucket->ub_cnt - 1]; 4356 domain = item_domain(item); 4357 zdom = ZDOM_GET(zone, domain); 4358 if (zdom->uzd_cross == NULL) { 4359 if ((b = STAILQ_FIRST(&emptybuckets)) != NULL) { 4360 STAILQ_REMOVE_HEAD(&emptybuckets, ub_link); 4361 zdom->uzd_cross = b; 4362 } else { 4363 /* 4364 * Avoid allocating a bucket with the cross lock 4365 * held, since allocation can trigger a 4366 * cross-domain free and bucket zones may 4367 * allocate from each other. 4368 */ 4369 ZONE_CROSS_UNLOCK(zone); 4370 b = bucket_alloc(zone, udata, M_NOWAIT); 4371 if (b == NULL) 4372 goto out; 4373 ZONE_CROSS_LOCK(zone); 4374 if (zdom->uzd_cross != NULL) { 4375 STAILQ_INSERT_HEAD(&emptybuckets, b, 4376 ub_link); 4377 } else { 4378 zdom->uzd_cross = b; 4379 } 4380 } 4381 } 4382 b = zdom->uzd_cross; 4383 b->ub_bucket[b->ub_cnt++] = item; 4384 b->ub_seq = seq; 4385 if (b->ub_cnt == b->ub_entries) { 4386 STAILQ_INSERT_HEAD(&fullbuckets, b, ub_link); 4387 if ((b = STAILQ_FIRST(&emptybuckets)) != NULL) 4388 STAILQ_REMOVE_HEAD(&emptybuckets, ub_link); 4389 zdom->uzd_cross = b; 4390 } 4391 } 4392 ZONE_CROSS_UNLOCK(zone); 4393out: 4394 if (bucket->ub_cnt == 0) 4395 bucket->ub_seq = SMR_SEQ_INVALID; 4396 bucket_free(zone, bucket, udata); 4397 4398 while ((b = STAILQ_FIRST(&emptybuckets)) != NULL) { 4399 STAILQ_REMOVE_HEAD(&emptybuckets, ub_link); 4400 bucket_free(zone, b, udata); 4401 } 4402 while ((b = STAILQ_FIRST(&fullbuckets)) != NULL) { 4403 STAILQ_REMOVE_HEAD(&fullbuckets, ub_link); 4404 domain = item_domain(b->ub_bucket[0]); 4405 zone_put_bucket(zone, domain, b, udata, true); 4406 } 4407} 4408#endif 4409 4410static void 4411zone_free_bucket(uma_zone_t zone, uma_bucket_t bucket, void *udata, 4412 int itemdomain, bool ws) 4413{ 4414 4415#ifdef NUMA 4416 /* 4417 * Buckets coming from the wrong domain will be entirely for the 4418 * only other domain on two domain systems. In this case we can 4419 * simply cache them. Otherwise we need to sort them back to 4420 * correct domains. 4421 */ 4422 if ((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) != 0 && 4423 vm_ndomains > 2 && PCPU_GET(domain) != itemdomain) { 4424 zone_free_cross(zone, bucket, udata); 4425 return; 4426 } 4427#endif 4428 4429 /* 4430 * Attempt to save the bucket in the zone's domain bucket cache. 4431 */ 4432 CTR3(KTR_UMA, 4433 "uma_zfree: zone %s(%p) putting bucket %p on free list", 4434 zone->uz_name, zone, bucket); 4435 /* ub_cnt is pointing to the last free item */ 4436 if ((zone->uz_flags & UMA_ZONE_ROUNDROBIN) != 0) 4437 itemdomain = zone_domain_lowest(zone, itemdomain); 4438 zone_put_bucket(zone, itemdomain, bucket, udata, ws); 4439} 4440 4441/* 4442 * Populate a free or cross bucket for the current cpu cache. Free any 4443 * existing full bucket either to the zone cache or back to the slab layer. 4444 * 4445 * Enters and returns in a critical section. false return indicates that 4446 * we can not satisfy this free in the cache layer. true indicates that 4447 * the caller should retry. 4448 */ 4449static __noinline bool 4450cache_free(uma_zone_t zone, uma_cache_t cache, void *udata, void *item, 4451 int itemdomain) 4452{ 4453 uma_cache_bucket_t cbucket; 4454 uma_bucket_t newbucket, bucket; 4455 4456 CRITICAL_ASSERT(curthread); 4457 4458 if (zone->uz_bucket_size == 0) 4459 return false; 4460 4461 cache = &zone->uz_cpu[curcpu]; 4462 newbucket = NULL; 4463 4464 /* 4465 * FIRSTTOUCH domains need to free to the correct zdom. When 4466 * enabled this is the zdom of the item. The bucket is the 4467 * cross bucket if the current domain and itemdomain do not match. 4468 */ 4469 cbucket = &cache->uc_freebucket; 4470#ifdef NUMA 4471 if ((cache_uz_flags(cache) & UMA_ZONE_FIRSTTOUCH) != 0) { 4472 if (PCPU_GET(domain) != itemdomain) { 4473 cbucket = &cache->uc_crossbucket; 4474 if (cbucket->ucb_cnt != 0) 4475 counter_u64_add(zone->uz_xdomain, 4476 cbucket->ucb_cnt); 4477 } 4478 } 4479#endif 4480 bucket = cache_bucket_unload(cbucket); 4481 KASSERT(bucket == NULL || bucket->ub_cnt == bucket->ub_entries, 4482 ("cache_free: Entered with non-full free bucket.")); 4483 4484 /* We are no longer associated with this CPU. */ 4485 critical_exit(); 4486 4487 /* 4488 * Don't let SMR zones operate without a free bucket. Force 4489 * a synchronize and re-use this one. We will only degrade 4490 * to a synchronize every bucket_size items rather than every 4491 * item if we fail to allocate a bucket. 4492 */ 4493 if ((zone->uz_flags & UMA_ZONE_SMR) != 0) { 4494 if (bucket != NULL) 4495 bucket->ub_seq = smr_advance(zone->uz_smr); 4496 newbucket = bucket_alloc(zone, udata, M_NOWAIT); 4497 if (newbucket == NULL && bucket != NULL) { 4498 bucket_drain(zone, bucket); 4499 newbucket = bucket; 4500 bucket = NULL; 4501 } 4502 } else if (!bucketdisable) 4503 newbucket = bucket_alloc(zone, udata, M_NOWAIT); 4504 4505 if (bucket != NULL) 4506 zone_free_bucket(zone, bucket, udata, itemdomain, true); 4507 4508 critical_enter(); 4509 if ((bucket = newbucket) == NULL) 4510 return (false); 4511 cache = &zone->uz_cpu[curcpu]; 4512#ifdef NUMA 4513 /* 4514 * Check to see if we should be populating the cross bucket. If it 4515 * is already populated we will fall through and attempt to populate 4516 * the free bucket. 4517 */ 4518 if ((cache_uz_flags(cache) & UMA_ZONE_FIRSTTOUCH) != 0) { 4519 if (PCPU_GET(domain) != itemdomain && 4520 cache->uc_crossbucket.ucb_bucket == NULL) { 4521 cache_bucket_load_cross(cache, bucket); 4522 return (true); 4523 } 4524 } 4525#endif 4526 /* 4527 * We may have lost the race to fill the bucket or switched CPUs. 4528 */ 4529 if (cache->uc_freebucket.ucb_bucket != NULL) { 4530 critical_exit(); 4531 bucket_free(zone, bucket, udata); 4532 critical_enter(); 4533 } else 4534 cache_bucket_load_free(cache, bucket); 4535 4536 return (true); 4537} 4538 4539static void 4540slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item) 4541{ 4542 uma_keg_t keg; 4543 uma_domain_t dom; 4544 int freei; 4545 4546 keg = zone->uz_keg; 4547 KEG_LOCK_ASSERT(keg, slab->us_domain); 4548 4549 /* Do we need to remove from any lists? */ 4550 dom = &keg->uk_domain[slab->us_domain]; 4551 if (slab->us_freecount + 1 == keg->uk_ipers) { 4552 LIST_REMOVE(slab, us_link); 4553 LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link); 4554 dom->ud_free_slabs++; 4555 } else if (slab->us_freecount == 0) { 4556 LIST_REMOVE(slab, us_link); 4557 LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link); 4558 } 4559 4560 /* Slab management. */ 4561 freei = slab_item_index(slab, keg, item); 4562 BIT_SET(keg->uk_ipers, freei, &slab->us_free); 4563 slab->us_freecount++; 4564 4565 /* Keg statistics. */ 4566 dom->ud_free_items++; 4567} 4568 4569static void 4570zone_release(void *arg, void **bucket, int cnt) 4571{ 4572 struct mtx *lock; 4573 uma_zone_t zone; 4574 uma_slab_t slab; 4575 uma_keg_t keg; 4576 uint8_t *mem; 4577 void *item; 4578 int i; 4579 4580 zone = arg; 4581 keg = zone->uz_keg; 4582 lock = NULL; 4583 if (__predict_false((zone->uz_flags & UMA_ZFLAG_HASH) != 0)) 4584 lock = KEG_LOCK(keg, 0); 4585 for (i = 0; i < cnt; i++) { 4586 item = bucket[i]; 4587 if (__predict_true((zone->uz_flags & UMA_ZFLAG_VTOSLAB) != 0)) { 4588 slab = vtoslab((vm_offset_t)item); 4589 } else { 4590 mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK)); 4591 if ((zone->uz_flags & UMA_ZFLAG_HASH) != 0) 4592 slab = hash_sfind(&keg->uk_hash, mem); 4593 else 4594 slab = (uma_slab_t)(mem + keg->uk_pgoff); 4595 } 4596 if (lock != KEG_LOCKPTR(keg, slab->us_domain)) { 4597 if (lock != NULL) 4598 mtx_unlock(lock); 4599 lock = KEG_LOCK(keg, slab->us_domain); 4600 } 4601 slab_free_item(zone, slab, item); 4602 } 4603 if (lock != NULL) 4604 mtx_unlock(lock); 4605} 4606 4607/* 4608 * Frees a single item to any zone. 4609 * 4610 * Arguments: 4611 * zone The zone to free to 4612 * item The item we're freeing 4613 * udata User supplied data for the dtor 4614 * skip Skip dtors and finis 4615 */ 4616static __noinline void 4617zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip) 4618{ 4619 4620 /* 4621 * If a free is sent directly to an SMR zone we have to 4622 * synchronize immediately because the item can instantly 4623 * be reallocated. This should only happen in degenerate 4624 * cases when no memory is available for per-cpu caches. 4625 */ 4626 if ((zone->uz_flags & UMA_ZONE_SMR) != 0 && skip == SKIP_NONE) 4627 smr_synchronize(zone->uz_smr); 4628 4629 item_dtor(zone, item, zone->uz_size, udata, skip); 4630 4631 if (skip < SKIP_FINI && zone->uz_fini) 4632 zone->uz_fini(item, zone->uz_size); 4633 4634 zone->uz_release(zone->uz_arg, &item, 1); 4635 4636 if (skip & SKIP_CNT) 4637 return; 4638 4639 counter_u64_add(zone->uz_frees, 1); 4640 4641 if (zone->uz_max_items > 0) 4642 zone_free_limit(zone, 1); 4643} 4644 4645/* See uma.h */ 4646int 4647uma_zone_set_max(uma_zone_t zone, int nitems) 4648{ 4649 4650 /* 4651 * If the limit is small, we may need to constrain the maximum per-CPU 4652 * cache size, or disable caching entirely. 4653 */ 4654 uma_zone_set_maxcache(zone, nitems); 4655 4656 /* 4657 * XXX This can misbehave if the zone has any allocations with 4658 * no limit and a limit is imposed. There is currently no 4659 * way to clear a limit. 4660 */ 4661 ZONE_LOCK(zone); 4662 zone->uz_max_items = nitems; 4663 zone->uz_flags |= UMA_ZFLAG_LIMIT; 4664 zone_update_caches(zone); 4665 /* We may need to wake waiters. */ 4666 wakeup(&zone->uz_max_items); 4667 ZONE_UNLOCK(zone); 4668 4669 return (nitems); 4670} 4671 4672/* See uma.h */ 4673void 4674uma_zone_set_maxcache(uma_zone_t zone, int nitems) 4675{ 4676 int bpcpu, bpdom, bsize, nb; 4677 4678 ZONE_LOCK(zone); 4679 4680 /* 4681 * Compute a lower bound on the number of items that may be cached in 4682 * the zone. Each CPU gets at least two buckets, and for cross-domain 4683 * frees we use an additional bucket per CPU and per domain. Select the 4684 * largest bucket size that does not exceed half of the requested limit, 4685 * with the left over space given to the full bucket cache. 4686 */ 4687 bpdom = 0; 4688 bpcpu = 2; 4689#ifdef NUMA 4690 if ((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) != 0 && vm_ndomains > 1) { 4691 bpcpu++; 4692 bpdom++; 4693 } 4694#endif 4695 nb = bpcpu * mp_ncpus + bpdom * vm_ndomains; 4696 bsize = nitems / nb / 2; 4697 if (bsize > BUCKET_MAX) 4698 bsize = BUCKET_MAX; 4699 else if (bsize == 0 && nitems / nb > 0) 4700 bsize = 1; 4701 zone->uz_bucket_size_max = zone->uz_bucket_size = bsize; 4702 if (zone->uz_bucket_size_min > zone->uz_bucket_size_max) 4703 zone->uz_bucket_size_min = zone->uz_bucket_size_max; 4704 zone->uz_bucket_max = nitems - nb * bsize; 4705 ZONE_UNLOCK(zone); 4706} 4707 4708/* See uma.h */ 4709int 4710uma_zone_get_max(uma_zone_t zone) 4711{ 4712 int nitems; 4713 4714 nitems = atomic_load_64(&zone->uz_max_items); 4715 4716 return (nitems); 4717} 4718 4719/* See uma.h */ 4720void 4721uma_zone_set_warning(uma_zone_t zone, const char *warning) 4722{ 4723 4724 ZONE_ASSERT_COLD(zone); 4725 zone->uz_warning = warning; 4726} 4727 4728/* See uma.h */ 4729void 4730uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction) 4731{ 4732 4733 ZONE_ASSERT_COLD(zone); 4734 TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone); 4735} 4736 4737/* See uma.h */ 4738int 4739uma_zone_get_cur(uma_zone_t zone) 4740{ 4741 int64_t nitems; 4742 u_int i; 4743 4744 nitems = 0; 4745 if (zone->uz_allocs != EARLY_COUNTER && zone->uz_frees != EARLY_COUNTER) 4746 nitems = counter_u64_fetch(zone->uz_allocs) - 4747 counter_u64_fetch(zone->uz_frees); 4748 CPU_FOREACH(i) 4749 nitems += atomic_load_64(&zone->uz_cpu[i].uc_allocs) - 4750 atomic_load_64(&zone->uz_cpu[i].uc_frees); 4751 4752 return (nitems < 0 ? 0 : nitems); 4753} 4754 4755static uint64_t 4756uma_zone_get_allocs(uma_zone_t zone) 4757{ 4758 uint64_t nitems; 4759 u_int i; 4760 4761 nitems = 0; 4762 if (zone->uz_allocs != EARLY_COUNTER) 4763 nitems = counter_u64_fetch(zone->uz_allocs); 4764 CPU_FOREACH(i) 4765 nitems += atomic_load_64(&zone->uz_cpu[i].uc_allocs); 4766 4767 return (nitems); 4768} 4769 4770static uint64_t 4771uma_zone_get_frees(uma_zone_t zone) 4772{ 4773 uint64_t nitems; 4774 u_int i; 4775 4776 nitems = 0; 4777 if (zone->uz_frees != EARLY_COUNTER) 4778 nitems = counter_u64_fetch(zone->uz_frees); 4779 CPU_FOREACH(i) 4780 nitems += atomic_load_64(&zone->uz_cpu[i].uc_frees); 4781 4782 return (nitems); 4783} 4784 4785#ifdef INVARIANTS 4786/* Used only for KEG_ASSERT_COLD(). */ 4787static uint64_t 4788uma_keg_get_allocs(uma_keg_t keg) 4789{ 4790 uma_zone_t z; 4791 uint64_t nitems; 4792 4793 nitems = 0; 4794 LIST_FOREACH(z, &keg->uk_zones, uz_link) 4795 nitems += uma_zone_get_allocs(z); 4796 4797 return (nitems); 4798} 4799#endif 4800 4801/* See uma.h */ 4802void 4803uma_zone_set_init(uma_zone_t zone, uma_init uminit) 4804{ 4805 uma_keg_t keg; 4806 4807 KEG_GET(zone, keg); 4808 KEG_ASSERT_COLD(keg); 4809 keg->uk_init = uminit; 4810} 4811 4812/* See uma.h */ 4813void 4814uma_zone_set_fini(uma_zone_t zone, uma_fini fini) 4815{ 4816 uma_keg_t keg; 4817 4818 KEG_GET(zone, keg); 4819 KEG_ASSERT_COLD(keg); 4820 keg->uk_fini = fini; 4821} 4822 4823/* See uma.h */ 4824void 4825uma_zone_set_zinit(uma_zone_t zone, uma_init zinit) 4826{ 4827 4828 ZONE_ASSERT_COLD(zone); 4829 zone->uz_init = zinit; 4830} 4831 4832/* See uma.h */ 4833void 4834uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini) 4835{ 4836 4837 ZONE_ASSERT_COLD(zone); 4838 zone->uz_fini = zfini; 4839} 4840 4841/* See uma.h */ 4842void 4843uma_zone_set_freef(uma_zone_t zone, uma_free freef) 4844{ 4845 uma_keg_t keg; 4846 4847 KEG_GET(zone, keg); 4848 KEG_ASSERT_COLD(keg); 4849 keg->uk_freef = freef; 4850} 4851 4852/* See uma.h */ 4853void 4854uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf) 4855{ 4856 uma_keg_t keg; 4857 4858 KEG_GET(zone, keg); 4859 KEG_ASSERT_COLD(keg); 4860 keg->uk_allocf = allocf; 4861} 4862 4863/* See uma.h */ 4864void 4865uma_zone_set_smr(uma_zone_t zone, smr_t smr) 4866{ 4867 4868 ZONE_ASSERT_COLD(zone); 4869 4870 KASSERT(smr != NULL, ("Got NULL smr")); 4871 KASSERT((zone->uz_flags & UMA_ZONE_SMR) == 0, 4872 ("zone %p (%s) already uses SMR", zone, zone->uz_name)); 4873 zone->uz_flags |= UMA_ZONE_SMR; 4874 zone->uz_smr = smr; 4875 zone_update_caches(zone); 4876} 4877 4878smr_t 4879uma_zone_get_smr(uma_zone_t zone) 4880{ 4881 4882 return (zone->uz_smr); 4883} 4884 4885/* See uma.h */ 4886void 4887uma_zone_reserve(uma_zone_t zone, int items) 4888{ 4889 uma_keg_t keg; 4890 4891 KEG_GET(zone, keg); 4892 KEG_ASSERT_COLD(keg); 4893 keg->uk_reserve = items; 4894} 4895 4896/* See uma.h */ 4897int 4898uma_zone_reserve_kva(uma_zone_t zone, int count) 4899{ 4900 uma_keg_t keg; 4901 vm_offset_t kva; 4902 u_int pages; 4903 4904 KEG_GET(zone, keg); 4905 KEG_ASSERT_COLD(keg); 4906 ZONE_ASSERT_COLD(zone); 4907 4908 pages = howmany(count, keg->uk_ipers) * keg->uk_ppera; 4909 4910#ifdef UMA_MD_SMALL_ALLOC 4911 if (keg->uk_ppera > 1) { 4912#else 4913 if (1) { 4914#endif 4915 kva = kva_alloc((vm_size_t)pages * PAGE_SIZE); 4916 if (kva == 0) 4917 return (0); 4918 } else 4919 kva = 0; 4920 4921 MPASS(keg->uk_kva == 0); 4922 keg->uk_kva = kva; 4923 keg->uk_offset = 0; 4924 zone->uz_max_items = pages * keg->uk_ipers; 4925#ifdef UMA_MD_SMALL_ALLOC 4926 keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc; 4927#else 4928 keg->uk_allocf = noobj_alloc; 4929#endif 4930 keg->uk_flags |= UMA_ZFLAG_LIMIT | UMA_ZONE_NOFREE; 4931 zone->uz_flags |= UMA_ZFLAG_LIMIT | UMA_ZONE_NOFREE; 4932 zone_update_caches(zone); 4933 4934 return (1); 4935} 4936 4937/* See uma.h */ 4938void 4939uma_prealloc(uma_zone_t zone, int items) 4940{ 4941 struct vm_domainset_iter di; 4942 uma_domain_t dom; 4943 uma_slab_t slab; 4944 uma_keg_t keg; 4945 int aflags, domain, slabs; 4946 4947 KEG_GET(zone, keg); 4948 slabs = howmany(items, keg->uk_ipers); 4949 while (slabs-- > 0) { 4950 aflags = M_NOWAIT; 4951 vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain, 4952 &aflags); 4953 for (;;) { 4954 slab = keg_alloc_slab(keg, zone, domain, M_WAITOK, 4955 aflags); 4956 if (slab != NULL) { 4957 dom = &keg->uk_domain[slab->us_domain]; 4958 /* 4959 * keg_alloc_slab() always returns a slab on the 4960 * partial list. 4961 */ 4962 LIST_REMOVE(slab, us_link); 4963 LIST_INSERT_HEAD(&dom->ud_free_slab, slab, 4964 us_link); 4965 dom->ud_free_slabs++; 4966 KEG_UNLOCK(keg, slab->us_domain); 4967 break; 4968 } 4969 if (vm_domainset_iter_policy(&di, &domain) != 0) 4970 vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask, 0); 4971 } 4972 } 4973} 4974 4975/* 4976 * Returns a snapshot of memory consumption in bytes. 4977 */ 4978size_t 4979uma_zone_memory(uma_zone_t zone) 4980{ 4981 size_t sz; 4982 int i; 4983 4984 sz = 0; 4985 if (zone->uz_flags & UMA_ZFLAG_CACHE) { 4986 for (i = 0; i < vm_ndomains; i++) 4987 sz += ZDOM_GET(zone, i)->uzd_nitems; 4988 return (sz * zone->uz_size); 4989 } 4990 for (i = 0; i < vm_ndomains; i++) 4991 sz += zone->uz_keg->uk_domain[i].ud_pages; 4992 4993 return (sz * PAGE_SIZE); 4994} 4995 4996/* See uma.h */ 4997void 4998uma_reclaim(int req) 4999{ 5000 uma_reclaim_domain(req, UMA_ANYDOMAIN); 5001} 5002 5003void 5004uma_reclaim_domain(int req, int domain) 5005{ 5006 void *arg; 5007 5008 bucket_enable(); 5009 5010 arg = (void *)(uintptr_t)domain; 5011 sx_slock(&uma_reclaim_lock); 5012 switch (req) { 5013 case UMA_RECLAIM_TRIM: 5014 zone_foreach(zone_trim, arg); 5015 break; 5016 case UMA_RECLAIM_DRAIN: 5017 zone_foreach(zone_drain, arg); 5018 break; 5019 case UMA_RECLAIM_DRAIN_CPU: 5020 zone_foreach(zone_drain, arg); 5021 pcpu_cache_drain_safe(NULL); 5022 zone_foreach(zone_drain, arg); 5023 break; 5024 default: 5025 panic("unhandled reclamation request %d", req); 5026 } 5027 5028 /* 5029 * Some slabs may have been freed but this zone will be visited early 5030 * we visit again so that we can free pages that are empty once other 5031 * zones are drained. We have to do the same for buckets. 5032 */ 5033 zone_drain(slabzones[0], arg); 5034 zone_drain(slabzones[1], arg); 5035 bucket_zone_drain(domain); 5036 sx_sunlock(&uma_reclaim_lock); 5037} 5038 5039static volatile int uma_reclaim_needed; 5040 5041void 5042uma_reclaim_wakeup(void) 5043{ 5044 5045 if (atomic_fetchadd_int(&uma_reclaim_needed, 1) == 0) 5046 wakeup(uma_reclaim); 5047} 5048 5049void 5050uma_reclaim_worker(void *arg __unused) 5051{ 5052 5053 for (;;) { 5054 sx_xlock(&uma_reclaim_lock); 5055 while (atomic_load_int(&uma_reclaim_needed) == 0) 5056 sx_sleep(uma_reclaim, &uma_reclaim_lock, PVM, "umarcl", 5057 hz); 5058 sx_xunlock(&uma_reclaim_lock); 5059 EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM); 5060 uma_reclaim(UMA_RECLAIM_DRAIN_CPU); 5061 atomic_store_int(&uma_reclaim_needed, 0); 5062 /* Don't fire more than once per-second. */ 5063 pause("umarclslp", hz); 5064 } 5065} 5066 5067/* See uma.h */ 5068void 5069uma_zone_reclaim(uma_zone_t zone, int req) 5070{ 5071 uma_zone_reclaim_domain(zone, req, UMA_ANYDOMAIN); 5072} 5073 5074void 5075uma_zone_reclaim_domain(uma_zone_t zone, int req, int domain) 5076{ 5077 void *arg; 5078 5079 arg = (void *)(uintptr_t)domain; 5080 switch (req) { 5081 case UMA_RECLAIM_TRIM: 5082 zone_trim(zone, arg); 5083 break; 5084 case UMA_RECLAIM_DRAIN: 5085 zone_drain(zone, arg); 5086 break; 5087 case UMA_RECLAIM_DRAIN_CPU: 5088 pcpu_cache_drain_safe(zone); 5089 zone_drain(zone, arg); 5090 break; 5091 default: 5092 panic("unhandled reclamation request %d", req); 5093 } 5094} 5095 5096/* See uma.h */ 5097int 5098uma_zone_exhausted(uma_zone_t zone) 5099{ 5100 5101 return (atomic_load_32(&zone->uz_sleepers) > 0); 5102} 5103 5104unsigned long 5105uma_limit(void) 5106{ 5107 5108 return (uma_kmem_limit); 5109} 5110 5111void 5112uma_set_limit(unsigned long limit) 5113{ 5114 5115 uma_kmem_limit = limit; 5116} 5117 5118unsigned long 5119uma_size(void) 5120{ 5121 5122 return (atomic_load_long(&uma_kmem_total)); 5123} 5124 5125long 5126uma_avail(void) 5127{ 5128 5129 return (uma_kmem_limit - uma_size()); 5130} 5131 5132#ifdef DDB 5133/* 5134 * Generate statistics across both the zone and its per-cpu cache's. Return 5135 * desired statistics if the pointer is non-NULL for that statistic. 5136 * 5137 * Note: does not update the zone statistics, as it can't safely clear the 5138 * per-CPU cache statistic. 5139 * 5140 */ 5141static void 5142uma_zone_sumstat(uma_zone_t z, long *cachefreep, uint64_t *allocsp, 5143 uint64_t *freesp, uint64_t *sleepsp, uint64_t *xdomainp) 5144{ 5145 uma_cache_t cache; 5146 uint64_t allocs, frees, sleeps, xdomain; 5147 int cachefree, cpu; 5148 5149 allocs = frees = sleeps = xdomain = 0; 5150 cachefree = 0; 5151 CPU_FOREACH(cpu) { 5152 cache = &z->uz_cpu[cpu]; 5153 cachefree += cache->uc_allocbucket.ucb_cnt; 5154 cachefree += cache->uc_freebucket.ucb_cnt; 5155 xdomain += cache->uc_crossbucket.ucb_cnt; 5156 cachefree += cache->uc_crossbucket.ucb_cnt; 5157 allocs += cache->uc_allocs; 5158 frees += cache->uc_frees; 5159 } 5160 allocs += counter_u64_fetch(z->uz_allocs); 5161 frees += counter_u64_fetch(z->uz_frees); 5162 xdomain += counter_u64_fetch(z->uz_xdomain); 5163 sleeps += z->uz_sleeps; 5164 if (cachefreep != NULL) 5165 *cachefreep = cachefree; 5166 if (allocsp != NULL) 5167 *allocsp = allocs; 5168 if (freesp != NULL) 5169 *freesp = frees; 5170 if (sleepsp != NULL) 5171 *sleepsp = sleeps; 5172 if (xdomainp != NULL) 5173 *xdomainp = xdomain; 5174} 5175#endif /* DDB */ 5176 5177static int 5178sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS) 5179{ 5180 uma_keg_t kz; 5181 uma_zone_t z; 5182 int count; 5183 5184 count = 0; 5185 rw_rlock(&uma_rwlock); 5186 LIST_FOREACH(kz, &uma_kegs, uk_link) { 5187 LIST_FOREACH(z, &kz->uk_zones, uz_link) 5188 count++; 5189 } 5190 LIST_FOREACH(z, &uma_cachezones, uz_link) 5191 count++; 5192 5193 rw_runlock(&uma_rwlock); 5194 return (sysctl_handle_int(oidp, &count, 0, req)); 5195} 5196 5197static void 5198uma_vm_zone_stats(struct uma_type_header *uth, uma_zone_t z, struct sbuf *sbuf, 5199 struct uma_percpu_stat *ups, bool internal) 5200{ 5201 uma_zone_domain_t zdom; 5202 uma_cache_t cache; 5203 int i; 5204 5205 for (i = 0; i < vm_ndomains; i++) { 5206 zdom = ZDOM_GET(z, i); 5207 uth->uth_zone_free += zdom->uzd_nitems; 5208 } 5209 uth->uth_allocs = counter_u64_fetch(z->uz_allocs); 5210 uth->uth_frees = counter_u64_fetch(z->uz_frees); 5211 uth->uth_fails = counter_u64_fetch(z->uz_fails); 5212 uth->uth_xdomain = counter_u64_fetch(z->uz_xdomain); 5213 uth->uth_sleeps = z->uz_sleeps; 5214 5215 for (i = 0; i < mp_maxid + 1; i++) { 5216 bzero(&ups[i], sizeof(*ups)); 5217 if (internal || CPU_ABSENT(i)) 5218 continue; 5219 cache = &z->uz_cpu[i]; 5220 ups[i].ups_cache_free += cache->uc_allocbucket.ucb_cnt; 5221 ups[i].ups_cache_free += cache->uc_freebucket.ucb_cnt; 5222 ups[i].ups_cache_free += cache->uc_crossbucket.ucb_cnt; 5223 ups[i].ups_allocs = cache->uc_allocs; 5224 ups[i].ups_frees = cache->uc_frees; 5225 } 5226} 5227 5228static int 5229sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS) 5230{ 5231 struct uma_stream_header ush; 5232 struct uma_type_header uth; 5233 struct uma_percpu_stat *ups; 5234 struct sbuf sbuf; 5235 uma_keg_t kz; 5236 uma_zone_t z; 5237 uint64_t items; 5238 uint32_t kfree, pages; 5239 int count, error, i; 5240 5241 error = sysctl_wire_old_buffer(req, 0); 5242 if (error != 0) 5243 return (error); 5244 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 5245 sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL); 5246 ups = malloc((mp_maxid + 1) * sizeof(*ups), M_TEMP, M_WAITOK); 5247 5248 count = 0; 5249 rw_rlock(&uma_rwlock); 5250 LIST_FOREACH(kz, &uma_kegs, uk_link) { 5251 LIST_FOREACH(z, &kz->uk_zones, uz_link) 5252 count++; 5253 } 5254 5255 LIST_FOREACH(z, &uma_cachezones, uz_link) 5256 count++; 5257 5258 /* 5259 * Insert stream header. 5260 */ 5261 bzero(&ush, sizeof(ush)); 5262 ush.ush_version = UMA_STREAM_VERSION; 5263 ush.ush_maxcpus = (mp_maxid + 1); 5264 ush.ush_count = count; 5265 (void)sbuf_bcat(&sbuf, &ush, sizeof(ush)); 5266 5267 LIST_FOREACH(kz, &uma_kegs, uk_link) { 5268 kfree = pages = 0; 5269 for (i = 0; i < vm_ndomains; i++) { 5270 kfree += kz->uk_domain[i].ud_free_items; 5271 pages += kz->uk_domain[i].ud_pages; 5272 } 5273 LIST_FOREACH(z, &kz->uk_zones, uz_link) { 5274 bzero(&uth, sizeof(uth)); 5275 strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME); 5276 uth.uth_align = kz->uk_align; 5277 uth.uth_size = kz->uk_size; 5278 uth.uth_rsize = kz->uk_rsize; 5279 if (z->uz_max_items > 0) { 5280 items = UZ_ITEMS_COUNT(z->uz_items); 5281 uth.uth_pages = (items / kz->uk_ipers) * 5282 kz->uk_ppera; 5283 } else 5284 uth.uth_pages = pages; 5285 uth.uth_maxpages = (z->uz_max_items / kz->uk_ipers) * 5286 kz->uk_ppera; 5287 uth.uth_limit = z->uz_max_items; 5288 uth.uth_keg_free = kfree; 5289 5290 /* 5291 * A zone is secondary is it is not the first entry 5292 * on the keg's zone list. 5293 */ 5294 if ((z->uz_flags & UMA_ZONE_SECONDARY) && 5295 (LIST_FIRST(&kz->uk_zones) != z)) 5296 uth.uth_zone_flags = UTH_ZONE_SECONDARY; 5297 uma_vm_zone_stats(&uth, z, &sbuf, ups, 5298 kz->uk_flags & UMA_ZFLAG_INTERNAL); 5299 (void)sbuf_bcat(&sbuf, &uth, sizeof(uth)); 5300 for (i = 0; i < mp_maxid + 1; i++) 5301 (void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i])); 5302 } 5303 } 5304 LIST_FOREACH(z, &uma_cachezones, uz_link) { 5305 bzero(&uth, sizeof(uth)); 5306 strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME); 5307 uth.uth_size = z->uz_size; 5308 uma_vm_zone_stats(&uth, z, &sbuf, ups, false); 5309 (void)sbuf_bcat(&sbuf, &uth, sizeof(uth)); 5310 for (i = 0; i < mp_maxid + 1; i++) 5311 (void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i])); 5312 } 5313 5314 rw_runlock(&uma_rwlock); 5315 error = sbuf_finish(&sbuf); 5316 sbuf_delete(&sbuf); 5317 free(ups, M_TEMP); 5318 return (error); 5319} 5320 5321int 5322sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS) 5323{ 5324 uma_zone_t zone = *(uma_zone_t *)arg1; 5325 int error, max; 5326 5327 max = uma_zone_get_max(zone); 5328 error = sysctl_handle_int(oidp, &max, 0, req); 5329 if (error || !req->newptr) 5330 return (error); 5331 5332 uma_zone_set_max(zone, max); 5333 5334 return (0); 5335} 5336 5337int 5338sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS) 5339{ 5340 uma_zone_t zone; 5341 int cur; 5342 5343 /* 5344 * Some callers want to add sysctls for global zones that 5345 * may not yet exist so they pass a pointer to a pointer. 5346 */ 5347 if (arg2 == 0) 5348 zone = *(uma_zone_t *)arg1; 5349 else 5350 zone = arg1; 5351 cur = uma_zone_get_cur(zone); 5352 return (sysctl_handle_int(oidp, &cur, 0, req)); 5353} 5354 5355static int 5356sysctl_handle_uma_zone_allocs(SYSCTL_HANDLER_ARGS) 5357{ 5358 uma_zone_t zone = arg1; 5359 uint64_t cur; 5360 5361 cur = uma_zone_get_allocs(zone); 5362 return (sysctl_handle_64(oidp, &cur, 0, req)); 5363} 5364 5365static int 5366sysctl_handle_uma_zone_frees(SYSCTL_HANDLER_ARGS) 5367{ 5368 uma_zone_t zone = arg1; 5369 uint64_t cur; 5370 5371 cur = uma_zone_get_frees(zone); 5372 return (sysctl_handle_64(oidp, &cur, 0, req)); 5373} 5374 5375static int 5376sysctl_handle_uma_zone_flags(SYSCTL_HANDLER_ARGS) 5377{ 5378 struct sbuf sbuf; 5379 uma_zone_t zone = arg1; 5380 int error; 5381 5382 sbuf_new_for_sysctl(&sbuf, NULL, 0, req); 5383 if (zone->uz_flags != 0) 5384 sbuf_printf(&sbuf, "0x%b", zone->uz_flags, PRINT_UMA_ZFLAGS); 5385 else 5386 sbuf_printf(&sbuf, "0"); 5387 error = sbuf_finish(&sbuf); 5388 sbuf_delete(&sbuf); 5389 5390 return (error); 5391} 5392 5393static int 5394sysctl_handle_uma_slab_efficiency(SYSCTL_HANDLER_ARGS) 5395{ 5396 uma_keg_t keg = arg1; 5397 int avail, effpct, total; 5398 5399 total = keg->uk_ppera * PAGE_SIZE; 5400 if ((keg->uk_flags & UMA_ZFLAG_OFFPAGE) != 0) 5401 total += slabzone(keg->uk_ipers)->uz_keg->uk_rsize; 5402 /* 5403 * We consider the client's requested size and alignment here, not the 5404 * real size determination uk_rsize, because we also adjust the real 5405 * size for internal implementation reasons (max bitset size). 5406 */ 5407 avail = keg->uk_ipers * roundup2(keg->uk_size, keg->uk_align + 1); 5408 if ((keg->uk_flags & UMA_ZONE_PCPU) != 0) 5409 avail *= mp_maxid + 1; 5410 effpct = 100 * avail / total; 5411 return (sysctl_handle_int(oidp, &effpct, 0, req)); 5412} 5413 5414static int 5415sysctl_handle_uma_zone_items(SYSCTL_HANDLER_ARGS) 5416{ 5417 uma_zone_t zone = arg1; 5418 uint64_t cur; 5419 5420 cur = UZ_ITEMS_COUNT(atomic_load_64(&zone->uz_items)); 5421 return (sysctl_handle_64(oidp, &cur, 0, req)); 5422} 5423 5424#ifdef INVARIANTS 5425static uma_slab_t 5426uma_dbg_getslab(uma_zone_t zone, void *item) 5427{ 5428 uma_slab_t slab; 5429 uma_keg_t keg; 5430 uint8_t *mem; 5431 5432 /* 5433 * It is safe to return the slab here even though the 5434 * zone is unlocked because the item's allocation state 5435 * essentially holds a reference. 5436 */ 5437 mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK)); 5438 if ((zone->uz_flags & UMA_ZFLAG_CACHE) != 0) 5439 return (NULL); 5440 if (zone->uz_flags & UMA_ZFLAG_VTOSLAB) 5441 return (vtoslab((vm_offset_t)mem)); 5442 keg = zone->uz_keg; 5443 if ((keg->uk_flags & UMA_ZFLAG_HASH) == 0) 5444 return ((uma_slab_t)(mem + keg->uk_pgoff)); 5445 KEG_LOCK(keg, 0); 5446 slab = hash_sfind(&keg->uk_hash, mem); 5447 KEG_UNLOCK(keg, 0); 5448 5449 return (slab); 5450} 5451 5452static bool 5453uma_dbg_zskip(uma_zone_t zone, void *mem) 5454{ 5455 5456 if ((zone->uz_flags & UMA_ZFLAG_CACHE) != 0) 5457 return (true); 5458 5459 return (uma_dbg_kskip(zone->uz_keg, mem)); 5460} 5461 5462static bool 5463uma_dbg_kskip(uma_keg_t keg, void *mem) 5464{ 5465 uintptr_t idx; 5466 5467 if (dbg_divisor == 0) 5468 return (true); 5469 5470 if (dbg_divisor == 1) 5471 return (false); 5472 5473 idx = (uintptr_t)mem >> PAGE_SHIFT; 5474 if (keg->uk_ipers > 1) { 5475 idx *= keg->uk_ipers; 5476 idx += ((uintptr_t)mem & PAGE_MASK) / keg->uk_rsize; 5477 } 5478 5479 if ((idx / dbg_divisor) * dbg_divisor != idx) { 5480 counter_u64_add(uma_skip_cnt, 1); 5481 return (true); 5482 } 5483 counter_u64_add(uma_dbg_cnt, 1); 5484 5485 return (false); 5486} 5487 5488/* 5489 * Set up the slab's freei data such that uma_dbg_free can function. 5490 * 5491 */ 5492static void 5493uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item) 5494{ 5495 uma_keg_t keg; 5496 int freei; 5497 5498 if (slab == NULL) { 5499 slab = uma_dbg_getslab(zone, item); 5500 if (slab == NULL) 5501 panic("uma: item %p did not belong to zone %s", 5502 item, zone->uz_name); 5503 } 5504 keg = zone->uz_keg; 5505 freei = slab_item_index(slab, keg, item); 5506 5507 if (BIT_TEST_SET_ATOMIC(keg->uk_ipers, freei, 5508 slab_dbg_bits(slab, keg))) 5509 panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)", 5510 item, zone, zone->uz_name, slab, freei); 5511} 5512 5513/* 5514 * Verifies freed addresses. Checks for alignment, valid slab membership 5515 * and duplicate frees. 5516 * 5517 */ 5518static void 5519uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item) 5520{ 5521 uma_keg_t keg; 5522 int freei; 5523 5524 if (slab == NULL) { 5525 slab = uma_dbg_getslab(zone, item); 5526 if (slab == NULL) 5527 panic("uma: Freed item %p did not belong to zone %s", 5528 item, zone->uz_name); 5529 } 5530 keg = zone->uz_keg; 5531 freei = slab_item_index(slab, keg, item); 5532 5533 if (freei >= keg->uk_ipers) 5534 panic("Invalid free of %p from zone %p(%s) slab %p(%d)", 5535 item, zone, zone->uz_name, slab, freei); 5536 5537 if (slab_item(slab, keg, freei) != item) 5538 panic("Unaligned free of %p from zone %p(%s) slab %p(%d)", 5539 item, zone, zone->uz_name, slab, freei); 5540 5541 if (!BIT_TEST_CLR_ATOMIC(keg->uk_ipers, freei, 5542 slab_dbg_bits(slab, keg))) 5543 panic("Duplicate free of %p from zone %p(%s) slab %p(%d)", 5544 item, zone, zone->uz_name, slab, freei); 5545} 5546#endif /* INVARIANTS */ 5547 5548#ifdef DDB 5549static int64_t 5550get_uma_stats(uma_keg_t kz, uma_zone_t z, uint64_t *allocs, uint64_t *used, 5551 uint64_t *sleeps, long *cachefree, uint64_t *xdomain) 5552{ 5553 uint64_t frees; 5554 int i; 5555 5556 if (kz->uk_flags & UMA_ZFLAG_INTERNAL) { 5557 *allocs = counter_u64_fetch(z->uz_allocs); 5558 frees = counter_u64_fetch(z->uz_frees); 5559 *sleeps = z->uz_sleeps; 5560 *cachefree = 0; 5561 *xdomain = 0; 5562 } else 5563 uma_zone_sumstat(z, cachefree, allocs, &frees, sleeps, 5564 xdomain); 5565 for (i = 0; i < vm_ndomains; i++) { 5566 *cachefree += ZDOM_GET(z, i)->uzd_nitems; 5567 if (!((z->uz_flags & UMA_ZONE_SECONDARY) && 5568 (LIST_FIRST(&kz->uk_zones) != z))) 5569 *cachefree += kz->uk_domain[i].ud_free_items; 5570 } 5571 *used = *allocs - frees; 5572 return (((int64_t)*used + *cachefree) * kz->uk_size); 5573} 5574 5575DB_SHOW_COMMAND(uma, db_show_uma) 5576{ 5577 const char *fmt_hdr, *fmt_entry; 5578 uma_keg_t kz; 5579 uma_zone_t z; 5580 uint64_t allocs, used, sleeps, xdomain; 5581 long cachefree; 5582 /* variables for sorting */ 5583 uma_keg_t cur_keg; 5584 uma_zone_t cur_zone, last_zone; 5585 int64_t cur_size, last_size, size; 5586 int ties; 5587 5588 /* /i option produces machine-parseable CSV output */ 5589 if (modif[0] == 'i') { 5590 fmt_hdr = "%s,%s,%s,%s,%s,%s,%s,%s,%s\n"; 5591 fmt_entry = "\"%s\",%ju,%jd,%ld,%ju,%ju,%u,%jd,%ju\n"; 5592 } else { 5593 fmt_hdr = "%18s %6s %7s %7s %11s %7s %7s %10s %8s\n"; 5594 fmt_entry = "%18s %6ju %7jd %7ld %11ju %7ju %7u %10jd %8ju\n"; 5595 } 5596 5597 db_printf(fmt_hdr, "Zone", "Size", "Used", "Free", "Requests", 5598 "Sleeps", "Bucket", "Total Mem", "XFree"); 5599 5600 /* Sort the zones with largest size first. */ 5601 last_zone = NULL; 5602 last_size = INT64_MAX; 5603 for (;;) { 5604 cur_zone = NULL; 5605 cur_size = -1; 5606 ties = 0; 5607 LIST_FOREACH(kz, &uma_kegs, uk_link) { 5608 LIST_FOREACH(z, &kz->uk_zones, uz_link) { 5609 /* 5610 * In the case of size ties, print out zones 5611 * in the order they are encountered. That is, 5612 * when we encounter the most recently output 5613 * zone, we have already printed all preceding 5614 * ties, and we must print all following ties. 5615 */ 5616 if (z == last_zone) { 5617 ties = 1; 5618 continue; 5619 } 5620 size = get_uma_stats(kz, z, &allocs, &used, 5621 &sleeps, &cachefree, &xdomain); 5622 if (size > cur_size && size < last_size + ties) 5623 { 5624 cur_size = size; 5625 cur_zone = z; 5626 cur_keg = kz; 5627 } 5628 } 5629 } 5630 if (cur_zone == NULL) 5631 break; 5632 5633 size = get_uma_stats(cur_keg, cur_zone, &allocs, &used, 5634 &sleeps, &cachefree, &xdomain); 5635 db_printf(fmt_entry, cur_zone->uz_name, 5636 (uintmax_t)cur_keg->uk_size, (intmax_t)used, cachefree, 5637 (uintmax_t)allocs, (uintmax_t)sleeps, 5638 (unsigned)cur_zone->uz_bucket_size, (intmax_t)size, 5639 xdomain); 5640 5641 if (db_pager_quit) 5642 return; 5643 last_zone = cur_zone; 5644 last_size = cur_size; 5645 } 5646} 5647 5648DB_SHOW_COMMAND(umacache, db_show_umacache) 5649{ 5650 uma_zone_t z; 5651 uint64_t allocs, frees; 5652 long cachefree; 5653 int i; 5654 5655 db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free", 5656 "Requests", "Bucket"); 5657 LIST_FOREACH(z, &uma_cachezones, uz_link) { 5658 uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL, NULL); 5659 for (i = 0; i < vm_ndomains; i++) 5660 cachefree += ZDOM_GET(z, i)->uzd_nitems; 5661 db_printf("%18s %8ju %8jd %8ld %12ju %8u\n", 5662 z->uz_name, (uintmax_t)z->uz_size, 5663 (intmax_t)(allocs - frees), cachefree, 5664 (uintmax_t)allocs, z->uz_bucket_size); 5665 if (db_pager_quit) 5666 return; 5667 } 5668} 5669#endif /* DDB */ 5670