subr_vmem.c revision 282361
1/*- 2 * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi, 3 * Copyright (c) 2013 EMC Corp. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28/* 29 * From: 30 * $NetBSD: vmem_impl.h,v 1.2 2013/01/29 21:26:24 para Exp $ 31 * $NetBSD: subr_vmem.c,v 1.83 2013/03/06 11:20:10 yamt Exp $ 32 */ 33 34/* 35 * reference: 36 * - Magazines and Vmem: Extending the Slab Allocator 37 * to Many CPUs and Arbitrary Resources 38 * http://www.usenix.org/event/usenix01/bonwick.html 39 */ 40 41#include <sys/cdefs.h> 42__FBSDID("$FreeBSD: stable/10/sys/kern/subr_vmem.c 282361 2015-05-03 07:13:14Z mav $"); 43 44#include "opt_ddb.h" 45 46#include <sys/param.h> 47#include <sys/systm.h> 48#include <sys/kernel.h> 49#include <sys/queue.h> 50#include <sys/callout.h> 51#include <sys/hash.h> 52#include <sys/lock.h> 53#include <sys/malloc.h> 54#include <sys/mutex.h> 55#include <sys/smp.h> 56#include <sys/condvar.h> 57#include <sys/sysctl.h> 58#include <sys/taskqueue.h> 59#include <sys/vmem.h> 60 61#include "opt_vm.h" 62 63#include <vm/uma.h> 64#include <vm/vm.h> 65#include <vm/pmap.h> 66#include <vm/vm_map.h> 67#include <vm/vm_object.h> 68#include <vm/vm_kern.h> 69#include <vm/vm_extern.h> 70#include <vm/vm_param.h> 71#include <vm/vm_pageout.h> 72 73#define VMEM_OPTORDER 5 74#define VMEM_OPTVALUE (1 << VMEM_OPTORDER) 75#define VMEM_MAXORDER \ 76 (VMEM_OPTVALUE - 1 + sizeof(vmem_size_t) * NBBY - VMEM_OPTORDER) 77 78#define VMEM_HASHSIZE_MIN 16 79#define VMEM_HASHSIZE_MAX 131072 80 81#define VMEM_QCACHE_IDX_MAX 16 82 83#define VMEM_FITMASK (M_BESTFIT | M_FIRSTFIT) 84 85#define VMEM_FLAGS \ 86 (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM | M_BESTFIT | M_FIRSTFIT) 87 88#define BT_FLAGS (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM) 89 90#define QC_NAME_MAX 16 91 92/* 93 * Data structures private to vmem. 94 */ 95MALLOC_DEFINE(M_VMEM, "vmem", "vmem internal structures"); 96 97typedef struct vmem_btag bt_t; 98 99TAILQ_HEAD(vmem_seglist, vmem_btag); 100LIST_HEAD(vmem_freelist, vmem_btag); 101LIST_HEAD(vmem_hashlist, vmem_btag); 102 103struct qcache { 104 uma_zone_t qc_cache; 105 vmem_t *qc_vmem; 106 vmem_size_t qc_size; 107 char qc_name[QC_NAME_MAX]; 108}; 109typedef struct qcache qcache_t; 110#define QC_POOL_TO_QCACHE(pool) ((qcache_t *)(pool->pr_qcache)) 111 112#define VMEM_NAME_MAX 16 113 114/* vmem arena */ 115struct vmem { 116 struct mtx_padalign vm_lock; 117 struct cv vm_cv; 118 char vm_name[VMEM_NAME_MAX+1]; 119 LIST_ENTRY(vmem) vm_alllist; 120 struct vmem_hashlist vm_hash0[VMEM_HASHSIZE_MIN]; 121 struct vmem_freelist vm_freelist[VMEM_MAXORDER]; 122 struct vmem_seglist vm_seglist; 123 struct vmem_hashlist *vm_hashlist; 124 vmem_size_t vm_hashsize; 125 126 /* Constant after init */ 127 vmem_size_t vm_qcache_max; 128 vmem_size_t vm_quantum_mask; 129 vmem_size_t vm_import_quantum; 130 int vm_quantum_shift; 131 132 /* Written on alloc/free */ 133 LIST_HEAD(, vmem_btag) vm_freetags; 134 int vm_nfreetags; 135 int vm_nbusytag; 136 vmem_size_t vm_inuse; 137 vmem_size_t vm_size; 138 139 /* Used on import. */ 140 vmem_import_t *vm_importfn; 141 vmem_release_t *vm_releasefn; 142 void *vm_arg; 143 144 /* Space exhaustion callback. */ 145 vmem_reclaim_t *vm_reclaimfn; 146 147 /* quantum cache */ 148 qcache_t vm_qcache[VMEM_QCACHE_IDX_MAX]; 149}; 150 151/* boundary tag */ 152struct vmem_btag { 153 TAILQ_ENTRY(vmem_btag) bt_seglist; 154 union { 155 LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */ 156 LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */ 157 } bt_u; 158#define bt_hashlist bt_u.u_hashlist 159#define bt_freelist bt_u.u_freelist 160 vmem_addr_t bt_start; 161 vmem_size_t bt_size; 162 int bt_type; 163}; 164 165#define BT_TYPE_SPAN 1 /* Allocated from importfn */ 166#define BT_TYPE_SPAN_STATIC 2 /* vmem_add() or create. */ 167#define BT_TYPE_FREE 3 /* Available space. */ 168#define BT_TYPE_BUSY 4 /* Used space. */ 169#define BT_ISSPAN_P(bt) ((bt)->bt_type <= BT_TYPE_SPAN_STATIC) 170 171#define BT_END(bt) ((bt)->bt_start + (bt)->bt_size - 1) 172 173#if defined(DIAGNOSTIC) 174static int enable_vmem_check = 1; 175SYSCTL_INT(_debug, OID_AUTO, vmem_check, CTLFLAG_RWTUN, 176 &enable_vmem_check, 0, "Enable vmem check"); 177static void vmem_check(vmem_t *); 178#endif 179 180static struct callout vmem_periodic_ch; 181static int vmem_periodic_interval; 182static struct task vmem_periodic_wk; 183 184static struct mtx_padalign vmem_list_lock; 185static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list); 186 187/* ---- misc */ 188#define VMEM_CONDVAR_INIT(vm, wchan) cv_init(&vm->vm_cv, wchan) 189#define VMEM_CONDVAR_DESTROY(vm) cv_destroy(&vm->vm_cv) 190#define VMEM_CONDVAR_WAIT(vm) cv_wait(&vm->vm_cv, &vm->vm_lock) 191#define VMEM_CONDVAR_BROADCAST(vm) cv_broadcast(&vm->vm_cv) 192 193 194#define VMEM_LOCK(vm) mtx_lock(&vm->vm_lock) 195#define VMEM_TRYLOCK(vm) mtx_trylock(&vm->vm_lock) 196#define VMEM_UNLOCK(vm) mtx_unlock(&vm->vm_lock) 197#define VMEM_LOCK_INIT(vm, name) mtx_init(&vm->vm_lock, (name), NULL, MTX_DEF) 198#define VMEM_LOCK_DESTROY(vm) mtx_destroy(&vm->vm_lock) 199#define VMEM_ASSERT_LOCKED(vm) mtx_assert(&vm->vm_lock, MA_OWNED); 200 201#define VMEM_ALIGNUP(addr, align) (-(-(addr) & -(align))) 202 203#define VMEM_CROSS_P(addr1, addr2, boundary) \ 204 ((((addr1) ^ (addr2)) & -(boundary)) != 0) 205 206#define ORDER2SIZE(order) ((order) < VMEM_OPTVALUE ? ((order) + 1) : \ 207 (vmem_size_t)1 << ((order) - (VMEM_OPTVALUE - VMEM_OPTORDER - 1))) 208#define SIZE2ORDER(size) ((size) <= VMEM_OPTVALUE ? ((size) - 1) : \ 209 (flsl(size) + (VMEM_OPTVALUE - VMEM_OPTORDER - 2))) 210 211/* 212 * Maximum number of boundary tags that may be required to satisfy an 213 * allocation. Two may be required to import. Another two may be 214 * required to clip edges. 215 */ 216#define BT_MAXALLOC 4 217 218/* 219 * Max free limits the number of locally cached boundary tags. We 220 * just want to avoid hitting the zone allocator for every call. 221 */ 222#define BT_MAXFREE (BT_MAXALLOC * 8) 223 224/* Allocator for boundary tags. */ 225static uma_zone_t vmem_bt_zone; 226 227/* boot time arena storage. */ 228static struct vmem kernel_arena_storage; 229static struct vmem kmem_arena_storage; 230static struct vmem buffer_arena_storage; 231static struct vmem transient_arena_storage; 232vmem_t *kernel_arena = &kernel_arena_storage; 233vmem_t *kmem_arena = &kmem_arena_storage; 234vmem_t *buffer_arena = &buffer_arena_storage; 235vmem_t *transient_arena = &transient_arena_storage; 236 237#ifdef DEBUG_MEMGUARD 238static struct vmem memguard_arena_storage; 239vmem_t *memguard_arena = &memguard_arena_storage; 240#endif 241 242/* 243 * Fill the vmem's boundary tag cache. We guarantee that boundary tag 244 * allocation will not fail once bt_fill() passes. To do so we cache 245 * at least the maximum possible tag allocations in the arena. 246 */ 247static int 248bt_fill(vmem_t *vm, int flags) 249{ 250 bt_t *bt; 251 252 VMEM_ASSERT_LOCKED(vm); 253 254 /* 255 * Only allow the kmem arena to dip into reserve tags. It is the 256 * vmem where new tags come from. 257 */ 258 flags &= BT_FLAGS; 259 if (vm != kmem_arena) 260 flags &= ~M_USE_RESERVE; 261 262 /* 263 * Loop until we meet the reserve. To minimize the lock shuffle 264 * and prevent simultaneous fills we first try a NOWAIT regardless 265 * of the caller's flags. Specify M_NOVM so we don't recurse while 266 * holding a vmem lock. 267 */ 268 while (vm->vm_nfreetags < BT_MAXALLOC) { 269 bt = uma_zalloc(vmem_bt_zone, 270 (flags & M_USE_RESERVE) | M_NOWAIT | M_NOVM); 271 if (bt == NULL) { 272 VMEM_UNLOCK(vm); 273 bt = uma_zalloc(vmem_bt_zone, flags); 274 VMEM_LOCK(vm); 275 if (bt == NULL && (flags & M_NOWAIT) != 0) 276 break; 277 } 278 LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); 279 vm->vm_nfreetags++; 280 } 281 282 if (vm->vm_nfreetags < BT_MAXALLOC) 283 return ENOMEM; 284 285 return 0; 286} 287 288/* 289 * Pop a tag off of the freetag stack. 290 */ 291static bt_t * 292bt_alloc(vmem_t *vm) 293{ 294 bt_t *bt; 295 296 VMEM_ASSERT_LOCKED(vm); 297 bt = LIST_FIRST(&vm->vm_freetags); 298 MPASS(bt != NULL); 299 LIST_REMOVE(bt, bt_freelist); 300 vm->vm_nfreetags--; 301 302 return bt; 303} 304 305/* 306 * Trim the per-vmem free list. Returns with the lock released to 307 * avoid allocator recursions. 308 */ 309static void 310bt_freetrim(vmem_t *vm, int freelimit) 311{ 312 LIST_HEAD(, vmem_btag) freetags; 313 bt_t *bt; 314 315 LIST_INIT(&freetags); 316 VMEM_ASSERT_LOCKED(vm); 317 while (vm->vm_nfreetags > freelimit) { 318 bt = LIST_FIRST(&vm->vm_freetags); 319 LIST_REMOVE(bt, bt_freelist); 320 vm->vm_nfreetags--; 321 LIST_INSERT_HEAD(&freetags, bt, bt_freelist); 322 } 323 VMEM_UNLOCK(vm); 324 while ((bt = LIST_FIRST(&freetags)) != NULL) { 325 LIST_REMOVE(bt, bt_freelist); 326 uma_zfree(vmem_bt_zone, bt); 327 } 328} 329 330static inline void 331bt_free(vmem_t *vm, bt_t *bt) 332{ 333 334 VMEM_ASSERT_LOCKED(vm); 335 MPASS(LIST_FIRST(&vm->vm_freetags) != bt); 336 LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); 337 vm->vm_nfreetags++; 338} 339 340/* 341 * freelist[0] ... [1, 1] 342 * freelist[1] ... [2, 2] 343 * : 344 * freelist[29] ... [30, 30] 345 * freelist[30] ... [31, 31] 346 * freelist[31] ... [32, 63] 347 * freelist[33] ... [64, 127] 348 * : 349 * freelist[n] ... [(1 << (n - 26)), (1 << (n - 25)) - 1] 350 * : 351 */ 352 353static struct vmem_freelist * 354bt_freehead_tofree(vmem_t *vm, vmem_size_t size) 355{ 356 const vmem_size_t qsize = size >> vm->vm_quantum_shift; 357 const int idx = SIZE2ORDER(qsize); 358 359 MPASS(size != 0 && qsize != 0); 360 MPASS((size & vm->vm_quantum_mask) == 0); 361 MPASS(idx >= 0); 362 MPASS(idx < VMEM_MAXORDER); 363 364 return &vm->vm_freelist[idx]; 365} 366 367/* 368 * bt_freehead_toalloc: return the freelist for the given size and allocation 369 * strategy. 370 * 371 * For M_FIRSTFIT, return the list in which any blocks are large enough 372 * for the requested size. otherwise, return the list which can have blocks 373 * large enough for the requested size. 374 */ 375static struct vmem_freelist * 376bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, int strat) 377{ 378 const vmem_size_t qsize = size >> vm->vm_quantum_shift; 379 int idx = SIZE2ORDER(qsize); 380 381 MPASS(size != 0 && qsize != 0); 382 MPASS((size & vm->vm_quantum_mask) == 0); 383 384 if (strat == M_FIRSTFIT && ORDER2SIZE(idx) != qsize) { 385 idx++; 386 /* check too large request? */ 387 } 388 MPASS(idx >= 0); 389 MPASS(idx < VMEM_MAXORDER); 390 391 return &vm->vm_freelist[idx]; 392} 393 394/* ---- boundary tag hash */ 395 396static struct vmem_hashlist * 397bt_hashhead(vmem_t *vm, vmem_addr_t addr) 398{ 399 struct vmem_hashlist *list; 400 unsigned int hash; 401 402 hash = hash32_buf(&addr, sizeof(addr), 0); 403 list = &vm->vm_hashlist[hash % vm->vm_hashsize]; 404 405 return list; 406} 407 408static bt_t * 409bt_lookupbusy(vmem_t *vm, vmem_addr_t addr) 410{ 411 struct vmem_hashlist *list; 412 bt_t *bt; 413 414 VMEM_ASSERT_LOCKED(vm); 415 list = bt_hashhead(vm, addr); 416 LIST_FOREACH(bt, list, bt_hashlist) { 417 if (bt->bt_start == addr) { 418 break; 419 } 420 } 421 422 return bt; 423} 424 425static void 426bt_rembusy(vmem_t *vm, bt_t *bt) 427{ 428 429 VMEM_ASSERT_LOCKED(vm); 430 MPASS(vm->vm_nbusytag > 0); 431 vm->vm_inuse -= bt->bt_size; 432 vm->vm_nbusytag--; 433 LIST_REMOVE(bt, bt_hashlist); 434} 435 436static void 437bt_insbusy(vmem_t *vm, bt_t *bt) 438{ 439 struct vmem_hashlist *list; 440 441 VMEM_ASSERT_LOCKED(vm); 442 MPASS(bt->bt_type == BT_TYPE_BUSY); 443 444 list = bt_hashhead(vm, bt->bt_start); 445 LIST_INSERT_HEAD(list, bt, bt_hashlist); 446 vm->vm_nbusytag++; 447 vm->vm_inuse += bt->bt_size; 448} 449 450/* ---- boundary tag list */ 451 452static void 453bt_remseg(vmem_t *vm, bt_t *bt) 454{ 455 456 TAILQ_REMOVE(&vm->vm_seglist, bt, bt_seglist); 457 bt_free(vm, bt); 458} 459 460static void 461bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev) 462{ 463 464 TAILQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist); 465} 466 467static void 468bt_insseg_tail(vmem_t *vm, bt_t *bt) 469{ 470 471 TAILQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist); 472} 473 474static void 475bt_remfree(vmem_t *vm, bt_t *bt) 476{ 477 478 MPASS(bt->bt_type == BT_TYPE_FREE); 479 480 LIST_REMOVE(bt, bt_freelist); 481} 482 483static void 484bt_insfree(vmem_t *vm, bt_t *bt) 485{ 486 struct vmem_freelist *list; 487 488 list = bt_freehead_tofree(vm, bt->bt_size); 489 LIST_INSERT_HEAD(list, bt, bt_freelist); 490} 491 492/* ---- vmem internal functions */ 493 494/* 495 * Import from the arena into the quantum cache in UMA. 496 */ 497static int 498qc_import(void *arg, void **store, int cnt, int flags) 499{ 500 qcache_t *qc; 501 vmem_addr_t addr; 502 int i; 503 504 qc = arg; 505 flags |= M_BESTFIT; 506 for (i = 0; i < cnt; i++) { 507 if (vmem_xalloc(qc->qc_vmem, qc->qc_size, 0, 0, 0, 508 VMEM_ADDR_MIN, VMEM_ADDR_MAX, flags, &addr) != 0) 509 break; 510 store[i] = (void *)addr; 511 /* Only guarantee one allocation. */ 512 flags &= ~M_WAITOK; 513 flags |= M_NOWAIT; 514 } 515 return i; 516} 517 518/* 519 * Release memory from the UMA cache to the arena. 520 */ 521static void 522qc_release(void *arg, void **store, int cnt) 523{ 524 qcache_t *qc; 525 int i; 526 527 qc = arg; 528 for (i = 0; i < cnt; i++) 529 vmem_xfree(qc->qc_vmem, (vmem_addr_t)store[i], qc->qc_size); 530} 531 532static void 533qc_init(vmem_t *vm, vmem_size_t qcache_max) 534{ 535 qcache_t *qc; 536 vmem_size_t size; 537 int qcache_idx_max; 538 int i; 539 540 MPASS((qcache_max & vm->vm_quantum_mask) == 0); 541 qcache_idx_max = MIN(qcache_max >> vm->vm_quantum_shift, 542 VMEM_QCACHE_IDX_MAX); 543 vm->vm_qcache_max = qcache_idx_max << vm->vm_quantum_shift; 544 for (i = 0; i < qcache_idx_max; i++) { 545 qc = &vm->vm_qcache[i]; 546 size = (i + 1) << vm->vm_quantum_shift; 547 snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu", 548 vm->vm_name, size); 549 qc->qc_vmem = vm; 550 qc->qc_size = size; 551 qc->qc_cache = uma_zcache_create(qc->qc_name, size, 552 NULL, NULL, NULL, NULL, qc_import, qc_release, qc, 553 UMA_ZONE_VM); 554 MPASS(qc->qc_cache); 555 } 556} 557 558static void 559qc_destroy(vmem_t *vm) 560{ 561 int qcache_idx_max; 562 int i; 563 564 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift; 565 for (i = 0; i < qcache_idx_max; i++) 566 uma_zdestroy(vm->vm_qcache[i].qc_cache); 567} 568 569static void 570qc_drain(vmem_t *vm) 571{ 572 int qcache_idx_max; 573 int i; 574 575 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift; 576 for (i = 0; i < qcache_idx_max; i++) 577 zone_drain(vm->vm_qcache[i].qc_cache); 578} 579 580#ifndef UMA_MD_SMALL_ALLOC 581 582static struct mtx_padalign vmem_bt_lock; 583 584/* 585 * vmem_bt_alloc: Allocate a new page of boundary tags. 586 * 587 * On architectures with uma_small_alloc there is no recursion; no address 588 * space need be allocated to allocate boundary tags. For the others, we 589 * must handle recursion. Boundary tags are necessary to allocate new 590 * boundary tags. 591 * 592 * UMA guarantees that enough tags are held in reserve to allocate a new 593 * page of kva. We dip into this reserve by specifying M_USE_RESERVE only 594 * when allocating the page to hold new boundary tags. In this way the 595 * reserve is automatically filled by the allocation that uses the reserve. 596 * 597 * We still have to guarantee that the new tags are allocated atomically since 598 * many threads may try concurrently. The bt_lock provides this guarantee. 599 * We convert WAITOK allocations to NOWAIT and then handle the blocking here 600 * on failure. It's ok to return NULL for a WAITOK allocation as UMA will 601 * loop again after checking to see if we lost the race to allocate. 602 * 603 * There is a small race between vmem_bt_alloc() returning the page and the 604 * zone lock being acquired to add the page to the zone. For WAITOK 605 * allocations we just pause briefly. NOWAIT may experience a transient 606 * failure. To alleviate this we permit a small number of simultaneous 607 * fills to proceed concurrently so NOWAIT is less likely to fail unless 608 * we are really out of KVA. 609 */ 610static void * 611vmem_bt_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait) 612{ 613 vmem_addr_t addr; 614 615 *pflag = UMA_SLAB_KMEM; 616 617 /* 618 * Single thread boundary tag allocation so that the address space 619 * and memory are added in one atomic operation. 620 */ 621 mtx_lock(&vmem_bt_lock); 622 if (vmem_xalloc(kmem_arena, bytes, 0, 0, 0, VMEM_ADDR_MIN, 623 VMEM_ADDR_MAX, M_NOWAIT | M_NOVM | M_USE_RESERVE | M_BESTFIT, 624 &addr) == 0) { 625 if (kmem_back(kmem_object, addr, bytes, 626 M_NOWAIT | M_USE_RESERVE) == 0) { 627 mtx_unlock(&vmem_bt_lock); 628 return ((void *)addr); 629 } 630 vmem_xfree(kmem_arena, addr, bytes); 631 mtx_unlock(&vmem_bt_lock); 632 /* 633 * Out of memory, not address space. This may not even be 634 * possible due to M_USE_RESERVE page allocation. 635 */ 636 if (wait & M_WAITOK) 637 VM_WAIT; 638 return (NULL); 639 } 640 mtx_unlock(&vmem_bt_lock); 641 /* 642 * We're either out of address space or lost a fill race. 643 */ 644 if (wait & M_WAITOK) 645 pause("btalloc", 1); 646 647 return (NULL); 648} 649#endif 650 651void 652vmem_startup(void) 653{ 654 655 mtx_init(&vmem_list_lock, "vmem list lock", NULL, MTX_DEF); 656 vmem_bt_zone = uma_zcreate("vmem btag", 657 sizeof(struct vmem_btag), NULL, NULL, NULL, NULL, 658 UMA_ALIGN_PTR, UMA_ZONE_VM); 659#ifndef UMA_MD_SMALL_ALLOC 660 mtx_init(&vmem_bt_lock, "btag lock", NULL, MTX_DEF); 661 uma_prealloc(vmem_bt_zone, BT_MAXALLOC); 662 /* 663 * Reserve enough tags to allocate new tags. We allow multiple 664 * CPUs to attempt to allocate new tags concurrently to limit 665 * false restarts in UMA. 666 */ 667 uma_zone_reserve(vmem_bt_zone, BT_MAXALLOC * (mp_ncpus + 1) / 2); 668 uma_zone_set_allocf(vmem_bt_zone, vmem_bt_alloc); 669#endif 670} 671 672/* ---- rehash */ 673 674static int 675vmem_rehash(vmem_t *vm, vmem_size_t newhashsize) 676{ 677 bt_t *bt; 678 int i; 679 struct vmem_hashlist *newhashlist; 680 struct vmem_hashlist *oldhashlist; 681 vmem_size_t oldhashsize; 682 683 MPASS(newhashsize > 0); 684 685 newhashlist = malloc(sizeof(struct vmem_hashlist) * newhashsize, 686 M_VMEM, M_NOWAIT); 687 if (newhashlist == NULL) 688 return ENOMEM; 689 for (i = 0; i < newhashsize; i++) { 690 LIST_INIT(&newhashlist[i]); 691 } 692 693 VMEM_LOCK(vm); 694 oldhashlist = vm->vm_hashlist; 695 oldhashsize = vm->vm_hashsize; 696 vm->vm_hashlist = newhashlist; 697 vm->vm_hashsize = newhashsize; 698 if (oldhashlist == NULL) { 699 VMEM_UNLOCK(vm); 700 return 0; 701 } 702 for (i = 0; i < oldhashsize; i++) { 703 while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) { 704 bt_rembusy(vm, bt); 705 bt_insbusy(vm, bt); 706 } 707 } 708 VMEM_UNLOCK(vm); 709 710 if (oldhashlist != vm->vm_hash0) { 711 free(oldhashlist, M_VMEM); 712 } 713 714 return 0; 715} 716 717static void 718vmem_periodic_kick(void *dummy) 719{ 720 721 taskqueue_enqueue(taskqueue_thread, &vmem_periodic_wk); 722} 723 724static void 725vmem_periodic(void *unused, int pending) 726{ 727 vmem_t *vm; 728 vmem_size_t desired; 729 vmem_size_t current; 730 731 mtx_lock(&vmem_list_lock); 732 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 733#ifdef DIAGNOSTIC 734 /* Convenient time to verify vmem state. */ 735 if (enable_vmem_check == 1) { 736 VMEM_LOCK(vm); 737 vmem_check(vm); 738 VMEM_UNLOCK(vm); 739 } 740#endif 741 desired = 1 << flsl(vm->vm_nbusytag); 742 desired = MIN(MAX(desired, VMEM_HASHSIZE_MIN), 743 VMEM_HASHSIZE_MAX); 744 current = vm->vm_hashsize; 745 746 /* Grow in powers of two. Shrink less aggressively. */ 747 if (desired >= current * 2 || desired * 4 <= current) 748 vmem_rehash(vm, desired); 749 750 /* 751 * Periodically wake up threads waiting for resources, 752 * so they could ask for reclamation again. 753 */ 754 VMEM_CONDVAR_BROADCAST(vm); 755 } 756 mtx_unlock(&vmem_list_lock); 757 758 callout_reset(&vmem_periodic_ch, vmem_periodic_interval, 759 vmem_periodic_kick, NULL); 760} 761 762static void 763vmem_start_callout(void *unused) 764{ 765 766 TASK_INIT(&vmem_periodic_wk, 0, vmem_periodic, NULL); 767 vmem_periodic_interval = hz * 10; 768 callout_init(&vmem_periodic_ch, CALLOUT_MPSAFE); 769 callout_reset(&vmem_periodic_ch, vmem_periodic_interval, 770 vmem_periodic_kick, NULL); 771} 772SYSINIT(vfs, SI_SUB_CONFIGURE, SI_ORDER_ANY, vmem_start_callout, NULL); 773 774static void 775vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int type) 776{ 777 bt_t *btspan; 778 bt_t *btfree; 779 780 MPASS(type == BT_TYPE_SPAN || type == BT_TYPE_SPAN_STATIC); 781 MPASS((size & vm->vm_quantum_mask) == 0); 782 783 btspan = bt_alloc(vm); 784 btspan->bt_type = type; 785 btspan->bt_start = addr; 786 btspan->bt_size = size; 787 bt_insseg_tail(vm, btspan); 788 789 btfree = bt_alloc(vm); 790 btfree->bt_type = BT_TYPE_FREE; 791 btfree->bt_start = addr; 792 btfree->bt_size = size; 793 bt_insseg(vm, btfree, btspan); 794 bt_insfree(vm, btfree); 795 796 vm->vm_size += size; 797} 798 799static void 800vmem_destroy1(vmem_t *vm) 801{ 802 bt_t *bt; 803 804 /* 805 * Drain per-cpu quantum caches. 806 */ 807 qc_destroy(vm); 808 809 /* 810 * The vmem should now only contain empty segments. 811 */ 812 VMEM_LOCK(vm); 813 MPASS(vm->vm_nbusytag == 0); 814 815 while ((bt = TAILQ_FIRST(&vm->vm_seglist)) != NULL) 816 bt_remseg(vm, bt); 817 818 if (vm->vm_hashlist != NULL && vm->vm_hashlist != vm->vm_hash0) 819 free(vm->vm_hashlist, M_VMEM); 820 821 bt_freetrim(vm, 0); 822 823 VMEM_CONDVAR_DESTROY(vm); 824 VMEM_LOCK_DESTROY(vm); 825 free(vm, M_VMEM); 826} 827 828static int 829vmem_import(vmem_t *vm, vmem_size_t size, vmem_size_t align, int flags) 830{ 831 vmem_addr_t addr; 832 int error; 833 834 if (vm->vm_importfn == NULL) 835 return EINVAL; 836 837 /* 838 * To make sure we get a span that meets the alignment we double it 839 * and add the size to the tail. This slightly overestimates. 840 */ 841 if (align != vm->vm_quantum_mask + 1) 842 size = (align * 2) + size; 843 size = roundup(size, vm->vm_import_quantum); 844 845 /* 846 * Hide MAXALLOC tags so we're guaranteed to be able to add this 847 * span and the tag we want to allocate from it. 848 */ 849 MPASS(vm->vm_nfreetags >= BT_MAXALLOC); 850 vm->vm_nfreetags -= BT_MAXALLOC; 851 VMEM_UNLOCK(vm); 852 error = (vm->vm_importfn)(vm->vm_arg, size, flags, &addr); 853 VMEM_LOCK(vm); 854 vm->vm_nfreetags += BT_MAXALLOC; 855 if (error) 856 return ENOMEM; 857 858 vmem_add1(vm, addr, size, BT_TYPE_SPAN); 859 860 return 0; 861} 862 863/* 864 * vmem_fit: check if a bt can satisfy the given restrictions. 865 * 866 * it's a caller's responsibility to ensure the region is big enough 867 * before calling us. 868 */ 869static int 870vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align, 871 vmem_size_t phase, vmem_size_t nocross, vmem_addr_t minaddr, 872 vmem_addr_t maxaddr, vmem_addr_t *addrp) 873{ 874 vmem_addr_t start; 875 vmem_addr_t end; 876 877 MPASS(size > 0); 878 MPASS(bt->bt_size >= size); /* caller's responsibility */ 879 880 /* 881 * XXX assumption: vmem_addr_t and vmem_size_t are 882 * unsigned integer of the same size. 883 */ 884 885 start = bt->bt_start; 886 if (start < minaddr) { 887 start = minaddr; 888 } 889 end = BT_END(bt); 890 if (end > maxaddr) 891 end = maxaddr; 892 if (start > end) 893 return (ENOMEM); 894 895 start = VMEM_ALIGNUP(start - phase, align) + phase; 896 if (start < bt->bt_start) 897 start += align; 898 if (VMEM_CROSS_P(start, start + size - 1, nocross)) { 899 MPASS(align < nocross); 900 start = VMEM_ALIGNUP(start - phase, nocross) + phase; 901 } 902 if (start <= end && end - start >= size - 1) { 903 MPASS((start & (align - 1)) == phase); 904 MPASS(!VMEM_CROSS_P(start, start + size - 1, nocross)); 905 MPASS(minaddr <= start); 906 MPASS(maxaddr == 0 || start + size - 1 <= maxaddr); 907 MPASS(bt->bt_start <= start); 908 MPASS(BT_END(bt) - start >= size - 1); 909 *addrp = start; 910 911 return (0); 912 } 913 return (ENOMEM); 914} 915 916/* 917 * vmem_clip: Trim the boundary tag edges to the requested start and size. 918 */ 919static void 920vmem_clip(vmem_t *vm, bt_t *bt, vmem_addr_t start, vmem_size_t size) 921{ 922 bt_t *btnew; 923 bt_t *btprev; 924 925 VMEM_ASSERT_LOCKED(vm); 926 MPASS(bt->bt_type == BT_TYPE_FREE); 927 MPASS(bt->bt_size >= size); 928 bt_remfree(vm, bt); 929 if (bt->bt_start != start) { 930 btprev = bt_alloc(vm); 931 btprev->bt_type = BT_TYPE_FREE; 932 btprev->bt_start = bt->bt_start; 933 btprev->bt_size = start - bt->bt_start; 934 bt->bt_start = start; 935 bt->bt_size -= btprev->bt_size; 936 bt_insfree(vm, btprev); 937 bt_insseg(vm, btprev, 938 TAILQ_PREV(bt, vmem_seglist, bt_seglist)); 939 } 940 MPASS(bt->bt_start == start); 941 if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) { 942 /* split */ 943 btnew = bt_alloc(vm); 944 btnew->bt_type = BT_TYPE_BUSY; 945 btnew->bt_start = bt->bt_start; 946 btnew->bt_size = size; 947 bt->bt_start = bt->bt_start + size; 948 bt->bt_size -= size; 949 bt_insfree(vm, bt); 950 bt_insseg(vm, btnew, 951 TAILQ_PREV(bt, vmem_seglist, bt_seglist)); 952 bt_insbusy(vm, btnew); 953 bt = btnew; 954 } else { 955 bt->bt_type = BT_TYPE_BUSY; 956 bt_insbusy(vm, bt); 957 } 958 MPASS(bt->bt_size >= size); 959 bt->bt_type = BT_TYPE_BUSY; 960} 961 962/* ---- vmem API */ 963 964void 965vmem_set_import(vmem_t *vm, vmem_import_t *importfn, 966 vmem_release_t *releasefn, void *arg, vmem_size_t import_quantum) 967{ 968 969 VMEM_LOCK(vm); 970 vm->vm_importfn = importfn; 971 vm->vm_releasefn = releasefn; 972 vm->vm_arg = arg; 973 vm->vm_import_quantum = import_quantum; 974 VMEM_UNLOCK(vm); 975} 976 977void 978vmem_set_reclaim(vmem_t *vm, vmem_reclaim_t *reclaimfn) 979{ 980 981 VMEM_LOCK(vm); 982 vm->vm_reclaimfn = reclaimfn; 983 VMEM_UNLOCK(vm); 984} 985 986/* 987 * vmem_init: Initializes vmem arena. 988 */ 989vmem_t * 990vmem_init(vmem_t *vm, const char *name, vmem_addr_t base, vmem_size_t size, 991 vmem_size_t quantum, vmem_size_t qcache_max, int flags) 992{ 993 int i; 994 995 MPASS(quantum > 0); 996 MPASS((quantum & (quantum - 1)) == 0); 997 998 bzero(vm, sizeof(*vm)); 999 1000 VMEM_CONDVAR_INIT(vm, name); 1001 VMEM_LOCK_INIT(vm, name); 1002 vm->vm_nfreetags = 0; 1003 LIST_INIT(&vm->vm_freetags); 1004 strlcpy(vm->vm_name, name, sizeof(vm->vm_name)); 1005 vm->vm_quantum_mask = quantum - 1; 1006 vm->vm_quantum_shift = flsl(quantum) - 1; 1007 vm->vm_nbusytag = 0; 1008 vm->vm_size = 0; 1009 vm->vm_inuse = 0; 1010 qc_init(vm, qcache_max); 1011 1012 TAILQ_INIT(&vm->vm_seglist); 1013 for (i = 0; i < VMEM_MAXORDER; i++) { 1014 LIST_INIT(&vm->vm_freelist[i]); 1015 } 1016 memset(&vm->vm_hash0, 0, sizeof(vm->vm_hash0)); 1017 vm->vm_hashsize = VMEM_HASHSIZE_MIN; 1018 vm->vm_hashlist = vm->vm_hash0; 1019 1020 if (size != 0) { 1021 if (vmem_add(vm, base, size, flags) != 0) { 1022 vmem_destroy1(vm); 1023 return NULL; 1024 } 1025 } 1026 1027 mtx_lock(&vmem_list_lock); 1028 LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist); 1029 mtx_unlock(&vmem_list_lock); 1030 1031 return vm; 1032} 1033 1034/* 1035 * vmem_create: create an arena. 1036 */ 1037vmem_t * 1038vmem_create(const char *name, vmem_addr_t base, vmem_size_t size, 1039 vmem_size_t quantum, vmem_size_t qcache_max, int flags) 1040{ 1041 1042 vmem_t *vm; 1043 1044 vm = malloc(sizeof(*vm), M_VMEM, flags & (M_WAITOK|M_NOWAIT)); 1045 if (vm == NULL) 1046 return (NULL); 1047 if (vmem_init(vm, name, base, size, quantum, qcache_max, 1048 flags) == NULL) { 1049 free(vm, M_VMEM); 1050 return (NULL); 1051 } 1052 return (vm); 1053} 1054 1055void 1056vmem_destroy(vmem_t *vm) 1057{ 1058 1059 mtx_lock(&vmem_list_lock); 1060 LIST_REMOVE(vm, vm_alllist); 1061 mtx_unlock(&vmem_list_lock); 1062 1063 vmem_destroy1(vm); 1064} 1065 1066vmem_size_t 1067vmem_roundup_size(vmem_t *vm, vmem_size_t size) 1068{ 1069 1070 return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask; 1071} 1072 1073/* 1074 * vmem_alloc: allocate resource from the arena. 1075 */ 1076int 1077vmem_alloc(vmem_t *vm, vmem_size_t size, int flags, vmem_addr_t *addrp) 1078{ 1079 const int strat __unused = flags & VMEM_FITMASK; 1080 qcache_t *qc; 1081 1082 flags &= VMEM_FLAGS; 1083 MPASS(size > 0); 1084 MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT); 1085 if ((flags & M_NOWAIT) == 0) 1086 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_alloc"); 1087 1088 if (size <= vm->vm_qcache_max) { 1089 qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift]; 1090 *addrp = (vmem_addr_t)uma_zalloc(qc->qc_cache, flags); 1091 if (*addrp == 0) 1092 return (ENOMEM); 1093 return (0); 1094 } 1095 1096 return vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, 1097 flags, addrp); 1098} 1099 1100int 1101vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align, 1102 const vmem_size_t phase, const vmem_size_t nocross, 1103 const vmem_addr_t minaddr, const vmem_addr_t maxaddr, int flags, 1104 vmem_addr_t *addrp) 1105{ 1106 const vmem_size_t size = vmem_roundup_size(vm, size0); 1107 struct vmem_freelist *list; 1108 struct vmem_freelist *first; 1109 struct vmem_freelist *end; 1110 vmem_size_t avail; 1111 bt_t *bt; 1112 int error; 1113 int strat; 1114 1115 flags &= VMEM_FLAGS; 1116 strat = flags & VMEM_FITMASK; 1117 MPASS(size0 > 0); 1118 MPASS(size > 0); 1119 MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT); 1120 MPASS((flags & (M_NOWAIT|M_WAITOK)) != (M_NOWAIT|M_WAITOK)); 1121 if ((flags & M_NOWAIT) == 0) 1122 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_xalloc"); 1123 MPASS((align & vm->vm_quantum_mask) == 0); 1124 MPASS((align & (align - 1)) == 0); 1125 MPASS((phase & vm->vm_quantum_mask) == 0); 1126 MPASS((nocross & vm->vm_quantum_mask) == 0); 1127 MPASS((nocross & (nocross - 1)) == 0); 1128 MPASS((align == 0 && phase == 0) || phase < align); 1129 MPASS(nocross == 0 || nocross >= size); 1130 MPASS(minaddr <= maxaddr); 1131 MPASS(!VMEM_CROSS_P(phase, phase + size - 1, nocross)); 1132 1133 if (align == 0) 1134 align = vm->vm_quantum_mask + 1; 1135 1136 *addrp = 0; 1137 end = &vm->vm_freelist[VMEM_MAXORDER]; 1138 /* 1139 * choose a free block from which we allocate. 1140 */ 1141 first = bt_freehead_toalloc(vm, size, strat); 1142 VMEM_LOCK(vm); 1143 for (;;) { 1144 /* 1145 * Make sure we have enough tags to complete the 1146 * operation. 1147 */ 1148 if (vm->vm_nfreetags < BT_MAXALLOC && 1149 bt_fill(vm, flags) != 0) { 1150 error = ENOMEM; 1151 break; 1152 } 1153 /* 1154 * Scan freelists looking for a tag that satisfies the 1155 * allocation. If we're doing BESTFIT we may encounter 1156 * sizes below the request. If we're doing FIRSTFIT we 1157 * inspect only the first element from each list. 1158 */ 1159 for (list = first; list < end; list++) { 1160 LIST_FOREACH(bt, list, bt_freelist) { 1161 if (bt->bt_size >= size) { 1162 error = vmem_fit(bt, size, align, phase, 1163 nocross, minaddr, maxaddr, addrp); 1164 if (error == 0) { 1165 vmem_clip(vm, bt, *addrp, size); 1166 goto out; 1167 } 1168 } 1169 /* FIRST skips to the next list. */ 1170 if (strat == M_FIRSTFIT) 1171 break; 1172 } 1173 } 1174 /* 1175 * Retry if the fast algorithm failed. 1176 */ 1177 if (strat == M_FIRSTFIT) { 1178 strat = M_BESTFIT; 1179 first = bt_freehead_toalloc(vm, size, strat); 1180 continue; 1181 } 1182 /* 1183 * XXX it is possible to fail to meet restrictions with the 1184 * imported region. It is up to the user to specify the 1185 * import quantum such that it can satisfy any allocation. 1186 */ 1187 if (vmem_import(vm, size, align, flags) == 0) 1188 continue; 1189 1190 /* 1191 * Try to free some space from the quantum cache or reclaim 1192 * functions if available. 1193 */ 1194 if (vm->vm_qcache_max != 0 || vm->vm_reclaimfn != NULL) { 1195 avail = vm->vm_size - vm->vm_inuse; 1196 VMEM_UNLOCK(vm); 1197 if (vm->vm_qcache_max != 0) 1198 qc_drain(vm); 1199 if (vm->vm_reclaimfn != NULL) 1200 vm->vm_reclaimfn(vm, flags); 1201 VMEM_LOCK(vm); 1202 /* If we were successful retry even NOWAIT. */ 1203 if (vm->vm_size - vm->vm_inuse > avail) 1204 continue; 1205 } 1206 if ((flags & M_NOWAIT) != 0) { 1207 error = ENOMEM; 1208 break; 1209 } 1210 VMEM_CONDVAR_WAIT(vm); 1211 } 1212out: 1213 VMEM_UNLOCK(vm); 1214 if (error != 0 && (flags & M_NOWAIT) == 0) 1215 panic("failed to allocate waiting allocation\n"); 1216 1217 return (error); 1218} 1219 1220/* 1221 * vmem_free: free the resource to the arena. 1222 */ 1223void 1224vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1225{ 1226 qcache_t *qc; 1227 MPASS(size > 0); 1228 1229 if (size <= vm->vm_qcache_max) { 1230 qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift]; 1231 uma_zfree(qc->qc_cache, (void *)addr); 1232 } else 1233 vmem_xfree(vm, addr, size); 1234} 1235 1236void 1237vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1238{ 1239 bt_t *bt; 1240 bt_t *t; 1241 1242 MPASS(size > 0); 1243 1244 VMEM_LOCK(vm); 1245 bt = bt_lookupbusy(vm, addr); 1246 MPASS(bt != NULL); 1247 MPASS(bt->bt_start == addr); 1248 MPASS(bt->bt_size == vmem_roundup_size(vm, size) || 1249 bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask); 1250 MPASS(bt->bt_type == BT_TYPE_BUSY); 1251 bt_rembusy(vm, bt); 1252 bt->bt_type = BT_TYPE_FREE; 1253 1254 /* coalesce */ 1255 t = TAILQ_NEXT(bt, bt_seglist); 1256 if (t != NULL && t->bt_type == BT_TYPE_FREE) { 1257 MPASS(BT_END(bt) < t->bt_start); /* YYY */ 1258 bt->bt_size += t->bt_size; 1259 bt_remfree(vm, t); 1260 bt_remseg(vm, t); 1261 } 1262 t = TAILQ_PREV(bt, vmem_seglist, bt_seglist); 1263 if (t != NULL && t->bt_type == BT_TYPE_FREE) { 1264 MPASS(BT_END(t) < bt->bt_start); /* YYY */ 1265 bt->bt_size += t->bt_size; 1266 bt->bt_start = t->bt_start; 1267 bt_remfree(vm, t); 1268 bt_remseg(vm, t); 1269 } 1270 1271 t = TAILQ_PREV(bt, vmem_seglist, bt_seglist); 1272 MPASS(t != NULL); 1273 MPASS(BT_ISSPAN_P(t) || t->bt_type == BT_TYPE_BUSY); 1274 if (vm->vm_releasefn != NULL && t->bt_type == BT_TYPE_SPAN && 1275 t->bt_size == bt->bt_size) { 1276 vmem_addr_t spanaddr; 1277 vmem_size_t spansize; 1278 1279 MPASS(t->bt_start == bt->bt_start); 1280 spanaddr = bt->bt_start; 1281 spansize = bt->bt_size; 1282 bt_remseg(vm, bt); 1283 bt_remseg(vm, t); 1284 vm->vm_size -= spansize; 1285 VMEM_CONDVAR_BROADCAST(vm); 1286 bt_freetrim(vm, BT_MAXFREE); 1287 (*vm->vm_releasefn)(vm->vm_arg, spanaddr, spansize); 1288 } else { 1289 bt_insfree(vm, bt); 1290 VMEM_CONDVAR_BROADCAST(vm); 1291 bt_freetrim(vm, BT_MAXFREE); 1292 } 1293} 1294 1295/* 1296 * vmem_add: 1297 * 1298 */ 1299int 1300vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int flags) 1301{ 1302 int error; 1303 1304 error = 0; 1305 flags &= VMEM_FLAGS; 1306 VMEM_LOCK(vm); 1307 if (vm->vm_nfreetags >= BT_MAXALLOC || bt_fill(vm, flags) == 0) 1308 vmem_add1(vm, addr, size, BT_TYPE_SPAN_STATIC); 1309 else 1310 error = ENOMEM; 1311 VMEM_UNLOCK(vm); 1312 1313 return (error); 1314} 1315 1316/* 1317 * vmem_size: information about arenas size 1318 */ 1319vmem_size_t 1320vmem_size(vmem_t *vm, int typemask) 1321{ 1322 int i; 1323 1324 switch (typemask) { 1325 case VMEM_ALLOC: 1326 return vm->vm_inuse; 1327 case VMEM_FREE: 1328 return vm->vm_size - vm->vm_inuse; 1329 case VMEM_FREE|VMEM_ALLOC: 1330 return vm->vm_size; 1331 case VMEM_MAXFREE: 1332 VMEM_LOCK(vm); 1333 for (i = VMEM_MAXORDER - 1; i >= 0; i--) { 1334 if (LIST_EMPTY(&vm->vm_freelist[i])) 1335 continue; 1336 VMEM_UNLOCK(vm); 1337 return ((vmem_size_t)ORDER2SIZE(i) << 1338 vm->vm_quantum_shift); 1339 } 1340 VMEM_UNLOCK(vm); 1341 return (0); 1342 default: 1343 panic("vmem_size"); 1344 } 1345} 1346 1347/* ---- debug */ 1348 1349#if defined(DDB) || defined(DIAGNOSTIC) 1350 1351static void bt_dump(const bt_t *, int (*)(const char *, ...) 1352 __printflike(1, 2)); 1353 1354static const char * 1355bt_type_string(int type) 1356{ 1357 1358 switch (type) { 1359 case BT_TYPE_BUSY: 1360 return "busy"; 1361 case BT_TYPE_FREE: 1362 return "free"; 1363 case BT_TYPE_SPAN: 1364 return "span"; 1365 case BT_TYPE_SPAN_STATIC: 1366 return "static span"; 1367 default: 1368 break; 1369 } 1370 return "BOGUS"; 1371} 1372 1373static void 1374bt_dump(const bt_t *bt, int (*pr)(const char *, ...)) 1375{ 1376 1377 (*pr)("\t%p: %jx %jx, %d(%s)\n", 1378 bt, (intmax_t)bt->bt_start, (intmax_t)bt->bt_size, 1379 bt->bt_type, bt_type_string(bt->bt_type)); 1380} 1381 1382static void 1383vmem_dump(const vmem_t *vm , int (*pr)(const char *, ...) __printflike(1, 2)) 1384{ 1385 const bt_t *bt; 1386 int i; 1387 1388 (*pr)("vmem %p '%s'\n", vm, vm->vm_name); 1389 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1390 bt_dump(bt, pr); 1391 } 1392 1393 for (i = 0; i < VMEM_MAXORDER; i++) { 1394 const struct vmem_freelist *fl = &vm->vm_freelist[i]; 1395 1396 if (LIST_EMPTY(fl)) { 1397 continue; 1398 } 1399 1400 (*pr)("freelist[%d]\n", i); 1401 LIST_FOREACH(bt, fl, bt_freelist) { 1402 bt_dump(bt, pr); 1403 } 1404 } 1405} 1406 1407#endif /* defined(DDB) || defined(DIAGNOSTIC) */ 1408 1409#if defined(DDB) 1410static bt_t * 1411vmem_whatis_lookup(vmem_t *vm, vmem_addr_t addr) 1412{ 1413 bt_t *bt; 1414 1415 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1416 if (BT_ISSPAN_P(bt)) { 1417 continue; 1418 } 1419 if (bt->bt_start <= addr && addr <= BT_END(bt)) { 1420 return bt; 1421 } 1422 } 1423 1424 return NULL; 1425} 1426 1427void 1428vmem_whatis(vmem_addr_t addr, int (*pr)(const char *, ...)) 1429{ 1430 vmem_t *vm; 1431 1432 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1433 bt_t *bt; 1434 1435 bt = vmem_whatis_lookup(vm, addr); 1436 if (bt == NULL) { 1437 continue; 1438 } 1439 (*pr)("%p is %p+%zu in VMEM '%s' (%s)\n", 1440 (void *)addr, (void *)bt->bt_start, 1441 (vmem_size_t)(addr - bt->bt_start), vm->vm_name, 1442 (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free"); 1443 } 1444} 1445 1446void 1447vmem_printall(const char *modif, int (*pr)(const char *, ...)) 1448{ 1449 const vmem_t *vm; 1450 1451 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1452 vmem_dump(vm, pr); 1453 } 1454} 1455 1456void 1457vmem_print(vmem_addr_t addr, const char *modif, int (*pr)(const char *, ...)) 1458{ 1459 const vmem_t *vm = (const void *)addr; 1460 1461 vmem_dump(vm, pr); 1462} 1463#endif /* defined(DDB) */ 1464 1465#define vmem_printf printf 1466 1467#if defined(DIAGNOSTIC) 1468 1469static bool 1470vmem_check_sanity(vmem_t *vm) 1471{ 1472 const bt_t *bt, *bt2; 1473 1474 MPASS(vm != NULL); 1475 1476 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1477 if (bt->bt_start > BT_END(bt)) { 1478 printf("corrupted tag\n"); 1479 bt_dump(bt, vmem_printf); 1480 return false; 1481 } 1482 } 1483 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1484 TAILQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) { 1485 if (bt == bt2) { 1486 continue; 1487 } 1488 if (BT_ISSPAN_P(bt) != BT_ISSPAN_P(bt2)) { 1489 continue; 1490 } 1491 if (bt->bt_start <= BT_END(bt2) && 1492 bt2->bt_start <= BT_END(bt)) { 1493 printf("overwrapped tags\n"); 1494 bt_dump(bt, vmem_printf); 1495 bt_dump(bt2, vmem_printf); 1496 return false; 1497 } 1498 } 1499 } 1500 1501 return true; 1502} 1503 1504static void 1505vmem_check(vmem_t *vm) 1506{ 1507 1508 if (!vmem_check_sanity(vm)) { 1509 panic("insanity vmem %p", vm); 1510 } 1511} 1512 1513#endif /* defined(DIAGNOSTIC) */ 1514