subr_vmem.c revision 254025
1/*- 2 * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi, 3 * Copyright (c) 2013 EMC Corp. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28/* 29 * From: 30 * $NetBSD: vmem_impl.h,v 1.2 2013/01/29 21:26:24 para Exp $ 31 * $NetBSD: subr_vmem.c,v 1.83 2013/03/06 11:20:10 yamt Exp $ 32 */ 33 34/* 35 * reference: 36 * - Magazines and Vmem: Extending the Slab Allocator 37 * to Many CPUs and Arbitrary Resources 38 * http://www.usenix.org/event/usenix01/bonwick.html 39 */ 40 41#include <sys/cdefs.h> 42__FBSDID("$FreeBSD: head/sys/kern/subr_vmem.c 254025 2013-08-07 06:21:20Z jeff $"); 43 44#include "opt_ddb.h" 45 46#include <sys/param.h> 47#include <sys/systm.h> 48#include <sys/kernel.h> 49#include <sys/queue.h> 50#include <sys/callout.h> 51#include <sys/hash.h> 52#include <sys/lock.h> 53#include <sys/malloc.h> 54#include <sys/mutex.h> 55#include <sys/smp.h> 56#include <sys/condvar.h> 57#include <sys/taskqueue.h> 58#include <sys/vmem.h> 59 60#include <vm/uma.h> 61#include <vm/vm.h> 62#include <vm/pmap.h> 63#include <vm/vm_map.h> 64#include <vm/vm_object.h> 65#include <vm/vm_kern.h> 66#include <vm/vm_extern.h> 67#include <vm/vm_param.h> 68#include <vm/vm_pageout.h> 69 70#define VMEM_MAXORDER (sizeof(vmem_size_t) * NBBY) 71 72#define VMEM_HASHSIZE_MIN 16 73#define VMEM_HASHSIZE_MAX 131072 74 75#define VMEM_QCACHE_IDX_MAX 16 76 77#define VMEM_FITMASK (M_BESTFIT | M_FIRSTFIT) 78 79#define VMEM_FLAGS \ 80 (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM | M_BESTFIT | M_FIRSTFIT) 81 82#define BT_FLAGS (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM) 83 84#define QC_NAME_MAX 16 85 86/* 87 * Data structures private to vmem. 88 */ 89MALLOC_DEFINE(M_VMEM, "vmem", "vmem internal structures"); 90 91typedef struct vmem_btag bt_t; 92 93TAILQ_HEAD(vmem_seglist, vmem_btag); 94LIST_HEAD(vmem_freelist, vmem_btag); 95LIST_HEAD(vmem_hashlist, vmem_btag); 96 97struct qcache { 98 uma_zone_t qc_cache; 99 vmem_t *qc_vmem; 100 vmem_size_t qc_size; 101 char qc_name[QC_NAME_MAX]; 102}; 103typedef struct qcache qcache_t; 104#define QC_POOL_TO_QCACHE(pool) ((qcache_t *)(pool->pr_qcache)) 105 106#define VMEM_NAME_MAX 16 107 108/* vmem arena */ 109struct vmem { 110 struct mtx_padalign vm_lock; 111 struct cv vm_cv; 112 char vm_name[VMEM_NAME_MAX+1]; 113 LIST_ENTRY(vmem) vm_alllist; 114 struct vmem_hashlist vm_hash0[VMEM_HASHSIZE_MIN]; 115 struct vmem_freelist vm_freelist[VMEM_MAXORDER]; 116 struct vmem_seglist vm_seglist; 117 struct vmem_hashlist *vm_hashlist; 118 vmem_size_t vm_hashsize; 119 120 /* Constant after init */ 121 vmem_size_t vm_qcache_max; 122 vmem_size_t vm_quantum_mask; 123 vmem_size_t vm_import_quantum; 124 int vm_quantum_shift; 125 126 /* Written on alloc/free */ 127 LIST_HEAD(, vmem_btag) vm_freetags; 128 int vm_nfreetags; 129 int vm_nbusytag; 130 vmem_size_t vm_inuse; 131 vmem_size_t vm_size; 132 133 /* Used on import. */ 134 vmem_import_t *vm_importfn; 135 vmem_release_t *vm_releasefn; 136 void *vm_arg; 137 138 /* Space exhaustion callback. */ 139 vmem_reclaim_t *vm_reclaimfn; 140 141 /* quantum cache */ 142 qcache_t vm_qcache[VMEM_QCACHE_IDX_MAX]; 143}; 144 145/* boundary tag */ 146struct vmem_btag { 147 TAILQ_ENTRY(vmem_btag) bt_seglist; 148 union { 149 LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */ 150 LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */ 151 } bt_u; 152#define bt_hashlist bt_u.u_hashlist 153#define bt_freelist bt_u.u_freelist 154 vmem_addr_t bt_start; 155 vmem_size_t bt_size; 156 int bt_type; 157}; 158 159#define BT_TYPE_SPAN 1 /* Allocated from importfn */ 160#define BT_TYPE_SPAN_STATIC 2 /* vmem_add() or create. */ 161#define BT_TYPE_FREE 3 /* Available space. */ 162#define BT_TYPE_BUSY 4 /* Used space. */ 163#define BT_ISSPAN_P(bt) ((bt)->bt_type <= BT_TYPE_SPAN_STATIC) 164 165#define BT_END(bt) ((bt)->bt_start + (bt)->bt_size - 1) 166 167#if defined(DIAGNOSTIC) 168static void vmem_check(vmem_t *); 169#endif 170 171static struct callout vmem_periodic_ch; 172static int vmem_periodic_interval; 173static struct task vmem_periodic_wk; 174 175static struct mtx_padalign vmem_list_lock; 176static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list); 177 178/* ---- misc */ 179#define VMEM_CONDVAR_INIT(vm, wchan) cv_init(&vm->vm_cv, wchan) 180#define VMEM_CONDVAR_DESTROY(vm) cv_destroy(&vm->vm_cv) 181#define VMEM_CONDVAR_WAIT(vm) cv_wait(&vm->vm_cv, &vm->vm_lock) 182#define VMEM_CONDVAR_BROADCAST(vm) cv_broadcast(&vm->vm_cv) 183 184 185#define VMEM_LOCK(vm) mtx_lock(&vm->vm_lock) 186#define VMEM_TRYLOCK(vm) mtx_trylock(&vm->vm_lock) 187#define VMEM_UNLOCK(vm) mtx_unlock(&vm->vm_lock) 188#define VMEM_LOCK_INIT(vm, name) mtx_init(&vm->vm_lock, (name), NULL, MTX_DEF) 189#define VMEM_LOCK_DESTROY(vm) mtx_destroy(&vm->vm_lock) 190#define VMEM_ASSERT_LOCKED(vm) mtx_assert(&vm->vm_lock, MA_OWNED); 191 192#define VMEM_ALIGNUP(addr, align) (-(-(addr) & -(align))) 193 194#define VMEM_CROSS_P(addr1, addr2, boundary) \ 195 ((((addr1) ^ (addr2)) & -(boundary)) != 0) 196 197#define ORDER2SIZE(order) ((vmem_size_t)1 << (order)) 198#define SIZE2ORDER(size) ((int)flsl(size) - 1) 199 200/* 201 * Maximum number of boundary tags that may be required to satisfy an 202 * allocation. Two may be required to import. Another two may be 203 * required to clip edges. 204 */ 205#define BT_MAXALLOC 4 206 207/* 208 * Max free limits the number of locally cached boundary tags. We 209 * just want to avoid hitting the zone allocator for every call. 210 */ 211#define BT_MAXFREE (BT_MAXALLOC * 8) 212 213/* Allocator for boundary tags. */ 214static uma_zone_t vmem_bt_zone; 215 216/* boot time arena storage. */ 217static struct vmem kernel_arena_storage; 218static struct vmem kmem_arena_storage; 219static struct vmem buffer_arena_storage; 220static struct vmem transient_arena_storage; 221vmem_t *kernel_arena = &kernel_arena_storage; 222vmem_t *kmem_arena = &kmem_arena_storage; 223vmem_t *buffer_arena = &buffer_arena_storage; 224vmem_t *transient_arena = &transient_arena_storage; 225 226/* 227 * Fill the vmem's boundary tag cache. We guarantee that boundary tag 228 * allocation will not fail once bt_fill() passes. To do so we cache 229 * at least the maximum possible tag allocations in the arena. 230 */ 231static int 232bt_fill(vmem_t *vm, int flags) 233{ 234 bt_t *bt; 235 236 VMEM_ASSERT_LOCKED(vm); 237 238 /* 239 * Only allow the kmem arena to dip into reserve tags. It is the 240 * vmem where new tags come from. 241 */ 242 flags &= BT_FLAGS; 243 if (vm != kmem_arena) 244 flags &= ~M_USE_RESERVE; 245 246 /* 247 * Loop until we meet the reserve. To minimize the lock shuffle 248 * and prevent simultaneous fills we first try a NOWAIT regardless 249 * of the caller's flags. Specify M_NOVM so we don't recurse while 250 * holding a vmem lock. 251 */ 252 while (vm->vm_nfreetags < BT_MAXALLOC) { 253 bt = uma_zalloc(vmem_bt_zone, 254 (flags & M_USE_RESERVE) | M_NOWAIT | M_NOVM); 255 if (bt == NULL) { 256 VMEM_UNLOCK(vm); 257 bt = uma_zalloc(vmem_bt_zone, flags); 258 VMEM_LOCK(vm); 259 if (bt == NULL && (flags & M_NOWAIT) != 0) 260 break; 261 } 262 LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); 263 vm->vm_nfreetags++; 264 } 265 266 if (vm->vm_nfreetags < BT_MAXALLOC) 267 return ENOMEM; 268 269 return 0; 270} 271 272/* 273 * Pop a tag off of the freetag stack. 274 */ 275static bt_t * 276bt_alloc(vmem_t *vm) 277{ 278 bt_t *bt; 279 280 VMEM_ASSERT_LOCKED(vm); 281 bt = LIST_FIRST(&vm->vm_freetags); 282 MPASS(bt != NULL); 283 LIST_REMOVE(bt, bt_freelist); 284 vm->vm_nfreetags--; 285 286 return bt; 287} 288 289/* 290 * Trim the per-vmem free list. Returns with the lock released to 291 * avoid allocator recursions. 292 */ 293static void 294bt_freetrim(vmem_t *vm, int freelimit) 295{ 296 LIST_HEAD(, vmem_btag) freetags; 297 bt_t *bt; 298 299 LIST_INIT(&freetags); 300 VMEM_ASSERT_LOCKED(vm); 301 while (vm->vm_nfreetags > freelimit) { 302 bt = LIST_FIRST(&vm->vm_freetags); 303 LIST_REMOVE(bt, bt_freelist); 304 vm->vm_nfreetags--; 305 LIST_INSERT_HEAD(&freetags, bt, bt_freelist); 306 } 307 VMEM_UNLOCK(vm); 308 while ((bt = LIST_FIRST(&freetags)) != NULL) { 309 LIST_REMOVE(bt, bt_freelist); 310 uma_zfree(vmem_bt_zone, bt); 311 } 312} 313 314static inline void 315bt_free(vmem_t *vm, bt_t *bt) 316{ 317 318 VMEM_ASSERT_LOCKED(vm); 319 MPASS(LIST_FIRST(&vm->vm_freetags) != bt); 320 LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); 321 vm->vm_nfreetags++; 322} 323 324/* 325 * freelist[0] ... [1, 1] 326 * freelist[1] ... [2, 3] 327 * freelist[2] ... [4, 7] 328 * freelist[3] ... [8, 15] 329 * : 330 * freelist[n] ... [(1 << n), (1 << (n + 1)) - 1] 331 * : 332 */ 333 334static struct vmem_freelist * 335bt_freehead_tofree(vmem_t *vm, vmem_size_t size) 336{ 337 const vmem_size_t qsize = size >> vm->vm_quantum_shift; 338 const int idx = SIZE2ORDER(qsize); 339 340 MPASS(size != 0 && qsize != 0); 341 MPASS((size & vm->vm_quantum_mask) == 0); 342 MPASS(idx >= 0); 343 MPASS(idx < VMEM_MAXORDER); 344 345 return &vm->vm_freelist[idx]; 346} 347 348/* 349 * bt_freehead_toalloc: return the freelist for the given size and allocation 350 * strategy. 351 * 352 * For M_FIRSTFIT, return the list in which any blocks are large enough 353 * for the requested size. otherwise, return the list which can have blocks 354 * large enough for the requested size. 355 */ 356static struct vmem_freelist * 357bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, int strat) 358{ 359 const vmem_size_t qsize = size >> vm->vm_quantum_shift; 360 int idx = SIZE2ORDER(qsize); 361 362 MPASS(size != 0 && qsize != 0); 363 MPASS((size & vm->vm_quantum_mask) == 0); 364 365 if (strat == M_FIRSTFIT && ORDER2SIZE(idx) != qsize) { 366 idx++; 367 /* check too large request? */ 368 } 369 MPASS(idx >= 0); 370 MPASS(idx < VMEM_MAXORDER); 371 372 return &vm->vm_freelist[idx]; 373} 374 375/* ---- boundary tag hash */ 376 377static struct vmem_hashlist * 378bt_hashhead(vmem_t *vm, vmem_addr_t addr) 379{ 380 struct vmem_hashlist *list; 381 unsigned int hash; 382 383 hash = hash32_buf(&addr, sizeof(addr), 0); 384 list = &vm->vm_hashlist[hash % vm->vm_hashsize]; 385 386 return list; 387} 388 389static bt_t * 390bt_lookupbusy(vmem_t *vm, vmem_addr_t addr) 391{ 392 struct vmem_hashlist *list; 393 bt_t *bt; 394 395 VMEM_ASSERT_LOCKED(vm); 396 list = bt_hashhead(vm, addr); 397 LIST_FOREACH(bt, list, bt_hashlist) { 398 if (bt->bt_start == addr) { 399 break; 400 } 401 } 402 403 return bt; 404} 405 406static void 407bt_rembusy(vmem_t *vm, bt_t *bt) 408{ 409 410 VMEM_ASSERT_LOCKED(vm); 411 MPASS(vm->vm_nbusytag > 0); 412 vm->vm_inuse -= bt->bt_size; 413 vm->vm_nbusytag--; 414 LIST_REMOVE(bt, bt_hashlist); 415} 416 417static void 418bt_insbusy(vmem_t *vm, bt_t *bt) 419{ 420 struct vmem_hashlist *list; 421 422 VMEM_ASSERT_LOCKED(vm); 423 MPASS(bt->bt_type == BT_TYPE_BUSY); 424 425 list = bt_hashhead(vm, bt->bt_start); 426 LIST_INSERT_HEAD(list, bt, bt_hashlist); 427 vm->vm_nbusytag++; 428 vm->vm_inuse += bt->bt_size; 429} 430 431/* ---- boundary tag list */ 432 433static void 434bt_remseg(vmem_t *vm, bt_t *bt) 435{ 436 437 TAILQ_REMOVE(&vm->vm_seglist, bt, bt_seglist); 438 bt_free(vm, bt); 439} 440 441static void 442bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev) 443{ 444 445 TAILQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist); 446} 447 448static void 449bt_insseg_tail(vmem_t *vm, bt_t *bt) 450{ 451 452 TAILQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist); 453} 454 455static void 456bt_remfree(vmem_t *vm, bt_t *bt) 457{ 458 459 MPASS(bt->bt_type == BT_TYPE_FREE); 460 461 LIST_REMOVE(bt, bt_freelist); 462} 463 464static void 465bt_insfree(vmem_t *vm, bt_t *bt) 466{ 467 struct vmem_freelist *list; 468 469 list = bt_freehead_tofree(vm, bt->bt_size); 470 LIST_INSERT_HEAD(list, bt, bt_freelist); 471} 472 473/* ---- vmem internal functions */ 474 475/* 476 * Import from the arena into the quantum cache in UMA. 477 */ 478static int 479qc_import(void *arg, void **store, int cnt, int flags) 480{ 481 qcache_t *qc; 482 vmem_addr_t addr; 483 int i; 484 485 qc = arg; 486 flags |= M_BESTFIT; 487 for (i = 0; i < cnt; i++) { 488 if (vmem_xalloc(qc->qc_vmem, qc->qc_size, 0, 0, 0, 489 VMEM_ADDR_MIN, VMEM_ADDR_MAX, flags, &addr) != 0) 490 break; 491 store[i] = (void *)addr; 492 /* Only guarantee one allocation. */ 493 flags &= ~M_WAITOK; 494 flags |= M_NOWAIT; 495 } 496 return i; 497} 498 499/* 500 * Release memory from the UMA cache to the arena. 501 */ 502static void 503qc_release(void *arg, void **store, int cnt) 504{ 505 qcache_t *qc; 506 int i; 507 508 qc = arg; 509 for (i = 0; i < cnt; i++) 510 vmem_xfree(qc->qc_vmem, (vmem_addr_t)store[i], qc->qc_size); 511} 512 513static void 514qc_init(vmem_t *vm, vmem_size_t qcache_max) 515{ 516 qcache_t *qc; 517 vmem_size_t size; 518 int qcache_idx_max; 519 int i; 520 521 MPASS((qcache_max & vm->vm_quantum_mask) == 0); 522 qcache_idx_max = MIN(qcache_max >> vm->vm_quantum_shift, 523 VMEM_QCACHE_IDX_MAX); 524 vm->vm_qcache_max = qcache_idx_max << vm->vm_quantum_shift; 525 for (i = 0; i < qcache_idx_max; i++) { 526 qc = &vm->vm_qcache[i]; 527 size = (i + 1) << vm->vm_quantum_shift; 528 snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu", 529 vm->vm_name, size); 530 qc->qc_vmem = vm; 531 qc->qc_size = size; 532 qc->qc_cache = uma_zcache_create(qc->qc_name, size, 533 NULL, NULL, NULL, NULL, qc_import, qc_release, qc, 534 UMA_ZONE_VM); 535 MPASS(qc->qc_cache); 536 } 537} 538 539static void 540qc_destroy(vmem_t *vm) 541{ 542 int qcache_idx_max; 543 int i; 544 545 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift; 546 for (i = 0; i < qcache_idx_max; i++) 547 uma_zdestroy(vm->vm_qcache[i].qc_cache); 548} 549 550static void 551qc_drain(vmem_t *vm) 552{ 553 int qcache_idx_max; 554 int i; 555 556 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift; 557 for (i = 0; i < qcache_idx_max; i++) 558 zone_drain(vm->vm_qcache[i].qc_cache); 559} 560 561#ifndef UMA_MD_SMALL_ALLOC 562 563static struct mtx_padalign vmem_bt_lock; 564 565/* 566 * vmem_bt_alloc: Allocate a new page of boundary tags. 567 * 568 * On architectures with uma_small_alloc there is no recursion; no address 569 * space need be allocated to allocate boundary tags. For the others, we 570 * must handle recursion. Boundary tags are necessary to allocate new 571 * boundary tags. 572 * 573 * UMA guarantees that enough tags are held in reserve to allocate a new 574 * page of kva. We dip into this reserve by specifying M_USE_RESERVE only 575 * when allocating the page to hold new boundary tags. In this way the 576 * reserve is automatically filled by the allocation that uses the reserve. 577 * 578 * We still have to guarantee that the new tags are allocated atomically since 579 * many threads may try concurrently. The bt_lock provides this guarantee. 580 * We convert WAITOK allocations to NOWAIT and then handle the blocking here 581 * on failure. It's ok to return NULL for a WAITOK allocation as UMA will 582 * loop again after checking to see if we lost the race to allocate. 583 * 584 * There is a small race between vmem_bt_alloc() returning the page and the 585 * zone lock being acquired to add the page to the zone. For WAITOK 586 * allocations we just pause briefly. NOWAIT may experience a transient 587 * failure. To alleviate this we permit a small number of simultaneous 588 * fills to proceed concurrently so NOWAIT is less likely to fail unless 589 * we are really out of KVA. 590 */ 591static void * 592vmem_bt_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait) 593{ 594 vmem_addr_t addr; 595 596 *pflag = UMA_SLAB_KMEM; 597 598 /* 599 * Single thread boundary tag allocation so that the address space 600 * and memory are added in one atomic operation. 601 */ 602 mtx_lock(&vmem_bt_lock); 603 if (vmem_xalloc(kmem_arena, bytes, 0, 0, 0, VMEM_ADDR_MIN, 604 VMEM_ADDR_MAX, M_NOWAIT | M_NOVM | M_USE_RESERVE | M_BESTFIT, 605 &addr) == 0) { 606 if (kmem_back(kmem_object, addr, bytes, 607 M_NOWAIT | M_USE_RESERVE) == 0) { 608 mtx_unlock(&vmem_bt_lock); 609 return ((void *)addr); 610 } 611 vmem_xfree(kmem_arena, addr, bytes); 612 mtx_unlock(&vmem_bt_lock); 613 /* 614 * Out of memory, not address space. This may not even be 615 * possible due to M_USE_RESERVE page allocation. 616 */ 617 if (wait & M_WAITOK) 618 VM_WAIT; 619 return (NULL); 620 } 621 mtx_unlock(&vmem_bt_lock); 622 /* 623 * We're either out of address space or lost a fill race. 624 */ 625 if (wait & M_WAITOK) 626 pause("btalloc", 1); 627 628 return (NULL); 629} 630#endif 631 632void 633vmem_startup(void) 634{ 635 636 mtx_init(&vmem_list_lock, "vmem list lock", NULL, MTX_DEF); 637 vmem_bt_zone = uma_zcreate("vmem btag", 638 sizeof(struct vmem_btag), NULL, NULL, NULL, NULL, 639 UMA_ALIGN_PTR, UMA_ZONE_VM); 640#ifndef UMA_MD_SMALL_ALLOC 641 mtx_init(&vmem_bt_lock, "btag lock", NULL, MTX_DEF); 642 uma_prealloc(vmem_bt_zone, BT_MAXALLOC); 643 /* 644 * Reserve enough tags to allocate new tags. We allow multiple 645 * CPUs to attempt to allocate new tags concurrently to limit 646 * false restarts in UMA. 647 */ 648 uma_zone_reserve(vmem_bt_zone, BT_MAXALLOC * (mp_ncpus + 1) / 2); 649 uma_zone_set_allocf(vmem_bt_zone, vmem_bt_alloc); 650#endif 651} 652 653/* ---- rehash */ 654 655static int 656vmem_rehash(vmem_t *vm, vmem_size_t newhashsize) 657{ 658 bt_t *bt; 659 int i; 660 struct vmem_hashlist *newhashlist; 661 struct vmem_hashlist *oldhashlist; 662 vmem_size_t oldhashsize; 663 664 MPASS(newhashsize > 0); 665 666 newhashlist = malloc(sizeof(struct vmem_hashlist) * newhashsize, 667 M_VMEM, M_NOWAIT); 668 if (newhashlist == NULL) 669 return ENOMEM; 670 for (i = 0; i < newhashsize; i++) { 671 LIST_INIT(&newhashlist[i]); 672 } 673 674 VMEM_LOCK(vm); 675 oldhashlist = vm->vm_hashlist; 676 oldhashsize = vm->vm_hashsize; 677 vm->vm_hashlist = newhashlist; 678 vm->vm_hashsize = newhashsize; 679 if (oldhashlist == NULL) { 680 VMEM_UNLOCK(vm); 681 return 0; 682 } 683 for (i = 0; i < oldhashsize; i++) { 684 while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) { 685 bt_rembusy(vm, bt); 686 bt_insbusy(vm, bt); 687 } 688 } 689 VMEM_UNLOCK(vm); 690 691 if (oldhashlist != vm->vm_hash0) { 692 free(oldhashlist, M_VMEM); 693 } 694 695 return 0; 696} 697 698static void 699vmem_periodic_kick(void *dummy) 700{ 701 702 taskqueue_enqueue(taskqueue_thread, &vmem_periodic_wk); 703} 704 705static void 706vmem_periodic(void *unused, int pending) 707{ 708 vmem_t *vm; 709 vmem_size_t desired; 710 vmem_size_t current; 711 712 mtx_lock(&vmem_list_lock); 713 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 714#ifdef DIAGNOSTIC 715 /* Convenient time to verify vmem state. */ 716 VMEM_LOCK(vm); 717 vmem_check(vm); 718 VMEM_UNLOCK(vm); 719#endif 720 desired = 1 << flsl(vm->vm_nbusytag); 721 desired = MIN(MAX(desired, VMEM_HASHSIZE_MIN), 722 VMEM_HASHSIZE_MAX); 723 current = vm->vm_hashsize; 724 725 /* Grow in powers of two. Shrink less aggressively. */ 726 if (desired >= current * 2 || desired * 4 <= current) 727 vmem_rehash(vm, desired); 728 } 729 mtx_unlock(&vmem_list_lock); 730 731 callout_reset(&vmem_periodic_ch, vmem_periodic_interval, 732 vmem_periodic_kick, NULL); 733} 734 735static void 736vmem_start_callout(void *unused) 737{ 738 739 TASK_INIT(&vmem_periodic_wk, 0, vmem_periodic, NULL); 740 vmem_periodic_interval = hz * 10; 741 callout_init(&vmem_periodic_ch, CALLOUT_MPSAFE); 742 callout_reset(&vmem_periodic_ch, vmem_periodic_interval, 743 vmem_periodic_kick, NULL); 744} 745SYSINIT(vfs, SI_SUB_CONFIGURE, SI_ORDER_ANY, vmem_start_callout, NULL); 746 747static void 748vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int type) 749{ 750 bt_t *btspan; 751 bt_t *btfree; 752 753 MPASS(type == BT_TYPE_SPAN || type == BT_TYPE_SPAN_STATIC); 754 755 btspan = bt_alloc(vm); 756 btspan->bt_type = type; 757 btspan->bt_start = addr; 758 btspan->bt_size = size; 759 bt_insseg_tail(vm, btspan); 760 761 btfree = bt_alloc(vm); 762 btfree->bt_type = BT_TYPE_FREE; 763 btfree->bt_start = addr; 764 btfree->bt_size = size; 765 bt_insseg(vm, btfree, btspan); 766 bt_insfree(vm, btfree); 767 768 vm->vm_size += size; 769} 770 771static void 772vmem_destroy1(vmem_t *vm) 773{ 774 bt_t *bt; 775 776 /* 777 * Drain per-cpu quantum caches. 778 */ 779 qc_destroy(vm); 780 781 /* 782 * The vmem should now only contain empty segments. 783 */ 784 VMEM_LOCK(vm); 785 MPASS(vm->vm_nbusytag == 0); 786 787 while ((bt = TAILQ_FIRST(&vm->vm_seglist)) != NULL) 788 bt_remseg(vm, bt); 789 790 if (vm->vm_hashlist != NULL && vm->vm_hashlist != vm->vm_hash0) 791 free(vm->vm_hashlist, M_VMEM); 792 793 bt_freetrim(vm, 0); 794 795 VMEM_CONDVAR_DESTROY(vm); 796 VMEM_LOCK_DESTROY(vm); 797 free(vm, M_VMEM); 798} 799 800static int 801vmem_import(vmem_t *vm, vmem_size_t size, int flags) 802{ 803 vmem_addr_t addr; 804 int error; 805 806 if (vm->vm_importfn == NULL) 807 return EINVAL; 808 809 size = roundup(size, vm->vm_import_quantum); 810 811 /* 812 * Hide MAXALLOC tags so we're guaranteed to be able to add this 813 * span and the tag we want to allocate from it. 814 */ 815 MPASS(vm->vm_nfreetags >= BT_MAXALLOC); 816 vm->vm_nfreetags -= BT_MAXALLOC; 817 VMEM_UNLOCK(vm); 818 error = (vm->vm_importfn)(vm->vm_arg, size, flags, &addr); 819 VMEM_LOCK(vm); 820 vm->vm_nfreetags += BT_MAXALLOC; 821 if (error) 822 return ENOMEM; 823 824 vmem_add1(vm, addr, size, BT_TYPE_SPAN); 825 826 return 0; 827} 828 829/* 830 * vmem_fit: check if a bt can satisfy the given restrictions. 831 * 832 * it's a caller's responsibility to ensure the region is big enough 833 * before calling us. 834 */ 835static int 836vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align, 837 vmem_size_t phase, vmem_size_t nocross, vmem_addr_t minaddr, 838 vmem_addr_t maxaddr, vmem_addr_t *addrp) 839{ 840 vmem_addr_t start; 841 vmem_addr_t end; 842 843 MPASS(size > 0); 844 MPASS(bt->bt_size >= size); /* caller's responsibility */ 845 846 /* 847 * XXX assumption: vmem_addr_t and vmem_size_t are 848 * unsigned integer of the same size. 849 */ 850 851 start = bt->bt_start; 852 if (start < minaddr) { 853 start = minaddr; 854 } 855 end = BT_END(bt); 856 if (end > maxaddr) 857 end = maxaddr; 858 if (start > end) 859 return (ENOMEM); 860 861 start = VMEM_ALIGNUP(start - phase, align) + phase; 862 if (start < bt->bt_start) 863 start += align; 864 if (VMEM_CROSS_P(start, start + size - 1, nocross)) { 865 MPASS(align < nocross); 866 start = VMEM_ALIGNUP(start - phase, nocross) + phase; 867 } 868 if (start <= end && end - start >= size - 1) { 869 MPASS((start & (align - 1)) == phase); 870 MPASS(!VMEM_CROSS_P(start, start + size - 1, nocross)); 871 MPASS(minaddr <= start); 872 MPASS(maxaddr == 0 || start + size - 1 <= maxaddr); 873 MPASS(bt->bt_start <= start); 874 MPASS(BT_END(bt) - start >= size - 1); 875 *addrp = start; 876 877 return (0); 878 } 879 return (ENOMEM); 880} 881 882/* 883 * vmem_clip: Trim the boundary tag edges to the requested start and size. 884 */ 885static void 886vmem_clip(vmem_t *vm, bt_t *bt, vmem_addr_t start, vmem_size_t size) 887{ 888 bt_t *btnew; 889 bt_t *btprev; 890 891 VMEM_ASSERT_LOCKED(vm); 892 MPASS(bt->bt_type == BT_TYPE_FREE); 893 MPASS(bt->bt_size >= size); 894 bt_remfree(vm, bt); 895 if (bt->bt_start != start) { 896 btprev = bt_alloc(vm); 897 btprev->bt_type = BT_TYPE_FREE; 898 btprev->bt_start = bt->bt_start; 899 btprev->bt_size = start - bt->bt_start; 900 bt->bt_start = start; 901 bt->bt_size -= btprev->bt_size; 902 bt_insfree(vm, btprev); 903 bt_insseg(vm, btprev, 904 TAILQ_PREV(bt, vmem_seglist, bt_seglist)); 905 } 906 MPASS(bt->bt_start == start); 907 if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) { 908 /* split */ 909 btnew = bt_alloc(vm); 910 btnew->bt_type = BT_TYPE_BUSY; 911 btnew->bt_start = bt->bt_start; 912 btnew->bt_size = size; 913 bt->bt_start = bt->bt_start + size; 914 bt->bt_size -= size; 915 bt_insfree(vm, bt); 916 bt_insseg(vm, btnew, 917 TAILQ_PREV(bt, vmem_seglist, bt_seglist)); 918 bt_insbusy(vm, btnew); 919 bt = btnew; 920 } else { 921 bt->bt_type = BT_TYPE_BUSY; 922 bt_insbusy(vm, bt); 923 } 924 MPASS(bt->bt_size >= size); 925 bt->bt_type = BT_TYPE_BUSY; 926} 927 928/* ---- vmem API */ 929 930void 931vmem_set_import(vmem_t *vm, vmem_import_t *importfn, 932 vmem_release_t *releasefn, void *arg, vmem_size_t import_quantum) 933{ 934 935 VMEM_LOCK(vm); 936 vm->vm_importfn = importfn; 937 vm->vm_releasefn = releasefn; 938 vm->vm_arg = arg; 939 vm->vm_import_quantum = import_quantum; 940 VMEM_UNLOCK(vm); 941} 942 943void 944vmem_set_reclaim(vmem_t *vm, vmem_reclaim_t *reclaimfn) 945{ 946 947 VMEM_LOCK(vm); 948 vm->vm_reclaimfn = reclaimfn; 949 VMEM_UNLOCK(vm); 950} 951 952/* 953 * vmem_init: Initializes vmem arena. 954 */ 955vmem_t * 956vmem_init(vmem_t *vm, const char *name, vmem_addr_t base, vmem_size_t size, 957 vmem_size_t quantum, vmem_size_t qcache_max, int flags) 958{ 959 int i; 960 961 MPASS(quantum > 0); 962 963 bzero(vm, sizeof(*vm)); 964 965 VMEM_CONDVAR_INIT(vm, name); 966 VMEM_LOCK_INIT(vm, name); 967 vm->vm_nfreetags = 0; 968 LIST_INIT(&vm->vm_freetags); 969 strlcpy(vm->vm_name, name, sizeof(vm->vm_name)); 970 vm->vm_quantum_mask = quantum - 1; 971 vm->vm_quantum_shift = SIZE2ORDER(quantum); 972 MPASS(ORDER2SIZE(vm->vm_quantum_shift) == quantum); 973 vm->vm_nbusytag = 0; 974 vm->vm_size = 0; 975 vm->vm_inuse = 0; 976 qc_init(vm, qcache_max); 977 978 TAILQ_INIT(&vm->vm_seglist); 979 for (i = 0; i < VMEM_MAXORDER; i++) { 980 LIST_INIT(&vm->vm_freelist[i]); 981 } 982 memset(&vm->vm_hash0, 0, sizeof(vm->vm_hash0)); 983 vm->vm_hashsize = VMEM_HASHSIZE_MIN; 984 vm->vm_hashlist = vm->vm_hash0; 985 986 if (size != 0) { 987 if (vmem_add(vm, base, size, flags) != 0) { 988 vmem_destroy1(vm); 989 return NULL; 990 } 991 } 992 993 mtx_lock(&vmem_list_lock); 994 LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist); 995 mtx_unlock(&vmem_list_lock); 996 997 return vm; 998} 999 1000/* 1001 * vmem_create: create an arena. 1002 */ 1003vmem_t * 1004vmem_create(const char *name, vmem_addr_t base, vmem_size_t size, 1005 vmem_size_t quantum, vmem_size_t qcache_max, int flags) 1006{ 1007 1008 vmem_t *vm; 1009 1010 vm = malloc(sizeof(*vm), M_VMEM, flags & (M_WAITOK|M_NOWAIT)); 1011 if (vm == NULL) 1012 return (NULL); 1013 if (vmem_init(vm, name, base, size, quantum, qcache_max, 1014 flags) == NULL) { 1015 free(vm, M_VMEM); 1016 return (NULL); 1017 } 1018 return (vm); 1019} 1020 1021void 1022vmem_destroy(vmem_t *vm) 1023{ 1024 1025 mtx_lock(&vmem_list_lock); 1026 LIST_REMOVE(vm, vm_alllist); 1027 mtx_unlock(&vmem_list_lock); 1028 1029 vmem_destroy1(vm); 1030} 1031 1032vmem_size_t 1033vmem_roundup_size(vmem_t *vm, vmem_size_t size) 1034{ 1035 1036 return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask; 1037} 1038 1039/* 1040 * vmem_alloc: allocate resource from the arena. 1041 */ 1042int 1043vmem_alloc(vmem_t *vm, vmem_size_t size, int flags, vmem_addr_t *addrp) 1044{ 1045 const int strat __unused = flags & VMEM_FITMASK; 1046 qcache_t *qc; 1047 1048 flags &= VMEM_FLAGS; 1049 MPASS(size > 0); 1050 MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT); 1051 if ((flags & M_NOWAIT) == 0) 1052 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_alloc"); 1053 1054 if (size <= vm->vm_qcache_max) { 1055 qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift]; 1056 *addrp = (vmem_addr_t)uma_zalloc(qc->qc_cache, flags); 1057 if (*addrp == 0) 1058 return (ENOMEM); 1059 return (0); 1060 } 1061 1062 return vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, 1063 flags, addrp); 1064} 1065 1066int 1067vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align, 1068 const vmem_size_t phase, const vmem_size_t nocross, 1069 const vmem_addr_t minaddr, const vmem_addr_t maxaddr, int flags, 1070 vmem_addr_t *addrp) 1071{ 1072 const vmem_size_t size = vmem_roundup_size(vm, size0); 1073 struct vmem_freelist *list; 1074 struct vmem_freelist *first; 1075 struct vmem_freelist *end; 1076 vmem_size_t avail; 1077 bt_t *bt; 1078 int error; 1079 int strat; 1080 1081 flags &= VMEM_FLAGS; 1082 strat = flags & VMEM_FITMASK; 1083 MPASS(size0 > 0); 1084 MPASS(size > 0); 1085 MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT); 1086 MPASS((flags & (M_NOWAIT|M_WAITOK)) != (M_NOWAIT|M_WAITOK)); 1087 if ((flags & M_NOWAIT) == 0) 1088 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_xalloc"); 1089 MPASS((align & vm->vm_quantum_mask) == 0); 1090 MPASS((align & (align - 1)) == 0); 1091 MPASS((phase & vm->vm_quantum_mask) == 0); 1092 MPASS((nocross & vm->vm_quantum_mask) == 0); 1093 MPASS((nocross & (nocross - 1)) == 0); 1094 MPASS((align == 0 && phase == 0) || phase < align); 1095 MPASS(nocross == 0 || nocross >= size); 1096 MPASS(minaddr <= maxaddr); 1097 MPASS(!VMEM_CROSS_P(phase, phase + size - 1, nocross)); 1098 1099 if (align == 0) 1100 align = vm->vm_quantum_mask + 1; 1101 1102 *addrp = 0; 1103 end = &vm->vm_freelist[VMEM_MAXORDER]; 1104 /* 1105 * choose a free block from which we allocate. 1106 */ 1107 first = bt_freehead_toalloc(vm, size, strat); 1108 VMEM_LOCK(vm); 1109 for (;;) { 1110 /* 1111 * Make sure we have enough tags to complete the 1112 * operation. 1113 */ 1114 if (vm->vm_nfreetags < BT_MAXALLOC && 1115 bt_fill(vm, flags) != 0) { 1116 error = ENOMEM; 1117 break; 1118 } 1119 /* 1120 * Scan freelists looking for a tag that satisfies the 1121 * allocation. If we're doing BESTFIT we may encounter 1122 * sizes below the request. If we're doing FIRSTFIT we 1123 * inspect only the first element from each list. 1124 */ 1125 for (list = first; list < end; list++) { 1126 LIST_FOREACH(bt, list, bt_freelist) { 1127 if (bt->bt_size >= size) { 1128 error = vmem_fit(bt, size, align, phase, 1129 nocross, minaddr, maxaddr, addrp); 1130 if (error == 0) { 1131 vmem_clip(vm, bt, *addrp, size); 1132 goto out; 1133 } 1134 } 1135 /* FIRST skips to the next list. */ 1136 if (strat == M_FIRSTFIT) 1137 break; 1138 } 1139 } 1140 /* 1141 * Retry if the fast algorithm failed. 1142 */ 1143 if (strat == M_FIRSTFIT) { 1144 strat = M_BESTFIT; 1145 first = bt_freehead_toalloc(vm, size, strat); 1146 continue; 1147 } 1148 /* 1149 * XXX it is possible to fail to meet restrictions with the 1150 * imported region. It is up to the user to specify the 1151 * import quantum such that it can satisfy any allocation. 1152 */ 1153 if (vmem_import(vm, size, flags) == 0) 1154 continue; 1155 1156 /* 1157 * Try to free some space from the quantum cache or reclaim 1158 * functions if available. 1159 */ 1160 if (vm->vm_qcache_max != 0 || vm->vm_reclaimfn != NULL) { 1161 avail = vm->vm_size - vm->vm_inuse; 1162 VMEM_UNLOCK(vm); 1163 if (vm->vm_qcache_max != 0) 1164 qc_drain(vm); 1165 if (vm->vm_reclaimfn != NULL) 1166 vm->vm_reclaimfn(vm, flags); 1167 VMEM_LOCK(vm); 1168 /* If we were successful retry even NOWAIT. */ 1169 if (vm->vm_size - vm->vm_inuse > avail) 1170 continue; 1171 } 1172 if ((flags & M_NOWAIT) != 0) { 1173 error = ENOMEM; 1174 break; 1175 } 1176 VMEM_CONDVAR_WAIT(vm); 1177 } 1178out: 1179 VMEM_UNLOCK(vm); 1180 if (error != 0 && (flags & M_NOWAIT) == 0) 1181 panic("failed to allocate waiting allocation\n"); 1182 1183 return (error); 1184} 1185 1186/* 1187 * vmem_free: free the resource to the arena. 1188 */ 1189void 1190vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1191{ 1192 qcache_t *qc; 1193 MPASS(size > 0); 1194 1195 if (size <= vm->vm_qcache_max) { 1196 qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift]; 1197 uma_zfree(qc->qc_cache, (void *)addr); 1198 } else 1199 vmem_xfree(vm, addr, size); 1200} 1201 1202void 1203vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1204{ 1205 bt_t *bt; 1206 bt_t *t; 1207 1208 MPASS(size > 0); 1209 1210 VMEM_LOCK(vm); 1211 bt = bt_lookupbusy(vm, addr); 1212 MPASS(bt != NULL); 1213 MPASS(bt->bt_start == addr); 1214 MPASS(bt->bt_size == vmem_roundup_size(vm, size) || 1215 bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask); 1216 MPASS(bt->bt_type == BT_TYPE_BUSY); 1217 bt_rembusy(vm, bt); 1218 bt->bt_type = BT_TYPE_FREE; 1219 1220 /* coalesce */ 1221 t = TAILQ_NEXT(bt, bt_seglist); 1222 if (t != NULL && t->bt_type == BT_TYPE_FREE) { 1223 MPASS(BT_END(bt) < t->bt_start); /* YYY */ 1224 bt->bt_size += t->bt_size; 1225 bt_remfree(vm, t); 1226 bt_remseg(vm, t); 1227 } 1228 t = TAILQ_PREV(bt, vmem_seglist, bt_seglist); 1229 if (t != NULL && t->bt_type == BT_TYPE_FREE) { 1230 MPASS(BT_END(t) < bt->bt_start); /* YYY */ 1231 bt->bt_size += t->bt_size; 1232 bt->bt_start = t->bt_start; 1233 bt_remfree(vm, t); 1234 bt_remseg(vm, t); 1235 } 1236 1237 t = TAILQ_PREV(bt, vmem_seglist, bt_seglist); 1238 MPASS(t != NULL); 1239 MPASS(BT_ISSPAN_P(t) || t->bt_type == BT_TYPE_BUSY); 1240 if (vm->vm_releasefn != NULL && t->bt_type == BT_TYPE_SPAN && 1241 t->bt_size == bt->bt_size) { 1242 vmem_addr_t spanaddr; 1243 vmem_size_t spansize; 1244 1245 MPASS(t->bt_start == bt->bt_start); 1246 spanaddr = bt->bt_start; 1247 spansize = bt->bt_size; 1248 bt_remseg(vm, bt); 1249 bt_remseg(vm, t); 1250 vm->vm_size -= spansize; 1251 VMEM_CONDVAR_BROADCAST(vm); 1252 bt_freetrim(vm, BT_MAXFREE); 1253 (*vm->vm_releasefn)(vm->vm_arg, spanaddr, spansize); 1254 } else { 1255 bt_insfree(vm, bt); 1256 VMEM_CONDVAR_BROADCAST(vm); 1257 bt_freetrim(vm, BT_MAXFREE); 1258 } 1259} 1260 1261/* 1262 * vmem_add: 1263 * 1264 */ 1265int 1266vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int flags) 1267{ 1268 int error; 1269 1270 error = 0; 1271 flags &= VMEM_FLAGS; 1272 VMEM_LOCK(vm); 1273 if (vm->vm_nfreetags >= BT_MAXALLOC || bt_fill(vm, flags) == 0) 1274 vmem_add1(vm, addr, size, BT_TYPE_SPAN_STATIC); 1275 else 1276 error = ENOMEM; 1277 VMEM_UNLOCK(vm); 1278 1279 return (error); 1280} 1281 1282/* 1283 * vmem_size: information about arenas size 1284 */ 1285vmem_size_t 1286vmem_size(vmem_t *vm, int typemask) 1287{ 1288 1289 switch (typemask) { 1290 case VMEM_ALLOC: 1291 return vm->vm_inuse; 1292 case VMEM_FREE: 1293 return vm->vm_size - vm->vm_inuse; 1294 case VMEM_FREE|VMEM_ALLOC: 1295 return vm->vm_size; 1296 default: 1297 panic("vmem_size"); 1298 } 1299} 1300 1301/* ---- debug */ 1302 1303#if defined(DDB) || defined(DIAGNOSTIC) 1304 1305static void bt_dump(const bt_t *, int (*)(const char *, ...) 1306 __printflike(1, 2)); 1307 1308static const char * 1309bt_type_string(int type) 1310{ 1311 1312 switch (type) { 1313 case BT_TYPE_BUSY: 1314 return "busy"; 1315 case BT_TYPE_FREE: 1316 return "free"; 1317 case BT_TYPE_SPAN: 1318 return "span"; 1319 case BT_TYPE_SPAN_STATIC: 1320 return "static span"; 1321 default: 1322 break; 1323 } 1324 return "BOGUS"; 1325} 1326 1327static void 1328bt_dump(const bt_t *bt, int (*pr)(const char *, ...)) 1329{ 1330 1331 (*pr)("\t%p: %jx %jx, %d(%s)\n", 1332 bt, (intmax_t)bt->bt_start, (intmax_t)bt->bt_size, 1333 bt->bt_type, bt_type_string(bt->bt_type)); 1334} 1335 1336static void 1337vmem_dump(const vmem_t *vm , int (*pr)(const char *, ...) __printflike(1, 2)) 1338{ 1339 const bt_t *bt; 1340 int i; 1341 1342 (*pr)("vmem %p '%s'\n", vm, vm->vm_name); 1343 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1344 bt_dump(bt, pr); 1345 } 1346 1347 for (i = 0; i < VMEM_MAXORDER; i++) { 1348 const struct vmem_freelist *fl = &vm->vm_freelist[i]; 1349 1350 if (LIST_EMPTY(fl)) { 1351 continue; 1352 } 1353 1354 (*pr)("freelist[%d]\n", i); 1355 LIST_FOREACH(bt, fl, bt_freelist) { 1356 bt_dump(bt, pr); 1357 } 1358 } 1359} 1360 1361#endif /* defined(DDB) || defined(DIAGNOSTIC) */ 1362 1363#if defined(DDB) 1364static bt_t * 1365vmem_whatis_lookup(vmem_t *vm, vmem_addr_t addr) 1366{ 1367 bt_t *bt; 1368 1369 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1370 if (BT_ISSPAN_P(bt)) { 1371 continue; 1372 } 1373 if (bt->bt_start <= addr && addr <= BT_END(bt)) { 1374 return bt; 1375 } 1376 } 1377 1378 return NULL; 1379} 1380 1381void 1382vmem_whatis(vmem_addr_t addr, int (*pr)(const char *, ...)) 1383{ 1384 vmem_t *vm; 1385 1386 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1387 bt_t *bt; 1388 1389 bt = vmem_whatis_lookup(vm, addr); 1390 if (bt == NULL) { 1391 continue; 1392 } 1393 (*pr)("%p is %p+%zu in VMEM '%s' (%s)\n", 1394 (void *)addr, (void *)bt->bt_start, 1395 (vmem_size_t)(addr - bt->bt_start), vm->vm_name, 1396 (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free"); 1397 } 1398} 1399 1400void 1401vmem_printall(const char *modif, int (*pr)(const char *, ...)) 1402{ 1403 const vmem_t *vm; 1404 1405 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1406 vmem_dump(vm, pr); 1407 } 1408} 1409 1410void 1411vmem_print(vmem_addr_t addr, const char *modif, int (*pr)(const char *, ...)) 1412{ 1413 const vmem_t *vm = (const void *)addr; 1414 1415 vmem_dump(vm, pr); 1416} 1417#endif /* defined(DDB) */ 1418 1419#define vmem_printf printf 1420 1421#if defined(DIAGNOSTIC) 1422 1423static bool 1424vmem_check_sanity(vmem_t *vm) 1425{ 1426 const bt_t *bt, *bt2; 1427 1428 MPASS(vm != NULL); 1429 1430 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1431 if (bt->bt_start > BT_END(bt)) { 1432 printf("corrupted tag\n"); 1433 bt_dump(bt, vmem_printf); 1434 return false; 1435 } 1436 } 1437 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1438 TAILQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) { 1439 if (bt == bt2) { 1440 continue; 1441 } 1442 if (BT_ISSPAN_P(bt) != BT_ISSPAN_P(bt2)) { 1443 continue; 1444 } 1445 if (bt->bt_start <= BT_END(bt2) && 1446 bt2->bt_start <= BT_END(bt)) { 1447 printf("overwrapped tags\n"); 1448 bt_dump(bt, vmem_printf); 1449 bt_dump(bt2, vmem_printf); 1450 return false; 1451 } 1452 } 1453 } 1454 1455 return true; 1456} 1457 1458static void 1459vmem_check(vmem_t *vm) 1460{ 1461 1462 if (!vmem_check_sanity(vm)) { 1463 panic("insanity vmem %p", vm); 1464 } 1465} 1466 1467#endif /* defined(DIAGNOSTIC) */ 1468