subr_vmem.c revision 254543
1/*- 2 * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi, 3 * Copyright (c) 2013 EMC Corp. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28/* 29 * From: 30 * $NetBSD: vmem_impl.h,v 1.2 2013/01/29 21:26:24 para Exp $ 31 * $NetBSD: subr_vmem.c,v 1.83 2013/03/06 11:20:10 yamt Exp $ 32 */ 33 34/* 35 * reference: 36 * - Magazines and Vmem: Extending the Slab Allocator 37 * to Many CPUs and Arbitrary Resources 38 * http://www.usenix.org/event/usenix01/bonwick.html 39 */ 40 41#include <sys/cdefs.h> 42__FBSDID("$FreeBSD: head/sys/kern/subr_vmem.c 254543 2013-08-19 23:02:39Z jeff $"); 43 44#include "opt_ddb.h" 45 46#include <sys/param.h> 47#include <sys/systm.h> 48#include <sys/kernel.h> 49#include <sys/queue.h> 50#include <sys/callout.h> 51#include <sys/hash.h> 52#include <sys/lock.h> 53#include <sys/malloc.h> 54#include <sys/mutex.h> 55#include <sys/smp.h> 56#include <sys/condvar.h> 57#include <sys/taskqueue.h> 58#include <sys/vmem.h> 59 60#include "opt_vm.h" 61 62#include <vm/uma.h> 63#include <vm/vm.h> 64#include <vm/pmap.h> 65#include <vm/vm_map.h> 66#include <vm/vm_object.h> 67#include <vm/vm_kern.h> 68#include <vm/vm_extern.h> 69#include <vm/vm_param.h> 70#include <vm/vm_pageout.h> 71 72#define VMEM_MAXORDER (sizeof(vmem_size_t) * NBBY) 73 74#define VMEM_HASHSIZE_MIN 16 75#define VMEM_HASHSIZE_MAX 131072 76 77#define VMEM_QCACHE_IDX_MAX 16 78 79#define VMEM_FITMASK (M_BESTFIT | M_FIRSTFIT) 80 81#define VMEM_FLAGS \ 82 (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM | M_BESTFIT | M_FIRSTFIT) 83 84#define BT_FLAGS (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM) 85 86#define QC_NAME_MAX 16 87 88/* 89 * Data structures private to vmem. 90 */ 91MALLOC_DEFINE(M_VMEM, "vmem", "vmem internal structures"); 92 93typedef struct vmem_btag bt_t; 94 95TAILQ_HEAD(vmem_seglist, vmem_btag); 96LIST_HEAD(vmem_freelist, vmem_btag); 97LIST_HEAD(vmem_hashlist, vmem_btag); 98 99struct qcache { 100 uma_zone_t qc_cache; 101 vmem_t *qc_vmem; 102 vmem_size_t qc_size; 103 char qc_name[QC_NAME_MAX]; 104}; 105typedef struct qcache qcache_t; 106#define QC_POOL_TO_QCACHE(pool) ((qcache_t *)(pool->pr_qcache)) 107 108#define VMEM_NAME_MAX 16 109 110/* vmem arena */ 111struct vmem { 112 struct mtx_padalign vm_lock; 113 struct cv vm_cv; 114 char vm_name[VMEM_NAME_MAX+1]; 115 LIST_ENTRY(vmem) vm_alllist; 116 struct vmem_hashlist vm_hash0[VMEM_HASHSIZE_MIN]; 117 struct vmem_freelist vm_freelist[VMEM_MAXORDER]; 118 struct vmem_seglist vm_seglist; 119 struct vmem_hashlist *vm_hashlist; 120 vmem_size_t vm_hashsize; 121 122 /* Constant after init */ 123 vmem_size_t vm_qcache_max; 124 vmem_size_t vm_quantum_mask; 125 vmem_size_t vm_import_quantum; 126 int vm_quantum_shift; 127 128 /* Written on alloc/free */ 129 LIST_HEAD(, vmem_btag) vm_freetags; 130 int vm_nfreetags; 131 int vm_nbusytag; 132 vmem_size_t vm_inuse; 133 vmem_size_t vm_size; 134 135 /* Used on import. */ 136 vmem_import_t *vm_importfn; 137 vmem_release_t *vm_releasefn; 138 void *vm_arg; 139 140 /* Space exhaustion callback. */ 141 vmem_reclaim_t *vm_reclaimfn; 142 143 /* quantum cache */ 144 qcache_t vm_qcache[VMEM_QCACHE_IDX_MAX]; 145}; 146 147/* boundary tag */ 148struct vmem_btag { 149 TAILQ_ENTRY(vmem_btag) bt_seglist; 150 union { 151 LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */ 152 LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */ 153 } bt_u; 154#define bt_hashlist bt_u.u_hashlist 155#define bt_freelist bt_u.u_freelist 156 vmem_addr_t bt_start; 157 vmem_size_t bt_size; 158 int bt_type; 159}; 160 161#define BT_TYPE_SPAN 1 /* Allocated from importfn */ 162#define BT_TYPE_SPAN_STATIC 2 /* vmem_add() or create. */ 163#define BT_TYPE_FREE 3 /* Available space. */ 164#define BT_TYPE_BUSY 4 /* Used space. */ 165#define BT_ISSPAN_P(bt) ((bt)->bt_type <= BT_TYPE_SPAN_STATIC) 166 167#define BT_END(bt) ((bt)->bt_start + (bt)->bt_size - 1) 168 169#if defined(DIAGNOSTIC) 170static void vmem_check(vmem_t *); 171#endif 172 173static struct callout vmem_periodic_ch; 174static int vmem_periodic_interval; 175static struct task vmem_periodic_wk; 176 177static struct mtx_padalign vmem_list_lock; 178static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list); 179 180/* ---- misc */ 181#define VMEM_CONDVAR_INIT(vm, wchan) cv_init(&vm->vm_cv, wchan) 182#define VMEM_CONDVAR_DESTROY(vm) cv_destroy(&vm->vm_cv) 183#define VMEM_CONDVAR_WAIT(vm) cv_wait(&vm->vm_cv, &vm->vm_lock) 184#define VMEM_CONDVAR_BROADCAST(vm) cv_broadcast(&vm->vm_cv) 185 186 187#define VMEM_LOCK(vm) mtx_lock(&vm->vm_lock) 188#define VMEM_TRYLOCK(vm) mtx_trylock(&vm->vm_lock) 189#define VMEM_UNLOCK(vm) mtx_unlock(&vm->vm_lock) 190#define VMEM_LOCK_INIT(vm, name) mtx_init(&vm->vm_lock, (name), NULL, MTX_DEF) 191#define VMEM_LOCK_DESTROY(vm) mtx_destroy(&vm->vm_lock) 192#define VMEM_ASSERT_LOCKED(vm) mtx_assert(&vm->vm_lock, MA_OWNED); 193 194#define VMEM_ALIGNUP(addr, align) (-(-(addr) & -(align))) 195 196#define VMEM_CROSS_P(addr1, addr2, boundary) \ 197 ((((addr1) ^ (addr2)) & -(boundary)) != 0) 198 199#define ORDER2SIZE(order) ((vmem_size_t)1 << (order)) 200#define SIZE2ORDER(size) ((int)flsl(size) - 1) 201 202/* 203 * Maximum number of boundary tags that may be required to satisfy an 204 * allocation. Two may be required to import. Another two may be 205 * required to clip edges. 206 */ 207#define BT_MAXALLOC 4 208 209/* 210 * Max free limits the number of locally cached boundary tags. We 211 * just want to avoid hitting the zone allocator for every call. 212 */ 213#define BT_MAXFREE (BT_MAXALLOC * 8) 214 215/* Allocator for boundary tags. */ 216static uma_zone_t vmem_bt_zone; 217 218/* boot time arena storage. */ 219static struct vmem kernel_arena_storage; 220static struct vmem kmem_arena_storage; 221static struct vmem buffer_arena_storage; 222static struct vmem transient_arena_storage; 223vmem_t *kernel_arena = &kernel_arena_storage; 224vmem_t *kmem_arena = &kmem_arena_storage; 225vmem_t *buffer_arena = &buffer_arena_storage; 226vmem_t *transient_arena = &transient_arena_storage; 227 228#ifdef DEBUG_MEMGUARD 229static struct vmem memguard_arena_storage; 230vmem_t *memguard_arena = &memguard_arena_storage; 231#endif 232 233/* 234 * Fill the vmem's boundary tag cache. We guarantee that boundary tag 235 * allocation will not fail once bt_fill() passes. To do so we cache 236 * at least the maximum possible tag allocations in the arena. 237 */ 238static int 239bt_fill(vmem_t *vm, int flags) 240{ 241 bt_t *bt; 242 243 VMEM_ASSERT_LOCKED(vm); 244 245 /* 246 * Only allow the kmem arena to dip into reserve tags. It is the 247 * vmem where new tags come from. 248 */ 249 flags &= BT_FLAGS; 250 if (vm != kmem_arena) 251 flags &= ~M_USE_RESERVE; 252 253 /* 254 * Loop until we meet the reserve. To minimize the lock shuffle 255 * and prevent simultaneous fills we first try a NOWAIT regardless 256 * of the caller's flags. Specify M_NOVM so we don't recurse while 257 * holding a vmem lock. 258 */ 259 while (vm->vm_nfreetags < BT_MAXALLOC) { 260 bt = uma_zalloc(vmem_bt_zone, 261 (flags & M_USE_RESERVE) | M_NOWAIT | M_NOVM); 262 if (bt == NULL) { 263 VMEM_UNLOCK(vm); 264 bt = uma_zalloc(vmem_bt_zone, flags); 265 VMEM_LOCK(vm); 266 if (bt == NULL && (flags & M_NOWAIT) != 0) 267 break; 268 } 269 LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); 270 vm->vm_nfreetags++; 271 } 272 273 if (vm->vm_nfreetags < BT_MAXALLOC) 274 return ENOMEM; 275 276 return 0; 277} 278 279/* 280 * Pop a tag off of the freetag stack. 281 */ 282static bt_t * 283bt_alloc(vmem_t *vm) 284{ 285 bt_t *bt; 286 287 VMEM_ASSERT_LOCKED(vm); 288 bt = LIST_FIRST(&vm->vm_freetags); 289 MPASS(bt != NULL); 290 LIST_REMOVE(bt, bt_freelist); 291 vm->vm_nfreetags--; 292 293 return bt; 294} 295 296/* 297 * Trim the per-vmem free list. Returns with the lock released to 298 * avoid allocator recursions. 299 */ 300static void 301bt_freetrim(vmem_t *vm, int freelimit) 302{ 303 LIST_HEAD(, vmem_btag) freetags; 304 bt_t *bt; 305 306 LIST_INIT(&freetags); 307 VMEM_ASSERT_LOCKED(vm); 308 while (vm->vm_nfreetags > freelimit) { 309 bt = LIST_FIRST(&vm->vm_freetags); 310 LIST_REMOVE(bt, bt_freelist); 311 vm->vm_nfreetags--; 312 LIST_INSERT_HEAD(&freetags, bt, bt_freelist); 313 } 314 VMEM_UNLOCK(vm); 315 while ((bt = LIST_FIRST(&freetags)) != NULL) { 316 LIST_REMOVE(bt, bt_freelist); 317 uma_zfree(vmem_bt_zone, bt); 318 } 319} 320 321static inline void 322bt_free(vmem_t *vm, bt_t *bt) 323{ 324 325 VMEM_ASSERT_LOCKED(vm); 326 MPASS(LIST_FIRST(&vm->vm_freetags) != bt); 327 LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); 328 vm->vm_nfreetags++; 329} 330 331/* 332 * freelist[0] ... [1, 1] 333 * freelist[1] ... [2, 3] 334 * freelist[2] ... [4, 7] 335 * freelist[3] ... [8, 15] 336 * : 337 * freelist[n] ... [(1 << n), (1 << (n + 1)) - 1] 338 * : 339 */ 340 341static struct vmem_freelist * 342bt_freehead_tofree(vmem_t *vm, vmem_size_t size) 343{ 344 const vmem_size_t qsize = size >> vm->vm_quantum_shift; 345 const int idx = SIZE2ORDER(qsize); 346 347 MPASS(size != 0 && qsize != 0); 348 MPASS((size & vm->vm_quantum_mask) == 0); 349 MPASS(idx >= 0); 350 MPASS(idx < VMEM_MAXORDER); 351 352 return &vm->vm_freelist[idx]; 353} 354 355/* 356 * bt_freehead_toalloc: return the freelist for the given size and allocation 357 * strategy. 358 * 359 * For M_FIRSTFIT, return the list in which any blocks are large enough 360 * for the requested size. otherwise, return the list which can have blocks 361 * large enough for the requested size. 362 */ 363static struct vmem_freelist * 364bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, int strat) 365{ 366 const vmem_size_t qsize = size >> vm->vm_quantum_shift; 367 int idx = SIZE2ORDER(qsize); 368 369 MPASS(size != 0 && qsize != 0); 370 MPASS((size & vm->vm_quantum_mask) == 0); 371 372 if (strat == M_FIRSTFIT && ORDER2SIZE(idx) != qsize) { 373 idx++; 374 /* check too large request? */ 375 } 376 MPASS(idx >= 0); 377 MPASS(idx < VMEM_MAXORDER); 378 379 return &vm->vm_freelist[idx]; 380} 381 382/* ---- boundary tag hash */ 383 384static struct vmem_hashlist * 385bt_hashhead(vmem_t *vm, vmem_addr_t addr) 386{ 387 struct vmem_hashlist *list; 388 unsigned int hash; 389 390 hash = hash32_buf(&addr, sizeof(addr), 0); 391 list = &vm->vm_hashlist[hash % vm->vm_hashsize]; 392 393 return list; 394} 395 396static bt_t * 397bt_lookupbusy(vmem_t *vm, vmem_addr_t addr) 398{ 399 struct vmem_hashlist *list; 400 bt_t *bt; 401 402 VMEM_ASSERT_LOCKED(vm); 403 list = bt_hashhead(vm, addr); 404 LIST_FOREACH(bt, list, bt_hashlist) { 405 if (bt->bt_start == addr) { 406 break; 407 } 408 } 409 410 return bt; 411} 412 413static void 414bt_rembusy(vmem_t *vm, bt_t *bt) 415{ 416 417 VMEM_ASSERT_LOCKED(vm); 418 MPASS(vm->vm_nbusytag > 0); 419 vm->vm_inuse -= bt->bt_size; 420 vm->vm_nbusytag--; 421 LIST_REMOVE(bt, bt_hashlist); 422} 423 424static void 425bt_insbusy(vmem_t *vm, bt_t *bt) 426{ 427 struct vmem_hashlist *list; 428 429 VMEM_ASSERT_LOCKED(vm); 430 MPASS(bt->bt_type == BT_TYPE_BUSY); 431 432 list = bt_hashhead(vm, bt->bt_start); 433 LIST_INSERT_HEAD(list, bt, bt_hashlist); 434 vm->vm_nbusytag++; 435 vm->vm_inuse += bt->bt_size; 436} 437 438/* ---- boundary tag list */ 439 440static void 441bt_remseg(vmem_t *vm, bt_t *bt) 442{ 443 444 TAILQ_REMOVE(&vm->vm_seglist, bt, bt_seglist); 445 bt_free(vm, bt); 446} 447 448static void 449bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev) 450{ 451 452 TAILQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist); 453} 454 455static void 456bt_insseg_tail(vmem_t *vm, bt_t *bt) 457{ 458 459 TAILQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist); 460} 461 462static void 463bt_remfree(vmem_t *vm, bt_t *bt) 464{ 465 466 MPASS(bt->bt_type == BT_TYPE_FREE); 467 468 LIST_REMOVE(bt, bt_freelist); 469} 470 471static void 472bt_insfree(vmem_t *vm, bt_t *bt) 473{ 474 struct vmem_freelist *list; 475 476 list = bt_freehead_tofree(vm, bt->bt_size); 477 LIST_INSERT_HEAD(list, bt, bt_freelist); 478} 479 480/* ---- vmem internal functions */ 481 482/* 483 * Import from the arena into the quantum cache in UMA. 484 */ 485static int 486qc_import(void *arg, void **store, int cnt, int flags) 487{ 488 qcache_t *qc; 489 vmem_addr_t addr; 490 int i; 491 492 qc = arg; 493 flags |= M_BESTFIT; 494 for (i = 0; i < cnt; i++) { 495 if (vmem_xalloc(qc->qc_vmem, qc->qc_size, 0, 0, 0, 496 VMEM_ADDR_MIN, VMEM_ADDR_MAX, flags, &addr) != 0) 497 break; 498 store[i] = (void *)addr; 499 /* Only guarantee one allocation. */ 500 flags &= ~M_WAITOK; 501 flags |= M_NOWAIT; 502 } 503 return i; 504} 505 506/* 507 * Release memory from the UMA cache to the arena. 508 */ 509static void 510qc_release(void *arg, void **store, int cnt) 511{ 512 qcache_t *qc; 513 int i; 514 515 qc = arg; 516 for (i = 0; i < cnt; i++) 517 vmem_xfree(qc->qc_vmem, (vmem_addr_t)store[i], qc->qc_size); 518} 519 520static void 521qc_init(vmem_t *vm, vmem_size_t qcache_max) 522{ 523 qcache_t *qc; 524 vmem_size_t size; 525 int qcache_idx_max; 526 int i; 527 528 MPASS((qcache_max & vm->vm_quantum_mask) == 0); 529 qcache_idx_max = MIN(qcache_max >> vm->vm_quantum_shift, 530 VMEM_QCACHE_IDX_MAX); 531 vm->vm_qcache_max = qcache_idx_max << vm->vm_quantum_shift; 532 for (i = 0; i < qcache_idx_max; i++) { 533 qc = &vm->vm_qcache[i]; 534 size = (i + 1) << vm->vm_quantum_shift; 535 snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu", 536 vm->vm_name, size); 537 qc->qc_vmem = vm; 538 qc->qc_size = size; 539 qc->qc_cache = uma_zcache_create(qc->qc_name, size, 540 NULL, NULL, NULL, NULL, qc_import, qc_release, qc, 541 UMA_ZONE_VM); 542 MPASS(qc->qc_cache); 543 } 544} 545 546static void 547qc_destroy(vmem_t *vm) 548{ 549 int qcache_idx_max; 550 int i; 551 552 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift; 553 for (i = 0; i < qcache_idx_max; i++) 554 uma_zdestroy(vm->vm_qcache[i].qc_cache); 555} 556 557static void 558qc_drain(vmem_t *vm) 559{ 560 int qcache_idx_max; 561 int i; 562 563 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift; 564 for (i = 0; i < qcache_idx_max; i++) 565 zone_drain(vm->vm_qcache[i].qc_cache); 566} 567 568#ifndef UMA_MD_SMALL_ALLOC 569 570static struct mtx_padalign vmem_bt_lock; 571 572/* 573 * vmem_bt_alloc: Allocate a new page of boundary tags. 574 * 575 * On architectures with uma_small_alloc there is no recursion; no address 576 * space need be allocated to allocate boundary tags. For the others, we 577 * must handle recursion. Boundary tags are necessary to allocate new 578 * boundary tags. 579 * 580 * UMA guarantees that enough tags are held in reserve to allocate a new 581 * page of kva. We dip into this reserve by specifying M_USE_RESERVE only 582 * when allocating the page to hold new boundary tags. In this way the 583 * reserve is automatically filled by the allocation that uses the reserve. 584 * 585 * We still have to guarantee that the new tags are allocated atomically since 586 * many threads may try concurrently. The bt_lock provides this guarantee. 587 * We convert WAITOK allocations to NOWAIT and then handle the blocking here 588 * on failure. It's ok to return NULL for a WAITOK allocation as UMA will 589 * loop again after checking to see if we lost the race to allocate. 590 * 591 * There is a small race between vmem_bt_alloc() returning the page and the 592 * zone lock being acquired to add the page to the zone. For WAITOK 593 * allocations we just pause briefly. NOWAIT may experience a transient 594 * failure. To alleviate this we permit a small number of simultaneous 595 * fills to proceed concurrently so NOWAIT is less likely to fail unless 596 * we are really out of KVA. 597 */ 598static void * 599vmem_bt_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait) 600{ 601 vmem_addr_t addr; 602 603 *pflag = UMA_SLAB_KMEM; 604 605 /* 606 * Single thread boundary tag allocation so that the address space 607 * and memory are added in one atomic operation. 608 */ 609 mtx_lock(&vmem_bt_lock); 610 if (vmem_xalloc(kmem_arena, bytes, 0, 0, 0, VMEM_ADDR_MIN, 611 VMEM_ADDR_MAX, M_NOWAIT | M_NOVM | M_USE_RESERVE | M_BESTFIT, 612 &addr) == 0) { 613 if (kmem_back(kmem_object, addr, bytes, 614 M_NOWAIT | M_USE_RESERVE) == 0) { 615 mtx_unlock(&vmem_bt_lock); 616 return ((void *)addr); 617 } 618 vmem_xfree(kmem_arena, addr, bytes); 619 mtx_unlock(&vmem_bt_lock); 620 /* 621 * Out of memory, not address space. This may not even be 622 * possible due to M_USE_RESERVE page allocation. 623 */ 624 if (wait & M_WAITOK) 625 VM_WAIT; 626 return (NULL); 627 } 628 mtx_unlock(&vmem_bt_lock); 629 /* 630 * We're either out of address space or lost a fill race. 631 */ 632 if (wait & M_WAITOK) 633 pause("btalloc", 1); 634 635 return (NULL); 636} 637#endif 638 639void 640vmem_startup(void) 641{ 642 643 mtx_init(&vmem_list_lock, "vmem list lock", NULL, MTX_DEF); 644 vmem_bt_zone = uma_zcreate("vmem btag", 645 sizeof(struct vmem_btag), NULL, NULL, NULL, NULL, 646 UMA_ALIGN_PTR, UMA_ZONE_VM); 647#ifndef UMA_MD_SMALL_ALLOC 648 mtx_init(&vmem_bt_lock, "btag lock", NULL, MTX_DEF); 649 uma_prealloc(vmem_bt_zone, BT_MAXALLOC); 650 /* 651 * Reserve enough tags to allocate new tags. We allow multiple 652 * CPUs to attempt to allocate new tags concurrently to limit 653 * false restarts in UMA. 654 */ 655 uma_zone_reserve(vmem_bt_zone, BT_MAXALLOC * (mp_ncpus + 1) / 2); 656 uma_zone_set_allocf(vmem_bt_zone, vmem_bt_alloc); 657#endif 658} 659 660/* ---- rehash */ 661 662static int 663vmem_rehash(vmem_t *vm, vmem_size_t newhashsize) 664{ 665 bt_t *bt; 666 int i; 667 struct vmem_hashlist *newhashlist; 668 struct vmem_hashlist *oldhashlist; 669 vmem_size_t oldhashsize; 670 671 MPASS(newhashsize > 0); 672 673 newhashlist = malloc(sizeof(struct vmem_hashlist) * newhashsize, 674 M_VMEM, M_NOWAIT); 675 if (newhashlist == NULL) 676 return ENOMEM; 677 for (i = 0; i < newhashsize; i++) { 678 LIST_INIT(&newhashlist[i]); 679 } 680 681 VMEM_LOCK(vm); 682 oldhashlist = vm->vm_hashlist; 683 oldhashsize = vm->vm_hashsize; 684 vm->vm_hashlist = newhashlist; 685 vm->vm_hashsize = newhashsize; 686 if (oldhashlist == NULL) { 687 VMEM_UNLOCK(vm); 688 return 0; 689 } 690 for (i = 0; i < oldhashsize; i++) { 691 while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) { 692 bt_rembusy(vm, bt); 693 bt_insbusy(vm, bt); 694 } 695 } 696 VMEM_UNLOCK(vm); 697 698 if (oldhashlist != vm->vm_hash0) { 699 free(oldhashlist, M_VMEM); 700 } 701 702 return 0; 703} 704 705static void 706vmem_periodic_kick(void *dummy) 707{ 708 709 taskqueue_enqueue(taskqueue_thread, &vmem_periodic_wk); 710} 711 712static void 713vmem_periodic(void *unused, int pending) 714{ 715 vmem_t *vm; 716 vmem_size_t desired; 717 vmem_size_t current; 718 719 mtx_lock(&vmem_list_lock); 720 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 721#ifdef DIAGNOSTIC 722 /* Convenient time to verify vmem state. */ 723 VMEM_LOCK(vm); 724 vmem_check(vm); 725 VMEM_UNLOCK(vm); 726#endif 727 desired = 1 << flsl(vm->vm_nbusytag); 728 desired = MIN(MAX(desired, VMEM_HASHSIZE_MIN), 729 VMEM_HASHSIZE_MAX); 730 current = vm->vm_hashsize; 731 732 /* Grow in powers of two. Shrink less aggressively. */ 733 if (desired >= current * 2 || desired * 4 <= current) 734 vmem_rehash(vm, desired); 735 } 736 mtx_unlock(&vmem_list_lock); 737 738 callout_reset(&vmem_periodic_ch, vmem_periodic_interval, 739 vmem_periodic_kick, NULL); 740} 741 742static void 743vmem_start_callout(void *unused) 744{ 745 746 TASK_INIT(&vmem_periodic_wk, 0, vmem_periodic, NULL); 747 vmem_periodic_interval = hz * 10; 748 callout_init(&vmem_periodic_ch, CALLOUT_MPSAFE); 749 callout_reset(&vmem_periodic_ch, vmem_periodic_interval, 750 vmem_periodic_kick, NULL); 751} 752SYSINIT(vfs, SI_SUB_CONFIGURE, SI_ORDER_ANY, vmem_start_callout, NULL); 753 754static void 755vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int type) 756{ 757 bt_t *btspan; 758 bt_t *btfree; 759 760 MPASS(type == BT_TYPE_SPAN || type == BT_TYPE_SPAN_STATIC); 761 MPASS((size & vm->vm_quantum_mask) == 0); 762 763 btspan = bt_alloc(vm); 764 btspan->bt_type = type; 765 btspan->bt_start = addr; 766 btspan->bt_size = size; 767 bt_insseg_tail(vm, btspan); 768 769 btfree = bt_alloc(vm); 770 btfree->bt_type = BT_TYPE_FREE; 771 btfree->bt_start = addr; 772 btfree->bt_size = size; 773 bt_insseg(vm, btfree, btspan); 774 bt_insfree(vm, btfree); 775 776 vm->vm_size += size; 777} 778 779static void 780vmem_destroy1(vmem_t *vm) 781{ 782 bt_t *bt; 783 784 /* 785 * Drain per-cpu quantum caches. 786 */ 787 qc_destroy(vm); 788 789 /* 790 * The vmem should now only contain empty segments. 791 */ 792 VMEM_LOCK(vm); 793 MPASS(vm->vm_nbusytag == 0); 794 795 while ((bt = TAILQ_FIRST(&vm->vm_seglist)) != NULL) 796 bt_remseg(vm, bt); 797 798 if (vm->vm_hashlist != NULL && vm->vm_hashlist != vm->vm_hash0) 799 free(vm->vm_hashlist, M_VMEM); 800 801 bt_freetrim(vm, 0); 802 803 VMEM_CONDVAR_DESTROY(vm); 804 VMEM_LOCK_DESTROY(vm); 805 free(vm, M_VMEM); 806} 807 808static int 809vmem_import(vmem_t *vm, vmem_size_t size, vmem_size_t align, int flags) 810{ 811 vmem_addr_t addr; 812 int error; 813 814 if (vm->vm_importfn == NULL) 815 return EINVAL; 816 817 /* 818 * To make sure we get a span that meets the alignment we double it 819 * and add the size to the tail. This slightly overestimates. 820 */ 821 if (align != vm->vm_quantum_mask + 1) 822 size = (align * 2) + size; 823 size = roundup(size, vm->vm_import_quantum); 824 825 /* 826 * Hide MAXALLOC tags so we're guaranteed to be able to add this 827 * span and the tag we want to allocate from it. 828 */ 829 MPASS(vm->vm_nfreetags >= BT_MAXALLOC); 830 vm->vm_nfreetags -= BT_MAXALLOC; 831 VMEM_UNLOCK(vm); 832 error = (vm->vm_importfn)(vm->vm_arg, size, flags, &addr); 833 VMEM_LOCK(vm); 834 vm->vm_nfreetags += BT_MAXALLOC; 835 if (error) 836 return ENOMEM; 837 838 vmem_add1(vm, addr, size, BT_TYPE_SPAN); 839 840 return 0; 841} 842 843/* 844 * vmem_fit: check if a bt can satisfy the given restrictions. 845 * 846 * it's a caller's responsibility to ensure the region is big enough 847 * before calling us. 848 */ 849static int 850vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align, 851 vmem_size_t phase, vmem_size_t nocross, vmem_addr_t minaddr, 852 vmem_addr_t maxaddr, vmem_addr_t *addrp) 853{ 854 vmem_addr_t start; 855 vmem_addr_t end; 856 857 MPASS(size > 0); 858 MPASS(bt->bt_size >= size); /* caller's responsibility */ 859 860 /* 861 * XXX assumption: vmem_addr_t and vmem_size_t are 862 * unsigned integer of the same size. 863 */ 864 865 start = bt->bt_start; 866 if (start < minaddr) { 867 start = minaddr; 868 } 869 end = BT_END(bt); 870 if (end > maxaddr) 871 end = maxaddr; 872 if (start > end) 873 return (ENOMEM); 874 875 start = VMEM_ALIGNUP(start - phase, align) + phase; 876 if (start < bt->bt_start) 877 start += align; 878 if (VMEM_CROSS_P(start, start + size - 1, nocross)) { 879 MPASS(align < nocross); 880 start = VMEM_ALIGNUP(start - phase, nocross) + phase; 881 } 882 if (start <= end && end - start >= size - 1) { 883 MPASS((start & (align - 1)) == phase); 884 MPASS(!VMEM_CROSS_P(start, start + size - 1, nocross)); 885 MPASS(minaddr <= start); 886 MPASS(maxaddr == 0 || start + size - 1 <= maxaddr); 887 MPASS(bt->bt_start <= start); 888 MPASS(BT_END(bt) - start >= size - 1); 889 *addrp = start; 890 891 return (0); 892 } 893 return (ENOMEM); 894} 895 896/* 897 * vmem_clip: Trim the boundary tag edges to the requested start and size. 898 */ 899static void 900vmem_clip(vmem_t *vm, bt_t *bt, vmem_addr_t start, vmem_size_t size) 901{ 902 bt_t *btnew; 903 bt_t *btprev; 904 905 VMEM_ASSERT_LOCKED(vm); 906 MPASS(bt->bt_type == BT_TYPE_FREE); 907 MPASS(bt->bt_size >= size); 908 bt_remfree(vm, bt); 909 if (bt->bt_start != start) { 910 btprev = bt_alloc(vm); 911 btprev->bt_type = BT_TYPE_FREE; 912 btprev->bt_start = bt->bt_start; 913 btprev->bt_size = start - bt->bt_start; 914 bt->bt_start = start; 915 bt->bt_size -= btprev->bt_size; 916 bt_insfree(vm, btprev); 917 bt_insseg(vm, btprev, 918 TAILQ_PREV(bt, vmem_seglist, bt_seglist)); 919 } 920 MPASS(bt->bt_start == start); 921 if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) { 922 /* split */ 923 btnew = bt_alloc(vm); 924 btnew->bt_type = BT_TYPE_BUSY; 925 btnew->bt_start = bt->bt_start; 926 btnew->bt_size = size; 927 bt->bt_start = bt->bt_start + size; 928 bt->bt_size -= size; 929 bt_insfree(vm, bt); 930 bt_insseg(vm, btnew, 931 TAILQ_PREV(bt, vmem_seglist, bt_seglist)); 932 bt_insbusy(vm, btnew); 933 bt = btnew; 934 } else { 935 bt->bt_type = BT_TYPE_BUSY; 936 bt_insbusy(vm, bt); 937 } 938 MPASS(bt->bt_size >= size); 939 bt->bt_type = BT_TYPE_BUSY; 940} 941 942/* ---- vmem API */ 943 944void 945vmem_set_import(vmem_t *vm, vmem_import_t *importfn, 946 vmem_release_t *releasefn, void *arg, vmem_size_t import_quantum) 947{ 948 949 VMEM_LOCK(vm); 950 vm->vm_importfn = importfn; 951 vm->vm_releasefn = releasefn; 952 vm->vm_arg = arg; 953 vm->vm_import_quantum = import_quantum; 954 VMEM_UNLOCK(vm); 955} 956 957void 958vmem_set_reclaim(vmem_t *vm, vmem_reclaim_t *reclaimfn) 959{ 960 961 VMEM_LOCK(vm); 962 vm->vm_reclaimfn = reclaimfn; 963 VMEM_UNLOCK(vm); 964} 965 966/* 967 * vmem_init: Initializes vmem arena. 968 */ 969vmem_t * 970vmem_init(vmem_t *vm, const char *name, vmem_addr_t base, vmem_size_t size, 971 vmem_size_t quantum, vmem_size_t qcache_max, int flags) 972{ 973 int i; 974 975 MPASS(quantum > 0); 976 977 bzero(vm, sizeof(*vm)); 978 979 VMEM_CONDVAR_INIT(vm, name); 980 VMEM_LOCK_INIT(vm, name); 981 vm->vm_nfreetags = 0; 982 LIST_INIT(&vm->vm_freetags); 983 strlcpy(vm->vm_name, name, sizeof(vm->vm_name)); 984 vm->vm_quantum_mask = quantum - 1; 985 vm->vm_quantum_shift = SIZE2ORDER(quantum); 986 MPASS(ORDER2SIZE(vm->vm_quantum_shift) == quantum); 987 vm->vm_nbusytag = 0; 988 vm->vm_size = 0; 989 vm->vm_inuse = 0; 990 qc_init(vm, qcache_max); 991 992 TAILQ_INIT(&vm->vm_seglist); 993 for (i = 0; i < VMEM_MAXORDER; i++) { 994 LIST_INIT(&vm->vm_freelist[i]); 995 } 996 memset(&vm->vm_hash0, 0, sizeof(vm->vm_hash0)); 997 vm->vm_hashsize = VMEM_HASHSIZE_MIN; 998 vm->vm_hashlist = vm->vm_hash0; 999 1000 if (size != 0) { 1001 if (vmem_add(vm, base, size, flags) != 0) { 1002 vmem_destroy1(vm); 1003 return NULL; 1004 } 1005 } 1006 1007 mtx_lock(&vmem_list_lock); 1008 LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist); 1009 mtx_unlock(&vmem_list_lock); 1010 1011 return vm; 1012} 1013 1014/* 1015 * vmem_create: create an arena. 1016 */ 1017vmem_t * 1018vmem_create(const char *name, vmem_addr_t base, vmem_size_t size, 1019 vmem_size_t quantum, vmem_size_t qcache_max, int flags) 1020{ 1021 1022 vmem_t *vm; 1023 1024 vm = malloc(sizeof(*vm), M_VMEM, flags & (M_WAITOK|M_NOWAIT)); 1025 if (vm == NULL) 1026 return (NULL); 1027 if (vmem_init(vm, name, base, size, quantum, qcache_max, 1028 flags) == NULL) { 1029 free(vm, M_VMEM); 1030 return (NULL); 1031 } 1032 return (vm); 1033} 1034 1035void 1036vmem_destroy(vmem_t *vm) 1037{ 1038 1039 mtx_lock(&vmem_list_lock); 1040 LIST_REMOVE(vm, vm_alllist); 1041 mtx_unlock(&vmem_list_lock); 1042 1043 vmem_destroy1(vm); 1044} 1045 1046vmem_size_t 1047vmem_roundup_size(vmem_t *vm, vmem_size_t size) 1048{ 1049 1050 return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask; 1051} 1052 1053/* 1054 * vmem_alloc: allocate resource from the arena. 1055 */ 1056int 1057vmem_alloc(vmem_t *vm, vmem_size_t size, int flags, vmem_addr_t *addrp) 1058{ 1059 const int strat __unused = flags & VMEM_FITMASK; 1060 qcache_t *qc; 1061 1062 flags &= VMEM_FLAGS; 1063 MPASS(size > 0); 1064 MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT); 1065 if ((flags & M_NOWAIT) == 0) 1066 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_alloc"); 1067 1068 if (size <= vm->vm_qcache_max) { 1069 qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift]; 1070 *addrp = (vmem_addr_t)uma_zalloc(qc->qc_cache, flags); 1071 if (*addrp == 0) 1072 return (ENOMEM); 1073 return (0); 1074 } 1075 1076 return vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, 1077 flags, addrp); 1078} 1079 1080int 1081vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align, 1082 const vmem_size_t phase, const vmem_size_t nocross, 1083 const vmem_addr_t minaddr, const vmem_addr_t maxaddr, int flags, 1084 vmem_addr_t *addrp) 1085{ 1086 const vmem_size_t size = vmem_roundup_size(vm, size0); 1087 struct vmem_freelist *list; 1088 struct vmem_freelist *first; 1089 struct vmem_freelist *end; 1090 vmem_size_t avail; 1091 bt_t *bt; 1092 int error; 1093 int strat; 1094 1095 flags &= VMEM_FLAGS; 1096 strat = flags & VMEM_FITMASK; 1097 MPASS(size0 > 0); 1098 MPASS(size > 0); 1099 MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT); 1100 MPASS((flags & (M_NOWAIT|M_WAITOK)) != (M_NOWAIT|M_WAITOK)); 1101 if ((flags & M_NOWAIT) == 0) 1102 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_xalloc"); 1103 MPASS((align & vm->vm_quantum_mask) == 0); 1104 MPASS((align & (align - 1)) == 0); 1105 MPASS((phase & vm->vm_quantum_mask) == 0); 1106 MPASS((nocross & vm->vm_quantum_mask) == 0); 1107 MPASS((nocross & (nocross - 1)) == 0); 1108 MPASS((align == 0 && phase == 0) || phase < align); 1109 MPASS(nocross == 0 || nocross >= size); 1110 MPASS(minaddr <= maxaddr); 1111 MPASS(!VMEM_CROSS_P(phase, phase + size - 1, nocross)); 1112 1113 if (align == 0) 1114 align = vm->vm_quantum_mask + 1; 1115 1116 *addrp = 0; 1117 end = &vm->vm_freelist[VMEM_MAXORDER]; 1118 /* 1119 * choose a free block from which we allocate. 1120 */ 1121 first = bt_freehead_toalloc(vm, size, strat); 1122 VMEM_LOCK(vm); 1123 for (;;) { 1124 /* 1125 * Make sure we have enough tags to complete the 1126 * operation. 1127 */ 1128 if (vm->vm_nfreetags < BT_MAXALLOC && 1129 bt_fill(vm, flags) != 0) { 1130 error = ENOMEM; 1131 break; 1132 } 1133 /* 1134 * Scan freelists looking for a tag that satisfies the 1135 * allocation. If we're doing BESTFIT we may encounter 1136 * sizes below the request. If we're doing FIRSTFIT we 1137 * inspect only the first element from each list. 1138 */ 1139 for (list = first; list < end; list++) { 1140 LIST_FOREACH(bt, list, bt_freelist) { 1141 if (bt->bt_size >= size) { 1142 error = vmem_fit(bt, size, align, phase, 1143 nocross, minaddr, maxaddr, addrp); 1144 if (error == 0) { 1145 vmem_clip(vm, bt, *addrp, size); 1146 goto out; 1147 } 1148 } 1149 /* FIRST skips to the next list. */ 1150 if (strat == M_FIRSTFIT) 1151 break; 1152 } 1153 } 1154 /* 1155 * Retry if the fast algorithm failed. 1156 */ 1157 if (strat == M_FIRSTFIT) { 1158 strat = M_BESTFIT; 1159 first = bt_freehead_toalloc(vm, size, strat); 1160 continue; 1161 } 1162 /* 1163 * XXX it is possible to fail to meet restrictions with the 1164 * imported region. It is up to the user to specify the 1165 * import quantum such that it can satisfy any allocation. 1166 */ 1167 if (vmem_import(vm, size, align, flags) == 0) 1168 continue; 1169 1170 /* 1171 * Try to free some space from the quantum cache or reclaim 1172 * functions if available. 1173 */ 1174 if (vm->vm_qcache_max != 0 || vm->vm_reclaimfn != NULL) { 1175 avail = vm->vm_size - vm->vm_inuse; 1176 VMEM_UNLOCK(vm); 1177 if (vm->vm_qcache_max != 0) 1178 qc_drain(vm); 1179 if (vm->vm_reclaimfn != NULL) 1180 vm->vm_reclaimfn(vm, flags); 1181 VMEM_LOCK(vm); 1182 /* If we were successful retry even NOWAIT. */ 1183 if (vm->vm_size - vm->vm_inuse > avail) 1184 continue; 1185 } 1186 if ((flags & M_NOWAIT) != 0) { 1187 error = ENOMEM; 1188 break; 1189 } 1190 VMEM_CONDVAR_WAIT(vm); 1191 } 1192out: 1193 VMEM_UNLOCK(vm); 1194 if (error != 0 && (flags & M_NOWAIT) == 0) 1195 panic("failed to allocate waiting allocation\n"); 1196 1197 return (error); 1198} 1199 1200/* 1201 * vmem_free: free the resource to the arena. 1202 */ 1203void 1204vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1205{ 1206 qcache_t *qc; 1207 MPASS(size > 0); 1208 1209 if (size <= vm->vm_qcache_max) { 1210 qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift]; 1211 uma_zfree(qc->qc_cache, (void *)addr); 1212 } else 1213 vmem_xfree(vm, addr, size); 1214} 1215 1216void 1217vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1218{ 1219 bt_t *bt; 1220 bt_t *t; 1221 1222 MPASS(size > 0); 1223 1224 VMEM_LOCK(vm); 1225 bt = bt_lookupbusy(vm, addr); 1226 MPASS(bt != NULL); 1227 MPASS(bt->bt_start == addr); 1228 MPASS(bt->bt_size == vmem_roundup_size(vm, size) || 1229 bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask); 1230 MPASS(bt->bt_type == BT_TYPE_BUSY); 1231 bt_rembusy(vm, bt); 1232 bt->bt_type = BT_TYPE_FREE; 1233 1234 /* coalesce */ 1235 t = TAILQ_NEXT(bt, bt_seglist); 1236 if (t != NULL && t->bt_type == BT_TYPE_FREE) { 1237 MPASS(BT_END(bt) < t->bt_start); /* YYY */ 1238 bt->bt_size += t->bt_size; 1239 bt_remfree(vm, t); 1240 bt_remseg(vm, t); 1241 } 1242 t = TAILQ_PREV(bt, vmem_seglist, bt_seglist); 1243 if (t != NULL && t->bt_type == BT_TYPE_FREE) { 1244 MPASS(BT_END(t) < bt->bt_start); /* YYY */ 1245 bt->bt_size += t->bt_size; 1246 bt->bt_start = t->bt_start; 1247 bt_remfree(vm, t); 1248 bt_remseg(vm, t); 1249 } 1250 1251 t = TAILQ_PREV(bt, vmem_seglist, bt_seglist); 1252 MPASS(t != NULL); 1253 MPASS(BT_ISSPAN_P(t) || t->bt_type == BT_TYPE_BUSY); 1254 if (vm->vm_releasefn != NULL && t->bt_type == BT_TYPE_SPAN && 1255 t->bt_size == bt->bt_size) { 1256 vmem_addr_t spanaddr; 1257 vmem_size_t spansize; 1258 1259 MPASS(t->bt_start == bt->bt_start); 1260 spanaddr = bt->bt_start; 1261 spansize = bt->bt_size; 1262 bt_remseg(vm, bt); 1263 bt_remseg(vm, t); 1264 vm->vm_size -= spansize; 1265 VMEM_CONDVAR_BROADCAST(vm); 1266 bt_freetrim(vm, BT_MAXFREE); 1267 (*vm->vm_releasefn)(vm->vm_arg, spanaddr, spansize); 1268 } else { 1269 bt_insfree(vm, bt); 1270 VMEM_CONDVAR_BROADCAST(vm); 1271 bt_freetrim(vm, BT_MAXFREE); 1272 } 1273} 1274 1275/* 1276 * vmem_add: 1277 * 1278 */ 1279int 1280vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int flags) 1281{ 1282 int error; 1283 1284 error = 0; 1285 flags &= VMEM_FLAGS; 1286 VMEM_LOCK(vm); 1287 if (vm->vm_nfreetags >= BT_MAXALLOC || bt_fill(vm, flags) == 0) 1288 vmem_add1(vm, addr, size, BT_TYPE_SPAN_STATIC); 1289 else 1290 error = ENOMEM; 1291 VMEM_UNLOCK(vm); 1292 1293 return (error); 1294} 1295 1296/* 1297 * vmem_size: information about arenas size 1298 */ 1299vmem_size_t 1300vmem_size(vmem_t *vm, int typemask) 1301{ 1302 1303 switch (typemask) { 1304 case VMEM_ALLOC: 1305 return vm->vm_inuse; 1306 case VMEM_FREE: 1307 return vm->vm_size - vm->vm_inuse; 1308 case VMEM_FREE|VMEM_ALLOC: 1309 return vm->vm_size; 1310 default: 1311 panic("vmem_size"); 1312 } 1313} 1314 1315/* ---- debug */ 1316 1317#if defined(DDB) || defined(DIAGNOSTIC) 1318 1319static void bt_dump(const bt_t *, int (*)(const char *, ...) 1320 __printflike(1, 2)); 1321 1322static const char * 1323bt_type_string(int type) 1324{ 1325 1326 switch (type) { 1327 case BT_TYPE_BUSY: 1328 return "busy"; 1329 case BT_TYPE_FREE: 1330 return "free"; 1331 case BT_TYPE_SPAN: 1332 return "span"; 1333 case BT_TYPE_SPAN_STATIC: 1334 return "static span"; 1335 default: 1336 break; 1337 } 1338 return "BOGUS"; 1339} 1340 1341static void 1342bt_dump(const bt_t *bt, int (*pr)(const char *, ...)) 1343{ 1344 1345 (*pr)("\t%p: %jx %jx, %d(%s)\n", 1346 bt, (intmax_t)bt->bt_start, (intmax_t)bt->bt_size, 1347 bt->bt_type, bt_type_string(bt->bt_type)); 1348} 1349 1350static void 1351vmem_dump(const vmem_t *vm , int (*pr)(const char *, ...) __printflike(1, 2)) 1352{ 1353 const bt_t *bt; 1354 int i; 1355 1356 (*pr)("vmem %p '%s'\n", vm, vm->vm_name); 1357 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1358 bt_dump(bt, pr); 1359 } 1360 1361 for (i = 0; i < VMEM_MAXORDER; i++) { 1362 const struct vmem_freelist *fl = &vm->vm_freelist[i]; 1363 1364 if (LIST_EMPTY(fl)) { 1365 continue; 1366 } 1367 1368 (*pr)("freelist[%d]\n", i); 1369 LIST_FOREACH(bt, fl, bt_freelist) { 1370 bt_dump(bt, pr); 1371 } 1372 } 1373} 1374 1375#endif /* defined(DDB) || defined(DIAGNOSTIC) */ 1376 1377#if defined(DDB) 1378static bt_t * 1379vmem_whatis_lookup(vmem_t *vm, vmem_addr_t addr) 1380{ 1381 bt_t *bt; 1382 1383 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1384 if (BT_ISSPAN_P(bt)) { 1385 continue; 1386 } 1387 if (bt->bt_start <= addr && addr <= BT_END(bt)) { 1388 return bt; 1389 } 1390 } 1391 1392 return NULL; 1393} 1394 1395void 1396vmem_whatis(vmem_addr_t addr, int (*pr)(const char *, ...)) 1397{ 1398 vmem_t *vm; 1399 1400 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1401 bt_t *bt; 1402 1403 bt = vmem_whatis_lookup(vm, addr); 1404 if (bt == NULL) { 1405 continue; 1406 } 1407 (*pr)("%p is %p+%zu in VMEM '%s' (%s)\n", 1408 (void *)addr, (void *)bt->bt_start, 1409 (vmem_size_t)(addr - bt->bt_start), vm->vm_name, 1410 (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free"); 1411 } 1412} 1413 1414void 1415vmem_printall(const char *modif, int (*pr)(const char *, ...)) 1416{ 1417 const vmem_t *vm; 1418 1419 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1420 vmem_dump(vm, pr); 1421 } 1422} 1423 1424void 1425vmem_print(vmem_addr_t addr, const char *modif, int (*pr)(const char *, ...)) 1426{ 1427 const vmem_t *vm = (const void *)addr; 1428 1429 vmem_dump(vm, pr); 1430} 1431#endif /* defined(DDB) */ 1432 1433#define vmem_printf printf 1434 1435#if defined(DIAGNOSTIC) 1436 1437static bool 1438vmem_check_sanity(vmem_t *vm) 1439{ 1440 const bt_t *bt, *bt2; 1441 1442 MPASS(vm != NULL); 1443 1444 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1445 if (bt->bt_start > BT_END(bt)) { 1446 printf("corrupted tag\n"); 1447 bt_dump(bt, vmem_printf); 1448 return false; 1449 } 1450 } 1451 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1452 TAILQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) { 1453 if (bt == bt2) { 1454 continue; 1455 } 1456 if (BT_ISSPAN_P(bt) != BT_ISSPAN_P(bt2)) { 1457 continue; 1458 } 1459 if (bt->bt_start <= BT_END(bt2) && 1460 bt2->bt_start <= BT_END(bt)) { 1461 printf("overwrapped tags\n"); 1462 bt_dump(bt, vmem_printf); 1463 bt_dump(bt2, vmem_printf); 1464 return false; 1465 } 1466 } 1467 } 1468 1469 return true; 1470} 1471 1472static void 1473vmem_check(vmem_t *vm) 1474{ 1475 1476 if (!vmem_check_sanity(vm)) { 1477 panic("insanity vmem %p", vm); 1478 } 1479} 1480 1481#endif /* defined(DIAGNOSTIC) */ 1482